diff options
Diffstat (limited to 'thirdparty')
285 files changed, 28973 insertions, 15140 deletions
diff --git a/thirdparty/README.md b/thirdparty/README.md index 31b19451b3..818f2f5892 100644 --- a/thirdparty/README.md +++ b/thirdparty/README.md @@ -62,7 +62,7 @@ Files extracted from upstream source: ## doctest - Upstream: https://github.com/onqtam/doctest -- Version: 2.4.8 (7b9885133108ae301ddd16e2651320f54cafeba7, 2022) +- Version: 2.4.9 (b7c21ec5ceeadb4951b00396fc1e4642dd347e5f, 2022) - License: MIT Files extracted from upstream source: @@ -118,7 +118,7 @@ will limit its functionality to IPv4 only. ## etcpak - Upstream: https://github.com/wolfpld/etcpak -- Version: git (f128369e64a5f4715de8125b325e4fe7debb5194, 2022) +- Version: 1.0 (153f0e04a18b93c277684b577365210adcf8e11c, 2022) - License: BSD-3-Clause Files extracted from upstream source: @@ -167,6 +167,11 @@ Files extracted from upstream source: - `include/` folder, minus the `dlg` subfolder - `LICENSE.TXT` and `docs/FTL.TXT` +Some changes have been made in order to prevent LTO from removing code. +They are marked with `// -- GODOT start --` and `// -- GODOT end --` +comments. Apply the patches in the `patches/` folder when syncing on newer upstream +commits. + ## glslang @@ -208,7 +213,7 @@ Files extracted from upstream source: ## harfbuzz - Upstream: https://github.com/harfbuzz/harfbuzz -- Version: 4.3.0 (aee123fc83388b8f5acfb301d87bd92eccc5b843, 2022) +- Version: 4.4.1 (096aaa62a6e0d07c02a4894fc036efc927e5aaf9, 2022) - License: MIT Files extracted from upstream source: @@ -249,6 +254,7 @@ Files generated from upstream source: Files extracted from upstream source: - `jpgd*.{c,h}` +- `jpge*.{c,h}` ## libogg @@ -322,15 +328,15 @@ Files extracted from upstream source: ## mbedtls -- Upstream: https://tls.mbed.org/ -- Version: 2.16.12 (cf4667126010c665341f9e50ef691b7ef8294188, 2021) +- Upstream: https://github.com/Mbed-TLS/mbedtls +- Version: 2.18.1 (dd79db10014d85b26d11fe57218431f2e5ede6f2, 2022) - License: Apache 2.0 File extracted from upstream release tarball: - All `*.h` from `include/mbedtls/` to `thirdparty/mbedtls/include/mbedtls/` except `config_psa.h` and `psa_util.h`. - All `*.c` and `*.h` from `library/` to `thirdparty/mbedtls/library/` except those starting with `psa_*`. -- `LICENSE` and `apache-2.0.txt` files. +- The `LICENSE` file. - Applied the patch in `patches/1453.diff` (upstream PR: https://github.com/ARMmbed/mbedtls/pull/1453). - Added 2 files `godot_core_mbedtls_platform.c` and `godot_core_mbedtls_config.h` @@ -340,7 +346,7 @@ File extracted from upstream release tarball: ## meshoptimizer - Upstream: https://github.com/zeux/meshoptimizer -- Version: git (8a7d69caa68f778cb559f1879b6beb7987c8c6b7, 2022) +- Version: git (ea4558d1c0f217f1d67ed7fe0b07896ece88ae18, 2022) - License: MIT Files extracted from upstream repository: @@ -432,6 +438,15 @@ Collection of single-file libraries used in Godot components. * Upstream: https://github.com/Auburn/FastNoiseLite * Version: git (6be3d6bf7fb408de341285f9ee8a29b67fd953f1, 2022) + custom changes * License: MIT +- `ok_color.h` + * Upstream: https://github.com/bottosson/bottosson.github.io/blob/master/misc/ok_color.h + * Version: git (d69831edb90ffdcd08b7e64da3c5405acd48ad2c, 2022) + * License: MIT + * Modifications: License included in header. +- `ok_color_shader.h` + * https://www.shadertoy.com/view/7sK3D1 + * Version: 2021-09-13 + * License: MIT - `pcg.{cpp,h}` * Upstream: http://www.pcg-random.org * Version: minimal C implementation, http://www.pcg-random.org/download.html @@ -511,7 +526,7 @@ Patch files are provided in `oidn/patches/`. ## openxr - Upstream: https://github.com/KhronosGroup/OpenXR-SDK -- Version: 1.0.22 (458984d7f59d1ae6dc1b597d94b02e4f7132eaba, 2022) +- Version: 1.0.23 (885a90f8934d84121344ba8e4aa5159d5b496e08, 2022) - License: Apache 2.0 Files extracted from upstream source: @@ -695,7 +710,7 @@ Files extracted from upstream source: SDK release: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/layers/generated/vk_enum_string_helper.h `vk_mem_alloc.h` is taken from https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator -Version: 3.0.1-development (2022-03-28), commit `5b598e0a359381d7e2a94149210a1b7642024ae5` +Version: 3.0.1 (2022-06-10), commit `cfdc0f8775ab3258a3b9c4e47d8ce4b6f52a5441` `vk_mem_alloc.cpp` is a Godot file and should be preserved on updates. Patches in the `patches` directory should be re-applied after updates. @@ -744,7 +759,7 @@ Files extracted from upstream source: ## zstd - Upstream: https://github.com/facebook/zstd -- Version: 1.5.0 (a488ba114ec17ea1054b9057c26a046fc122b3b6, 2021) +- Version: 1.5.2 (e47e674cd09583ff0503f0f6defd6d23d8b718d3, 2022) - License: BSD-3-Clause Files extracted from upstream source: diff --git a/thirdparty/doctest/doctest.h b/thirdparty/doctest/doctest.h index d25f526827..aa2724c738 100644 --- a/thirdparty/doctest/doctest.h +++ b/thirdparty/doctest/doctest.h @@ -48,7 +48,7 @@ #define DOCTEST_VERSION_MAJOR 2 #define DOCTEST_VERSION_MINOR 4 -#define DOCTEST_VERSION_PATCH 8 +#define DOCTEST_VERSION_PATCH 9 // util we need here #define DOCTEST_TOSTR_IMPL(x) #x @@ -68,6 +68,12 @@ // ideas for the version stuff are taken from here: https://github.com/cxxstuff/cxx_detect +#ifdef _MSC_VER +#define DOCTEST_CPLUSPLUS _MSVC_LANG +#else +#define DOCTEST_CPLUSPLUS __cplusplus +#endif + #define DOCTEST_COMPILER(MAJOR, MINOR, PATCH) ((MAJOR)*10000000 + (MINOR)*100000 + (PATCH)) // GCC/Clang and GCC/MSVC are mutually exclusive, but Clang/MSVC are not because of clang-cl... @@ -153,7 +159,6 @@ DOCTEST_CLANG_SUPPRESS_WARNING("-Wweak-vtables") \ DOCTEST_CLANG_SUPPRESS_WARNING("-Wpadded") \ DOCTEST_CLANG_SUPPRESS_WARNING("-Wmissing-prototypes") \ - DOCTEST_CLANG_SUPPRESS_WARNING("-Wunused-local-typedef") \ DOCTEST_CLANG_SUPPRESS_WARNING("-Wc++98-compat") \ DOCTEST_CLANG_SUPPRESS_WARNING("-Wc++98-compat-pedantic") \ \ @@ -164,7 +169,6 @@ DOCTEST_GCC_SUPPRESS_WARNING("-Wstrict-overflow") \ DOCTEST_GCC_SUPPRESS_WARNING("-Wstrict-aliasing") \ DOCTEST_GCC_SUPPRESS_WARNING("-Wmissing-declarations") \ - DOCTEST_GCC_SUPPRESS_WARNING("-Wunused-local-typedefs") \ DOCTEST_GCC_SUPPRESS_WARNING("-Wuseless-cast") \ DOCTEST_GCC_SUPPRESS_WARNING("-Wnoexcept") \ \ @@ -231,7 +235,8 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define DOCTEST_MSVC_SUPPRESS_WARNING(4623) /* default constructor was implicitly deleted */ \ DOCTEST_MSVC_SUPPRESS_WARNING(5039) /* pointer to pot. throwing function passed to extern C */ \ DOCTEST_MSVC_SUPPRESS_WARNING(5045) /* Spectre mitigation for memory load */ \ - DOCTEST_MSVC_SUPPRESS_WARNING(5105) /* macro producing 'defined' has undefined behavior */ + DOCTEST_MSVC_SUPPRESS_WARNING(5105) /* macro producing 'defined' has undefined behavior */ \ + DOCTEST_MSVC_SUPPRESS_WARNING(4738) /* storing float result in memory, loss of performance */ #define DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_END DOCTEST_MSVC_SUPPRESS_WARNING_POP @@ -266,7 +271,7 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #endif // DOCTEST_CONFIG_NO_WINDOWS_SEH #if !defined(_WIN32) && !defined(__QNX__) && !defined(DOCTEST_CONFIG_POSIX_SIGNALS) && \ - !defined(__EMSCRIPTEN__) + !defined(__EMSCRIPTEN__) && !defined(__wasi__) #define DOCTEST_CONFIG_POSIX_SIGNALS #endif // _WIN32 #if defined(DOCTEST_CONFIG_NO_POSIX_SIGNALS) && defined(DOCTEST_CONFIG_POSIX_SIGNALS) @@ -274,7 +279,8 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #endif // DOCTEST_CONFIG_NO_POSIX_SIGNALS #ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) +#if !defined(__cpp_exceptions) && !defined(__EXCEPTIONS) && !defined(_CPPUNWIND) \ + || defined(__wasi__) #define DOCTEST_CONFIG_NO_EXCEPTIONS #endif // no exceptions #endif // DOCTEST_CONFIG_NO_EXCEPTIONS @@ -289,6 +295,10 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #define DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS #endif // DOCTEST_CONFIG_NO_EXCEPTIONS && !DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS +#ifdef __wasi__ +#define DOCTEST_CONFIG_NO_MULTITHREADING +#endif + #if defined(DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN) && !defined(DOCTEST_CONFIG_IMPLEMENT) #define DOCTEST_CONFIG_IMPLEMENT #endif // DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN @@ -316,6 +326,16 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #define DOCTEST_INTERFACE #endif // DOCTEST_CONFIG_IMPLEMENTATION_IN_DLL +// needed for extern template instantiations +// see https://github.com/fmtlib/fmt/issues/2228 +#if DOCTEST_MSVC +#define DOCTEST_INTERFACE_DECL +#define DOCTEST_INTERFACE_DEF DOCTEST_INTERFACE +#else // DOCTEST_MSVC +#define DOCTEST_INTERFACE_DECL DOCTEST_INTERFACE +#define DOCTEST_INTERFACE_DEF +#endif // DOCTEST_MSVC + #define DOCTEST_EMPTY #if DOCTEST_MSVC @@ -351,8 +371,10 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #ifndef DOCTEST_CONSTEXPR #if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) #define DOCTEST_CONSTEXPR const +#define DOCTEST_CONSTEXPR_FUNC inline #else // DOCTEST_MSVC #define DOCTEST_CONSTEXPR constexpr +#define DOCTEST_CONSTEXPR_FUNC constexpr #endif // DOCTEST_MSVC #endif // DOCTEST_CONSTEXPR @@ -360,6 +382,17 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define // == FEATURE DETECTION END ======================================================================== // ================================================================================================= +#define DOCTEST_DECLARE_INTERFACE(name) \ + virtual ~name(); \ + name() = default; \ + name(const name&) = delete; \ + name(name&&) = delete; \ + name& operator=(const name&) = delete; \ + name& operator=(name&&) = delete; + +#define DOCTEST_DEFINE_INTERFACE(name) \ + name::~name() = default; + // internal macros for string concatenation and anonymous variable name generation #define DOCTEST_CAT_IMPL(s1, s2) s1##s2 #define DOCTEST_CAT(s1, s2) DOCTEST_CAT_IMPL(s1, s2) @@ -382,17 +415,19 @@ DOCTEST_MSVC_SUPPRESS_WARNING(4623) // default constructor was implicitly define #define DOCTEST_PLATFORM_IPHONE #elif defined(_WIN32) #define DOCTEST_PLATFORM_WINDOWS +#elif defined(__wasi__) +#define DOCTEST_PLATFORM_WASI #else // DOCTEST_PLATFORM #define DOCTEST_PLATFORM_LINUX #endif // DOCTEST_PLATFORM namespace doctest { namespace detail { - static DOCTEST_CONSTEXPR int consume(const int*, int) { return 0; } + static DOCTEST_CONSTEXPR int consume(const int*, int) noexcept { return 0; } }} -#define DOCTEST_GLOBAL_NO_WARNINGS(var, ...) \ - DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wglobal-constructors") \ - static const int var = doctest::detail::consume(&var, __VA_ARGS__); \ +#define DOCTEST_GLOBAL_NO_WARNINGS(var, ...) \ + DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wglobal-constructors") \ + static const int var = doctest::detail::consume(&var, __VA_ARGS__); \ DOCTEST_CLANG_SUPPRESS_WARNING_POP #ifndef DOCTEST_BREAK_INTO_DEBUGGER @@ -400,16 +435,19 @@ namespace doctest { namespace detail { #ifdef DOCTEST_PLATFORM_LINUX #if defined(__GNUC__) && (defined(__i386) || defined(__x86_64)) // Break at the location of the failing check if possible -#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("int $3\n" : :) // NOLINT (hicpp-no-assembler) +#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("int $3\n" : :) // NOLINT(hicpp-no-assembler) #else #include <signal.h> #define DOCTEST_BREAK_INTO_DEBUGGER() raise(SIGTRAP) #endif #elif defined(DOCTEST_PLATFORM_MAC) #if defined(__x86_64) || defined(__x86_64__) || defined(__amd64__) || defined(__i386) -#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("int $3\n" : :) // NOLINT (hicpp-no-assembler) +#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("int $3\n" : :) // NOLINT(hicpp-no-assembler) +#elif defined(__ppc__) || defined(__ppc64__) +// https://www.cocoawithlove.com/2008/03/break-into-debugger.html +#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("li r0, 20\nsc\nnop\nli r0, 37\nli r4, 2\nsc\nnop\n": : : "memory","r0","r3","r4") // NOLINT(hicpp-no-assembler) #else -#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("brk #0"); // NOLINT (hicpp-no-assembler) +#define DOCTEST_BREAK_INTO_DEBUGGER() __asm__("brk #0"); // NOLINT(hicpp-no-assembler) #endif #elif DOCTEST_MSVC #define DOCTEST_BREAK_INTO_DEBUGGER() __debugbreak() @@ -425,7 +463,9 @@ DOCTEST_GCC_SUPPRESS_WARNING_POP // this is kept here for backwards compatibility since the config option was changed #ifdef DOCTEST_CONFIG_USE_IOSFWD +#ifndef DOCTEST_CONFIG_USE_STD_HEADERS #define DOCTEST_CONFIG_USE_STD_HEADERS +#endif #endif // DOCTEST_CONFIG_USE_IOSFWD // for clang - always include ciso646 (which drags some std stuff) because @@ -436,7 +476,9 @@ DOCTEST_GCC_SUPPRESS_WARNING_POP #if DOCTEST_CLANG #include <ciso646> #ifdef _LIBCPP_VERSION +#ifndef DOCTEST_CONFIG_USE_STD_HEADERS #define DOCTEST_CONFIG_USE_STD_HEADERS +#endif #endif // _LIBCPP_VERSION #endif // clang @@ -444,26 +486,32 @@ DOCTEST_GCC_SUPPRESS_WARNING_POP #ifndef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS #define DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS #endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS +DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_BEGIN #include <cstddef> #include <ostream> #include <istream> +DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_END #else // DOCTEST_CONFIG_USE_STD_HEADERS // Forward declaring 'X' in namespace std is not permitted by the C++ Standard. DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4643) -namespace std { // NOLINT (cert-dcl58-cpp) -typedef decltype(nullptr) nullptr_t; +namespace std { // NOLINT(cert-dcl58-cpp) +typedef decltype(nullptr) nullptr_t; // NOLINT(modernize-use-using) +typedef decltype(sizeof(void*)) size_t; // NOLINT(modernize-use-using) template <class charT> struct char_traits; template <> struct char_traits<char>; template <class charT, class traits> -class basic_ostream; -typedef basic_ostream<char, char_traits<char>> ostream; +class basic_ostream; // NOLINT(fuchsia-virtual-inheritance) +typedef basic_ostream<char, char_traits<char>> ostream; // NOLINT(modernize-use-using) +template<class traits> +// NOLINTNEXTLINE +basic_ostream<char, traits>& operator<<(basic_ostream<char, traits>&, const char*); template <class charT, class traits> class basic_istream; -typedef basic_istream<char, char_traits<char>> istream; +typedef basic_istream<char, char_traits<char>> istream; // NOLINT(modernize-use-using) template <class... Types> class tuple; #if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) @@ -486,8 +534,14 @@ DOCTEST_MSVC_SUPPRESS_WARNING_POP namespace doctest { +using std::size_t; + DOCTEST_INTERFACE extern bool is_running_in_test; +#ifndef DOCTEST_CONFIG_STRING_SIZE_TYPE +#define DOCTEST_CONFIG_STRING_SIZE_TYPE unsigned +#endif + // A 24 byte string class (can be as small as 17 for x64 and 13 for x86) that can hold strings with length // of up to 23 chars on the stack before going on the heap - the last byte of the buffer is used for: // - "is small" bit - the highest bit - if "0" then it is small - otherwise its "1" (128) @@ -500,7 +554,6 @@ DOCTEST_INTERFACE extern bool is_running_in_test; // TODO: // - optimizations - like not deleting memory unnecessarily in operator= and etc. // - resize/reserve/clear -// - substr // - replace // - back/front // - iterator stuff @@ -510,64 +563,80 @@ DOCTEST_INTERFACE extern bool is_running_in_test; // - relational operators as free functions - taking const char* as one of the params class DOCTEST_INTERFACE String { - static const unsigned len = 24; //!OCLINT avoid private static members - static const unsigned last = len - 1; //!OCLINT avoid private static members +public: + using size_type = DOCTEST_CONFIG_STRING_SIZE_TYPE; + +private: + static DOCTEST_CONSTEXPR size_type len = 24; //!OCLINT avoid private static members + static DOCTEST_CONSTEXPR size_type last = len - 1; //!OCLINT avoid private static members struct view // len should be more than sizeof(view) - because of the final byte for flags { char* ptr; - unsigned size; - unsigned capacity; + size_type size; + size_type capacity; }; union { - char buf[len]; + char buf[len]; // NOLINT(*-avoid-c-arrays) view data; }; - char* allocate(unsigned sz); + char* allocate(size_type sz); - bool isOnStack() const { return (buf[last] & 128) == 0; } - void setOnHeap(); - void setLast(unsigned in = last); + bool isOnStack() const noexcept { return (buf[last] & 128) == 0; } + void setOnHeap() noexcept; + void setLast(size_type in = last) noexcept; + void setSize(size_type sz) noexcept; void copy(const String& other); public: - String(); + static DOCTEST_CONSTEXPR size_type npos = static_cast<size_type>(-1); + + String() noexcept; ~String(); // cppcheck-suppress noExplicitConstructor String(const char* in); - String(const char* in, unsigned in_size); + String(const char* in, size_type in_size); - String(std::istream& in, unsigned in_size); + String(std::istream& in, size_type in_size); String(const String& other); String& operator=(const String& other); String& operator+=(const String& other); - String(String&& other); - String& operator=(String&& other); + String(String&& other) noexcept; + String& operator=(String&& other) noexcept; - char operator[](unsigned i) const; - char& operator[](unsigned i); + char operator[](size_type i) const; + char& operator[](size_type i); // the only functions I'm willing to leave in the interface - available for inlining const char* c_str() const { return const_cast<String*>(this)->c_str(); } // NOLINT char* c_str() { - if(isOnStack()) + if (isOnStack()) { return reinterpret_cast<char*>(buf); + } return data.ptr; } - unsigned size() const; - unsigned capacity() const; + size_type size() const; + size_type capacity() const; + + String substr(size_type pos, size_type cnt = npos) &&; + String substr(size_type pos, size_type cnt = npos) const &; + + size_type find(char ch, size_type pos = 0) const; + size_type rfind(char ch, size_type pos = npos) const; int compare(const char* other, bool no_case = false) const; int compare(const String& other, bool no_case = false) const; + +friend DOCTEST_INTERFACE std::ostream& operator<<(std::ostream& s, const String& in); }; DOCTEST_INTERFACE String operator+(const String& lhs, const String& rhs); @@ -579,7 +648,21 @@ DOCTEST_INTERFACE bool operator>(const String& lhs, const String& rhs); DOCTEST_INTERFACE bool operator<=(const String& lhs, const String& rhs); DOCTEST_INTERFACE bool operator>=(const String& lhs, const String& rhs); -DOCTEST_INTERFACE std::ostream& operator<<(std::ostream& s, const String& in); +class DOCTEST_INTERFACE Contains { +public: + explicit Contains(const String& string); + + bool checkWith(const String& other) const; + + String string; +}; + +DOCTEST_INTERFACE String toString(const Contains& in); + +DOCTEST_INTERFACE bool operator==(const String& lhs, const Contains& rhs); +DOCTEST_INTERFACE bool operator==(const Contains& lhs, const String& rhs); +DOCTEST_INTERFACE bool operator!=(const String& lhs, const Contains& rhs); +DOCTEST_INTERFACE bool operator!=(const Contains& lhs, const String& rhs); namespace Color { enum Enum @@ -652,7 +735,7 @@ namespace assertType { DT_WARN_THROWS_WITH = is_throws_with | is_warn, DT_CHECK_THROWS_WITH = is_throws_with | is_check, DT_REQUIRE_THROWS_WITH = is_throws_with | is_require, - + DT_WARN_THROWS_WITH_AS = is_throws_with | is_throws_as | is_warn, DT_CHECK_THROWS_WITH_AS = is_throws_with | is_throws_as | is_check, DT_REQUIRE_THROWS_WITH_AS = is_throws_with | is_throws_as | is_require, @@ -733,9 +816,27 @@ struct DOCTEST_INTERFACE AssertData String m_decomp; // for specific exception-related asserts - bool m_threw_as; - const char* m_exception_type; - const char* m_exception_string; + bool m_threw_as; + const char* m_exception_type; + + class DOCTEST_INTERFACE StringContains { + private: + Contains content; + bool isContains; + + public: + StringContains(const String& str) : content(str), isContains(false) { } + StringContains(Contains cntn) : content(static_cast<Contains&&>(cntn)), isContains(true) { } + + bool check(const String& str) { return isContains ? (content == str) : (content.string == str); } + + operator const String&() const { return content.string; } + + const char* c_str() const { return content.string.c_str(); } + } m_exception_string; + + AssertData(assertType::Enum at, const char* file, int line, const char* expr, + const char* exception_type, const StringContains& exception_string); }; struct DOCTEST_INTERFACE MessageData @@ -752,13 +853,13 @@ struct DOCTEST_INTERFACE SubcaseSignature const char* m_file; int m_line; + bool operator==(const SubcaseSignature& other) const; bool operator<(const SubcaseSignature& other) const; }; struct DOCTEST_INTERFACE IContextScope { - IContextScope(); - virtual ~IContextScope(); + DOCTEST_DECLARE_INTERFACE(IContextScope) virtual void stringify(std::ostream*) const = 0; }; @@ -815,200 +916,184 @@ struct ContextOptions //!OCLINT too many fields }; namespace detail { - template <bool CONDITION, typename TYPE = void> - struct enable_if - {}; + namespace types { +#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS + using namespace std; +#else + template <bool COND, typename T = void> + struct enable_if { }; + + template <typename T> + struct enable_if<true, T> { using type = T; }; - template <typename TYPE> - struct enable_if<true, TYPE> - { typedef TYPE type; }; + struct true_type { static DOCTEST_CONSTEXPR bool value = true; }; + struct false_type { static DOCTEST_CONSTEXPR bool value = false; }; - // clang-format off - template<class T> struct remove_reference { typedef T type; }; - template<class T> struct remove_reference<T&> { typedef T type; }; - template<class T> struct remove_reference<T&&> { typedef T type; }; + template <typename T> struct remove_reference { using type = T; }; + template <typename T> struct remove_reference<T&> { using type = T; }; + template <typename T> struct remove_reference<T&&> { using type = T; }; - template<typename T, typename U = T&&> U declval(int); + template <typename T> struct is_rvalue_reference : false_type { }; + template <typename T> struct is_rvalue_reference<T&&> : true_type { }; - template<typename T> T declval(long); + template<typename T> struct remove_const { using type = T; }; + template <typename T> struct remove_const<const T> { using type = T; }; - template<typename T> auto declval() DOCTEST_NOEXCEPT -> decltype(declval<T>(0)) ; + // Compiler intrinsics + template <typename T> struct is_enum { static DOCTEST_CONSTEXPR bool value = __is_enum(T); }; + template <typename T> struct underlying_type { using type = __underlying_type(T); }; - template<class T> struct is_lvalue_reference { const static bool value=false; }; - template<class T> struct is_lvalue_reference<T&> { const static bool value=true; }; + template <typename T> struct is_pointer : false_type { }; + template <typename T> struct is_pointer<T*> : true_type { }; + + template <typename T> struct is_array : false_type { }; + // NOLINTNEXTLINE(*-avoid-c-arrays) + template <typename T, size_t SIZE> struct is_array<T[SIZE]> : true_type { }; +#endif + } - template<class T> struct is_rvalue_reference { const static bool value=false; }; - template<class T> struct is_rvalue_reference<T&&> { const static bool value=true; }; + // <utility> + template <typename T> + T&& declval(); template <class T> - inline T&& forward(typename remove_reference<T>::type& t) DOCTEST_NOEXCEPT - { + DOCTEST_CONSTEXPR_FUNC T&& forward(typename types::remove_reference<T>::type& t) DOCTEST_NOEXCEPT { return static_cast<T&&>(t); } template <class T> - inline T&& forward(typename remove_reference<T>::type&& t) DOCTEST_NOEXCEPT - { - static_assert(!is_lvalue_reference<T>::value, - "Can not forward an rvalue as an lvalue."); + DOCTEST_CONSTEXPR_FUNC T&& forward(typename types::remove_reference<T>::type&& t) DOCTEST_NOEXCEPT { return static_cast<T&&>(t); } - template<class T> struct remove_const { typedef T type; }; - template<class T> struct remove_const<const T> { typedef T type; }; -#ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS - template<class T> struct is_enum : public std::is_enum<T> {}; - template<class T> struct underlying_type : public std::underlying_type<T> {}; -#else - // Use compiler intrinsics - template<class T> struct is_enum { DOCTEST_CONSTEXPR static bool value = __is_enum(T); }; - template<class T> struct underlying_type { typedef __underlying_type(T) type; }; -#endif - // clang-format on + template <typename T> + struct deferred_false : types::false_type { }; + +// MSVS 2015 :( +#if defined(_MSC_VER) && _MSC_VER <= 1900 + template <typename T, typename = void> + struct has_global_insertion_operator : types::false_type { }; template <typename T> - struct deferred_false - // cppcheck-suppress unusedStructMember - { static const bool value = false; }; - - namespace has_insertion_operator_impl { - std::ostream &os(); - template<class T> - DOCTEST_REF_WRAP(T) val(); - - template<class, class = void> - struct check { - static DOCTEST_CONSTEXPR bool value = false; - }; + struct has_global_insertion_operator<T, decltype(::operator<<(declval<std::ostream&>(), declval<const T&>()), void())> : types::true_type { }; - template<class T> - struct check<T, decltype(os() << val<T>(), void())> { - static DOCTEST_CONSTEXPR bool value = true; - }; - } // namespace has_insertion_operator_impl + template <typename T, typename = void> + struct has_insertion_operator { static DOCTEST_CONSTEXPR bool value = has_global_insertion_operator<T>::value; }; + + template <typename T, bool global> + struct insert_hack; + + template <typename T> + struct insert_hack<T, true> { + static void insert(std::ostream& os, const T& t) { ::operator<<(os, t); } + }; - template<class T> - using has_insertion_operator = has_insertion_operator_impl::check<const T>; + template <typename T> + struct insert_hack<T, false> { + static void insert(std::ostream& os, const T& t) { operator<<(os, t); } + }; + + template <typename T> + using insert_hack_t = insert_hack<T, has_global_insertion_operator<T>::value>; +#else + template <typename T, typename = void> + struct has_insertion_operator : types::false_type { }; +#endif + +template <typename T> +struct has_insertion_operator<T, decltype(operator<<(declval<std::ostream&>(), declval<const T&>()), void())> : types::true_type { }; DOCTEST_INTERFACE std::ostream* tlssPush(); DOCTEST_INTERFACE String tlssPop(); - template <bool C> - struct StringMakerBase - { + struct StringMakerBase { template <typename T> static String convert(const DOCTEST_REF_WRAP(T)) { +#ifdef DOCTEST_CONFIG_REQUIRE_STRINGIFICATION_FOR_ALL_USED_TYPES + static_assert(deferred_false<T>::value, "No stringification detected for type T. See string conversion manual"); +#endif return "{?}"; } }; - // Vector<int> and various type other than pointer or array. - template<typename T> - struct filldata - { - static void fill(std::ostream* stream, const T &in) { - *stream << in; - } - }; - - template<typename T,unsigned long N> - struct filldata<T[N]> - { - static void fill(std::ostream* stream, const T (&in)[N]) { - for (unsigned long i = 0; i < N; i++) { - *stream << in[i]; - } - } - }; - - // Specialized since we don't want the terminating null byte! - template<unsigned long N> - struct filldata<const char[N]> - { - static void fill(std::ostream* stream, const char(&in)[N]) { - *stream << in; - } - }; + template <typename T> + struct filldata; - template<typename T> + template <typename T> void filloss(std::ostream* stream, const T& in) { filldata<T>::fill(stream, in); } - template<typename T,unsigned long N> - void filloss(std::ostream* stream, const T (&in)[N]) { + template <typename T, size_t N> + void filloss(std::ostream* stream, const T (&in)[N]) { // NOLINT(*-avoid-c-arrays) // T[N], T(&)[N], T(&&)[N] have same behaviour. // Hence remove reference. - filldata<typename remove_reference<decltype(in)>::type>::fill(stream, in); + filloss<typename types::remove_reference<decltype(in)>::type>(stream, in); + } + + template <typename T> + String toStream(const T& in) { + std::ostream* stream = tlssPush(); + filloss(stream, in); + return tlssPop(); } template <> - struct StringMakerBase<true> - { + struct StringMakerBase<true> { template <typename T> static String convert(const DOCTEST_REF_WRAP(T) in) { - /* When parameter "in" is a null terminated const char* it works. - * When parameter "in" is a T arr[N] without '\0' we can fill the - * stringstream with N objects (T=char).If in is char pointer * - * without '\0' , it would cause segfault - * stepping over unaccessible memory. - */ - - std::ostream* stream = tlssPush(); - filloss(stream, in); - return tlssPop(); + return toStream(in); } }; - - DOCTEST_INTERFACE String rawMemoryToString(const void* object, unsigned size); - - template <typename T> - String rawMemoryToString(const DOCTEST_REF_WRAP(T) object) { - return rawMemoryToString(&object, sizeof(object)); - } - - template <typename T> - const char* type_to_string() { - return "<>"; - } } // namespace detail template <typename T> -struct StringMaker : public detail::StringMakerBase<detail::has_insertion_operator<T>::value> +struct StringMaker : public detail::StringMakerBase< + detail::has_insertion_operator<T>::value || detail::types::is_pointer<T>::value || detail::types::is_array<T>::value> {}; -template <typename T> -struct StringMaker<T*> -{ - template <typename U> - static String convert(U* p) { - if(p) - return detail::rawMemoryToString(p); - return "NULL"; - } -}; +#ifndef DOCTEST_STRINGIFY +#ifdef DOCTEST_CONFIG_DOUBLE_STRINGIFY +#define DOCTEST_STRINGIFY(...) toString(toString(__VA_ARGS__)) +#else +#define DOCTEST_STRINGIFY(...) toString(__VA_ARGS__) +#endif +#endif -template <typename R, typename C> -struct StringMaker<R C::*> -{ - static String convert(R C::*p) { - if(p) - return detail::rawMemoryToString(p); - return "NULL"; - } -}; +template <typename T> +String toString() { +#if DOCTEST_MSVC >= 0 && DOCTEST_CLANG == 0 && DOCTEST_GCC == 0 + String ret = __FUNCSIG__; // class doctest::String __cdecl doctest::toString<TYPE>(void) + String::size_type beginPos = ret.find('<'); + return ret.substr(beginPos + 1, ret.size() - beginPos - static_cast<String::size_type>(sizeof(">(void)"))); +#else + String ret = __PRETTY_FUNCTION__; // doctest::String toString() [with T = TYPE] + String::size_type begin = ret.find('=') + 2; + return ret.substr(begin, ret.size() - begin - 1); +#endif +} -template <typename T, typename detail::enable_if<!detail::is_enum<T>::value, bool>::type = true> +template <typename T, typename detail::types::enable_if<!detail::types::is_enum<T>::value, bool>::type = true> String toString(const DOCTEST_REF_WRAP(T) value) { return StringMaker<T>::convert(value); } #ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -DOCTEST_INTERFACE String toString(char* in); DOCTEST_INTERFACE String toString(const char* in); #endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING + +#if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) +// see this issue on why this is needed: https://github.com/doctest/doctest/issues/183 +DOCTEST_INTERFACE String toString(const std::string& in); +#endif // VS 2019 + +DOCTEST_INTERFACE String toString(String in); + +DOCTEST_INTERFACE String toString(std::nullptr_t); + DOCTEST_INTERFACE String toString(bool in); + DOCTEST_INTERFACE String toString(float in); DOCTEST_INTERFACE String toString(double in); DOCTEST_INTERFACE String toString(double long in); @@ -1016,40 +1101,85 @@ DOCTEST_INTERFACE String toString(double long in); DOCTEST_INTERFACE String toString(char in); DOCTEST_INTERFACE String toString(char signed in); DOCTEST_INTERFACE String toString(char unsigned in); -DOCTEST_INTERFACE String toString(int short in); -DOCTEST_INTERFACE String toString(int short unsigned in); -DOCTEST_INTERFACE String toString(int in); -DOCTEST_INTERFACE String toString(int unsigned in); -DOCTEST_INTERFACE String toString(int long in); -DOCTEST_INTERFACE String toString(int long unsigned in); -DOCTEST_INTERFACE String toString(int long long in); -DOCTEST_INTERFACE String toString(int long long unsigned in); -DOCTEST_INTERFACE String toString(std::nullptr_t in); - -template <typename T, typename detail::enable_if<detail::is_enum<T>::value, bool>::type = true> +DOCTEST_INTERFACE String toString(short in); +DOCTEST_INTERFACE String toString(short unsigned in); +DOCTEST_INTERFACE String toString(signed in); +DOCTEST_INTERFACE String toString(unsigned in); +DOCTEST_INTERFACE String toString(long in); +DOCTEST_INTERFACE String toString(long unsigned in); +DOCTEST_INTERFACE String toString(long long in); +DOCTEST_INTERFACE String toString(long long unsigned in); + +template <typename T, typename detail::types::enable_if<detail::types::is_enum<T>::value, bool>::type = true> String toString(const DOCTEST_REF_WRAP(T) value) { - typedef typename detail::underlying_type<T>::type UT; - return toString(static_cast<UT>(value)); + using UT = typename detail::types::underlying_type<T>::type; + return (DOCTEST_STRINGIFY(static_cast<UT>(value))); } -#if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) -// see this issue on why this is needed: https://github.com/doctest/doctest/issues/183 -DOCTEST_INTERFACE String toString(const std::string& in); -#endif // VS 2019 +namespace detail { + template <typename T> + struct filldata + { + static void fill(std::ostream* stream, const T& in) { +#if defined(_MSC_VER) && _MSC_VER <= 1900 + insert_hack_t<T>::insert(*stream, in); +#else + operator<<(*stream, in); +#endif + } + }; + +DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4866) +// NOLINTBEGIN(*-avoid-c-arrays) + template <typename T, size_t N> + struct filldata<T[N]> { + static void fill(std::ostream* stream, const T(&in)[N]) { + *stream << "["; + for (size_t i = 0; i < N; i++) { + if (i != 0) { *stream << ", "; } + *stream << (DOCTEST_STRINGIFY(in[i])); + } + *stream << "]"; + } + }; +// NOLINTEND(*-avoid-c-arrays) +DOCTEST_MSVC_SUPPRESS_WARNING_POP -class DOCTEST_INTERFACE Approx + // Specialized since we don't want the terminating null byte! +// NOLINTBEGIN(*-avoid-c-arrays) + template <size_t N> + struct filldata<const char[N]> { + static void fill(std::ostream* stream, const char (&in)[N]) { + *stream << String(in, in[N - 1] ? N : N - 1); + } // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) + }; +// NOLINTEND(*-avoid-c-arrays) + + template <> + struct filldata<const void*> { + static void fill(std::ostream* stream, const void* in); + }; + + template <typename T> + struct filldata<T*> { + static void fill(std::ostream* stream, const T* in) { + filldata<const void*>::fill(stream, in); + } + }; +} + +struct DOCTEST_INTERFACE Approx { -public: - explicit Approx(double value); + Approx(double value); Approx operator()(double value) const; #ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS template <typename T> explicit Approx(const T& value, - typename detail::enable_if<std::is_constructible<double, T>::value>::type* = + typename detail::types::enable_if<std::is_constructible<double, T>::value>::type* = static_cast<T*>(nullptr)) { - *this = Approx(static_cast<double>(value)); + *this = static_cast<double>(value); } #endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS @@ -1057,7 +1187,7 @@ public: #ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS template <typename T> - typename detail::enable_if<std::is_constructible<double, T>::value, Approx&>::type epsilon( + typename std::enable_if<std::is_constructible<double, T>::value, Approx&>::type epsilon( const T& newEpsilon) { m_epsilon = static_cast<double>(newEpsilon); return *this; @@ -1068,7 +1198,7 @@ public: #ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS template <typename T> - typename detail::enable_if<std::is_constructible<double, T>::value, Approx&>::type scale( + typename std::enable_if<std::is_constructible<double, T>::value, Approx&>::type scale( const T& newScale) { m_scale = static_cast<double>(newScale); return *this; @@ -1089,30 +1219,27 @@ public: DOCTEST_INTERFACE friend bool operator> (double lhs, const Approx & rhs); DOCTEST_INTERFACE friend bool operator> (const Approx & lhs, double rhs); - DOCTEST_INTERFACE friend String toString(const Approx& in); - #ifdef DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS #define DOCTEST_APPROX_PREFIX \ - template <typename T> friend typename detail::enable_if<std::is_constructible<double, T>::value, bool>::type + template <typename T> friend typename std::enable_if<std::is_constructible<double, T>::value, bool>::type - DOCTEST_APPROX_PREFIX operator==(const T& lhs, const Approx& rhs) { return operator==(double(lhs), rhs); } + DOCTEST_APPROX_PREFIX operator==(const T& lhs, const Approx& rhs) { return operator==(static_cast<double>(lhs), rhs); } DOCTEST_APPROX_PREFIX operator==(const Approx& lhs, const T& rhs) { return operator==(rhs, lhs); } DOCTEST_APPROX_PREFIX operator!=(const T& lhs, const Approx& rhs) { return !operator==(lhs, rhs); } DOCTEST_APPROX_PREFIX operator!=(const Approx& lhs, const T& rhs) { return !operator==(rhs, lhs); } - DOCTEST_APPROX_PREFIX operator<=(const T& lhs, const Approx& rhs) { return double(lhs) < rhs.m_value || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator<=(const Approx& lhs, const T& rhs) { return lhs.m_value < double(rhs) || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator>=(const T& lhs, const Approx& rhs) { return double(lhs) > rhs.m_value || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator>=(const Approx& lhs, const T& rhs) { return lhs.m_value > double(rhs) || lhs == rhs; } - DOCTEST_APPROX_PREFIX operator< (const T& lhs, const Approx& rhs) { return double(lhs) < rhs.m_value && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator< (const Approx& lhs, const T& rhs) { return lhs.m_value < double(rhs) && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator> (const T& lhs, const Approx& rhs) { return double(lhs) > rhs.m_value && lhs != rhs; } - DOCTEST_APPROX_PREFIX operator> (const Approx& lhs, const T& rhs) { return lhs.m_value > double(rhs) && lhs != rhs; } + DOCTEST_APPROX_PREFIX operator<=(const T& lhs, const Approx& rhs) { return static_cast<double>(lhs) < rhs.m_value || lhs == rhs; } + DOCTEST_APPROX_PREFIX operator<=(const Approx& lhs, const T& rhs) { return lhs.m_value < static_cast<double>(rhs) || lhs == rhs; } + DOCTEST_APPROX_PREFIX operator>=(const T& lhs, const Approx& rhs) { return static_cast<double>(lhs) > rhs.m_value || lhs == rhs; } + DOCTEST_APPROX_PREFIX operator>=(const Approx& lhs, const T& rhs) { return lhs.m_value > static_cast<double>(rhs) || lhs == rhs; } + DOCTEST_APPROX_PREFIX operator< (const T& lhs, const Approx& rhs) { return static_cast<double>(lhs) < rhs.m_value && lhs != rhs; } + DOCTEST_APPROX_PREFIX operator< (const Approx& lhs, const T& rhs) { return lhs.m_value < static_cast<double>(rhs) && lhs != rhs; } + DOCTEST_APPROX_PREFIX operator> (const T& lhs, const Approx& rhs) { return static_cast<double>(lhs) > rhs.m_value && lhs != rhs; } + DOCTEST_APPROX_PREFIX operator> (const Approx& lhs, const T& rhs) { return lhs.m_value > static_cast<double>(rhs) && lhs != rhs; } #undef DOCTEST_APPROX_PREFIX #endif // DOCTEST_CONFIG_INCLUDE_TYPE_TRAITS // clang-format on -private: double m_epsilon; double m_scale; double m_value; @@ -1122,18 +1249,35 @@ DOCTEST_INTERFACE String toString(const Approx& in); DOCTEST_INTERFACE const ContextOptions* getContextOptions(); -#if !defined(DOCTEST_CONFIG_DISABLE) +template <typename F> +struct DOCTEST_INTERFACE_DECL IsNaN +{ + F value; bool flipped; + IsNaN(F f, bool flip = false) : value(f), flipped(flip) { } + IsNaN<F> operator!() const { return { value, !flipped }; } + operator bool() const; +}; +#ifndef __MINGW32__ +extern template struct DOCTEST_INTERFACE_DECL IsNaN<float>; +extern template struct DOCTEST_INTERFACE_DECL IsNaN<double>; +extern template struct DOCTEST_INTERFACE_DECL IsNaN<long double>; +#endif +DOCTEST_INTERFACE String toString(IsNaN<float> in); +DOCTEST_INTERFACE String toString(IsNaN<double> in); +DOCTEST_INTERFACE String toString(IsNaN<double long> in); + +#ifndef DOCTEST_CONFIG_DISABLE namespace detail { // clang-format off #ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - template<class T> struct decay_array { typedef T type; }; - template<class T, unsigned N> struct decay_array<T[N]> { typedef T* type; }; - template<class T> struct decay_array<T[]> { typedef T* type; }; + template<class T> struct decay_array { using type = T; }; + template<class T, unsigned N> struct decay_array<T[N]> { using type = T*; }; + template<class T> struct decay_array<T[]> { using type = T*; }; - template<class T> struct not_char_pointer { enum { value = 1 }; }; - template<> struct not_char_pointer<char*> { enum { value = 0 }; }; - template<> struct not_char_pointer<const char*> { enum { value = 0 }; }; + template<class T> struct not_char_pointer { static DOCTEST_CONSTEXPR value = 1; }; + template<> struct not_char_pointer<char*> { static DOCTEST_CONSTEXPR value = 0; }; + template<> struct not_char_pointer<const char*> { static DOCTEST_CONSTEXPR value = 0; }; template<class T> struct can_use_op : public not_char_pointer<typename decay_array<T>::type> {}; #endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING @@ -1156,16 +1300,22 @@ namespace detail { bool m_entered = false; Subcase(const String& name, const char* file, int line); + Subcase(const Subcase&) = delete; + Subcase(Subcase&&) = delete; + Subcase& operator=(const Subcase&) = delete; + Subcase& operator=(Subcase&&) = delete; ~Subcase(); operator bool() const; + + private: + bool checkFilters(); }; template <typename L, typename R> String stringifyBinaryExpr(const DOCTEST_REF_WRAP(L) lhs, const char* op, const DOCTEST_REF_WRAP(R) rhs) { - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) - return toString(lhs) + op + toString(rhs); + return (DOCTEST_STRINGIFY(lhs)) + op + (DOCTEST_STRINGIFY(rhs)); } #if DOCTEST_CLANG && DOCTEST_CLANG < DOCTEST_COMPILER(3, 6, 0) @@ -1180,17 +1330,8 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") #define DOCTEST_DO_BINARY_EXPRESSION_COMPARISON(op, op_str, op_macro) \ template <typename R> \ - DOCTEST_NOINLINE SFINAE_OP(Result,op) operator op(const R&& rhs) { \ - bool res = op_macro(doctest::detail::forward<const L>(lhs), doctest::detail::forward<const R>(rhs)); \ - if(m_at & assertType::is_false) \ - res = !res; \ - if(!res || doctest::getContextOptions()->success) \ - return Result(res, stringifyBinaryExpr(lhs, op_str, rhs)); \ - return Result(res); \ - } \ - template <typename R ,typename enable_if<!doctest::detail::is_rvalue_reference<R>::value, void >::type* = nullptr> \ - DOCTEST_NOINLINE SFINAE_OP(Result,op) operator op(const R& rhs) { \ - bool res = op_macro(doctest::detail::forward<const L>(lhs), rhs); \ + DOCTEST_NOINLINE SFINAE_OP(Result,op) operator op(R&& rhs) { \ + bool res = op_macro(doctest::detail::forward<const L>(lhs), doctest::detail::forward<R>(rhs)); \ if(m_at & assertType::is_false) \ res = !res; \ if(!res || doctest::getContextOptions()->success) \ @@ -1209,12 +1350,12 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") return *this; \ } - struct DOCTEST_INTERFACE Result + struct DOCTEST_INTERFACE Result // NOLINT(*-member-init) { bool m_passed; String m_decomp; - Result() = default; + Result() = default; // TODO: Why do we need this? (To remove NOLINT) Result(bool passed, const String& decomposition = String()); // forbidding some expressions based on this table: https://en.cppreference.com/w/cpp/language/operator_precedence @@ -1271,8 +1412,7 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") #ifndef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING #define DOCTEST_COMPARISON_RETURN_TYPE bool #else // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -#define DOCTEST_COMPARISON_RETURN_TYPE typename enable_if<can_use_op<L>::value || can_use_op<R>::value, bool>::type - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) +#define DOCTEST_COMPARISON_RETURN_TYPE typename types::enable_if<can_use_op<L>::value || can_use_op<R>::value, bool>::type inline bool eq(const char* lhs, const char* rhs) { return String(lhs) == String(rhs); } inline bool ne(const char* lhs, const char* rhs) { return String(lhs) != String(rhs); } inline bool lt(const char* lhs, const char* rhs) { return String(lhs) < String(rhs); } @@ -1320,7 +1460,7 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") assertType::Enum m_at; explicit Expression_lhs(L&& in, assertType::Enum at) - : lhs(doctest::detail::forward<L>(in)) + : lhs(static_cast<L&&>(in)) , m_at(at) {} DOCTEST_NOINLINE operator Result() { @@ -1328,12 +1468,14 @@ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wunused-comparison") DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4800) // 'int': forcing value to bool bool res = static_cast<bool>(lhs); DOCTEST_MSVC_SUPPRESS_WARNING_POP - if(m_at & assertType::is_false) //!OCLINT bitwise operator in conditional + if(m_at & assertType::is_false) { //!OCLINT bitwise operator in conditional res = !res; + } - if(!res || getContextOptions()->success) - return Result(res, toString(lhs)); - return Result(res); + if(!res || getContextOptions()->success) { + return { res, (DOCTEST_STRINGIFY(lhs)) }; + } + return { res }; } /* This is required for user-defined conversions from Expression_lhs to L */ @@ -1394,11 +1536,11 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP // https://github.com/catchorg/Catch2/issues/870 // https://github.com/catchorg/Catch2/issues/565 template <typename L> - Expression_lhs<const L> operator<<(const L &&operand) { - return Expression_lhs<const L>(doctest::detail::forward<const L>(operand), m_at); + Expression_lhs<L> operator<<(L&& operand) { + return Expression_lhs<L>(static_cast<L&&>(operand), m_at); } - template <typename L,typename enable_if<!doctest::detail::is_rvalue_reference<L>::value,void >::type* = nullptr> + template <typename L,typename types::enable_if<!doctest::detail::types::is_rvalue_reference<L>::value,void >::type* = nullptr> Expression_lhs<const L&> operator<<(const L &operand) { return Expression_lhs<const L&>(operand, m_at); } @@ -1425,25 +1567,28 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP } }; - typedef void (*funcType)(); + using funcType = void (*)(); struct DOCTEST_INTERFACE TestCase : public TestCaseData { funcType m_test; // a function pointer to the test case - const char* m_type; // for templated test cases - gets appended to the real name + String m_type; // for templated test cases - gets appended to the real name int m_template_id; // an ID used to distinguish between the different versions of a templated test case String m_full_name; // contains the name (only for templated test cases!) + the template type TestCase(funcType test, const char* file, unsigned line, const TestSuite& test_suite, - const char* type = "", int template_id = -1); + const String& type = String(), int template_id = -1); TestCase(const TestCase& other); + TestCase(TestCase&&) = delete; DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(26434) // hides a non-virtual function TestCase& operator=(const TestCase& other); DOCTEST_MSVC_SUPPRESS_WARNING_POP + TestCase& operator=(TestCase&&) = delete; + TestCase& operator*(const char* in); template <typename T> @@ -1453,6 +1598,8 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP } bool operator<(const TestCase& other) const; + + ~TestCase() = default; }; // forward declarations of functions used by the macros @@ -1492,7 +1639,10 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP struct DOCTEST_INTERFACE ResultBuilder : public AssertData { ResultBuilder(assertType::Enum at, const char* file, int line, const char* expr, - const char* exception_type = "", const char* exception_string = ""); + const char* exception_type = "", const String& exception_string = ""); + + ResultBuilder(assertType::Enum at, const char* file, int line, const char* expr, + const char* exception_type, const Contains& exception_string); void setResult(const Result& res); @@ -1500,8 +1650,9 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP DOCTEST_NOINLINE bool binary_assert(const DOCTEST_REF_WRAP(L) lhs, const DOCTEST_REF_WRAP(R) rhs) { m_failed = !RelationalComparator<comparison, L, R>()(lhs, rhs); - if(m_failed || getContextOptions()->success) + if (m_failed || getContextOptions()->success) { m_decomp = stringifyBinaryExpr(lhs, ", ", rhs); + } return !m_failed; } @@ -1509,11 +1660,13 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP DOCTEST_NOINLINE bool unary_assert(const DOCTEST_REF_WRAP(L) val) { m_failed = !val; - if(m_at & assertType::is_false) //!OCLINT bitwise operator in conditional + if (m_at & assertType::is_false) { //!OCLINT bitwise operator in conditional m_failed = !m_failed; + } - if(m_failed || getContextOptions()->success) - m_decomp = toString(val); + if (m_failed || getContextOptions()->success) { + m_decomp = (DOCTEST_STRINGIFY(val)); + } return !m_failed; } @@ -1536,7 +1689,7 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP DOCTEST_INTERFACE void failed_out_of_a_testing_context(const AssertData& ad); DOCTEST_INTERFACE bool decomp_assert(assertType::Enum at, const char* file, int line, - const char* expr, Result result); + const char* expr, const Result& result); #define DOCTEST_ASSERT_OUT_OF_TESTS(decomp) \ do { \ @@ -1592,15 +1745,14 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP // IF THE DEBUGGER BREAKS HERE - GO 1 LEVEL UP IN THE CALLSTACK FOR THE FAILING ASSERT // THIS IS THE EFFECT OF HAVING 'DOCTEST_CONFIG_SUPER_FAST_ASSERTS' DEFINED // ################################################################################### - DOCTEST_ASSERT_OUT_OF_TESTS(toString(val)); - DOCTEST_ASSERT_IN_TESTS(toString(val)); + DOCTEST_ASSERT_OUT_OF_TESTS((DOCTEST_STRINGIFY(val))); + DOCTEST_ASSERT_IN_TESTS((DOCTEST_STRINGIFY(val))); return !failed; } struct DOCTEST_INTERFACE IExceptionTranslator { - IExceptionTranslator(); - virtual ~IExceptionTranslator(); + DOCTEST_DECLARE_INTERFACE(IExceptionTranslator) virtual bool translate(String&) const = 0; }; @@ -1616,7 +1768,7 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP try { throw; // lgtm [cpp/rethrow-no-exception] // cppcheck-suppress catchExceptionByValue - } catch(T ex) { // NOLINT + } catch(const T& ex) { res = m_translateFunction(ex); //!OCLINT parameter reassignment return true; } catch(...) {} //!OCLINT - empty catch statement @@ -1631,64 +1783,19 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP DOCTEST_INTERFACE void registerExceptionTranslatorImpl(const IExceptionTranslator* et); - template <bool C> - struct StringStreamBase - { - template <typename T> - static void convert(std::ostream* s, const T& in) { - *s << toString(in); - } - - // always treat char* as a string in this context - no matter - // if DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING is defined - static void convert(std::ostream* s, const char* in) { *s << String(in); } - }; - - template <> - struct StringStreamBase<true> - { - template <typename T> - static void convert(std::ostream* s, const T& in) { - *s << in; - } - }; + // ContextScope base class used to allow implementing methods of ContextScope + // that don't depend on the template parameter in doctest.cpp. + struct DOCTEST_INTERFACE ContextScopeBase : public IContextScope { + ContextScopeBase(const ContextScopeBase&) = delete; - template <typename T> - struct StringStream : public StringStreamBase<has_insertion_operator<T>::value> - {}; + ContextScopeBase& operator=(const ContextScopeBase&) = delete; + ContextScopeBase& operator=(ContextScopeBase&&) = delete; - template <typename T> - void toStream(std::ostream* s, const T& value) { - StringStream<T>::convert(s, value); - } + ~ContextScopeBase() override = default; -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - DOCTEST_INTERFACE void toStream(std::ostream* s, char* in); - DOCTEST_INTERFACE void toStream(std::ostream* s, const char* in); -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - DOCTEST_INTERFACE void toStream(std::ostream* s, bool in); - DOCTEST_INTERFACE void toStream(std::ostream* s, float in); - DOCTEST_INTERFACE void toStream(std::ostream* s, double in); - DOCTEST_INTERFACE void toStream(std::ostream* s, double long in); - - DOCTEST_INTERFACE void toStream(std::ostream* s, char in); - DOCTEST_INTERFACE void toStream(std::ostream* s, char signed in); - DOCTEST_INTERFACE void toStream(std::ostream* s, char unsigned in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int short in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int short unsigned in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int unsigned in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int long in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int long unsigned in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int long long in); - DOCTEST_INTERFACE void toStream(std::ostream* s, int long long unsigned in); - - // ContextScope base class used to allow implementing methods of ContextScope - // that don't depend on the template parameter in doctest.cpp. - class DOCTEST_INTERFACE ContextScopeBase : public IContextScope { protected: ContextScopeBase(); - ContextScopeBase(ContextScopeBase&& other); + ContextScopeBase(ContextScopeBase&& other) noexcept; void destroy(); bool need_to_destroy{true}; @@ -1696,12 +1803,17 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP template <typename L> class ContextScope : public ContextScopeBase { - const L lambda_; + L lambda_; public: explicit ContextScope(const L &lambda) : lambda_(lambda) {} + explicit ContextScope(L&& lambda) : lambda_(static_cast<L&&>(lambda)) { } - ContextScope(ContextScope &&other) : ContextScopeBase(static_cast<ContextScopeBase&&>(other)), lambda_(other.lambda_) {} + ContextScope(const ContextScope&) = delete; + ContextScope(ContextScope&&) noexcept = default; + + ContextScope& operator=(const ContextScope&) = delete; + ContextScope& operator=(ContextScope&&) = delete; void stringify(std::ostream* s) const override { lambda_(s); } @@ -1718,15 +1830,23 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP bool logged = false; MessageBuilder(const char* file, int line, assertType::Enum severity); - MessageBuilder() = delete; + + MessageBuilder(const MessageBuilder&) = delete; + MessageBuilder(MessageBuilder&&) = delete; + + MessageBuilder& operator=(const MessageBuilder&) = delete; + MessageBuilder& operator=(MessageBuilder&&) = delete; + ~MessageBuilder(); // the preferred way of chaining parameters for stringification +DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4866) template <typename T> MessageBuilder& operator,(const T& in) { - toStream(m_stream, in); + *m_stream << (DOCTEST_STRINGIFY(in)); return *this; } +DOCTEST_MSVC_SUPPRESS_WARNING_POP // kept here just for backwards-compatibility - the comma operator should be preferred now template <typename T> @@ -1742,7 +1862,7 @@ DOCTEST_CLANG_SUPPRESS_WARNING_POP bool log(); void react(); }; - + template <typename L> ContextScope<L> MakeContextScope(const L &lambda) { return ContextScope<L>(lambda); @@ -1795,7 +1915,7 @@ int registerExceptionTranslator(String (*)(T)) { #endif // DOCTEST_CONFIG_DISABLE namespace detail { - typedef void (*assert_handler)(const AssertData&); + using assert_handler = void (*)(const AssertData&); struct ContextState; } // namespace detail @@ -1808,7 +1928,13 @@ class DOCTEST_INTERFACE Context public: explicit Context(int argc = 0, const char* const* argv = nullptr); - ~Context(); + Context(const Context&) = delete; + Context(Context&&) = delete; + + Context& operator=(const Context&) = delete; + Context& operator=(Context&&) = delete; + + ~Context(); // NOLINT(performance-trivially-destructible) void applyCommandLine(int argc, const char* const* argv); @@ -1916,8 +2042,7 @@ struct DOCTEST_INTERFACE IReporter // or isn't in the execution range (between first and last) (safe to cache a pointer to the input) virtual void test_case_skipped(const TestCaseData&) = 0; - // doctest will not be managing the lifetimes of reporters given to it but this would still be nice to have - virtual ~IReporter(); + DOCTEST_DECLARE_INTERFACE(IReporter) // can obtain all currently active contexts and stringify them if one wishes to do so static int get_num_active_contexts(); @@ -1929,7 +2054,7 @@ struct DOCTEST_INTERFACE IReporter }; namespace detail { - typedef IReporter* (*reporterCreatorFunc)(const ContextOptions&); + using reporterCreatorFunc = IReporter* (*)(const ContextOptions&); DOCTEST_INTERFACE void registerReporterImpl(const char* name, int prio, reporterCreatorFunc c, bool isReporter); @@ -1946,15 +2071,30 @@ int registerReporter(const char* name, int priority, bool isReporter) { } } // namespace doctest +#ifdef DOCTEST_CONFIG_ASSERTS_RETURN_VALUES +#define DOCTEST_FUNC_EMPTY [] { return false; }() +#else +#define DOCTEST_FUNC_EMPTY (void)0 +#endif + // if registering is not disabled -#if !defined(DOCTEST_CONFIG_DISABLE) +#ifndef DOCTEST_CONFIG_DISABLE + +#ifdef DOCTEST_CONFIG_ASSERTS_RETURN_VALUES +#define DOCTEST_FUNC_SCOPE_BEGIN [&] +#define DOCTEST_FUNC_SCOPE_END () +#define DOCTEST_FUNC_SCOPE_RET(v) return v +#else +#define DOCTEST_FUNC_SCOPE_BEGIN do +#define DOCTEST_FUNC_SCOPE_END while(false) +#define DOCTEST_FUNC_SCOPE_RET(v) (void)0 +#endif // common code in asserts - for convenience #define DOCTEST_ASSERT_LOG_REACT_RETURN(b) \ - if(b.log()) \ - DOCTEST_BREAK_INTO_DEBUGGER(); \ - b.react(); \ - return !b.m_failed + if(b.log()) DOCTEST_BREAK_INTO_DEBUGGER(); \ + b.react(); \ + DOCTEST_FUNC_SCOPE_RET(!b.m_failed) #ifdef DOCTEST_CONFIG_NO_TRY_CATCH_IN_ASSERTS #define DOCTEST_WRAP_IN_TRY(x) x; @@ -1976,7 +2116,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { // registers the test by initializing a dummy var with a function #define DOCTEST_REGISTER_FUNCTION(global_prefix, f, decorators) \ - global_prefix DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ + global_prefix DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), /* NOLINT */ \ doctest::detail::regTest( \ doctest::detail::TestCase( \ f, __FILE__, __LINE__, \ @@ -1984,18 +2124,18 @@ int registerReporter(const char* name, int priority, bool isReporter) { decorators)) #define DOCTEST_IMPLEMENT_FIXTURE(der, base, func, decorators) \ - namespace { \ + namespace { /* NOLINT */ \ struct der : public base \ { \ void f(); \ }; \ - static void func() { \ + static inline DOCTEST_NOINLINE void func() { \ der v; \ v.f(); \ } \ DOCTEST_REGISTER_FUNCTION(DOCTEST_EMPTY, func, decorators) \ } \ - inline DOCTEST_NOINLINE void der::f() + inline DOCTEST_NOINLINE void der::f() // NOLINT(misc-definitions-in-headers) #define DOCTEST_CREATE_AND_REGISTER_FUNCTION(f, decorators) \ static void f(); \ @@ -2004,7 +2144,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_CREATE_AND_REGISTER_FUNCTION_IN_CLASS(f, proxy, decorators) \ static doctest::detail::funcType proxy() { return f; } \ - DOCTEST_REGISTER_FUNCTION(inline, proxy(), decorators) \ + DOCTEST_REGISTER_FUNCTION(inline, proxy(), decorators) \ static void f() // for registering tests @@ -2012,7 +2152,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { DOCTEST_CREATE_AND_REGISTER_FUNCTION(DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), decorators) // for registering tests in classes - requires C++17 for inline variables! -#if __cplusplus >= 201703L || (DOCTEST_MSVC >= DOCTEST_COMPILER(19, 12, 0) && _MSVC_LANG >= 201703L) +#if DOCTEST_CPLUSPLUS >= 201703L #define DOCTEST_TEST_CASE_CLASS(decorators) \ DOCTEST_CREATE_AND_REGISTER_FUNCTION_IN_CLASS(DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), \ DOCTEST_ANONYMOUS(DOCTEST_ANON_PROXY_), \ @@ -2028,22 +2168,21 @@ int registerReporter(const char* name, int priority, bool isReporter) { DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), decorators) // for converting types to strings without the <typeinfo> header and demangling -#define DOCTEST_TYPE_TO_STRING_IMPL(...) \ - template <> \ - inline const char* type_to_string<__VA_ARGS__>() { \ - return "<" #__VA_ARGS__ ">"; \ - } -#define DOCTEST_TYPE_TO_STRING(...) \ - namespace doctest { namespace detail { \ - DOCTEST_TYPE_TO_STRING_IMPL(__VA_ARGS__) \ +#define DOCTEST_TYPE_TO_STRING_AS(str, ...) \ + namespace doctest { \ + template <> \ + inline String toString<__VA_ARGS__>() { \ + return str; \ } \ } \ static_assert(true, "") +#define DOCTEST_TYPE_TO_STRING(...) DOCTEST_TYPE_TO_STRING_AS(#__VA_ARGS__, __VA_ARGS__) + #define DOCTEST_TEST_CASE_TEMPLATE_DEFINE_IMPL(dec, T, iter, func) \ template <typename T> \ static void func(); \ - namespace { \ + namespace { /* NOLINT */ \ template <typename Tuple> \ struct iter; \ template <typename Type, typename... Rest> \ @@ -2052,7 +2191,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { iter(const char* file, unsigned line, int index) { \ doctest::detail::regTest(doctest::detail::TestCase(func<Type>, file, line, \ doctest_detail_test_suite_ns::getCurrentTestSuite(), \ - doctest::detail::type_to_string<Type>(), \ + doctest::toString<Type>(), \ int(line) * 1000 + index) \ * dec); \ iter<std::tuple<Rest...>>(file, line, index + 1); \ @@ -2072,7 +2211,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { DOCTEST_ANONYMOUS(DOCTEST_ANON_TMP_)) #define DOCTEST_TEST_CASE_TEMPLATE_INSTANTIATE_IMPL(id, anon, ...) \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_CAT(anon, DUMMY), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_CAT(anon, DUMMY), /* NOLINT(cert-err58-cpp, fuchsia-statically-constructed-objects) */ \ doctest::detail::instantiationHelper( \ DOCTEST_CAT(id, ITERATOR)<__VA_ARGS__>(__FILE__, __LINE__, 0))) @@ -2101,7 +2240,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { // for grouping tests in test suites by using code blocks #define DOCTEST_TEST_SUITE_IMPL(decorators, ns_name) \ namespace ns_name { namespace doctest_detail_test_suite_ns { \ - static DOCTEST_NOINLINE doctest::detail::TestSuite& getCurrentTestSuite() { \ + static DOCTEST_NOINLINE doctest::detail::TestSuite& getCurrentTestSuite() noexcept { \ DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4640) \ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wexit-time-destructors") \ DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wmissing-field-initializers") \ @@ -2125,20 +2264,20 @@ int registerReporter(const char* name, int priority, bool isReporter) { // for starting a testsuite block #define DOCTEST_TEST_SUITE_BEGIN(decorators) \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), /* NOLINT(cert-err58-cpp) */ \ doctest::detail::setTestSuite(doctest::detail::TestSuite() * decorators)) \ static_assert(true, "") // for ending a testsuite block #define DOCTEST_TEST_SUITE_END \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_VAR_), /* NOLINT(cert-err58-cpp) */ \ doctest::detail::setTestSuite(doctest::detail::TestSuite() * "")) \ - typedef int DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) + using DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) = int // for registering exception translators #define DOCTEST_REGISTER_EXCEPTION_TRANSLATOR_IMPL(translatorName, signature) \ inline doctest::String translatorName(signature); \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_TRANSLATOR_), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_TRANSLATOR_), /* NOLINT(cert-err58-cpp) */ \ doctest::registerExceptionTranslator(translatorName)) \ doctest::String translatorName(signature) @@ -2148,13 +2287,13 @@ int registerReporter(const char* name, int priority, bool isReporter) { // for registering reporters #define DOCTEST_REGISTER_REPORTER(name, priority, reporter) \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), /* NOLINT(cert-err58-cpp) */ \ doctest::registerReporter<reporter>(name, priority, true)) \ static_assert(true, "") // for registering listeners #define DOCTEST_REGISTER_LISTENER(name, priority, reporter) \ - DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), \ + DOCTEST_GLOBAL_NO_WARNINGS(DOCTEST_ANONYMOUS(DOCTEST_ANON_REPORTER_), /* NOLINT(cert-err58-cpp) */ \ doctest::registerReporter<reporter>(name, priority, false)) \ static_assert(true, "") @@ -2177,13 +2316,13 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_CAPTURE(x) DOCTEST_INFO(#x " := ", x) #define DOCTEST_ADD_AT_IMPL(type, file, line, mb, ...) \ - [&] { \ + DOCTEST_FUNC_SCOPE_BEGIN { \ doctest::detail::MessageBuilder mb(file, line, doctest::assertType::type); \ mb * __VA_ARGS__; \ if(mb.log()) \ DOCTEST_BREAK_INTO_DEBUGGER(); \ mb.react(); \ - }() + } DOCTEST_FUNC_SCOPE_END // clang-format off #define DOCTEST_ADD_MESSAGE_AT(file, line, ...) DOCTEST_ADD_AT_IMPL(is_warn, file, line, DOCTEST_ANONYMOUS(DOCTEST_MESSAGE_), __VA_ARGS__) @@ -2201,18 +2340,37 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_ASSERT_IMPLEMENT_2(assert_type, ...) \ DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Woverloaded-shift-op-parentheses") \ + /* NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) */ \ doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ __LINE__, #__VA_ARGS__); \ DOCTEST_WRAP_IN_TRY(DOCTEST_RB.setResult( \ doctest::detail::ExpressionDecomposer(doctest::assertType::assert_type) \ - << __VA_ARGS__)) \ + << __VA_ARGS__)) /* NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) */ \ DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB) \ DOCTEST_CLANG_SUPPRESS_WARNING_POP #define DOCTEST_ASSERT_IMPLEMENT_1(assert_type, ...) \ - [&] { \ + DOCTEST_FUNC_SCOPE_BEGIN { \ DOCTEST_ASSERT_IMPLEMENT_2(assert_type, __VA_ARGS__); \ - }() + } DOCTEST_FUNC_SCOPE_END // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) + +#define DOCTEST_BINARY_ASSERT(assert_type, comp, ...) \ + DOCTEST_FUNC_SCOPE_BEGIN { \ + doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ + __LINE__, #__VA_ARGS__); \ + DOCTEST_WRAP_IN_TRY( \ + DOCTEST_RB.binary_assert<doctest::detail::binaryAssertComparison::comp>( \ + __VA_ARGS__)) \ + DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ + } DOCTEST_FUNC_SCOPE_END + +#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ + DOCTEST_FUNC_SCOPE_BEGIN { \ + doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ + __LINE__, #__VA_ARGS__); \ + DOCTEST_WRAP_IN_TRY(DOCTEST_RB.unary_assert(__VA_ARGS__)) \ + DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ + } DOCTEST_FUNC_SCOPE_END #else // DOCTEST_CONFIG_SUPER_FAST_ASSERTS @@ -2226,6 +2384,14 @@ int registerReporter(const char* name, int priority, bool isReporter) { doctest::detail::ExpressionDecomposer(doctest::assertType::assert_type) \ << __VA_ARGS__) DOCTEST_CLANG_SUPPRESS_WARNING_POP +#define DOCTEST_BINARY_ASSERT(assert_type, comparison, ...) \ + doctest::detail::binary_assert<doctest::detail::binaryAssertComparison::comparison>( \ + doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__, __VA_ARGS__) + +#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ + doctest::detail::unary_assert(doctest::assertType::assert_type, __FILE__, __LINE__, \ + #__VA_ARGS__, __VA_ARGS__) + #endif // DOCTEST_CONFIG_SUPER_FAST_ASSERTS #define DOCTEST_WARN(...) DOCTEST_ASSERT_IMPLEMENT_1(DT_WARN, __VA_ARGS__) @@ -2236,34 +2402,62 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_REQUIRE_FALSE(...) DOCTEST_ASSERT_IMPLEMENT_1(DT_REQUIRE_FALSE, __VA_ARGS__) // clang-format off -#define DOCTEST_WARN_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN, cond); }() -#define DOCTEST_CHECK_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK, cond); }() -#define DOCTEST_REQUIRE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE, cond); }() -#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN_FALSE, cond); }() -#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK_FALSE, cond); }() -#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE_FALSE, cond); }() +#define DOCTEST_WARN_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN, cond); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK, cond); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE, cond); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_WARN_FALSE, cond); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_CHECK_FALSE, cond); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_ASSERT_IMPLEMENT_2(DT_REQUIRE_FALSE, cond); } DOCTEST_FUNC_SCOPE_END // clang-format on +#define DOCTEST_WARN_EQ(...) DOCTEST_BINARY_ASSERT(DT_WARN_EQ, eq, __VA_ARGS__) +#define DOCTEST_CHECK_EQ(...) DOCTEST_BINARY_ASSERT(DT_CHECK_EQ, eq, __VA_ARGS__) +#define DOCTEST_REQUIRE_EQ(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_EQ, eq, __VA_ARGS__) +#define DOCTEST_WARN_NE(...) DOCTEST_BINARY_ASSERT(DT_WARN_NE, ne, __VA_ARGS__) +#define DOCTEST_CHECK_NE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_NE, ne, __VA_ARGS__) +#define DOCTEST_REQUIRE_NE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_NE, ne, __VA_ARGS__) +#define DOCTEST_WARN_GT(...) DOCTEST_BINARY_ASSERT(DT_WARN_GT, gt, __VA_ARGS__) +#define DOCTEST_CHECK_GT(...) DOCTEST_BINARY_ASSERT(DT_CHECK_GT, gt, __VA_ARGS__) +#define DOCTEST_REQUIRE_GT(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_GT, gt, __VA_ARGS__) +#define DOCTEST_WARN_LT(...) DOCTEST_BINARY_ASSERT(DT_WARN_LT, lt, __VA_ARGS__) +#define DOCTEST_CHECK_LT(...) DOCTEST_BINARY_ASSERT(DT_CHECK_LT, lt, __VA_ARGS__) +#define DOCTEST_REQUIRE_LT(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_LT, lt, __VA_ARGS__) +#define DOCTEST_WARN_GE(...) DOCTEST_BINARY_ASSERT(DT_WARN_GE, ge, __VA_ARGS__) +#define DOCTEST_CHECK_GE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_GE, ge, __VA_ARGS__) +#define DOCTEST_REQUIRE_GE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_GE, ge, __VA_ARGS__) +#define DOCTEST_WARN_LE(...) DOCTEST_BINARY_ASSERT(DT_WARN_LE, le, __VA_ARGS__) +#define DOCTEST_CHECK_LE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_LE, le, __VA_ARGS__) +#define DOCTEST_REQUIRE_LE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_LE, le, __VA_ARGS__) + +#define DOCTEST_WARN_UNARY(...) DOCTEST_UNARY_ASSERT(DT_WARN_UNARY, __VA_ARGS__) +#define DOCTEST_CHECK_UNARY(...) DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY, __VA_ARGS__) +#define DOCTEST_REQUIRE_UNARY(...) DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY, __VA_ARGS__) +#define DOCTEST_WARN_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_WARN_UNARY_FALSE, __VA_ARGS__) +#define DOCTEST_CHECK_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY_FALSE, __VA_ARGS__) +#define DOCTEST_REQUIRE_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY_FALSE, __VA_ARGS__) + +#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS + #define DOCTEST_ASSERT_THROWS_AS(expr, assert_type, message, ...) \ - [&] { \ + DOCTEST_FUNC_SCOPE_BEGIN { \ if(!doctest::getContextOptions()->no_throw) { \ doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ __LINE__, #expr, #__VA_ARGS__, message); \ try { \ DOCTEST_CAST_TO_VOID(expr) \ - } catch(const typename doctest::detail::remove_const< \ - typename doctest::detail::remove_reference<__VA_ARGS__>::type>::type&) { \ + } catch(const typename doctest::detail::types::remove_const< \ + typename doctest::detail::types::remove_reference<__VA_ARGS__>::type>::type&) {\ DOCTEST_RB.translateException(); \ DOCTEST_RB.m_threw_as = true; \ } catch(...) { DOCTEST_RB.translateException(); } \ DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - } else { \ - return false; \ + } else { /* NOLINT(*-else-after-return) */ \ + DOCTEST_FUNC_SCOPE_RET(false); \ } \ - }() + } DOCTEST_FUNC_SCOPE_END #define DOCTEST_ASSERT_THROWS_WITH(expr, expr_str, assert_type, ...) \ - [&] { \ + DOCTEST_FUNC_SCOPE_BEGIN { \ if(!doctest::getContextOptions()->no_throw) { \ doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ __LINE__, expr_str, "", __VA_ARGS__); \ @@ -2271,20 +2465,20 @@ int registerReporter(const char* name, int priority, bool isReporter) { DOCTEST_CAST_TO_VOID(expr) \ } catch(...) { DOCTEST_RB.translateException(); } \ DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - } else { \ - return false; \ + } else { /* NOLINT(*-else-after-return) */ \ + DOCTEST_FUNC_SCOPE_RET(false); \ } \ - }() + } DOCTEST_FUNC_SCOPE_END #define DOCTEST_ASSERT_NOTHROW(assert_type, ...) \ - [&] { \ + DOCTEST_FUNC_SCOPE_BEGIN { \ doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ __LINE__, #__VA_ARGS__); \ try { \ DOCTEST_CAST_TO_VOID(__VA_ARGS__) \ } catch(...) { DOCTEST_RB.translateException(); } \ DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() + } DOCTEST_FUNC_SCOPE_END // clang-format off #define DOCTEST_WARN_THROWS(...) DOCTEST_ASSERT_THROWS_WITH((__VA_ARGS__), #__VA_ARGS__, DT_WARN_THROWS, "") @@ -2307,166 +2501,23 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_CHECK_NOTHROW(...) DOCTEST_ASSERT_NOTHROW(DT_CHECK_NOTHROW, __VA_ARGS__) #define DOCTEST_REQUIRE_NOTHROW(...) DOCTEST_ASSERT_NOTHROW(DT_REQUIRE_NOTHROW, __VA_ARGS__) -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS(expr); }() -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS(expr); }() -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS(expr); }() -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_AS(expr, ex); }() -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_AS(expr, ex); }() -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_AS(expr, ex); }() -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH(expr, with); }() -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH(expr, with); }() -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH(expr, with); }() -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ex); }() -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_NOTHROW(expr); }() -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_NOTHROW(expr); }() -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) [&] {DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_NOTHROW(expr); }() +#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS(expr); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS(expr); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS(expr); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_AS(expr, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_AS(expr, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_AS(expr, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH(expr, with); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH(expr, with); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH(expr, with); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_THROWS_WITH_AS(expr, with, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ex); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_WARN_NOTHROW(expr); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_CHECK_NOTHROW(expr); } DOCTEST_FUNC_SCOPE_END +#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_SCOPE_BEGIN { DOCTEST_INFO(__VA_ARGS__); DOCTEST_REQUIRE_NOTHROW(expr); } DOCTEST_FUNC_SCOPE_END // clang-format on -#ifndef DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_BINARY_ASSERT(assert_type, comp, ...) \ - [&] { \ - doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ - __LINE__, #__VA_ARGS__); \ - DOCTEST_WRAP_IN_TRY( \ - DOCTEST_RB.binary_assert<doctest::detail::binaryAssertComparison::comp>( \ - __VA_ARGS__)) \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() - -#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ - [&] { \ - doctest::detail::ResultBuilder DOCTEST_RB(doctest::assertType::assert_type, __FILE__, \ - __LINE__, #__VA_ARGS__); \ - DOCTEST_WRAP_IN_TRY(DOCTEST_RB.unary_assert(__VA_ARGS__)) \ - DOCTEST_ASSERT_LOG_REACT_RETURN(DOCTEST_RB); \ - }() - -#else // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_BINARY_ASSERT(assert_type, comparison, ...) \ - doctest::detail::binary_assert<doctest::detail::binaryAssertComparison::comparison>( \ - doctest::assertType::assert_type, __FILE__, __LINE__, #__VA_ARGS__, __VA_ARGS__) - -#define DOCTEST_UNARY_ASSERT(assert_type, ...) \ - doctest::detail::unary_assert(doctest::assertType::assert_type, __FILE__, __LINE__, \ - #__VA_ARGS__, __VA_ARGS__) - -#endif // DOCTEST_CONFIG_SUPER_FAST_ASSERTS - -#define DOCTEST_WARN_EQ(...) DOCTEST_BINARY_ASSERT(DT_WARN_EQ, eq, __VA_ARGS__) -#define DOCTEST_CHECK_EQ(...) DOCTEST_BINARY_ASSERT(DT_CHECK_EQ, eq, __VA_ARGS__) -#define DOCTEST_REQUIRE_EQ(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_EQ, eq, __VA_ARGS__) -#define DOCTEST_WARN_NE(...) DOCTEST_BINARY_ASSERT(DT_WARN_NE, ne, __VA_ARGS__) -#define DOCTEST_CHECK_NE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_NE, ne, __VA_ARGS__) -#define DOCTEST_REQUIRE_NE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_NE, ne, __VA_ARGS__) -#define DOCTEST_WARN_GT(...) DOCTEST_BINARY_ASSERT(DT_WARN_GT, gt, __VA_ARGS__) -#define DOCTEST_CHECK_GT(...) DOCTEST_BINARY_ASSERT(DT_CHECK_GT, gt, __VA_ARGS__) -#define DOCTEST_REQUIRE_GT(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_GT, gt, __VA_ARGS__) -#define DOCTEST_WARN_LT(...) DOCTEST_BINARY_ASSERT(DT_WARN_LT, lt, __VA_ARGS__) -#define DOCTEST_CHECK_LT(...) DOCTEST_BINARY_ASSERT(DT_CHECK_LT, lt, __VA_ARGS__) -#define DOCTEST_REQUIRE_LT(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_LT, lt, __VA_ARGS__) -#define DOCTEST_WARN_GE(...) DOCTEST_BINARY_ASSERT(DT_WARN_GE, ge, __VA_ARGS__) -#define DOCTEST_CHECK_GE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_GE, ge, __VA_ARGS__) -#define DOCTEST_REQUIRE_GE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_GE, ge, __VA_ARGS__) -#define DOCTEST_WARN_LE(...) DOCTEST_BINARY_ASSERT(DT_WARN_LE, le, __VA_ARGS__) -#define DOCTEST_CHECK_LE(...) DOCTEST_BINARY_ASSERT(DT_CHECK_LE, le, __VA_ARGS__) -#define DOCTEST_REQUIRE_LE(...) DOCTEST_BINARY_ASSERT(DT_REQUIRE_LE, le, __VA_ARGS__) - -#define DOCTEST_WARN_UNARY(...) DOCTEST_UNARY_ASSERT(DT_WARN_UNARY, __VA_ARGS__) -#define DOCTEST_CHECK_UNARY(...) DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY, __VA_ARGS__) -#define DOCTEST_REQUIRE_UNARY(...) DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY, __VA_ARGS__) -#define DOCTEST_WARN_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_WARN_UNARY_FALSE, __VA_ARGS__) -#define DOCTEST_CHECK_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_CHECK_UNARY_FALSE, __VA_ARGS__) -#define DOCTEST_REQUIRE_UNARY_FALSE(...) DOCTEST_UNARY_ASSERT(DT_REQUIRE_UNARY_FALSE, __VA_ARGS__) - -#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS - -#undef DOCTEST_WARN_THROWS -#undef DOCTEST_CHECK_THROWS -#undef DOCTEST_REQUIRE_THROWS -#undef DOCTEST_WARN_THROWS_AS -#undef DOCTEST_CHECK_THROWS_AS -#undef DOCTEST_REQUIRE_THROWS_AS -#undef DOCTEST_WARN_THROWS_WITH -#undef DOCTEST_CHECK_THROWS_WITH -#undef DOCTEST_REQUIRE_THROWS_WITH -#undef DOCTEST_WARN_THROWS_WITH_AS -#undef DOCTEST_CHECK_THROWS_WITH_AS -#undef DOCTEST_REQUIRE_THROWS_WITH_AS -#undef DOCTEST_WARN_NOTHROW -#undef DOCTEST_CHECK_NOTHROW -#undef DOCTEST_REQUIRE_NOTHROW - -#undef DOCTEST_WARN_THROWS_MESSAGE -#undef DOCTEST_CHECK_THROWS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_MESSAGE -#undef DOCTEST_WARN_THROWS_AS_MESSAGE -#undef DOCTEST_CHECK_THROWS_AS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_AS_MESSAGE -#undef DOCTEST_WARN_THROWS_WITH_MESSAGE -#undef DOCTEST_CHECK_THROWS_WITH_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_WITH_MESSAGE -#undef DOCTEST_WARN_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE -#undef DOCTEST_WARN_NOTHROW_MESSAGE -#undef DOCTEST_CHECK_NOTHROW_MESSAGE -#undef DOCTEST_REQUIRE_NOTHROW_MESSAGE - -#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#define DOCTEST_WARN_THROWS(...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS(...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS(...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW(...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW(...) ([] { return false; }) - -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) - -#else // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - -#undef DOCTEST_REQUIRE -#undef DOCTEST_REQUIRE_FALSE -#undef DOCTEST_REQUIRE_MESSAGE -#undef DOCTEST_REQUIRE_FALSE_MESSAGE -#undef DOCTEST_REQUIRE_EQ -#undef DOCTEST_REQUIRE_NE -#undef DOCTEST_REQUIRE_GT -#undef DOCTEST_REQUIRE_LT -#undef DOCTEST_REQUIRE_GE -#undef DOCTEST_REQUIRE_LE -#undef DOCTEST_REQUIRE_UNARY -#undef DOCTEST_REQUIRE_UNARY_FALSE - -#endif // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS - #endif // DOCTEST_CONFIG_NO_EXCEPTIONS // ================================================================================================= @@ -2476,7 +2527,7 @@ int registerReporter(const char* name, int priority, bool isReporter) { #else // DOCTEST_CONFIG_DISABLE #define DOCTEST_IMPLEMENT_FIXTURE(der, base, func, name) \ - namespace { \ + namespace /* NOLINT */ { \ template <typename DOCTEST_UNUSED_TEMPLATE_TYPE> \ struct der : public base \ { void f(); }; \ @@ -2502,8 +2553,8 @@ int registerReporter(const char* name, int priority, bool isReporter) { DOCTEST_ANONYMOUS(DOCTEST_ANON_FUNC_), name) // for converting types to strings without the <typeinfo> header and demangling +#define DOCTEST_TYPE_TO_STRING_AS(str, ...) static_assert(true, "") #define DOCTEST_TYPE_TO_STRING(...) static_assert(true, "") -#define DOCTEST_TYPE_TO_STRING_IMPL(...) // for typed tests #define DOCTEST_TEST_CASE_TEMPLATE(name, type, ...) \ @@ -2521,13 +2572,13 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_SUBCASE(name) // for a testsuite block -#define DOCTEST_TEST_SUITE(name) namespace +#define DOCTEST_TEST_SUITE(name) namespace // NOLINT // for starting a testsuite block #define DOCTEST_TEST_SUITE_BEGIN(name) static_assert(true, "") // for ending a testsuite block -#define DOCTEST_TEST_SUITE_END typedef int DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) +#define DOCTEST_TEST_SUITE_END using DOCTEST_ANONYMOUS(DOCTEST_ANON_FOR_SEMICOLON_) = int #define DOCTEST_REGISTER_EXCEPTION_TRANSLATOR(signature) \ template <typename DOCTEST_UNUSED_TEMPLATE_TYPE> \ @@ -2545,7 +2596,8 @@ int registerReporter(const char* name, int priority, bool isReporter) { #define DOCTEST_FAIL_CHECK(...) (static_cast<void>(0)) #define DOCTEST_FAIL(...) (static_cast<void>(0)) -#ifdef DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED +#if defined(DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED) \ + && defined(DOCTEST_CONFIG_ASSERTS_RETURN_VALUES) #define DOCTEST_WARN(...) [&] { return __VA_ARGS__; }() #define DOCTEST_CHECK(...) [&] { return __VA_ARGS__; }() @@ -2601,85 +2653,196 @@ namespace detail { #define DOCTEST_CHECK_UNARY_FALSE(...) [&] { return !(__VA_ARGS__); }() #define DOCTEST_REQUIRE_UNARY_FALSE(...) [&] { return !(__VA_ARGS__); }() +#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS + +#define DOCTEST_WARN_THROWS_WITH(expr, with, ...) [] { static_assert(false, "Exception translation is not available when doctest is disabled."); return false; }() +#define DOCTEST_CHECK_THROWS_WITH(expr, with, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_REQUIRE_THROWS_WITH(expr, with, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) + +#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) +#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_WARN_THROWS_WITH(,,) + +#define DOCTEST_WARN_THROWS(...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_CHECK_THROWS(...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_REQUIRE_THROWS(...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_WARN_THROWS_AS(expr, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_CHECK_THROWS_AS(expr, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_WARN_NOTHROW(...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() +#define DOCTEST_CHECK_NOTHROW(...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() +#define DOCTEST_REQUIRE_NOTHROW(...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() + +#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return false; } catch (...) { return true; } }() +#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) [&] { try { expr; } catch (__VA_ARGS__) { return true; } catch (...) { } return false; }() +#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() +#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() +#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) [&] { try { __VA_ARGS__; return true; } catch (...) { return false; } }() + +#endif // DOCTEST_CONFIG_NO_EXCEPTIONS + #else // DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED -#define DOCTEST_WARN(...) ([] { return false; }) -#define DOCTEST_CHECK(...) ([] { return false; }) -#define DOCTEST_REQUIRE(...) ([] { return false; }) -#define DOCTEST_WARN_FALSE(...) ([] { return false; }) -#define DOCTEST_CHECK_FALSE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_FALSE(...) ([] { return false; }) - -#define DOCTEST_WARN_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_CHECK_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) ([] { return false; }) - -#define DOCTEST_WARN_EQ(...) ([] { return false; }) -#define DOCTEST_CHECK_EQ(...) ([] { return false; }) -#define DOCTEST_REQUIRE_EQ(...) ([] { return false; }) -#define DOCTEST_WARN_NE(...) ([] { return false; }) -#define DOCTEST_CHECK_NE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NE(...) ([] { return false; }) -#define DOCTEST_WARN_GT(...) ([] { return false; }) -#define DOCTEST_CHECK_GT(...) ([] { return false; }) -#define DOCTEST_REQUIRE_GT(...) ([] { return false; }) -#define DOCTEST_WARN_LT(...) ([] { return false; }) -#define DOCTEST_CHECK_LT(...) ([] { return false; }) -#define DOCTEST_REQUIRE_LT(...) ([] { return false; }) -#define DOCTEST_WARN_GE(...) ([] { return false; }) -#define DOCTEST_CHECK_GE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_GE(...) ([] { return false; }) -#define DOCTEST_WARN_LE(...) ([] { return false; }) -#define DOCTEST_CHECK_LE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_LE(...) ([] { return false; }) - -#define DOCTEST_WARN_UNARY(...) ([] { return false; }) -#define DOCTEST_CHECK_UNARY(...) ([] { return false; }) -#define DOCTEST_REQUIRE_UNARY(...) ([] { return false; }) -#define DOCTEST_WARN_UNARY_FALSE(...) ([] { return false; }) -#define DOCTEST_CHECK_UNARY_FALSE(...) ([] { return false; }) -#define DOCTEST_REQUIRE_UNARY_FALSE(...) ([] { return false; }) +#define DOCTEST_WARN(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_FALSE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_FALSE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_FALSE(...) DOCTEST_FUNC_EMPTY + +#define DOCTEST_WARN_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_FALSE_MESSAGE(cond, ...) DOCTEST_FUNC_EMPTY + +#define DOCTEST_WARN_EQ(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_EQ(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_EQ(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_NE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_NE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_NE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_GT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_GT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_GT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_LT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_LT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_LT(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_GE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_GE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_GE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_LE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_LE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_LE(...) DOCTEST_FUNC_EMPTY + +#define DOCTEST_WARN_UNARY(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_UNARY(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_UNARY(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_UNARY_FALSE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_UNARY_FALSE(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_UNARY_FALSE(...) DOCTEST_FUNC_EMPTY -#endif // DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED +#ifndef DOCTEST_CONFIG_NO_EXCEPTIONS -// TODO: think about if these also need to work properly even when doctest is disabled -#define DOCTEST_WARN_THROWS(...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS(...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS(...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW(...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW(...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW(...) ([] { return false; }) - -#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) ([] { return false; }) -#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) ([] { return false; }) -#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) -#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) ([] { return false; }) +#define DOCTEST_WARN_THROWS(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_AS(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_AS(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_WITH(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_WITH(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_NOTHROW(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_NOTHROW(...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_NOTHROW(...) DOCTEST_FUNC_EMPTY + +#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY +#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) DOCTEST_FUNC_EMPTY + +#endif // DOCTEST_CONFIG_NO_EXCEPTIONS + +#endif // DOCTEST_CONFIG_EVALUATE_ASSERTS_EVEN_WHEN_DISABLED #endif // DOCTEST_CONFIG_DISABLE +#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS + +#ifdef DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS +#define DOCTEST_EXCEPTION_EMPTY_FUNC DOCTEST_FUNC_EMPTY +#else // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS +#define DOCTEST_EXCEPTION_EMPTY_FUNC [] { static_assert(false, "Exceptions are disabled! " \ + "Use DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS if you want to compile with exceptions disabled."); return false; }() + +#undef DOCTEST_REQUIRE +#undef DOCTEST_REQUIRE_FALSE +#undef DOCTEST_REQUIRE_MESSAGE +#undef DOCTEST_REQUIRE_FALSE_MESSAGE +#undef DOCTEST_REQUIRE_EQ +#undef DOCTEST_REQUIRE_NE +#undef DOCTEST_REQUIRE_GT +#undef DOCTEST_REQUIRE_LT +#undef DOCTEST_REQUIRE_GE +#undef DOCTEST_REQUIRE_LE +#undef DOCTEST_REQUIRE_UNARY +#undef DOCTEST_REQUIRE_UNARY_FALSE + +#define DOCTEST_REQUIRE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_FALSE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_MESSAGE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_FALSE_MESSAGE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_EQ DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_NE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_GT DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_LT DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_GE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_LE DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_UNARY DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_UNARY_FALSE DOCTEST_EXCEPTION_EMPTY_FUNC + +#endif // DOCTEST_CONFIG_NO_EXCEPTIONS_BUT_WITH_ALL_ASSERTS + +#define DOCTEST_WARN_THROWS(...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS(...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS(...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_AS(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_AS(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_AS(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_WITH(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_WITH(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_WITH(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_WITH_AS(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_WITH_AS(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_WITH_AS(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_NOTHROW(...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_NOTHROW(...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_NOTHROW(...) DOCTEST_EXCEPTION_EMPTY_FUNC + +#define DOCTEST_WARN_THROWS_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_AS_MESSAGE(expr, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_WITH_MESSAGE(expr, with, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_THROWS_WITH_AS_MESSAGE(expr, with, ex, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_WARN_NOTHROW_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_CHECK_NOTHROW_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC +#define DOCTEST_REQUIRE_NOTHROW_MESSAGE(expr, ...) DOCTEST_EXCEPTION_EMPTY_FUNC + +#endif // DOCTEST_CONFIG_NO_EXCEPTIONS + // clang-format off // KEPT FOR BACKWARDS COMPATIBILITY - FORWARDING TO THE RIGHT MACROS #define DOCTEST_FAST_WARN_EQ DOCTEST_WARN_EQ @@ -2726,11 +2889,12 @@ namespace detail { // clang-format on // == SHORT VERSIONS OF THE MACROS -#if !defined(DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES) +#ifndef DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES #define TEST_CASE(name) DOCTEST_TEST_CASE(name) #define TEST_CASE_CLASS(name) DOCTEST_TEST_CASE_CLASS(name) #define TEST_CASE_FIXTURE(x, name) DOCTEST_TEST_CASE_FIXTURE(x, name) +#define TYPE_TO_STRING_AS(str, ...) DOCTEST_TYPE_TO_STRING_AS(str, __VA_ARGS__) #define TYPE_TO_STRING(...) DOCTEST_TYPE_TO_STRING(__VA_ARGS__) #define TEST_CASE_TEMPLATE(name, T, ...) DOCTEST_TEST_CASE_TEMPLATE(name, T, __VA_ARGS__) #define TEST_CASE_TEMPLATE_DEFINE(name, T, id) DOCTEST_TEST_CASE_TEMPLATE_DEFINE(name, T, id) @@ -2863,33 +3027,11 @@ namespace detail { #endif // DOCTEST_CONFIG_NO_SHORT_MACRO_NAMES -#if !defined(DOCTEST_CONFIG_DISABLE) +#ifndef DOCTEST_CONFIG_DISABLE // this is here to clear the 'current test suite' for the current translation unit - at the top DOCTEST_TEST_SUITE_END(); -// add stringification for primitive/fundamental types -namespace doctest { namespace detail { - DOCTEST_TYPE_TO_STRING_IMPL(bool) - DOCTEST_TYPE_TO_STRING_IMPL(float) - DOCTEST_TYPE_TO_STRING_IMPL(double) - DOCTEST_TYPE_TO_STRING_IMPL(long double) - DOCTEST_TYPE_TO_STRING_IMPL(char) - DOCTEST_TYPE_TO_STRING_IMPL(signed char) - DOCTEST_TYPE_TO_STRING_IMPL(unsigned char) -#if !DOCTEST_MSVC || defined(_NATIVE_WCHAR_T_DEFINED) - DOCTEST_TYPE_TO_STRING_IMPL(wchar_t) -#endif // not MSVC or wchar_t support enabled - DOCTEST_TYPE_TO_STRING_IMPL(short int) - DOCTEST_TYPE_TO_STRING_IMPL(unsigned short int) - DOCTEST_TYPE_TO_STRING_IMPL(int) - DOCTEST_TYPE_TO_STRING_IMPL(unsigned int) - DOCTEST_TYPE_TO_STRING_IMPL(long int) - DOCTEST_TYPE_TO_STRING_IMPL(unsigned long int) - DOCTEST_TYPE_TO_STRING_IMPL(long long int) - DOCTEST_TYPE_TO_STRING_IMPL(unsigned long long int) -}} // namespace doctest::detail - #endif // DOCTEST_CONFIG_DISABLE DOCTEST_CLANG_SUPPRESS_WARNING_POP @@ -2981,16 +3123,27 @@ DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_BEGIN #include <algorithm> #include <iomanip> #include <vector> +#ifndef DOCTEST_CONFIG_NO_MULTITHREADING #include <atomic> #include <mutex> +#define DOCTEST_DECLARE_MUTEX(name) std::mutex name; +#define DOCTEST_DECLARE_STATIC_MUTEX(name) static DOCTEST_DECLARE_MUTEX(name) +#define DOCTEST_LOCK_MUTEX(name) std::lock_guard<std::mutex> DOCTEST_ANONYMOUS(DOCTEST_ANON_LOCK_)(name); +#else // DOCTEST_CONFIG_NO_MULTITHREADING +#define DOCTEST_DECLARE_MUTEX(name) +#define DOCTEST_DECLARE_STATIC_MUTEX(name) +#define DOCTEST_LOCK_MUTEX(name) +#endif // DOCTEST_CONFIG_NO_MULTITHREADING #include <set> #include <map> +#include <unordered_set> #include <exception> #include <stdexcept> #include <csignal> #include <cfloat> #include <cctype> #include <cstdint> +#include <string> #ifdef DOCTEST_PLATFORM_MAC #include <sys/types.h> @@ -3045,7 +3198,7 @@ DOCTEST_MAKE_STD_HEADERS_CLEAN_FROM_WARNINGS_ON_WALL_END #endif #ifndef DOCTEST_THREAD_LOCAL -#if DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) +#if defined(DOCTEST_CONFIG_NO_MULTITHREADING) || DOCTEST_MSVC && (DOCTEST_MSVC < DOCTEST_COMPILER(19, 0, 0)) #define DOCTEST_THREAD_LOCAL #else // DOCTEST_MSVC #define DOCTEST_THREAD_LOCAL thread_local @@ -3107,20 +3260,6 @@ namespace { } } - template <typename T> - String fpToString(T value, int precision) { - std::ostringstream oss; - oss << std::setprecision(precision) << std::fixed << value; - std::string d = oss.str(); - size_t i = d.find_last_not_of('0'); - if(i != std::string::npos && i != d.size() - 1) { - if(d[i] == '.') - i++; - d = d.substr(0, i + 1); - } - return d.c_str(); - } - struct Endianness { enum Arch @@ -3141,22 +3280,6 @@ namespace { } // namespace namespace detail { - String rawMemoryToString(const void* object, unsigned size) { - // Reverse order for little endian architectures - int i = 0, end = static_cast<int>(size), inc = 1; - if(Endianness::which() == Endianness::Little) { - i = end - 1; - end = inc = -1; - } - - unsigned const char* bytes = static_cast<unsigned const char*>(object); - std::ostream* oss = tlssPush(); - *oss << "0x" << std::setfill('0') << std::hex; - for(; i != end; i += inc) - *oss << std::setw(2) << static_cast<unsigned>(bytes[i]); - return tlssPop(); - } - DOCTEST_THREAD_LOCAL class { std::vector<std::streampos> stack; @@ -3194,19 +3317,19 @@ namespace timer_large_integer { #if defined(DOCTEST_PLATFORM_WINDOWS) - typedef ULONGLONG type; + using type = ULONGLONG; #else // DOCTEST_PLATFORM_WINDOWS - typedef std::uint64_t type; + using type = std::uint64_t; #endif // DOCTEST_PLATFORM_WINDOWS } -typedef timer_large_integer::type ticks_t; +using ticks_t = timer_large_integer::type; #ifdef DOCTEST_CONFIG_GETCURRENTTICKS ticks_t getCurrentTicks() { return DOCTEST_CONFIG_GETCURRENTTICKS(); } #elif defined(DOCTEST_PLATFORM_WINDOWS) ticks_t getCurrentTicks() { - static LARGE_INTEGER hz = {0}, hzo = {0}; + static LARGE_INTEGER hz = { {0} }, hzo = { {0} }; if(!hz.QuadPart) { QueryPerformanceFrequency(&hz); QueryPerformanceCounter(&hzo); @@ -3238,9 +3361,17 @@ typedef timer_large_integer::type ticks_t; ticks_t m_ticks = 0; }; -#ifdef DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS +#ifdef DOCTEST_CONFIG_NO_MULTITHREADING + template <typename T> + using Atomic = T; +#else // DOCTEST_CONFIG_NO_MULTITHREADING template <typename T> - using AtomicOrMultiLaneAtomic = std::atomic<T>; + using Atomic = std::atomic<T>; +#endif // DOCTEST_CONFIG_NO_MULTITHREADING + +#if defined(DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS) || defined(DOCTEST_CONFIG_NO_MULTITHREADING) + template <typename T> + using MultiLaneAtomic = Atomic<T>; #else // DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS // Provides a multilane implementation of an atomic variable that supports add, sub, load, // store. Instead of using a single atomic variable, this splits up into multiple ones, @@ -3257,8 +3388,8 @@ typedef timer_large_integer::type ticks_t; { struct CacheLineAlignedAtomic { - std::atomic<T> atomic{}; - char padding[DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE - sizeof(std::atomic<T>)]; + Atomic<T> atomic{}; + char padding[DOCTEST_MULTI_LANE_ATOMICS_CACHE_LINE_SIZE - sizeof(Atomic<T>)]; }; CacheLineAlignedAtomic m_atomics[DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES]; @@ -3314,24 +3445,21 @@ typedef timer_large_integer::type ticks_t; // assigned in a round-robin fashion. // 3. This tlsLaneIdx is stored in the thread local data, so it is directly available with // little overhead. - std::atomic<T>& myAtomic() DOCTEST_NOEXCEPT { - static std::atomic<size_t> laneCounter; + Atomic<T>& myAtomic() DOCTEST_NOEXCEPT { + static Atomic<size_t> laneCounter; DOCTEST_THREAD_LOCAL size_t tlsLaneIdx = laneCounter++ % DOCTEST_MULTI_LANE_ATOMICS_THREAD_LANES; return m_atomics[tlsLaneIdx].atomic; } }; - - template <typename T> - using AtomicOrMultiLaneAtomic = MultiLaneAtomic<T>; #endif // DOCTEST_CONFIG_NO_MULTI_LANE_ATOMICS // this holds both parameters from the command line and runtime data for tests struct ContextState : ContextOptions, TestRunStats, CurrentTestCaseStats { - AtomicOrMultiLaneAtomic<int> numAssertsCurrentTest_atomic; - AtomicOrMultiLaneAtomic<int> numAssertsFailedCurrentTest_atomic; + MultiLaneAtomic<int> numAssertsCurrentTest_atomic; + MultiLaneAtomic<int> numAssertsFailedCurrentTest_atomic; std::vector<std::vector<String>> filters = decltype(filters)(9); // 9 different filters @@ -3344,11 +3472,12 @@ typedef timer_large_integer::type ticks_t; std::vector<String> stringifiedContexts; // logging from INFO() due to an exception // stuff for subcases - std::vector<SubcaseSignature> subcasesStack; - std::set<decltype(subcasesStack)> subcasesPassed; - int subcasesCurrentMaxLevel; - bool should_reenter; - std::atomic<bool> shouldLogCurrentException; + bool reachedLeaf; + std::vector<SubcaseSignature> subcaseStack; + std::vector<SubcaseSignature> nextSubcaseStack; + std::unordered_set<unsigned long long> fullyTraversedSubcases; + size_t currentSubcaseDepth; + Atomic<bool> shouldLogCurrentException; void resetRunData() { numTestCases = 0; @@ -3414,7 +3543,7 @@ typedef timer_large_integer::type ticks_t; #endif // DOCTEST_CONFIG_DISABLE } // namespace detail -char* String::allocate(unsigned sz) { +char* String::allocate(size_type sz) { if (sz <= last) { buf[sz] = '\0'; setLast(last - sz); @@ -3429,8 +3558,12 @@ char* String::allocate(unsigned sz) { } } -void String::setOnHeap() { *reinterpret_cast<unsigned char*>(&buf[last]) = 128; } -void String::setLast(unsigned in) { buf[last] = char(in); } +void String::setOnHeap() noexcept { *reinterpret_cast<unsigned char*>(&buf[last]) = 128; } +void String::setLast(size_type in) noexcept { buf[last] = char(in); } +void String::setSize(size_type sz) noexcept { + if (isOnStack()) { buf[sz] = '\0'; setLast(last - sz); } + else { data.ptr[sz] = '\0'; data.size = sz; } +} void String::copy(const String& other) { if(other.isOnStack()) { @@ -3440,7 +3573,7 @@ void String::copy(const String& other) { } } -String::String() { +String::String() noexcept { buf[0] = '\0'; setLast(); } @@ -3448,17 +3581,16 @@ String::String() { String::~String() { if(!isOnStack()) delete[] data.ptr; - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -} +} // NOLINT(clang-analyzer-cplusplus.NewDeleteLeaks) String::String(const char* in) : String(in, strlen(in)) {} -String::String(const char* in, unsigned in_size) { +String::String(const char* in, size_type in_size) { memcpy(allocate(in_size), in, in_size); } -String::String(std::istream& in, unsigned in_size) { +String::String(std::istream& in, size_type in_size) { in.read(allocate(in_size), in_size); } @@ -3476,9 +3608,9 @@ String& String::operator=(const String& other) { } String& String::operator+=(const String& other) { - const unsigned my_old_size = size(); - const unsigned other_size = other.size(); - const unsigned total_size = my_old_size + other_size; + const size_type my_old_size = size(); + const size_type other_size = other.size(); + const size_type total_size = my_old_size + other_size; if(isOnStack()) { if(total_size < len) { // append to the current stack space @@ -3525,13 +3657,13 @@ String& String::operator+=(const String& other) { return *this; } -String::String(String&& other) { +String::String(String&& other) noexcept { memcpy(buf, other.buf, len); other.buf[0] = '\0'; other.setLast(); } -String& String::operator=(String&& other) { +String& String::operator=(String&& other) noexcept { if(this != &other) { if(!isOnStack()) delete[] data.ptr; @@ -3542,30 +3674,60 @@ String& String::operator=(String&& other) { return *this; } -char String::operator[](unsigned i) const { - return const_cast<String*>(this)->operator[](i); // NOLINT +char String::operator[](size_type i) const { + return const_cast<String*>(this)->operator[](i); } -char& String::operator[](unsigned i) { +char& String::operator[](size_type i) { if(isOnStack()) return reinterpret_cast<char*>(buf)[i]; return data.ptr[i]; } DOCTEST_GCC_SUPPRESS_WARNING_WITH_PUSH("-Wmaybe-uninitialized") -unsigned String::size() const { +String::size_type String::size() const { if(isOnStack()) - return last - (unsigned(buf[last]) & 31); // using "last" would work only if "len" is 32 + return last - (size_type(buf[last]) & 31); // using "last" would work only if "len" is 32 return data.size; } DOCTEST_GCC_SUPPRESS_WARNING_POP -unsigned String::capacity() const { +String::size_type String::capacity() const { if(isOnStack()) return len; return data.capacity; } +String String::substr(size_type pos, size_type cnt) && { + cnt = std::min(cnt, size() - 1 - pos); + char* cptr = c_str(); + memmove(cptr, cptr + pos, cnt); + setSize(cnt); + return std::move(*this); +} + +String String::substr(size_type pos, size_type cnt) const & { + cnt = std::min(cnt, size() - 1 - pos); + return String{ c_str() + pos, cnt }; +} + +String::size_type String::find(char ch, size_type pos) const { + const char* begin = c_str(); + const char* end = begin + size(); + const char* it = begin + pos; + for (; it < end && *it != ch; it++); + if (it < end) { return static_cast<size_type>(it - begin); } + else { return npos; } +} + +String::size_type String::rfind(char ch, size_type pos) const { + const char* begin = c_str(); + const char* it = begin + std::min(pos, size() - 1); + for (; it >= begin && *it != ch; it--); + if (it >= begin) { return static_cast<size_type>(it - begin); } + else { return npos; } +} + int String::compare(const char* other, bool no_case) const { if(no_case) return doctest::stricmp(c_str(), other); @@ -3576,20 +3738,32 @@ int String::compare(const String& other, bool no_case) const { return compare(other.c_str(), no_case); } -// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) String operator+(const String& lhs, const String& rhs) { return String(lhs) += rhs; } -// clang-format off bool operator==(const String& lhs, const String& rhs) { return lhs.compare(rhs) == 0; } bool operator!=(const String& lhs, const String& rhs) { return lhs.compare(rhs) != 0; } bool operator< (const String& lhs, const String& rhs) { return lhs.compare(rhs) < 0; } bool operator> (const String& lhs, const String& rhs) { return lhs.compare(rhs) > 0; } bool operator<=(const String& lhs, const String& rhs) { return (lhs != rhs) ? lhs.compare(rhs) < 0 : true; } bool operator>=(const String& lhs, const String& rhs) { return (lhs != rhs) ? lhs.compare(rhs) > 0 : true; } -// clang-format on std::ostream& operator<<(std::ostream& s, const String& in) { return s << in.c_str(); } +Contains::Contains(const String& str) : string(str) { } + +bool Contains::checkWith(const String& other) const { + return strstr(other.c_str(), string.c_str()) != nullptr; +} + +String toString(const Contains& in) { + return "Contains( " + in.string + " )"; +} + +bool operator==(const String& lhs, const Contains& rhs) { return rhs.checkWith(lhs); } +bool operator==(const Contains& lhs, const String& rhs) { return lhs.checkWith(rhs); } +bool operator!=(const String& lhs, const Contains& rhs) { return !rhs.checkWith(lhs); } +bool operator!=(const Contains& lhs, const String& rhs) { return !lhs.checkWith(rhs); } + namespace { void color_to_stream(std::ostream&, Color::Enum) DOCTEST_BRANCH_ON_DISABLED({}, ;) } // namespace @@ -3603,64 +3777,42 @@ namespace Color { // clang-format off const char* assertString(assertType::Enum at) { - DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4062) // enum 'x' in switch of enum 'y' is not handled - switch(at) { //!OCLINT missing default in switch statements - case assertType::DT_WARN : return "WARN"; - case assertType::DT_CHECK : return "CHECK"; - case assertType::DT_REQUIRE : return "REQUIRE"; - - case assertType::DT_WARN_FALSE : return "WARN_FALSE"; - case assertType::DT_CHECK_FALSE : return "CHECK_FALSE"; - case assertType::DT_REQUIRE_FALSE : return "REQUIRE_FALSE"; - - case assertType::DT_WARN_THROWS : return "WARN_THROWS"; - case assertType::DT_CHECK_THROWS : return "CHECK_THROWS"; - case assertType::DT_REQUIRE_THROWS : return "REQUIRE_THROWS"; - - case assertType::DT_WARN_THROWS_AS : return "WARN_THROWS_AS"; - case assertType::DT_CHECK_THROWS_AS : return "CHECK_THROWS_AS"; - case assertType::DT_REQUIRE_THROWS_AS : return "REQUIRE_THROWS_AS"; - - case assertType::DT_WARN_THROWS_WITH : return "WARN_THROWS_WITH"; - case assertType::DT_CHECK_THROWS_WITH : return "CHECK_THROWS_WITH"; - case assertType::DT_REQUIRE_THROWS_WITH : return "REQUIRE_THROWS_WITH"; - - case assertType::DT_WARN_THROWS_WITH_AS : return "WARN_THROWS_WITH_AS"; - case assertType::DT_CHECK_THROWS_WITH_AS : return "CHECK_THROWS_WITH_AS"; - case assertType::DT_REQUIRE_THROWS_WITH_AS : return "REQUIRE_THROWS_WITH_AS"; - - case assertType::DT_WARN_NOTHROW : return "WARN_NOTHROW"; - case assertType::DT_CHECK_NOTHROW : return "CHECK_NOTHROW"; - case assertType::DT_REQUIRE_NOTHROW : return "REQUIRE_NOTHROW"; - - case assertType::DT_WARN_EQ : return "WARN_EQ"; - case assertType::DT_CHECK_EQ : return "CHECK_EQ"; - case assertType::DT_REQUIRE_EQ : return "REQUIRE_EQ"; - case assertType::DT_WARN_NE : return "WARN_NE"; - case assertType::DT_CHECK_NE : return "CHECK_NE"; - case assertType::DT_REQUIRE_NE : return "REQUIRE_NE"; - case assertType::DT_WARN_GT : return "WARN_GT"; - case assertType::DT_CHECK_GT : return "CHECK_GT"; - case assertType::DT_REQUIRE_GT : return "REQUIRE_GT"; - case assertType::DT_WARN_LT : return "WARN_LT"; - case assertType::DT_CHECK_LT : return "CHECK_LT"; - case assertType::DT_REQUIRE_LT : return "REQUIRE_LT"; - case assertType::DT_WARN_GE : return "WARN_GE"; - case assertType::DT_CHECK_GE : return "CHECK_GE"; - case assertType::DT_REQUIRE_GE : return "REQUIRE_GE"; - case assertType::DT_WARN_LE : return "WARN_LE"; - case assertType::DT_CHECK_LE : return "CHECK_LE"; - case assertType::DT_REQUIRE_LE : return "REQUIRE_LE"; - - case assertType::DT_WARN_UNARY : return "WARN_UNARY"; - case assertType::DT_CHECK_UNARY : return "CHECK_UNARY"; - case assertType::DT_REQUIRE_UNARY : return "REQUIRE_UNARY"; - case assertType::DT_WARN_UNARY_FALSE : return "WARN_UNARY_FALSE"; - case assertType::DT_CHECK_UNARY_FALSE : return "CHECK_UNARY_FALSE"; - case assertType::DT_REQUIRE_UNARY_FALSE : return "REQUIRE_UNARY_FALSE"; + DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4061) // enum 'x' in switch of enum 'y' is not explicitely handled + #define DOCTEST_GENERATE_ASSERT_TYPE_CASE(assert_type) case assertType::DT_ ## assert_type: return #assert_type + #define DOCTEST_GENERATE_ASSERT_TYPE_CASES(assert_type) \ + DOCTEST_GENERATE_ASSERT_TYPE_CASE(WARN_ ## assert_type); \ + DOCTEST_GENERATE_ASSERT_TYPE_CASE(CHECK_ ## assert_type); \ + DOCTEST_GENERATE_ASSERT_TYPE_CASE(REQUIRE_ ## assert_type) + switch(at) { + DOCTEST_GENERATE_ASSERT_TYPE_CASE(WARN); + DOCTEST_GENERATE_ASSERT_TYPE_CASE(CHECK); + DOCTEST_GENERATE_ASSERT_TYPE_CASE(REQUIRE); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(FALSE); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(THROWS); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(THROWS_AS); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(THROWS_WITH); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(THROWS_WITH_AS); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(NOTHROW); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(EQ); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(NE); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(GT); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(LT); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(GE); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(LE); + + DOCTEST_GENERATE_ASSERT_TYPE_CASES(UNARY); + DOCTEST_GENERATE_ASSERT_TYPE_CASES(UNARY_FALSE); + + default: DOCTEST_INTERNAL_ERROR("Tried stringifying invalid assert type!"); } DOCTEST_MSVC_SUPPRESS_WARNING_POP - return ""; } // clang-format on @@ -3694,6 +3846,12 @@ const char* skipPathFromFilename(const char* file) { DOCTEST_CLANG_SUPPRESS_WARNING_POP DOCTEST_GCC_SUPPRESS_WARNING_POP +bool SubcaseSignature::operator==(const SubcaseSignature& other) const { + return m_line == other.m_line + && std::strcmp(m_file, other.m_file) == 0 + && m_name == other.m_name; +} + bool SubcaseSignature::operator<(const SubcaseSignature& other) const { if(m_line != other.m_line) return m_line < other.m_line; @@ -3702,45 +3860,53 @@ bool SubcaseSignature::operator<(const SubcaseSignature& other) const { return m_name.compare(other.m_name) < 0; } -IContextScope::IContextScope() = default; -IContextScope::~IContextScope() = default; +DOCTEST_DEFINE_INTERFACE(IContextScope) -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -String toString(char* in) { return toString(static_cast<const char*>(in)); } -// NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) -String toString(const char* in) { return String("\"") + (in ? in : "{null string}") + "\""; } -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING -String toString(bool in) { return in ? "true" : "false"; } -String toString(float in) { return fpToString(in, 5) + "f"; } -String toString(double in) { return fpToString(in, 10); } -String toString(double long in) { return fpToString(in, 15); } - -#define DOCTEST_TO_STRING_OVERLOAD(type, fmt) \ - String toString(type in) { \ - char buf[64]; \ - std::sprintf(buf, fmt, in); \ - return buf; \ +namespace detail { + void filldata<const void*>::fill(std::ostream* stream, const void* in) { + if (in) { *stream << in; } + else { *stream << "nullptr"; } } -DOCTEST_TO_STRING_OVERLOAD(char, "%d") -DOCTEST_TO_STRING_OVERLOAD(char signed, "%d") -DOCTEST_TO_STRING_OVERLOAD(char unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int short, "%d") -DOCTEST_TO_STRING_OVERLOAD(int short unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int, "%d") -DOCTEST_TO_STRING_OVERLOAD(unsigned, "%u") -DOCTEST_TO_STRING_OVERLOAD(int long, "%ld") -DOCTEST_TO_STRING_OVERLOAD(int long unsigned, "%lu") -DOCTEST_TO_STRING_OVERLOAD(int long long, "%lld") -DOCTEST_TO_STRING_OVERLOAD(int long long unsigned, "%llu") + template <typename T> + String toStreamLit(T t) { + std::ostream* os = tlssPush(); + os->operator<<(t); + return tlssPop(); + } +} -String toString(std::nullptr_t) { return "NULL"; } +#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING +String toString(const char* in) { return String("\"") + (in ? in : "{null string}") + "\""; } +#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING #if DOCTEST_MSVC >= DOCTEST_COMPILER(19, 20, 0) // see this issue on why this is needed: https://github.com/doctest/doctest/issues/183 String toString(const std::string& in) { return in.c_str(); } #endif // VS 2019 +String toString(String in) { return in; } + +String toString(std::nullptr_t) { return "nullptr"; } + +String toString(bool in) { return in ? "true" : "false"; } + +String toString(float in) { return toStreamLit(in); } +String toString(double in) { return toStreamLit(in); } +String toString(double long in) { return toStreamLit(in); } + +String toString(char in) { return toStreamLit(static_cast<signed>(in)); } +String toString(char signed in) { return toStreamLit(static_cast<signed>(in)); } +String toString(char unsigned in) { return toStreamLit(static_cast<unsigned>(in)); } +String toString(short in) { return toStreamLit(in); } +String toString(short unsigned in) { return toStreamLit(in); } +String toString(signed in) { return toStreamLit(in); } +String toString(unsigned in) { return toStreamLit(in); } +String toString(long in) { return toStreamLit(in); } +String toString(long unsigned in) { return toStreamLit(in); } +String toString(long long in) { return toStreamLit(in); } +String toString(long long unsigned in) { return toStreamLit(in); } + Approx::Approx(double value) : m_epsilon(static_cast<double>(std::numeric_limits<float>::epsilon()) * 100) , m_scale(1.0) @@ -3780,11 +3946,25 @@ bool operator>(double lhs, const Approx& rhs) { return lhs > rhs.m_value && lhs bool operator>(const Approx& lhs, double rhs) { return lhs.m_value > rhs && lhs != rhs; } String toString(const Approx& in) { - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) return "Approx( " + doctest::toString(in.m_value) + " )"; } const ContextOptions* getContextOptions() { return DOCTEST_BRANCH_ON_DISABLED(nullptr, g_cs); } +DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4738) +template <typename F> +IsNaN<F>::operator bool() const { + return std::isnan(value) ^ flipped; +} +DOCTEST_MSVC_SUPPRESS_WARNING_POP +template struct DOCTEST_INTERFACE_DEF IsNaN<float>; +template struct DOCTEST_INTERFACE_DEF IsNaN<double>; +template struct DOCTEST_INTERFACE_DEF IsNaN<long double>; +template <typename F> +String toString(IsNaN<F> in) { return String(in.flipped ? "! " : "") + "IsNaN( " + doctest::toString(in.value) + " )"; } +String toString(IsNaN<float> in) { return toString<float>(in); } +String toString(IsNaN<double> in) { return toString<double>(in); } +String toString(IsNaN<double long> in) { return toString<double long>(in); } + } // namespace doctest #ifdef DOCTEST_CONFIG_DISABLE @@ -3800,11 +3980,9 @@ void Context::setOption(const char*, const char*) {} bool Context::shouldExit() { return false; } void Context::setAsDefaultForAssertsOutOfTestCases() {} void Context::setAssertHandler(detail::assert_handler) {} -void Context::setCout(std::ostream* out) {} +void Context::setCout(std::ostream*) {} int Context::run() { return 0; } -IReporter::~IReporter() = default; - int IReporter::get_num_active_contexts() { return 0; } const IContextScope* const* IReporter::get_active_contexts() { return nullptr; } int IReporter::get_num_stringified_contexts() { return 0; } @@ -3837,7 +4015,7 @@ namespace doctest { namespace { // the int (priority) is part of the key for automatic sorting - sadly one can register a // reporter with a duplicate name and a different priority but hopefully that won't happen often :| - typedef std::map<std::pair<int, String>, reporterCreatorFunc> reporterMap; + using reporterMap = std::map<std::pair<int, String>, reporterCreatorFunc>; reporterMap& getReporters() { static reporterMap data; @@ -3869,8 +4047,8 @@ namespace detail { #ifndef DOCTEST_CONFIG_NO_EXCEPTIONS DOCTEST_NORETURN void throwException() { g_cs->shouldLogCurrentException = false; - throw TestFailureException(); - } // NOLINT(cert-err60-cpp) + throw TestFailureException(); // NOLINT(hicpp-exception-baseclass) + } #else // DOCTEST_CONFIG_NO_EXCEPTIONS void throwException() {} #endif // DOCTEST_CONFIG_NO_EXCEPTIONS @@ -3916,59 +4094,92 @@ namespace { return !*wild; } - //// C string hash function (djb2) - taken from http://www.cse.yorku.ca/~oz/hash.html - //unsigned hashStr(unsigned const char* str) { - // unsigned long hash = 5381; - // char c; - // while((c = *str++)) - // hash = ((hash << 5) + hash) + c; // hash * 33 + c - // return hash; - //} - // checks if the name matches any of the filters (and can be configured what to do when empty) bool matchesAny(const char* name, const std::vector<String>& filters, bool matchEmpty, - bool caseSensitive) { - if(filters.empty() && matchEmpty) + bool caseSensitive) { + if (filters.empty() && matchEmpty) return true; - for(auto& curr : filters) - if(wildcmp(name, curr.c_str(), caseSensitive)) + for (auto& curr : filters) + if (wildcmp(name, curr.c_str(), caseSensitive)) return true; return false; } -} // namespace -namespace detail { - Subcase::Subcase(const String& name, const char* file, int line) - : m_signature({name, file, line}) { - auto* s = g_cs; + unsigned long long hash(unsigned long long a, unsigned long long b) { + return (a << 5) + b; + } - // check subcase filters - if(s->subcasesStack.size() < size_t(s->subcase_filter_levels)) { - if(!matchesAny(m_signature.m_name.c_str(), s->filters[6], true, s->case_sensitive)) - return; - if(matchesAny(m_signature.m_name.c_str(), s->filters[7], false, s->case_sensitive)) - return; - } - - // if a Subcase on the same level has already been entered - if(s->subcasesStack.size() < size_t(s->subcasesCurrentMaxLevel)) { - s->should_reenter = true; - return; - } + // C string hash function (djb2) - taken from http://www.cse.yorku.ca/~oz/hash.html + unsigned long long hash(const char* str) { + unsigned long long hash = 5381; + char c; + while ((c = *str++)) + hash = ((hash << 5) + hash) + c; // hash * 33 + c + return hash; + } - // push the current signature to the stack so we can check if the - // current stack + the current new subcase have been traversed - s->subcasesStack.push_back(m_signature); - if(s->subcasesPassed.count(s->subcasesStack) != 0) { - // pop - revert to previous stack since we've already passed this - s->subcasesStack.pop_back(); - return; + unsigned long long hash(const SubcaseSignature& sig) { + return hash(hash(hash(sig.m_file), hash(sig.m_name.c_str())), sig.m_line); + } + + unsigned long long hash(const std::vector<SubcaseSignature>& sigs, size_t count) { + unsigned long long running = 0; + auto end = sigs.begin() + count; + for (auto it = sigs.begin(); it != end; it++) { + running = hash(running, hash(*it)); } + return running; + } - s->subcasesCurrentMaxLevel = s->subcasesStack.size(); - m_entered = true; + unsigned long long hash(const std::vector<SubcaseSignature>& sigs) { + unsigned long long running = 0; + for (const SubcaseSignature& sig : sigs) { + running = hash(running, hash(sig)); + } + return running; + } +} // namespace +namespace detail { + bool Subcase::checkFilters() { + if (g_cs->subcaseStack.size() < size_t(g_cs->subcase_filter_levels)) { + if (!matchesAny(m_signature.m_name.c_str(), g_cs->filters[6], true, g_cs->case_sensitive)) + return true; + if (matchesAny(m_signature.m_name.c_str(), g_cs->filters[7], false, g_cs->case_sensitive)) + return true; + } + return false; + } - DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_start, m_signature); + Subcase::Subcase(const String& name, const char* file, int line) + : m_signature({name, file, line}) { + if (!g_cs->reachedLeaf) { + if (g_cs->nextSubcaseStack.size() <= g_cs->subcaseStack.size() + || g_cs->nextSubcaseStack[g_cs->subcaseStack.size()] == m_signature) { + // Going down. + if (checkFilters()) { return; } + + g_cs->subcaseStack.push_back(m_signature); + g_cs->currentSubcaseDepth++; + m_entered = true; + DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_start, m_signature); + } + } else { + if (g_cs->subcaseStack[g_cs->currentSubcaseDepth] == m_signature) { + // This subcase is reentered via control flow. + g_cs->currentSubcaseDepth++; + m_entered = true; + DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_start, m_signature); + } else if (g_cs->nextSubcaseStack.size() <= g_cs->currentSubcaseDepth + && g_cs->fullyTraversedSubcases.find(hash(hash(g_cs->subcaseStack, g_cs->currentSubcaseDepth), hash(m_signature))) + == g_cs->fullyTraversedSubcases.end()) { + if (checkFilters()) { return; } + // This subcase is part of the one to be executed next. + g_cs->nextSubcaseStack.clear(); + g_cs->nextSubcaseStack.insert(g_cs->nextSubcaseStack.end(), + g_cs->subcaseStack.begin(), g_cs->subcaseStack.begin() + g_cs->currentSubcaseDepth); + g_cs->nextSubcaseStack.push_back(m_signature); + } + } } DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(4996) // std::uncaught_exception is deprecated in C++17 @@ -3976,25 +4187,33 @@ namespace detail { DOCTEST_CLANG_SUPPRESS_WARNING_WITH_PUSH("-Wdeprecated-declarations") Subcase::~Subcase() { - if(m_entered) { - // only mark the subcase stack as passed if no subcases have been skipped - if(g_cs->should_reenter == false) - g_cs->subcasesPassed.insert(g_cs->subcasesStack); - g_cs->subcasesStack.pop_back(); + if (m_entered) { + g_cs->currentSubcaseDepth--; + + if (!g_cs->reachedLeaf) { + // Leaf. + g_cs->fullyTraversedSubcases.insert(hash(g_cs->subcaseStack)); + g_cs->nextSubcaseStack.clear(); + g_cs->reachedLeaf = true; + } else if (g_cs->nextSubcaseStack.empty()) { + // All children are finished. + g_cs->fullyTraversedSubcases.insert(hash(g_cs->subcaseStack)); + } #if defined(__cpp_lib_uncaught_exceptions) && __cpp_lib_uncaught_exceptions >= 201411L && (!defined(__MAC_OS_X_VERSION_MIN_REQUIRED) || __MAC_OS_X_VERSION_MIN_REQUIRED >= 101200) if(std::uncaught_exceptions() > 0 #else if(std::uncaught_exception() #endif - && g_cs->shouldLogCurrentException) { + && g_cs->shouldLogCurrentException) { DOCTEST_ITERATE_THROUGH_REPORTERS( test_case_exception, {"exception thrown in subcase - will translate later " - "when the whole test case has been exited (cannot " - "translate while there is an active exception)", - false}); + "when the whole test case has been exited (cannot " + "translate while there is an active exception)", + false}); g_cs->shouldLogCurrentException = false; } + DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_end, DOCTEST_EMPTY); } } @@ -4018,7 +4237,7 @@ namespace detail { } TestCase::TestCase(funcType test, const char* file, unsigned line, const TestSuite& test_suite, - const char* type, int template_id) { + const String& type, int template_id) { m_file = file; m_line = line; m_name = nullptr; // will be later overridden in operator* @@ -4043,10 +4262,8 @@ namespace detail { } DOCTEST_MSVC_SUPPRESS_WARNING_WITH_PUSH(26434) // hides a non-virtual function - DOCTEST_MSVC_SUPPRESS_WARNING(26437) // Do not slice TestCase& TestCase::operator=(const TestCase& other) { - static_cast<TestCaseData&>(*this) = static_cast<const TestCaseData&>(other); - + TestCaseData::operator=(other); m_test = other.m_test; m_type = other.m_type; m_template_id = other.m_template_id; @@ -4062,7 +4279,7 @@ namespace detail { m_name = in; // make a new name with an appended type for templated test case if(m_template_id != -1) { - m_full_name = String(m_name) + m_type; + m_full_name = String(m_name) + "<" + m_type + ">"; // redirect the name to point to the newly constructed full name m_name = m_full_name.c_str(); } @@ -4304,34 +4521,13 @@ namespace detail { getExceptionTranslators().push_back(et); } -#ifdef DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - void toStream(std::ostream* s, char* in) { *s << in; } - void toStream(std::ostream* s, const char* in) { *s << in; } -#endif // DOCTEST_CONFIG_TREAT_CHAR_STAR_AS_STRING - void toStream(std::ostream* s, bool in) { *s << std::boolalpha << in << std::noboolalpha; } - void toStream(std::ostream* s, float in) { *s << in; } - void toStream(std::ostream* s, double in) { *s << in; } - void toStream(std::ostream* s, double long in) { *s << in; } - - void toStream(std::ostream* s, char in) { *s << in; } - void toStream(std::ostream* s, char signed in) { *s << in; } - void toStream(std::ostream* s, char unsigned in) { *s << in; } - void toStream(std::ostream* s, int short in) { *s << in; } - void toStream(std::ostream* s, int short unsigned in) { *s << in; } - void toStream(std::ostream* s, int in) { *s << in; } - void toStream(std::ostream* s, int unsigned in) { *s << in; } - void toStream(std::ostream* s, int long in) { *s << in; } - void toStream(std::ostream* s, int long unsigned in) { *s << in; } - void toStream(std::ostream* s, int long long in) { *s << in; } - void toStream(std::ostream* s, int long long unsigned in) { *s << in; } - DOCTEST_THREAD_LOCAL std::vector<IContextScope*> g_infoContexts; // for logging with INFO() ContextScopeBase::ContextScopeBase() { g_infoContexts.push_back(this); } - ContextScopeBase::ContextScopeBase(ContextScopeBase&& other) { + ContextScopeBase::ContextScopeBase(ContextScopeBase&& other) noexcept { if (other.need_to_destroy) { other.destroy(); } @@ -4401,10 +4597,10 @@ namespace { static LONG CALLBACK handleException(PEXCEPTION_POINTERS ExceptionInfo) { // Multiple threads may enter this filter/handler at once. We want the error message to be printed on the // console just once no matter how many threads have crashed. - static std::mutex mutex; + DOCTEST_DECLARE_STATIC_MUTEX(mutex) static bool execute = true; { - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) if(execute) { bool reported = false; for(size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { @@ -4577,7 +4773,7 @@ namespace { sigStack.ss_flags = 0; sigaltstack(&sigStack, &oldSigStack); struct sigaction sa = {}; - sa.sa_handler = handleSignal; // NOLINT + sa.sa_handler = handleSignal; sa.sa_flags = SA_ONSTACK; for(std::size_t i = 0; i < DOCTEST_COUNTOF(signalDefs); ++i) { sigaction(signalDefs[i].id, &sa, &oldSigActions[i]); @@ -4616,7 +4812,7 @@ namespace { #define DOCTEST_OUTPUT_DEBUG_STRING(text) ::OutputDebugStringA(text) #else // TODO: integration with XCode and other IDEs -#define DOCTEST_OUTPUT_DEBUG_STRING(text) // NOLINT(clang-diagnostic-unused-macros) +#define DOCTEST_OUTPUT_DEBUG_STRING(text) #endif // Platform void addAssert(assertType::Enum at) { @@ -4635,8 +4831,8 @@ namespace { DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_exception, {message.c_str(), true}); - while(g_cs->subcasesStack.size()) { - g_cs->subcasesStack.pop_back(); + while (g_cs->subcaseStack.size()) { + g_cs->subcaseStack.pop_back(); DOCTEST_ITERATE_THROUGH_REPORTERS(subcase_end, DOCTEST_EMPTY); } @@ -4648,25 +4844,26 @@ namespace { } #endif // DOCTEST_CONFIG_POSIX_SIGNALS || DOCTEST_CONFIG_WINDOWS_SEH } // namespace -namespace detail { - ResultBuilder::ResultBuilder(assertType::Enum at, const char* file, int line, const char* expr, - const char* exception_type, const char* exception_string) { - m_test_case = g_cs->currentTest; - m_at = at; - m_file = file; - m_line = line; - m_expr = expr; - m_failed = true; - m_threw = false; - m_threw_as = false; - m_exception_type = exception_type; - m_exception_string = exception_string; +AssertData::AssertData(assertType::Enum at, const char* file, int line, const char* expr, + const char* exception_type, const StringContains& exception_string) + : m_test_case(g_cs->currentTest), m_at(at), m_file(file), m_line(line), m_expr(expr), + m_failed(true), m_threw(false), m_threw_as(false), m_exception_type(exception_type), + m_exception_string(exception_string) { #if DOCTEST_MSVC - if(m_expr[0] == ' ') // this happens when variadic macros are disabled under MSVC - ++m_expr; + if (m_expr[0] == ' ') // this happens when variadic macros are disabled under MSVC + ++m_expr; #endif // MSVC - } +} + +namespace detail { + ResultBuilder::ResultBuilder(assertType::Enum at, const char* file, int line, const char* expr, + const char* exception_type, const String& exception_string) + : AssertData(at, file, line, expr, exception_type, exception_string) { } + + ResultBuilder::ResultBuilder(assertType::Enum at, const char* file, int line, const char* expr, + const char* exception_type, const Contains& exception_string) + : AssertData(at, file, line, expr, exception_type, exception_string) { } void ResultBuilder::setResult(const Result& res) { m_decomp = res.m_decomp; @@ -4682,11 +4879,11 @@ namespace detail { if(m_at & assertType::is_throws) { //!OCLINT bitwise operator in conditional m_failed = !m_threw; } else if((m_at & assertType::is_throws_as) && (m_at & assertType::is_throws_with)) { //!OCLINT - m_failed = !m_threw_as || (m_exception != m_exception_string); + m_failed = !m_threw_as || !m_exception_string.check(m_exception); } else if(m_at & assertType::is_throws_as) { //!OCLINT bitwise operator in conditional m_failed = !m_threw_as; } else if(m_at & assertType::is_throws_with) { //!OCLINT bitwise operator in conditional - m_failed = m_exception != m_exception_string; + m_failed = !m_exception_string.check(m_exception); } else if(m_at & assertType::is_nothrow) { //!OCLINT bitwise operator in conditional m_failed = m_threw; } @@ -4721,7 +4918,7 @@ namespace detail { } bool decomp_assert(assertType::Enum at, const char* file, int line, const char* expr, - Result result) { + const Result& result) { bool failed = !result.m_passed; // ################################################################################### @@ -4730,7 +4927,6 @@ namespace detail { // ################################################################################### DOCTEST_ASSERT_OUT_OF_TESTS(result.m_decomp); DOCTEST_ASSERT_IN_TESTS(result.m_decomp); - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) return !failed; } @@ -4746,8 +4942,7 @@ namespace detail { tlssPop(); } - IExceptionTranslator::IExceptionTranslator() = default; - IExceptionTranslator::~IExceptionTranslator() = default; + DOCTEST_DEFINE_INTERFACE(IExceptionTranslator) bool MessageBuilder::log() { if (!logged) { @@ -4858,10 +5053,10 @@ namespace { void ensureTagClosed(); - private: - void writeDeclaration(); + private: + void newlineIfNecessary(); bool m_tagIsOpen = false; @@ -5050,7 +5245,7 @@ namespace { XmlWriter::XmlWriter( std::ostream& os ) : m_os( os ) { - writeDeclaration(); + // writeDeclaration(); // called explicitly by the reporters that use the writer class - see issue #627 } XmlWriter::~XmlWriter() { @@ -5161,8 +5356,8 @@ namespace { struct XmlReporter : public IReporter { - XmlWriter xml; - std::mutex mutex; + XmlWriter xml; + DOCTEST_DECLARE_MUTEX(mutex) // caching pointers/references to objects of these types - safe to do const ContextOptions& opt; @@ -5256,6 +5451,8 @@ namespace { } void test_run_start() override { + xml.writeDeclaration(); + // remove .exe extension - mainly to have the same output on UNIX and Windows std::string binary_name = skipPathFromFilename(opt.binary_name.c_str()); #ifdef DOCTEST_PLATFORM_WINDOWS @@ -5322,7 +5519,7 @@ namespace { } void test_case_exception(const TestCaseException& e) override { - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) xml.scopedElement("Exception") .writeAttribute("crash", e.is_crash) @@ -5343,7 +5540,7 @@ namespace { if(!rb.m_failed && !opt.success) return; - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) xml.startElement("Expression") .writeAttribute("success", !rb.m_failed) @@ -5359,7 +5556,7 @@ namespace { if(rb.m_at & assertType::is_throws_as) xml.scopedElement("ExpectedException").writeText(rb.m_exception_type); if(rb.m_at & assertType::is_throws_with) - xml.scopedElement("ExpectedExceptionString").writeText(rb.m_exception_string); + xml.scopedElement("ExpectedExceptionString").writeText(rb.m_exception_string.c_str()); if((rb.m_at & assertType::is_normal) && !rb.m_threw) xml.scopedElement("Expanded").writeText(rb.m_decomp.c_str()); @@ -5369,7 +5566,7 @@ namespace { } void log_message(const MessageData& mb) override { - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) xml.startElement("Message") .writeAttribute("type", failureString(mb.m_severity)) @@ -5405,7 +5602,8 @@ namespace { } else if((rb.m_at & assertType::is_throws_as) && (rb.m_at & assertType::is_throws_with)) { //!OCLINT s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << ", \"" - << rb.m_exception_string << "\", " << rb.m_exception_type << " ) " << Color::None; + << rb.m_exception_string.c_str() + << "\", " << rb.m_exception_type << " ) " << Color::None; if(rb.m_threw) { if(!rb.m_failed) { s << "threw as expected!\n"; @@ -5426,7 +5624,8 @@ namespace { } else if(rb.m_at & assertType::is_throws_with) { //!OCLINT bitwise operator in conditional s << Color::Cyan << assertString(rb.m_at) << "( " << rb.m_expr << ", \"" - << rb.m_exception_string << "\" ) " << Color::None + << rb.m_exception_string.c_str() + << "\" ) " << Color::None << (rb.m_threw ? (!rb.m_failed ? "threw as expected!" : "threw a DIFFERENT exception: ") : "did NOT throw at all!") @@ -5451,8 +5650,8 @@ namespace { // - more attributes in tags struct JUnitReporter : public IReporter { - XmlWriter xml; - std::mutex mutex; + XmlWriter xml; + DOCTEST_DECLARE_MUTEX(mutex) Timer timer; std::vector<String> deepestSubcaseStackNames; @@ -5548,9 +5747,13 @@ namespace { // WHAT FOLLOWS ARE OVERRIDES OF THE VIRTUAL METHODS OF THE REPORTER INTERFACE // ========================================================================================= - void report_query(const QueryData&) override {} + void report_query(const QueryData&) override { + xml.writeDeclaration(); + } - void test_run_start() override {} + void test_run_start() override { + xml.writeDeclaration(); + } void test_run_end(const TestRunStats& p) override { // remove .exe extension - mainly to have the same output on UNIX and Windows @@ -5620,7 +5823,7 @@ namespace { } void test_case_exception(const TestCaseException& e) override { - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) testCaseData.addError("exception", e.error_string.c_str()); } @@ -5634,7 +5837,7 @@ namespace { if(!rb.m_failed) // report only failures & ignore the `success` option return; - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) std::ostringstream os; os << skipPathFromFilename(rb.m_file) << (opt.gnu_file_line ? ":" : "(") @@ -5685,7 +5888,7 @@ namespace { bool hasLoggedCurrentTestStart; std::vector<SubcaseSignature> subcasesStack; size_t currentSubcaseLevel; - std::mutex mutex; + DOCTEST_DECLARE_MUTEX(mutex) // caching pointers/references to objects of these types - safe to do const ContextOptions& opt; @@ -6031,7 +6234,7 @@ namespace { // log the preamble of the test case only if there is something // else to print - something other than that an assert has failed if(opt.duration || - (st.failure_flags && st.failure_flags != TestCaseFailureReason::AssertFailure)) + (st.failure_flags && st.failure_flags != static_cast<int>(TestCaseFailureReason::AssertFailure))) logTestStart(); if(opt.duration) @@ -6062,7 +6265,7 @@ namespace { } void test_case_exception(const TestCaseException& e) override { - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) if(tc->m_no_output) return; @@ -6101,7 +6304,7 @@ namespace { if((!rb.m_failed && !opt.success) || tc->m_no_output) return; - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) logTestStart(); @@ -6117,7 +6320,7 @@ namespace { if(tc->m_no_output) return; - std::lock_guard<std::mutex> lock(mutex); + DOCTEST_LOCK_MUTEX(mutex) logTestStart(); @@ -6245,8 +6448,8 @@ namespace { char character = *current++; if(seenBackslash) { seenBackslash = false; - if(character == ',') { - s.put(','); + if(character == ',' || character == '\\') { + s.put(character); continue; } s.put('\\'); @@ -6282,30 +6485,30 @@ namespace { if(!parseOption(argc, argv, pattern, &parsedValue)) return false; - if(type == 0) { + if(type) { + // integer + // TODO: change this to use std::stoi or something else! currently it uses undefined behavior - assumes '0' on failed parse... + int theInt = std::atoi(parsedValue.c_str()); + if (theInt != 0) { + res = theInt; //!OCLINT parameter reassignment + return true; + } + } else { // boolean - const char positive[][5] = {"1", "true", "on", "yes"}; // 5 - strlen("true") + 1 - const char negative[][6] = {"0", "false", "off", "no"}; // 6 - strlen("false") + 1 + const char positive[][5] = { "1", "true", "on", "yes" }; // 5 - strlen("true") + 1 + const char negative[][6] = { "0", "false", "off", "no" }; // 6 - strlen("false") + 1 // if the value matches any of the positive/negative possibilities - for(unsigned i = 0; i < 4; i++) { - if(parsedValue.compare(positive[i], true) == 0) { + for (unsigned i = 0; i < 4; i++) { + if (parsedValue.compare(positive[i], true) == 0) { res = 1; //!OCLINT parameter reassignment return true; } - if(parsedValue.compare(negative[i], true) == 0) { + if (parsedValue.compare(negative[i], true) == 0) { res = 0; //!OCLINT parameter reassignment return true; } } - } else { - // integer - // TODO: change this to use std::stoi or something else! currently it uses undefined behavior - assumes '0' on failed parse... - int theInt = std::atoi(parsedValue.c_str()); // NOLINT - if(theInt != 0) { - res = theInt; //!OCLINT parameter reassignment - return true; - } } return false; } @@ -6473,7 +6676,6 @@ void Context::setOption(const char* option, bool value) { // allows the user to override procedurally the int options from the command line void Context::setOption(const char* option, int value) { setOption(option, toString(value).c_str()); - // NOLINTNEXTLINE(clang-analyzer-cplusplus.NewDeleteLeaks) } // allows the user to override procedurally the string options from the command line @@ -6611,7 +6813,7 @@ int Context::run() { // random_shuffle implementation const auto first = &testArray[0]; for(size_t i = testArray.size() - 1; i > 0; --i) { - int idxToSwap = std::rand() % (i + 1); // NOLINT + int idxToSwap = std::rand() % (i + 1); const auto temp = first[i]; @@ -6698,7 +6900,7 @@ int Context::run() { p->numAssertsFailedCurrentTest_atomic = 0; p->numAssertsCurrentTest_atomic = 0; - p->subcasesPassed.clear(); + p->fullyTraversedSubcases.clear(); DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_start, tc); @@ -6708,9 +6910,10 @@ int Context::run() { do { // reset some of the fields for subcases (except for the set of fully passed ones) - p->should_reenter = false; - p->subcasesCurrentMaxLevel = 0; - p->subcasesStack.clear(); + p->reachedLeaf = false; + // May not be empty if previous subcase exited via exception. + p->subcaseStack.clear(); + p->currentSubcaseDepth = 0; p->shouldLogCurrentException = true; @@ -6744,9 +6947,9 @@ DOCTEST_MSVC_SUPPRESS_WARNING_POP p->failure_flags |= TestCaseFailureReason::TooManyFailedAsserts; } - if(p->should_reenter && run_test) + if(!p->nextSubcaseStack.empty() && run_test) DOCTEST_ITERATE_THROUGH_REPORTERS(test_case_reenter, tc); - if(!p->should_reenter) + if(p->nextSubcaseStack.empty()) run_test = false; } while(run_test); @@ -6775,7 +6978,7 @@ DOCTEST_MSVC_SUPPRESS_WARNING_POP return cleanup_and_return(); } -IReporter::~IReporter() = default; +DOCTEST_DEFINE_INTERFACE(IReporter) int IReporter::get_num_active_contexts() { return detail::g_infoContexts.size(); } const IContextScope* const* IReporter::get_active_contexts() { diff --git a/thirdparty/etcpak/AUTHORS.txt b/thirdparty/etcpak/AUTHORS.txt index e7bae62c85..675b4eb2a9 100644 --- a/thirdparty/etcpak/AUTHORS.txt +++ b/thirdparty/etcpak/AUTHORS.txt @@ -1,3 +1,5 @@ Bartosz Taudul <wolf@nereid.pl> Daniel Jungmann <el.3d.source@gmail.com> Florian Penzkofer <fp@nullptr.de> +Jae-Ho Nah <nahjaeho@gmail.com> +Marcin Ławicki <marcin.lawicki@gmail.com> diff --git a/thirdparty/etcpak/LICENSE.txt b/thirdparty/etcpak/LICENSE.txt index 59e85d6ea5..9c71039b9b 100644 --- a/thirdparty/etcpak/LICENSE.txt +++ b/thirdparty/etcpak/LICENSE.txt @@ -1,6 +1,6 @@ etcpak, an extremely fast ETC compression utility (https://github.com/wolfpld/etcpak) -Copyright (c) 2013-2021, Bartosz Taudul <wolf@nereid.pl> +Copyright (c) 2013-2022, Bartosz Taudul <wolf@nereid.pl> All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/thirdparty/etcpak/ProcessRGB.cpp b/thirdparty/etcpak/ProcessRGB.cpp index fdb0967ce7..4dc3bf23af 100644 --- a/thirdparty/etcpak/ProcessRGB.cpp +++ b/thirdparty/etcpak/ProcessRGB.cpp @@ -3216,9 +3216,9 @@ etcpak_force_inline static uint16x8_t ErrorProbe_EAC_NEON( uint8x8_t recVal, uin uint8x8_t srcValWide; #ifndef __aarch64__ if( Index < 8 ) - srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 8 ) ); + srcValWide = vdup_lane_u8( vget_low_u8( alphaBlock ), ClampConstant( Index, 0, 7 ) ); else - srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 8 ) ); + srcValWide = vdup_lane_u8( vget_high_u8( alphaBlock ), ClampConstant( Index - 8, 0, 7 ) ); #else srcValWide = vdup_laneq_u8( alphaBlock, Index ); #endif @@ -3256,9 +3256,9 @@ etcpak_force_inline static int16x8_t WidenMultiplier_EAC_NEON( int16x8_t multipl constexpr int Lane = GetMulSel( Index ); #ifndef __aarch64__ if( Lane < 4 ) - return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 4 ) ); + return vdupq_lane_s16( vget_low_s16( multipliers ), ClampConstant( Lane, 0, 3 ) ); else - return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 4 ) ); + return vdupq_lane_s16( vget_high_s16( multipliers ), ClampConstant( Lane - 4, 0, 3 ) ); #else return vdupq_laneq_s16( multipliers, Lane ); #endif diff --git a/thirdparty/freetype/patches/fix_gcc_lto_build.diff b/thirdparty/freetype/patches/fix_gcc_lto_build.diff new file mode 100644 index 0000000000..3c22b464c2 --- /dev/null +++ b/thirdparty/freetype/patches/fix_gcc_lto_build.diff @@ -0,0 +1,34 @@ +diff --git a/thirdparty/freetype/src/smooth/ftgrays.c b/thirdparty/freetype/src/smooth/ftgrays.c +index 622035aa79..5d9e1600b7 100644 +--- a/thirdparty/freetype/src/smooth/ftgrays.c ++++ b/thirdparty/freetype/src/smooth/ftgrays.c +@@ -1907,6 +1907,9 @@ typedef ptrdiff_t FT_PtrDist; + 0 /* delta */ + ) + ++// -- GODOT start -- ++ static volatile int _lto_dummy = 0; ++// -- GODOT end -- + + static int + gray_convert_glyph_inner( RAS_ARG, +@@ -1928,6 +1931,9 @@ typedef ptrdiff_t FT_PtrDist; + ras.max_ey, + ras.cell_null - ras.cell_free, + ras.cell_null - ras.cell_free == 1 ? "" : "s" )); ++// -- GODOT start -- ++ _lto_dummy = error; // Prevents LTO from removing this branch. ++// -- GODOT end -- + } + else + { +@@ -1935,6 +1941,9 @@ typedef ptrdiff_t FT_PtrDist; + + FT_TRACE7(( "band [%d..%d]: to be bisected\n", + ras.min_ey, ras.max_ey )); ++// -- GODOT start -- ++ _lto_dummy = error; // Prevents LTO from removing this branch. ++// -- GODOT end -- + } + + return error; diff --git a/thirdparty/freetype/src/smooth/ftgrays.c b/thirdparty/freetype/src/smooth/ftgrays.c index 622035aa79..5d9e1600b7 100644 --- a/thirdparty/freetype/src/smooth/ftgrays.c +++ b/thirdparty/freetype/src/smooth/ftgrays.c @@ -1907,6 +1907,9 @@ typedef ptrdiff_t FT_PtrDist; 0 /* delta */ ) +// -- GODOT start -- + static volatile int _lto_dummy = 0; +// -- GODOT end -- static int gray_convert_glyph_inner( RAS_ARG, @@ -1928,6 +1931,9 @@ typedef ptrdiff_t FT_PtrDist; ras.max_ey, ras.cell_null - ras.cell_free, ras.cell_null - ras.cell_free == 1 ? "" : "s" )); +// -- GODOT start -- + _lto_dummy = error; // Prevents LTO from removing this branch. +// -- GODOT end -- } else { @@ -1935,6 +1941,9 @@ typedef ptrdiff_t FT_PtrDist; FT_TRACE7(( "band [%d..%d]: to be bisected\n", ras.min_ey, ras.max_ey )); +// -- GODOT start -- + _lto_dummy = error; // Prevents LTO from removing this branch. +// -- GODOT end -- } return error; diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS.hh new file mode 100644 index 0000000000..224d6b746b --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS.hh @@ -0,0 +1,165 @@ +#ifndef OT_LAYOUT_GPOS_HH +#define OT_LAYOUT_GPOS_HH + +#include "../../hb-ot-layout-common.hh" +#include "../../hb-ot-layout-gsubgpos.hh" +#include "GPOS/Common.hh" +#include "GPOS/PosLookup.hh" + +namespace OT { +namespace Layout { + +static void +propagate_attachment_offsets (hb_glyph_position_t *pos, + unsigned int len, + unsigned int i, + hb_direction_t direction, + unsigned nesting_level = HB_MAX_NESTING_LEVEL); + +/* + * GPOS -- Glyph Positioning + * https://docs.microsoft.com/en-us/typography/opentype/spec/gpos + */ + +struct GPOS : GSUBGPOS +{ + static constexpr hb_tag_t tableTag = HB_OT_TAG_GPOS; + + using Lookup = GPOS_impl::PosLookup; + + const GPOS_impl::PosLookup& get_lookup (unsigned int i) const + { return static_cast<const GPOS_impl::PosLookup &> (GSUBGPOS::get_lookup (i)); } + + static inline void position_start (hb_font_t *font, hb_buffer_t *buffer); + static inline void position_finish_advances (hb_font_t *font, hb_buffer_t *buffer); + static inline void position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer); + + bool subset (hb_subset_context_t *c) const + { + hb_subset_layout_context_t l (c, tableTag, c->plan->gpos_lookups, c->plan->gpos_langsys, c->plan->gpos_features); + return GSUBGPOS::subset<GPOS_impl::PosLookup> (&l); + } + + bool sanitize (hb_sanitize_context_t *c) const + { return GSUBGPOS::sanitize<GPOS_impl::PosLookup> (c); } + + HB_INTERNAL bool is_blocklisted (hb_blob_t *blob, + hb_face_t *face) const; + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + for (unsigned i = 0; i < GSUBGPOS::get_lookup_count (); i++) + { + if (!c->gpos_lookups->has (i)) continue; + const GPOS_impl::PosLookup &l = get_lookup (i); + l.dispatch (c); + } + } + + void closure_lookups (hb_face_t *face, + const hb_set_t *glyphs, + hb_set_t *lookup_indexes /* IN/OUT */) const + { GSUBGPOS::closure_lookups<GPOS_impl::PosLookup> (face, glyphs, lookup_indexes); } + + typedef GSUBGPOS::accelerator_t<GPOS> accelerator_t; +}; + + +static void +propagate_attachment_offsets (hb_glyph_position_t *pos, + unsigned int len, + unsigned int i, + hb_direction_t direction, + unsigned nesting_level) +{ + /* Adjusts offsets of attached glyphs (both cursive and mark) to accumulate + * offset of glyph they are attached to. */ + int chain = pos[i].attach_chain(), type = pos[i].attach_type(); + if (likely (!chain)) + return; + + pos[i].attach_chain() = 0; + + unsigned int j = (int) i + chain; + + if (unlikely (j >= len)) + return; + + if (unlikely (!nesting_level)) + return; + + propagate_attachment_offsets (pos, len, j, direction, nesting_level - 1); + + assert (!!(type & GPOS_impl::ATTACH_TYPE_MARK) ^ !!(type & GPOS_impl::ATTACH_TYPE_CURSIVE)); + + if (type & GPOS_impl::ATTACH_TYPE_CURSIVE) + { + if (HB_DIRECTION_IS_HORIZONTAL (direction)) + pos[i].y_offset += pos[j].y_offset; + else + pos[i].x_offset += pos[j].x_offset; + } + else /*if (type & GPOS_impl::ATTACH_TYPE_MARK)*/ + { + pos[i].x_offset += pos[j].x_offset; + pos[i].y_offset += pos[j].y_offset; + + assert (j < i); + if (HB_DIRECTION_IS_FORWARD (direction)) + for (unsigned int k = j; k < i; k++) { + pos[i].x_offset -= pos[k].x_advance; + pos[i].y_offset -= pos[k].y_advance; + } + else + for (unsigned int k = j + 1; k < i + 1; k++) { + pos[i].x_offset += pos[k].x_advance; + pos[i].y_offset += pos[k].y_advance; + } + } +} + +void +GPOS::position_start (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) +{ + unsigned int count = buffer->len; + for (unsigned int i = 0; i < count; i++) + buffer->pos[i].attach_chain() = buffer->pos[i].attach_type() = 0; +} + +void +GPOS::position_finish_advances (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer HB_UNUSED) +{ + //_hb_buffer_assert_gsubgpos_vars (buffer); +} + +void +GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer) +{ + _hb_buffer_assert_gsubgpos_vars (buffer); + + unsigned int len; + hb_glyph_position_t *pos = hb_buffer_get_glyph_positions (buffer, &len); + hb_direction_t direction = buffer->props.direction; + + /* Handle attachments */ + if (buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT) + for (unsigned i = 0; i < len; i++) + propagate_attachment_offsets (pos, len, i, direction); + + if (unlikely (font->slant)) + { + for (unsigned i = 0; i < len; i++) + if (unlikely (pos[i].y_offset)) + pos[i].x_offset += _hb_roundf (font->slant_xy * pos[i].y_offset); + } +} + +} + +struct GPOS_accelerator_t : Layout::GPOS::accelerator_t { + GPOS_accelerator_t (hb_face_t *face) : Layout::GPOS::accelerator_t (face) {} +}; + +} + +#endif /* OT_LAYOUT_GPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/Anchor.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/Anchor.hh new file mode 100644 index 0000000000..bfe6b36afd --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/Anchor.hh @@ -0,0 +1,84 @@ +#ifndef OT_LAYOUT_GPOS_ANCHOR_HH +#define OT_LAYOUT_GPOS_ANCHOR_HH + +#include "AnchorFormat1.hh" +#include "AnchorFormat2.hh" +#include "AnchorFormat3.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct Anchor +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + AnchorFormat1 format1; + AnchorFormat2 format2; + AnchorFormat3 format3; + } u; + public: + DEFINE_SIZE_UNION (2, format); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + if (!u.format.sanitize (c)) return_trace (false); + switch (u.format) { + case 1: return_trace (u.format1.sanitize (c)); + case 2: return_trace (u.format2.sanitize (c)); + case 3: return_trace (u.format3.sanitize (c)); + default:return_trace (true); + } + } + + void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id, + float *x, float *y) const + { + *x = *y = 0; + switch (u.format) { + case 1: u.format1.get_anchor (c, glyph_id, x, y); return; + case 2: u.format2.get_anchor (c, glyph_id, x, y); return; + case 3: u.format3.get_anchor (c, glyph_id, x, y); return; + default: return; + } + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + switch (u.format) { + case 1: return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer)))); + case 2: + if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) + { + // AnchorFormat 2 just containins extra hinting information, so + // if hints are being dropped convert to format 1. + return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer)))); + } + return_trace (bool (reinterpret_cast<Anchor *> (u.format2.copy (c->serializer)))); + case 3: return_trace (bool (reinterpret_cast<Anchor *> (u.format3.copy (c->serializer, + c->plan->layout_variation_idx_map)))); + default:return_trace (false); + } + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + switch (u.format) { + case 1: case 2: + return; + case 3: + u.format3.collect_variation_indices (c); + return; + default: return; + } + } +}; + +} +} +} + +#endif // OT_LAYOUT_GPOS_ANCHOR_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat1.hh new file mode 100644 index 0000000000..738cc31bbf --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat1.hh @@ -0,0 +1,46 @@ +#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT1_HH +#define OT_LAYOUT_GPOS_ANCHORFORMAT1_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct AnchorFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + FWORD xCoordinate; /* Horizontal value--in design units */ + FWORD yCoordinate; /* Vertical value--in design units */ + public: + DEFINE_SIZE_STATIC (6); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this)); + } + + void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED, + float *x, float *y) const + { + hb_font_t *font = c->font; + *x = font->em_fscale_x (xCoordinate); + *y = font->em_fscale_y (yCoordinate); + } + + AnchorFormat1* copy (hb_serialize_context_t *c) const + { + TRACE_SERIALIZE (this); + AnchorFormat1* out = c->embed<AnchorFormat1> (this); + if (!out) return_trace (out); + out->format = 1; + return_trace (out); + } +}; + + +} +} +} + +#endif // OT_LAYOUT_GPOS_ANCHORFORMAT1_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat2.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat2.hh new file mode 100644 index 0000000000..70b4d19f53 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat2.hh @@ -0,0 +1,58 @@ +#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT2_HH +#define OT_LAYOUT_GPOS_ANCHORFORMAT2_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct AnchorFormat2 +{ + + protected: + HBUINT16 format; /* Format identifier--format = 2 */ + FWORD xCoordinate; /* Horizontal value--in design units */ + FWORD yCoordinate; /* Vertical value--in design units */ + HBUINT16 anchorPoint; /* Index to glyph contour point */ + public: + DEFINE_SIZE_STATIC (8); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this)); + } + + void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id, + float *x, float *y) const + { + hb_font_t *font = c->font; + +#ifdef HB_NO_HINTING + *x = font->em_fscale_x (xCoordinate); + *y = font->em_fscale_y (yCoordinate); + return; +#endif + + unsigned int x_ppem = font->x_ppem; + unsigned int y_ppem = font->y_ppem; + hb_position_t cx = 0, cy = 0; + bool ret; + + ret = (x_ppem || y_ppem) && + font->get_glyph_contour_point_for_origin (glyph_id, anchorPoint, HB_DIRECTION_LTR, &cx, &cy); + *x = ret && x_ppem ? cx : font->em_fscale_x (xCoordinate); + *y = ret && y_ppem ? cy : font->em_fscale_y (yCoordinate); + } + + AnchorFormat2* copy (hb_serialize_context_t *c) const + { + TRACE_SERIALIZE (this); + return_trace (c->embed<AnchorFormat2> (this)); + } +}; + +} +} +} + +#endif // OT_LAYOUT_GPOS_ANCHORFORMAT2_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat3.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat3.hh new file mode 100644 index 0000000000..d77b4699be --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorFormat3.hh @@ -0,0 +1,70 @@ +#ifndef OT_LAYOUT_GPOS_ANCHORFORMAT3_HH +#define OT_LAYOUT_GPOS_ANCHORFORMAT3_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct AnchorFormat3 +{ + protected: + HBUINT16 format; /* Format identifier--format = 3 */ + FWORD xCoordinate; /* Horizontal value--in design units */ + FWORD yCoordinate; /* Vertical value--in design units */ + Offset16To<Device> + xDeviceTable; /* Offset to Device table for X + * coordinate-- from beginning of + * Anchor table (may be NULL) */ + Offset16To<Device> + yDeviceTable; /* Offset to Device table for Y + * coordinate-- from beginning of + * Anchor table (may be NULL) */ + public: + DEFINE_SIZE_STATIC (10); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this)); + } + + void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED, + float *x, float *y) const + { + hb_font_t *font = c->font; + *x = font->em_fscale_x (xCoordinate); + *y = font->em_fscale_y (yCoordinate); + + if (font->x_ppem || font->num_coords) + *x += (this+xDeviceTable).get_x_delta (font, c->var_store, c->var_store_cache); + if (font->y_ppem || font->num_coords) + *y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache); + } + + AnchorFormat3* copy (hb_serialize_context_t *c, + const hb_map_t *layout_variation_idx_map) const + { + TRACE_SERIALIZE (this); + if (!layout_variation_idx_map) return_trace (nullptr); + + auto *out = c->embed<AnchorFormat3> (this); + if (unlikely (!out)) return_trace (nullptr); + + out->xDeviceTable.serialize_copy (c, xDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map); + out->yDeviceTable.serialize_copy (c, yDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map); + return_trace (out); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + (this+xDeviceTable).collect_variation_indices (c->layout_variation_indices); + (this+yDeviceTable).collect_variation_indices (c->layout_variation_indices); + } +}; + + +} +} +} + +#endif // OT_LAYOUT_GPOS_ANCHORFORMAT3_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorMatrix.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorMatrix.hh new file mode 100644 index 0000000000..c442efa1ea --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/AnchorMatrix.hh @@ -0,0 +1,77 @@ +#ifndef OT_LAYOUT_GPOS_ANCHORMATRIX_HH +#define OT_LAYOUT_GPOS_ANCHORMATRIX_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct AnchorMatrix +{ + HBUINT16 rows; /* Number of rows */ + UnsizedArrayOf<Offset16To<Anchor>> + matrixZ; /* Matrix of offsets to Anchor tables-- + * from beginning of AnchorMatrix table */ + public: + DEFINE_SIZE_ARRAY (2, matrixZ); + + bool sanitize (hb_sanitize_context_t *c, unsigned int cols) const + { + TRACE_SANITIZE (this); + if (!c->check_struct (this)) return_trace (false); + if (unlikely (hb_unsigned_mul_overflows (rows, cols))) return_trace (false); + unsigned int count = rows * cols; + if (!c->check_array (matrixZ.arrayZ, count)) return_trace (false); + for (unsigned int i = 0; i < count; i++) + if (!matrixZ[i].sanitize (c, this)) return_trace (false); + return_trace (true); + } + + const Anchor& get_anchor (unsigned int row, unsigned int col, + unsigned int cols, bool *found) const + { + *found = false; + if (unlikely (row >= rows || col >= cols)) return Null (Anchor); + *found = !matrixZ[row * cols + col].is_null (); + return this+matrixZ[row * cols + col]; + } + + template <typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + Iterator index_iter) const + { + for (unsigned i : index_iter) + (this+matrixZ[i]).collect_variation_indices (c); + } + + template <typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + bool subset (hb_subset_context_t *c, + unsigned num_rows, + Iterator index_iter) const + { + TRACE_SUBSET (this); + + auto *out = c->serializer->start_embed (this); + + if (!index_iter) return_trace (false); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + + out->rows = num_rows; + for (const unsigned i : index_iter) + { + auto *offset = c->serializer->embed (matrixZ[i]); + if (!offset) return_trace (false); + offset->serialize_subset (c, matrixZ[i], this); + } + + return_trace (true); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_ANCHORMATRIX_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/ChainContextPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ChainContextPos.hh new file mode 100644 index 0000000000..d551ac2a2b --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ChainContextPos.hh @@ -0,0 +1,14 @@ +#ifndef OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH +#define OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct ChainContextPos : ChainContext {}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_CHAINCONTEXTPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/Common.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/Common.hh new file mode 100644 index 0000000000..e16c06729d --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/Common.hh @@ -0,0 +1,32 @@ +#ifndef OT_LAYOUT_GPOS_COMMON_HH +#define OT_LAYOUT_GPOS_COMMON_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +enum attach_type_t { + ATTACH_TYPE_NONE = 0X00, + + /* Each attachment should be either a mark or a cursive; can't be both. */ + ATTACH_TYPE_MARK = 0X01, + ATTACH_TYPE_CURSIVE = 0X02, +}; + +/* buffer **position** var allocations */ +#define attach_chain() var.i16[0] /* glyph to which this attaches to, relative to current glyphs; negative for going back, positive for forward. */ +#define attach_type() var.u8[2] /* attachment type */ +/* Note! if attach_chain() is zero, the value of attach_type() is irrelevant. */ + +template<typename Iterator, typename SrcLookup> +static void SinglePos_serialize (hb_serialize_context_t *c, + const SrcLookup *src, + Iterator it, + const hb_map_t *layout_variation_idx_map); + + +} +} +} + +#endif // OT_LAYOUT_GPOS_COMMON_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/ContextPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ContextPos.hh new file mode 100644 index 0000000000..2a01eaa3a6 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ContextPos.hh @@ -0,0 +1,14 @@ +#ifndef OT_LAYOUT_GPOS_CONTEXTPOS_HH +#define OT_LAYOUT_GPOS_CONTEXTPOS_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct ContextPos : Context {}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_CONTEXTPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePos.hh new file mode 100644 index 0000000000..c105cfb091 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePos.hh @@ -0,0 +1,35 @@ +#ifndef OT_LAYOUT_GPOS_CURSIVEPOS_HH +#define OT_LAYOUT_GPOS_CURSIVEPOS_HH + +#include "CursivePosFormat1.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct CursivePos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + CursivePosFormat1 format1; + } u; + + public: + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_CURSIVEPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePosFormat1.hh new file mode 100644 index 0000000000..e212fab976 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/CursivePosFormat1.hh @@ -0,0 +1,281 @@ +#ifndef OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH + +#include "Anchor.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct EntryExitRecord +{ + friend struct CursivePosFormat1; + + bool sanitize (hb_sanitize_context_t *c, const void *base) const + { + TRACE_SANITIZE (this); + return_trace (entryAnchor.sanitize (c, base) && exitAnchor.sanitize (c, base)); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + const void *src_base) const + { + (src_base+entryAnchor).collect_variation_indices (c); + (src_base+exitAnchor).collect_variation_indices (c); + } + + EntryExitRecord* subset (hb_subset_context_t *c, + const void *src_base) const + { + TRACE_SERIALIZE (this); + auto *out = c->serializer->embed (this); + if (unlikely (!out)) return_trace (nullptr); + + out->entryAnchor.serialize_subset (c, entryAnchor, src_base); + out->exitAnchor.serialize_subset (c, exitAnchor, src_base); + return_trace (out); + } + + protected: + Offset16To<Anchor> + entryAnchor; /* Offset to EntryAnchor table--from + * beginning of CursivePos + * subtable--may be NULL */ + Offset16To<Anchor> + exitAnchor; /* Offset to ExitAnchor table--from + * beginning of CursivePos + * subtable--may be NULL */ + public: + DEFINE_SIZE_STATIC (4); +}; + +static void +reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent) { + int chain = pos[i].attach_chain(), type = pos[i].attach_type(); + if (likely (!chain || 0 == (type & ATTACH_TYPE_CURSIVE))) + return; + + pos[i].attach_chain() = 0; + + unsigned int j = (int) i + chain; + + /* Stop if we see new parent in the chain. */ + if (j == new_parent) + return; + + reverse_cursive_minor_offset (pos, j, direction, new_parent); + + if (HB_DIRECTION_IS_HORIZONTAL (direction)) + pos[j].y_offset = -pos[i].y_offset; + else + pos[j].x_offset = -pos[i].x_offset; + + pos[j].attach_chain() = -chain; + pos[j].attach_type() = type; +} + + +struct CursivePosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + coverage; /* Offset to Coverage table--from + * beginning of subtable */ + Array16Of<EntryExitRecord> + entryExitRecord; /* Array of EntryExit records--in + * Coverage Index order */ + public: + DEFINE_SIZE_ARRAY (6, entryExitRecord); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (coverage.sanitize (c, this) && entryExitRecord.sanitize (c, this)); + } + + bool intersects (const hb_set_t *glyphs) const + { return (this+coverage).intersects (glyphs); } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + + hb_zip (this+coverage, entryExitRecord) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + | hb_apply ([&] (const EntryExitRecord& record) { record.collect_variation_indices (c, this); }) + ; + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } + + const Coverage &get_coverage () const { return this+coverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + + const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)]; + if (!this_record.entryAnchor) return_trace (false); + + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + unsigned unsafe_from; + if (!skippy_iter.prev (&unsafe_from)) + { + buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); + return_trace (false); + } + + const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)]; + if (!prev_record.exitAnchor) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + unsigned int i = skippy_iter.idx; + unsigned int j = buffer->idx; + + buffer->unsafe_to_break (i, j); + float entry_x, entry_y, exit_x, exit_y; + (this+prev_record.exitAnchor).get_anchor (c, buffer->info[i].codepoint, &exit_x, &exit_y); + (this+this_record.entryAnchor).get_anchor (c, buffer->info[j].codepoint, &entry_x, &entry_y); + + hb_glyph_position_t *pos = buffer->pos; + + hb_position_t d; + /* Main-direction adjustment */ + switch (c->direction) { + case HB_DIRECTION_LTR: + pos[i].x_advance = roundf (exit_x) + pos[i].x_offset; + + d = roundf (entry_x) + pos[j].x_offset; + pos[j].x_advance -= d; + pos[j].x_offset -= d; + break; + case HB_DIRECTION_RTL: + d = roundf (exit_x) + pos[i].x_offset; + pos[i].x_advance -= d; + pos[i].x_offset -= d; + + pos[j].x_advance = roundf (entry_x) + pos[j].x_offset; + break; + case HB_DIRECTION_TTB: + pos[i].y_advance = roundf (exit_y) + pos[i].y_offset; + + d = roundf (entry_y) + pos[j].y_offset; + pos[j].y_advance -= d; + pos[j].y_offset -= d; + break; + case HB_DIRECTION_BTT: + d = roundf (exit_y) + pos[i].y_offset; + pos[i].y_advance -= d; + pos[i].y_offset -= d; + + pos[j].y_advance = roundf (entry_y); + break; + case HB_DIRECTION_INVALID: + default: + break; + } + + /* Cross-direction adjustment */ + + /* We attach child to parent (think graph theory and rooted trees whereas + * the root stays on baseline and each node aligns itself against its + * parent. + * + * Optimize things for the case of RightToLeft, as that's most common in + * Arabic. */ + unsigned int child = i; + unsigned int parent = j; + hb_position_t x_offset = entry_x - exit_x; + hb_position_t y_offset = entry_y - exit_y; + if (!(c->lookup_props & LookupFlag::RightToLeft)) + { + unsigned int k = child; + child = parent; + parent = k; + x_offset = -x_offset; + y_offset = -y_offset; + } + + /* If child was already connected to someone else, walk through its old + * chain and reverse the link direction, such that the whole tree of its + * previous connection now attaches to new parent. Watch out for case + * where new parent is on the path from old chain... + */ + reverse_cursive_minor_offset (pos, child, c->direction, parent); + + pos[child].attach_type() = ATTACH_TYPE_CURSIVE; + pos[child].attach_chain() = (int) parent - (int) child; + buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; + if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction))) + pos[child].y_offset = y_offset; + else + pos[child].x_offset = x_offset; + + /* If parent was attached to child, separate them. + * https://github.com/harfbuzz/harfbuzz/issues/2469 + */ + if (unlikely (pos[parent].attach_chain() == -pos[child].attach_chain())) + pos[parent].attach_chain() = 0; + + buffer->idx++; + return_trace (true); + } + + template <typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + void serialize (hb_subset_context_t *c, + Iterator it, + const void *src_base) + { + if (unlikely (!c->serializer->extend_min ((*this)))) return; + this->format = 1; + this->entryExitRecord.len = it.len (); + + for (const EntryExitRecord& entry_record : + it + | hb_map (hb_second)) + entry_record.subset (c, src_base); + + auto glyphs = + + it + | hb_map_retains_sorting (hb_first) + ; + + coverage.serialize_serialize (c->serializer, glyphs); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!out)) return_trace (false); + + auto it = + + hb_zip (this+coverage, entryExitRecord) + | hb_filter (glyphset, hb_first) + | hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const EntryExitRecord&> p) -> hb_pair_t<hb_codepoint_t, const EntryExitRecord&> + { return hb_pair (glyph_map[p.first], p.second);}) + ; + + bool ret = bool (it); + out->serialize (c, it, this); + return_trace (ret); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_CURSIVEPOSFORMAT1_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/ExtensionPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ExtensionPos.hh new file mode 100644 index 0000000000..d1808adab4 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ExtensionPos.hh @@ -0,0 +1,17 @@ +#ifndef OT_LAYOUT_GPOS_EXTENSIONPOS_HH +#define OT_LAYOUT_GPOS_EXTENSIONPOS_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct ExtensionPos : Extension<ExtensionPos> +{ + typedef struct PosLookupSubTable SubTable; +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_EXTENSIONPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkArray.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkArray.hh new file mode 100644 index 0000000000..f8cddd1991 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkArray.hh @@ -0,0 +1,113 @@ +#ifndef OT_LAYOUT_GPOS_MARKARRAY_HH +#define OT_LAYOUT_GPOS_MARKARRAY_HH + +#include "AnchorMatrix.hh" +#include "MarkRecord.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Coverage order */ +{ + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (Array16Of<MarkRecord>::sanitize (c, this)); + } + + bool apply (hb_ot_apply_context_t *c, + unsigned int mark_index, unsigned int glyph_index, + const AnchorMatrix &anchors, unsigned int class_count, + unsigned int glyph_pos) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + const MarkRecord &record = Array16Of<MarkRecord>::operator[](mark_index); + unsigned int mark_class = record.klass; + + const Anchor& mark_anchor = this + record.markAnchor; + bool found; + const Anchor& glyph_anchor = anchors.get_anchor (glyph_index, mark_class, class_count, &found); + /* If this subtable doesn't have an anchor for this base and this class, + * return false such that the subsequent subtables have a chance at it. */ + if (unlikely (!found)) return_trace (false); + + float mark_x, mark_y, base_x, base_y; + + buffer->unsafe_to_break (glyph_pos, buffer->idx + 1); + mark_anchor.get_anchor (c, buffer->cur().codepoint, &mark_x, &mark_y); + glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y); + + hb_glyph_position_t &o = buffer->cur_pos(); + o.x_offset = roundf (base_x - mark_x); + o.y_offset = roundf (base_y - mark_y); + o.attach_type() = ATTACH_TYPE_MARK; + o.attach_chain() = (int) glyph_pos - (int) buffer->idx; + buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; + + buffer->idx++; + return_trace (true); + } + + template <typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + bool subset (hb_subset_context_t *c, + Iterator coverage, + const hb_map_t *klass_mapping) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + + auto* out = c->serializer->start_embed (this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + + auto mark_iter = + + hb_zip (coverage, this->iter ()) + | hb_filter (glyphset, hb_first) + | hb_map (hb_second) + ; + + unsigned new_length = 0; + for (const auto& mark_record : mark_iter) { + if (unlikely (!mark_record.subset (c, this, klass_mapping))) + return_trace (false); + new_length++; + } + + if (unlikely (!c->serializer->check_assign (out->len, new_length, + HB_SERIALIZE_ERROR_ARRAY_OVERFLOW))) + return_trace (false); + + return_trace (true); + } +}; + +static void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage, + const MarkArray &mark_array, + const hb_set_t &glyphset, + hb_map_t* klass_mapping /* INOUT */) +{ + hb_set_t orig_classes; + + + hb_zip (mark_coverage, mark_array) + | hb_filter (glyphset, hb_first) + | hb_map (hb_second) + | hb_map (&MarkRecord::get_class) + | hb_sink (orig_classes) + ; + + unsigned idx = 0; + for (auto klass : orig_classes.iter ()) + { + if (klass_mapping->has (klass)) continue; + klass_mapping->set (klass, idx); + idx++; + } +} + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKARRAY_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePos.hh new file mode 100644 index 0000000000..e99e13ff84 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePos.hh @@ -0,0 +1,35 @@ +#ifndef OT_LAYOUT_GPOS_MARKBASEPOS_HH +#define OT_LAYOUT_GPOS_MARKBASEPOS_HH + +#include "MarkBasePosFormat1.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct MarkBasePos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + MarkBasePosFormat1 format1; + } u; + + public: + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKBASEPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePosFormat1.hh new file mode 100644 index 0000000000..a10b806fe5 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkBasePosFormat1.hh @@ -0,0 +1,217 @@ +#ifndef OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH + +#include "MarkArray.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +typedef AnchorMatrix BaseArray; /* base-major-- + * in order of BaseCoverage Index--, + * mark-minor-- + * ordered by class--zero-based. */ + +struct MarkBasePosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + markCoverage; /* Offset to MarkCoverage table--from + * beginning of MarkBasePos subtable */ + Offset16To<Coverage> + baseCoverage; /* Offset to BaseCoverage table--from + * beginning of MarkBasePos subtable */ + HBUINT16 classCount; /* Number of classes defined for marks */ + Offset16To<MarkArray> + markArray; /* Offset to MarkArray table--from + * beginning of MarkBasePos subtable */ + Offset16To<BaseArray> + baseArray; /* Offset to BaseArray table--from + * beginning of MarkBasePos subtable */ + + public: + DEFINE_SIZE_STATIC (12); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && + markCoverage.sanitize (c, this) && + baseCoverage.sanitize (c, this) && + markArray.sanitize (c, this) && + baseArray.sanitize (c, this, (unsigned int) classCount)); + } + + bool intersects (const hb_set_t *glyphs) const + { + return (this+markCoverage).intersects (glyphs) && + (this+baseCoverage).intersects (glyphs); + } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + + hb_zip (this+markCoverage, this+markArray) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); }) + ; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping); + + unsigned basecount = (this+baseArray).rows; + auto base_iter = + + hb_zip (this+baseCoverage, hb_range (basecount)) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + ; + + hb_sorted_vector_t<unsigned> base_indexes; + for (const unsigned row : base_iter) + { + + hb_range ((unsigned) classCount) + | hb_filter (klass_mapping) + | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) + | hb_sink (base_indexes) + ; + } + (this+baseArray).collect_variation_indices (c, base_indexes.iter ()); + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { + if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return; + if (unlikely (!(this+baseCoverage).collect_coverage (c->input))) return; + } + + const Coverage &get_coverage () const { return this+markCoverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint); + if (likely (mark_index == NOT_COVERED)) return_trace (false); + + /* Now we search backwards for a non-mark glyph */ + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks); + do { + unsigned unsafe_from; + if (!skippy_iter.prev (&unsafe_from)) + { + buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); + return_trace (false); + } + + /* We only want to attach to the first of a MultipleSubst sequence. + * https://github.com/harfbuzz/harfbuzz/issues/740 + * Reject others... + * ...but stop if we find a mark in the MultipleSubst sequence: + * https://github.com/harfbuzz/harfbuzz/issues/1020 */ + if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) || + 0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) || + (skippy_iter.idx == 0 || + _hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) || + _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) != + _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) || + _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) != + _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1 + )) + break; + skippy_iter.reject (); + } while (true); + + /* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */ + //if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); } + + unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint); + if (base_index == NOT_COVERED) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx)); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->format = format; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping); + + if (!klass_mapping.get_population ()) return_trace (false); + out->classCount = klass_mapping.get_population (); + + auto mark_iter = + + hb_zip (this+markCoverage, this+markArray) + | hb_filter (glyphset, hb_first) + ; + + hb_sorted_vector_t<hb_codepoint_t> new_coverage; + + mark_iter + | hb_map (hb_first) + | hb_map (glyph_map) + | hb_sink (new_coverage) + ; + + if (!out->markCoverage.serialize_serialize (c->serializer, new_coverage.iter ())) + return_trace (false); + + out->markArray.serialize_subset (c, markArray, this, + (this+markCoverage).iter (), + &klass_mapping); + + unsigned basecount = (this+baseArray).rows; + auto base_iter = + + hb_zip (this+baseCoverage, hb_range (basecount)) + | hb_filter (glyphset, hb_first) + ; + + new_coverage.reset (); + + base_iter + | hb_map (hb_first) + | hb_map (glyph_map) + | hb_sink (new_coverage) + ; + + if (!out->baseCoverage.serialize_serialize (c->serializer, new_coverage.iter ())) + return_trace (false); + + hb_sorted_vector_t<unsigned> base_indexes; + for (const unsigned row : + base_iter + | hb_map (hb_second)) + { + + hb_range ((unsigned) classCount) + | hb_filter (klass_mapping) + | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) + | hb_sink (base_indexes) + ; + } + + out->baseArray.serialize_subset (c, baseArray, this, + base_iter.len (), + base_indexes.iter ()); + + return_trace (true); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKBASEPOSFORMAT1_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPos.hh new file mode 100644 index 0000000000..7e74aa73e0 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPos.hh @@ -0,0 +1,35 @@ +#ifndef OT_LAYOUT_GPOS_MARKLIGPOS_HH +#define OT_LAYOUT_GPOS_MARKLIGPOS_HH + +#include "MarkLigPosFormat1.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct MarkLigPos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + MarkLigPosFormat1 format1; + } u; + + public: + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKLIGPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPosFormat1.hh new file mode 100644 index 0000000000..4382aa6c6c --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkLigPosFormat1.hh @@ -0,0 +1,244 @@ +#ifndef OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +typedef AnchorMatrix LigatureAttach; /* component-major-- + * in order of writing direction--, + * mark-minor-- + * ordered by class--zero-based. */ + +/* Array of LigatureAttach tables ordered by LigatureCoverage Index */ +struct LigatureArray : List16OfOffset16To<LigatureAttach> +{ + template <typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + bool subset (hb_subset_context_t *c, + Iterator coverage, + unsigned class_count, + const hb_map_t *klass_mapping) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + + auto *out = c->serializer->start_embed (this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + + for (const auto _ : + hb_zip (coverage, *this) + | hb_filter (glyphset, hb_first)) + { + auto *matrix = out->serialize_append (c->serializer); + if (unlikely (!matrix)) return_trace (false); + + const LigatureAttach& src = (this + _.second); + auto indexes = + + hb_range (src.rows * class_count) + | hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); }) + ; + matrix->serialize_subset (c, + _.second, + this, + src.rows, + indexes); + } + return_trace (this->len); + } +}; + +struct MarkLigPosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + markCoverage; /* Offset to Mark Coverage table--from + * beginning of MarkLigPos subtable */ + Offset16To<Coverage> + ligatureCoverage; /* Offset to Ligature Coverage + * table--from beginning of MarkLigPos + * subtable */ + HBUINT16 classCount; /* Number of defined mark classes */ + Offset16To<MarkArray> + markArray; /* Offset to MarkArray table--from + * beginning of MarkLigPos subtable */ + Offset16To<LigatureArray> + ligatureArray; /* Offset to LigatureArray table--from + * beginning of MarkLigPos subtable */ + public: + DEFINE_SIZE_STATIC (12); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && + markCoverage.sanitize (c, this) && + ligatureCoverage.sanitize (c, this) && + markArray.sanitize (c, this) && + ligatureArray.sanitize (c, this, (unsigned int) classCount)); + } + + bool intersects (const hb_set_t *glyphs) const + { + return (this+markCoverage).intersects (glyphs) && + (this+ligatureCoverage).intersects (glyphs); + } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + + hb_zip (this+markCoverage, this+markArray) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); }) + ; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping); + + unsigned ligcount = (this+ligatureArray).len; + auto lig_iter = + + hb_zip (this+ligatureCoverage, hb_range (ligcount)) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + ; + + const LigatureArray& lig_array = this+ligatureArray; + for (const unsigned i : lig_iter) + { + hb_sorted_vector_t<unsigned> lig_indexes; + unsigned row_count = lig_array[i].rows; + for (unsigned row : + hb_range (row_count)) + { + + hb_range ((unsigned) classCount) + | hb_filter (klass_mapping) + | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) + | hb_sink (lig_indexes) + ; + } + + lig_array[i].collect_variation_indices (c, lig_indexes.iter ()); + } + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { + if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return; + if (unlikely (!(this+ligatureCoverage).collect_coverage (c->input))) return; + } + + const Coverage &get_coverage () const { return this+markCoverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint); + if (likely (mark_index == NOT_COVERED)) return_trace (false); + + /* Now we search backwards for a non-mark glyph */ + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks); + unsigned unsafe_from; + if (!skippy_iter.prev (&unsafe_from)) + { + buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); + return_trace (false); + } + + /* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */ + //if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); } + + unsigned int j = skippy_iter.idx; + unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint); + if (lig_index == NOT_COVERED) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + const LigatureArray& lig_array = this+ligatureArray; + const LigatureAttach& lig_attach = lig_array[lig_index]; + + /* Find component to attach to */ + unsigned int comp_count = lig_attach.rows; + if (unlikely (!comp_count)) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + /* We must now check whether the ligature ID of the current mark glyph + * is identical to the ligature ID of the found ligature. If yes, we + * can directly use the component index. If not, we attach the mark + * glyph to the last component of the ligature. */ + unsigned int comp_index; + unsigned int lig_id = _hb_glyph_info_get_lig_id (&buffer->info[j]); + unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur()); + unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur()); + if (lig_id && lig_id == mark_id && mark_comp > 0) + comp_index = hb_min (comp_count, _hb_glyph_info_get_lig_comp (&buffer->cur())) - 1; + else + comp_index = comp_count - 1; + + return_trace ((this+markArray).apply (c, mark_index, comp_index, lig_attach, classCount, j)); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->format = format; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping); + + if (!klass_mapping.get_population ()) return_trace (false); + out->classCount = klass_mapping.get_population (); + + auto mark_iter = + + hb_zip (this+markCoverage, this+markArray) + | hb_filter (glyphset, hb_first) + ; + + auto new_mark_coverage = + + mark_iter + | hb_map_retains_sorting (hb_first) + | hb_map_retains_sorting (glyph_map) + ; + + if (!out->markCoverage.serialize_serialize (c->serializer, new_mark_coverage)) + return_trace (false); + + out->markArray.serialize_subset (c, markArray, this, + (this+markCoverage).iter (), + &klass_mapping); + + auto new_ligature_coverage = + + hb_iter (this + ligatureCoverage) + | hb_filter (glyphset) + | hb_map_retains_sorting (glyph_map) + ; + + if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage)) + return_trace (false); + + out->ligatureArray.serialize_subset (c, ligatureArray, this, + hb_iter (this+ligatureCoverage), classCount, &klass_mapping); + + return_trace (true); + } + +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKLIGPOSFORMAT1_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPos.hh new file mode 100644 index 0000000000..c0eee6d54c --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPos.hh @@ -0,0 +1,36 @@ +#ifndef OT_LAYOUT_GPOS_MARKMARKPOS_HH +#define OT_LAYOUT_GPOS_MARKMARKPOS_HH + +#include "MarkMarkPosFormat1.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct MarkMarkPos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + MarkMarkPosFormat1 format1; + } u; + + public: + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKMARKPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPosFormat1.hh new file mode 100644 index 0000000000..c48a74f773 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkMarkPosFormat1.hh @@ -0,0 +1,227 @@ +#ifndef OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH + +#include "MarkMarkPosFormat1.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +typedef AnchorMatrix Mark2Array; /* mark2-major-- + * in order of Mark2Coverage Index--, + * mark1-minor-- + * ordered by class--zero-based. */ + +struct MarkMarkPosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + mark1Coverage; /* Offset to Combining Mark1 Coverage + * table--from beginning of MarkMarkPos + * subtable */ + Offset16To<Coverage> + mark2Coverage; /* Offset to Combining Mark2 Coverage + * table--from beginning of MarkMarkPos + * subtable */ + HBUINT16 classCount; /* Number of defined mark classes */ + Offset16To<MarkArray> + mark1Array; /* Offset to Mark1Array table--from + * beginning of MarkMarkPos subtable */ + Offset16To<Mark2Array> + mark2Array; /* Offset to Mark2Array table--from + * beginning of MarkMarkPos subtable */ + public: + DEFINE_SIZE_STATIC (12); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && + mark1Coverage.sanitize (c, this) && + mark2Coverage.sanitize (c, this) && + mark1Array.sanitize (c, this) && + mark2Array.sanitize (c, this, (unsigned int) classCount)); + } + + bool intersects (const hb_set_t *glyphs) const + { + return (this+mark1Coverage).intersects (glyphs) && + (this+mark2Coverage).intersects (glyphs); + } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + + hb_zip (this+mark1Coverage, this+mark1Array) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+mark1Array)); }) + ; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, *c->glyph_set, &klass_mapping); + + unsigned mark2_count = (this+mark2Array).rows; + auto mark2_iter = + + hb_zip (this+mark2Coverage, hb_range (mark2_count)) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + ; + + hb_sorted_vector_t<unsigned> mark2_indexes; + for (const unsigned row : mark2_iter) + { + + hb_range ((unsigned) classCount) + | hb_filter (klass_mapping) + | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) + | hb_sink (mark2_indexes) + ; + } + (this+mark2Array).collect_variation_indices (c, mark2_indexes.iter ()); + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { + if (unlikely (!(this+mark1Coverage).collect_coverage (c->input))) return; + if (unlikely (!(this+mark2Coverage).collect_coverage (c->input))) return; + } + + const Coverage &get_coverage () const { return this+mark1Coverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int mark1_index = (this+mark1Coverage).get_coverage (buffer->cur().codepoint); + if (likely (mark1_index == NOT_COVERED)) return_trace (false); + + /* now we search backwards for a suitable mark glyph until a non-mark glyph */ + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + skippy_iter.set_lookup_props (c->lookup_props & ~LookupFlag::IgnoreFlags); + unsigned unsafe_from; + if (!skippy_iter.prev (&unsafe_from)) + { + buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); + return_trace (false); + } + + if (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx])) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + unsigned int j = skippy_iter.idx; + + unsigned int id1 = _hb_glyph_info_get_lig_id (&buffer->cur()); + unsigned int id2 = _hb_glyph_info_get_lig_id (&buffer->info[j]); + unsigned int comp1 = _hb_glyph_info_get_lig_comp (&buffer->cur()); + unsigned int comp2 = _hb_glyph_info_get_lig_comp (&buffer->info[j]); + + if (likely (id1 == id2)) + { + if (id1 == 0) /* Marks belonging to the same base. */ + goto good; + else if (comp1 == comp2) /* Marks belonging to the same ligature component. */ + goto good; + } + else + { + /* If ligature ids don't match, it may be the case that one of the marks + * itself is a ligature. In which case match. */ + if ((id1 > 0 && !comp1) || (id2 > 0 && !comp2)) + goto good; + } + + /* Didn't match. */ + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + + good: + unsigned int mark2_index = (this+mark2Coverage).get_coverage (buffer->info[j].codepoint); + if (mark2_index == NOT_COVERED) + { + buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); + return_trace (false); + } + + return_trace ((this+mark1Array).apply (c, mark1_index, mark2_index, this+mark2Array, classCount, j)); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->format = format; + + hb_map_t klass_mapping; + Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, glyphset, &klass_mapping); + + if (!klass_mapping.get_population ()) return_trace (false); + out->classCount = klass_mapping.get_population (); + + auto mark1_iter = + + hb_zip (this+mark1Coverage, this+mark1Array) + | hb_filter (glyphset, hb_first) + ; + + hb_sorted_vector_t<hb_codepoint_t> new_coverage; + + mark1_iter + | hb_map (hb_first) + | hb_map (glyph_map) + | hb_sink (new_coverage) + ; + + if (!out->mark1Coverage.serialize_serialize (c->serializer, new_coverage.iter ())) + return_trace (false); + + out->mark1Array.serialize_subset (c, mark1Array, this, + (this+mark1Coverage).iter (), + &klass_mapping); + + unsigned mark2count = (this+mark2Array).rows; + auto mark2_iter = + + hb_zip (this+mark2Coverage, hb_range (mark2count)) + | hb_filter (glyphset, hb_first) + ; + + new_coverage.reset (); + + mark2_iter + | hb_map (hb_first) + | hb_map (glyph_map) + | hb_sink (new_coverage) + ; + + if (!out->mark2Coverage.serialize_serialize (c->serializer, new_coverage.iter ())) + return_trace (false); + + hb_sorted_vector_t<unsigned> mark2_indexes; + for (const unsigned row : + mark2_iter + | hb_map (hb_second)) + { + + hb_range ((unsigned) classCount) + | hb_filter (klass_mapping) + | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) + | hb_sink (mark2_indexes) + ; + } + + out->mark2Array.serialize_subset (c, mark2Array, this, mark2_iter.len (), mark2_indexes.iter ()); + + return_trace (true); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKMARKPOSFORMAT1_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkRecord.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkRecord.hh new file mode 100644 index 0000000000..7a514453ae --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/MarkRecord.hh @@ -0,0 +1,52 @@ +#ifndef OT_LAYOUT_GPOS_MARKRECORD_HH +#define OT_LAYOUT_GPOS_MARKRECORD_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct MarkRecord +{ + friend struct MarkArray; + + protected: + HBUINT16 klass; /* Class defined for this mark */ + Offset16To<Anchor> + markAnchor; /* Offset to Anchor table--from + * beginning of MarkArray table */ + public: + DEFINE_SIZE_STATIC (4); + + unsigned get_class () const { return (unsigned) klass; } + bool sanitize (hb_sanitize_context_t *c, const void *base) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && markAnchor.sanitize (c, base)); + } + + MarkRecord *subset (hb_subset_context_t *c, + const void *src_base, + const hb_map_t *klass_mapping) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->embed (this); + if (unlikely (!out)) return_trace (nullptr); + + out->klass = klass_mapping->get (klass); + out->markAnchor.serialize_subset (c, markAnchor, src_base); + return_trace (out); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + const void *src_base) const + { + (src_base+markAnchor).collect_variation_indices (c); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_MARKRECORD_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPos.hh new file mode 100644 index 0000000000..8479178d38 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPos.hh @@ -0,0 +1,38 @@ +#ifndef OT_LAYOUT_GPOS_PAIRPOS_HH +#define OT_LAYOUT_GPOS_PAIRPOS_HH + +#include "PairPosFormat1.hh" +#include "PairPosFormat2.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct PairPos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + PairPosFormat1 format1; + PairPosFormat2 format2; + } u; + + public: + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + +} +} +} + +#endif // OT_LAYOUT_GPOS_PAIRPOS_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat1.hh new file mode 100644 index 0000000000..35a2db2d45 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat1.hh @@ -0,0 +1,420 @@ +#ifndef OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct PairValueRecord +{ + friend struct PairSet; + + int cmp (hb_codepoint_t k) const + { return secondGlyph.cmp (k); } + + struct context_t + { + const void *base; + const ValueFormat *valueFormats; + const ValueFormat *newFormats; + unsigned len1; /* valueFormats[0].get_len() */ + const hb_map_t *glyph_map; + const hb_map_t *layout_variation_idx_map; + }; + + bool subset (hb_subset_context_t *c, + context_t *closure) const + { + TRACE_SERIALIZE (this); + auto *s = c->serializer; + auto *out = s->start_embed (*this); + if (unlikely (!s->extend_min (out))) return_trace (false); + + out->secondGlyph = (*closure->glyph_map)[secondGlyph]; + + closure->valueFormats[0].copy_values (s, + closure->newFormats[0], + closure->base, &values[0], + closure->layout_variation_idx_map); + closure->valueFormats[1].copy_values (s, + closure->newFormats[1], + closure->base, + &values[closure->len1], + closure->layout_variation_idx_map); + + return_trace (true); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + const ValueFormat *valueFormats, + const void *base) const + { + unsigned record1_len = valueFormats[0].get_len (); + unsigned record2_len = valueFormats[1].get_len (); + const hb_array_t<const Value> values_array = values.as_array (record1_len + record2_len); + + if (valueFormats[0].has_device ()) + valueFormats[0].collect_variation_indices (c, base, values_array.sub_array (0, record1_len)); + + if (valueFormats[1].has_device ()) + valueFormats[1].collect_variation_indices (c, base, values_array.sub_array (record1_len, record2_len)); + } + + bool intersects (const hb_set_t& glyphset) const + { + return glyphset.has(secondGlyph); + } + + const Value* get_values_1 () const + { + return &values[0]; + } + + const Value* get_values_2 (ValueFormat format1) const + { + return &values[format1.get_len ()]; + } + + protected: + HBGlyphID16 secondGlyph; /* GlyphID of second glyph in the + * pair--first glyph is listed in the + * Coverage table */ + ValueRecord values; /* Positioning data for the first glyph + * followed by for second glyph */ + public: + DEFINE_SIZE_ARRAY (2, values); +}; + +struct PairSet +{ + friend struct PairPosFormat1; + + bool intersects (const hb_set_t *glyphs, + const ValueFormat *valueFormats) const + { + unsigned int len1 = valueFormats[0].get_len (); + unsigned int len2 = valueFormats[1].get_len (); + unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); + + const PairValueRecord *record = &firstPairValueRecord; + unsigned int count = len; + for (unsigned int i = 0; i < count; i++) + { + if (glyphs->has (record->secondGlyph)) + return true; + record = &StructAtOffset<const PairValueRecord> (record, record_size); + } + return false; + } + + void collect_glyphs (hb_collect_glyphs_context_t *c, + const ValueFormat *valueFormats) const + { + unsigned int len1 = valueFormats[0].get_len (); + unsigned int len2 = valueFormats[1].get_len (); + unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); + + const PairValueRecord *record = &firstPairValueRecord; + c->input->add_array (&record->secondGlyph, len, record_size); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + const ValueFormat *valueFormats) const + { + unsigned len1 = valueFormats[0].get_len (); + unsigned len2 = valueFormats[1].get_len (); + unsigned record_size = HBUINT16::static_size * (1 + len1 + len2); + + const PairValueRecord *record = &firstPairValueRecord; + unsigned count = len; + for (unsigned i = 0; i < count; i++) + { + if (c->glyph_set->has (record->secondGlyph)) + { record->collect_variation_indices (c, valueFormats, this); } + + record = &StructAtOffset<const PairValueRecord> (record, record_size); + } + } + + bool apply (hb_ot_apply_context_t *c, + const ValueFormat *valueFormats, + unsigned int pos) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int len1 = valueFormats[0].get_len (); + unsigned int len2 = valueFormats[1].get_len (); + unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); + + const PairValueRecord *record = hb_bsearch (buffer->info[pos].codepoint, + &firstPairValueRecord, + len, + record_size); + if (record) + { + bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos()); + bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]); + if (applied_first || applied_second) + buffer->unsafe_to_break (buffer->idx, pos + 1); + if (len2) + pos++; + buffer->idx = pos; + return_trace (true); + } + buffer->unsafe_to_concat (buffer->idx, pos + 1); + return_trace (false); + } + + bool subset (hb_subset_context_t *c, + const ValueFormat valueFormats[2], + const ValueFormat newFormats[2]) const + { + TRACE_SUBSET (this); + auto snap = c->serializer->snapshot (); + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->len = 0; + + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + unsigned len1 = valueFormats[0].get_len (); + unsigned len2 = valueFormats[1].get_len (); + unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2); + + PairValueRecord::context_t context = + { + this, + valueFormats, + newFormats, + len1, + &glyph_map, + c->plan->layout_variation_idx_map + }; + + const PairValueRecord *record = &firstPairValueRecord; + unsigned count = len, num = 0; + for (unsigned i = 0; i < count; i++) + { + if (glyphset.has (record->secondGlyph) + && record->subset (c, &context)) num++; + record = &StructAtOffset<const PairValueRecord> (record, record_size); + } + + out->len = num; + if (!num) c->serializer->revert (snap); + return_trace (num); + } + + struct sanitize_closure_t + { + const ValueFormat *valueFormats; + unsigned int len1; /* valueFormats[0].get_len() */ + unsigned int stride; /* 1 + len1 + len2 */ + }; + + bool sanitize (hb_sanitize_context_t *c, const sanitize_closure_t *closure) const + { + TRACE_SANITIZE (this); + if (!(c->check_struct (this) + && c->check_range (&firstPairValueRecord, + len, + HBUINT16::static_size, + closure->stride))) return_trace (false); + + unsigned int count = len; + const PairValueRecord *record = &firstPairValueRecord; + return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) && + closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride)); + } + + protected: + HBUINT16 len; /* Number of PairValueRecords */ + PairValueRecord firstPairValueRecord; + /* Array of PairValueRecords--ordered + * by GlyphID of the second glyph */ + public: + DEFINE_SIZE_MIN (2); +}; + +struct PairPosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + coverage; /* Offset to Coverage table--from + * beginning of subtable */ + ValueFormat valueFormat[2]; /* [0] Defines the types of data in + * ValueRecord1--for the first glyph + * in the pair--may be zero (0) */ + /* [1] Defines the types of data in + * ValueRecord2--for the second glyph + * in the pair--may be zero (0) */ + Array16OfOffset16To<PairSet> + pairSet; /* Array of PairSet tables + * ordered by Coverage Index */ + public: + DEFINE_SIZE_ARRAY (10, pairSet); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + + if (!c->check_struct (this)) return_trace (false); + + unsigned int len1 = valueFormat[0].get_len (); + unsigned int len2 = valueFormat[1].get_len (); + PairSet::sanitize_closure_t closure = + { + valueFormat, + len1, + 1 + len1 + len2 + }; + + return_trace (coverage.sanitize (c, this) && pairSet.sanitize (c, this, &closure)); + } + + + bool intersects (const hb_set_t *glyphs) const + { + return + + hb_zip (this+coverage, pairSet) + | hb_filter (*glyphs, hb_first) + | hb_map (hb_second) + | hb_map ([glyphs, this] (const Offset16To<PairSet> &_) + { return (this+_).intersects (glyphs, valueFormat); }) + | hb_any + ; + } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + if ((!valueFormat[0].has_device ()) && (!valueFormat[1].has_device ())) return; + + auto it = + + hb_zip (this+coverage, pairSet) + | hb_filter (c->glyph_set, hb_first) + | hb_map (hb_second) + ; + + if (!it) return; + + it + | hb_map (hb_add (this)) + | hb_apply ([&] (const PairSet& _) { _.collect_variation_indices (c, valueFormat); }) + ; + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { + if (unlikely (!(this+coverage).collect_coverage (c->input))) return; + unsigned int count = pairSet.len; + for (unsigned int i = 0; i < count; i++) + (this+pairSet[i]).collect_glyphs (c, valueFormat); + } + + const Coverage &get_coverage () const { return this+coverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); + if (likely (index == NOT_COVERED)) return_trace (false); + + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + unsigned unsafe_to; + if (!skippy_iter.next (&unsafe_to)) + { + buffer->unsafe_to_concat (buffer->idx, unsafe_to); + return_trace (false); + } + + return_trace ((this+pairSet[index]).apply (c, valueFormat, skippy_iter.idx)); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->format = format; + out->valueFormat[0] = valueFormat[0]; + out->valueFormat[1] = valueFormat[1]; + if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) + { + hb_pair_t<unsigned, unsigned> newFormats = compute_effective_value_formats (glyphset); + out->valueFormat[0] = newFormats.first; + out->valueFormat[1] = newFormats.second; + } + + hb_sorted_vector_t<hb_codepoint_t> new_coverage; + + + hb_zip (this+coverage, pairSet) + | hb_filter (glyphset, hb_first) + | hb_filter ([this, c, out] (const Offset16To<PairSet>& _) + { + auto snap = c->serializer->snapshot (); + auto *o = out->pairSet.serialize_append (c->serializer); + if (unlikely (!o)) return false; + bool ret = o->serialize_subset (c, _, this, valueFormat, out->valueFormat); + if (!ret) + { + out->pairSet.pop (); + c->serializer->revert (snap); + } + return ret; + }, + hb_second) + | hb_map (hb_first) + | hb_map (glyph_map) + | hb_sink (new_coverage) + ; + + out->coverage.serialize_serialize (c->serializer, new_coverage.iter ()); + + return_trace (bool (new_coverage)); + } + + + hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_set_t& glyphset) const + { + unsigned len1 = valueFormat[0].get_len (); + unsigned len2 = valueFormat[1].get_len (); + unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2); + + unsigned format1 = 0; + unsigned format2 = 0; + for (const Offset16To<PairSet>& _ : + + hb_zip (this+coverage, pairSet) | hb_filter (glyphset, hb_first) | hb_map (hb_second)) + { + const PairSet& set = (this + _); + const PairValueRecord *record = &set.firstPairValueRecord; + + for (unsigned i = 0; i < set.len; i++) + { + if (record->intersects (glyphset)) + { + format1 = format1 | valueFormat[0].get_effective_format (record->get_values_1 ()); + format2 = format2 | valueFormat[1].get_effective_format (record->get_values_2 (valueFormat[0])); + } + record = &StructAtOffset<const PairValueRecord> (record, record_size); + } + } + + return hb_pair (format1, format2); + } +}; + + +} +} +} + +#endif // OT_LAYOUT_GPOS_PAIRPOSFORMAT1_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh new file mode 100644 index 0000000000..3f5f9959c4 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PairPosFormat2.hh @@ -0,0 +1,314 @@ +#ifndef OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH +#define OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH + +#include "ValueFormat.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct PairPosFormat2 +{ + protected: + HBUINT16 format; /* Format identifier--format = 2 */ + Offset16To<Coverage> + coverage; /* Offset to Coverage table--from + * beginning of subtable */ + ValueFormat valueFormat1; /* ValueRecord definition--for the + * first glyph of the pair--may be zero + * (0) */ + ValueFormat valueFormat2; /* ValueRecord definition--for the + * second glyph of the pair--may be + * zero (0) */ + Offset16To<ClassDef> + classDef1; /* Offset to ClassDef table--from + * beginning of PairPos subtable--for + * the first glyph of the pair */ + Offset16To<ClassDef> + classDef2; /* Offset to ClassDef table--from + * beginning of PairPos subtable--for + * the second glyph of the pair */ + HBUINT16 class1Count; /* Number of classes in ClassDef1 + * table--includes Class0 */ + HBUINT16 class2Count; /* Number of classes in ClassDef2 + * table--includes Class0 */ + ValueRecord values; /* Matrix of value pairs: + * class1-major, class2-minor, + * Each entry has value1 and value2 */ + public: + DEFINE_SIZE_ARRAY (16, values); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + if (!(c->check_struct (this) + && coverage.sanitize (c, this) + && classDef1.sanitize (c, this) + && classDef2.sanitize (c, this))) return_trace (false); + + unsigned int len1 = valueFormat1.get_len (); + unsigned int len2 = valueFormat2.get_len (); + unsigned int stride = len1 + len2; + unsigned int record_size = valueFormat1.get_size () + valueFormat2.get_size (); + unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count; + return_trace (c->check_range ((const void *) values, + count, + record_size) && + valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) && + valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride)); + } + + bool intersects (const hb_set_t *glyphs) const + { + return (this+coverage).intersects (glyphs) && + (this+classDef2).intersects (glyphs); + } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + if (!intersects (c->glyph_set)) return; + if ((!valueFormat1.has_device ()) && (!valueFormat2.has_device ())) return; + + hb_set_t klass1_glyphs, klass2_glyphs; + if (!(this+classDef1).collect_coverage (&klass1_glyphs)) return; + if (!(this+classDef2).collect_coverage (&klass2_glyphs)) return; + + hb_set_t class1_set, class2_set; + for (const unsigned cp : + c->glyph_set->iter () | hb_filter (this + coverage)) + { + if (!klass1_glyphs.has (cp)) class1_set.add (0); + else + { + unsigned klass1 = (this+classDef1).get (cp); + class1_set.add (klass1); + } + } + + class2_set.add (0); + for (const unsigned cp : + c->glyph_set->iter () | hb_filter (klass2_glyphs)) + { + unsigned klass2 = (this+classDef2).get (cp); + class2_set.add (klass2); + } + + if (class1_set.is_empty () + || class2_set.is_empty () + || (class2_set.get_population() == 1 && class2_set.has(0))) + return; + + unsigned len1 = valueFormat1.get_len (); + unsigned len2 = valueFormat2.get_len (); + const hb_array_t<const Value> values_array = values.as_array ((unsigned)class1Count * (unsigned) class2Count * (len1 + len2)); + for (const unsigned class1_idx : class1_set.iter ()) + { + for (const unsigned class2_idx : class2_set.iter ()) + { + unsigned start_offset = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); + if (valueFormat1.has_device ()) + valueFormat1.collect_variation_indices (c, this, values_array.sub_array (start_offset, len1)); + + if (valueFormat2.has_device ()) + valueFormat2.collect_variation_indices (c, this, values_array.sub_array (start_offset+len1, len2)); + } + } + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { + if (unlikely (!(this+coverage).collect_coverage (c->input))) return; + if (unlikely (!(this+classDef2).collect_coverage (c->input))) return; + } + + const Coverage &get_coverage () const { return this+coverage; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); + if (likely (index == NOT_COVERED)) return_trace (false); + + hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; + skippy_iter.reset (buffer->idx, 1); + unsigned unsafe_to; + if (!skippy_iter.next (&unsafe_to)) + { + buffer->unsafe_to_concat (buffer->idx, unsafe_to); + return_trace (false); + } + + unsigned int len1 = valueFormat1.get_len (); + unsigned int len2 = valueFormat2.get_len (); + unsigned int record_len = len1 + len2; + + unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint); + unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint); + if (unlikely (klass1 >= class1Count || klass2 >= class2Count)) + { + buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1); + return_trace (false); + } + + const Value *v = &values[record_len * (klass1 * class2Count + klass2)]; + + bool applied_first = false, applied_second = false; + + + /* Isolate simple kerning and apply it half to each side. + * Results in better cursor positinoing / underline drawing. + * + * Disabled, because causes issues... :-( + * https://github.com/harfbuzz/harfbuzz/issues/3408 + * https://github.com/harfbuzz/harfbuzz/pull/3235#issuecomment-1029814978 + */ +#ifndef HB_SPLIT_KERN + if (0) +#endif + { + if (!len2) + { + const hb_direction_t dir = buffer->props.direction; + const bool horizontal = HB_DIRECTION_IS_HORIZONTAL (dir); + const bool backward = HB_DIRECTION_IS_BACKWARD (dir); + unsigned mask = horizontal ? ValueFormat::xAdvance : ValueFormat::yAdvance; + if (backward) + mask |= mask >> 2; /* Add eg. xPlacement in RTL. */ + /* Add Devices. */ + mask |= mask << 4; + + if (valueFormat1 & ~mask) + goto bail; + + /* Is simple kern. Apply value on an empty position slot, + * then split it between sides. */ + + hb_glyph_position_t pos{}; + if (valueFormat1.apply_value (c, this, v, pos)) + { + hb_position_t *src = &pos.x_advance; + hb_position_t *dst1 = &buffer->cur_pos().x_advance; + hb_position_t *dst2 = &buffer->pos[skippy_iter.idx].x_advance; + unsigned i = horizontal ? 0 : 1; + + hb_position_t kern = src[i]; + hb_position_t kern1 = kern >> 1; + hb_position_t kern2 = kern - kern1; + + if (!backward) + { + dst1[i] += kern1; + dst2[i] += kern2; + dst2[i + 2] += kern2; + } + else + { + dst1[i] += kern1; + dst1[i + 2] += src[i + 2] - kern2; + dst2[i] += kern2; + } + + applied_first = applied_second = kern != 0; + goto success; + } + goto boring; + } + } + bail: + + + applied_first = valueFormat1.apply_value (c, this, v, buffer->cur_pos()); + applied_second = valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]); + + success: + if (applied_first || applied_second) + buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1); + else + boring: + buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1); + + + buffer->idx = skippy_iter.idx; + if (len2) + buffer->idx++; + + return_trace (true); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + auto *out = c->serializer->start_embed (*this); + if (unlikely (!c->serializer->extend_min (out))) return_trace (false); + out->format = format; + + hb_map_t klass1_map; + out->classDef1.serialize_subset (c, classDef1, this, &klass1_map, true, true, &(this + coverage)); + out->class1Count = klass1_map.get_population (); + + hb_map_t klass2_map; + out->classDef2.serialize_subset (c, classDef2, this, &klass2_map, true, false); + out->class2Count = klass2_map.get_population (); + + unsigned len1 = valueFormat1.get_len (); + unsigned len2 = valueFormat2.get_len (); + + hb_pair_t<unsigned, unsigned> newFormats = hb_pair (valueFormat1, valueFormat2); + if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) + newFormats = compute_effective_value_formats (klass1_map, klass2_map); + + out->valueFormat1 = newFormats.first; + out->valueFormat2 = newFormats.second; + + for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map)) + { + for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map)) + { + unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); + valueFormat1.copy_values (c->serializer, newFormats.first, this, &values[idx], c->plan->layout_variation_idx_map); + valueFormat2.copy_values (c->serializer, newFormats.second, this, &values[idx + len1], c->plan->layout_variation_idx_map); + } + } + + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto it = + + hb_iter (this+coverage) + | hb_filter (glyphset) + | hb_map_retains_sorting (glyph_map) + ; + + out->coverage.serialize_serialize (c->serializer, it); + return_trace (out->class1Count && out->class2Count && bool (it)); + } + + + hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_map_t& klass1_map, + const hb_map_t& klass2_map) const + { + unsigned len1 = valueFormat1.get_len (); + unsigned len2 = valueFormat2.get_len (); + + unsigned format1 = 0; + unsigned format2 = 0; + + for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map)) + { + for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map)) + { + unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); + format1 = format1 | valueFormat1.get_effective_format (&values[idx]); + format2 = format2 | valueFormat2.get_effective_format (&values[idx + len1]); + } + } + + return hb_pair (format1, format2); + } +}; + +} +} +} + +#endif // OT_LAYOUT_GPOS_PAIRPOSFORMAT2_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookup.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookup.hh new file mode 100644 index 0000000000..c4e57bb543 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookup.hh @@ -0,0 +1,79 @@ +#ifndef OT_LAYOUT_GPOS_POSLOOKUP_HH +#define OT_LAYOUT_GPOS_POSLOOKUP_HH + +#include "PosLookupSubTable.hh" +#include "../../../hb-ot-layout-common.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct PosLookup : Lookup +{ + using SubTable = PosLookupSubTable; + + const SubTable& get_subtable (unsigned int i) const + { return Lookup::get_subtable<SubTable> (i); } + + bool is_reverse () const + { + return false; + } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + return_trace (dispatch (c)); + } + + bool intersects (const hb_set_t *glyphs) const + { + hb_intersects_context_t c (glyphs); + return dispatch (&c); + } + + hb_collect_glyphs_context_t::return_t collect_glyphs (hb_collect_glyphs_context_t *c) const + { return dispatch (c); } + + hb_closure_lookups_context_t::return_t closure_lookups (hb_closure_lookups_context_t *c, unsigned this_index) const + { + if (c->is_lookup_visited (this_index)) + return hb_closure_lookups_context_t::default_return_value (); + + c->set_lookup_visited (this_index); + if (!intersects (c->glyphs)) + { + c->set_lookup_inactive (this_index); + return hb_closure_lookups_context_t::default_return_value (); + } + + hb_closure_lookups_context_t::return_t ret = dispatch (c); + return ret; + } + + template <typename set_t> + void collect_coverage (set_t *glyphs) const + { + hb_collect_coverage_context_t<set_t> c (glyphs); + dispatch (&c); + } + + template <typename context_t> + static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index); + + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { return Lookup::dispatch<SubTable> (c, std::forward<Ts> (ds)...); } + + bool subset (hb_subset_context_t *c) const + { return Lookup::subset<SubTable> (c); } + + bool sanitize (hb_sanitize_context_t *c) const + { return Lookup::sanitize<SubTable> (c); } +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_POSLOOKUP_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookupSubTable.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookupSubTable.hh new file mode 100644 index 0000000000..c19fbc323f --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/PosLookupSubTable.hh @@ -0,0 +1,79 @@ +#ifndef OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH +#define OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH + +#include "SinglePos.hh" +#include "PairPos.hh" +#include "CursivePos.hh" +#include "MarkBasePos.hh" +#include "MarkLigPos.hh" +#include "MarkMarkPos.hh" +#include "ContextPos.hh" +#include "ChainContextPos.hh" +#include "ExtensionPos.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct PosLookupSubTable +{ + friend struct ::OT::Lookup; + friend struct PosLookup; + + enum Type { + Single = 1, + Pair = 2, + Cursive = 3, + MarkBase = 4, + MarkLig = 5, + MarkMark = 6, + Context = 7, + ChainContext = 8, + Extension = 9 + }; + + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const + { + TRACE_DISPATCH (this, lookup_type); + switch (lookup_type) { + case Single: return_trace (u.single.dispatch (c, std::forward<Ts> (ds)...)); + case Pair: return_trace (u.pair.dispatch (c, std::forward<Ts> (ds)...)); + case Cursive: return_trace (u.cursive.dispatch (c, std::forward<Ts> (ds)...)); + case MarkBase: return_trace (u.markBase.dispatch (c, std::forward<Ts> (ds)...)); + case MarkLig: return_trace (u.markLig.dispatch (c, std::forward<Ts> (ds)...)); + case MarkMark: return_trace (u.markMark.dispatch (c, std::forward<Ts> (ds)...)); + case Context: return_trace (u.context.dispatch (c, std::forward<Ts> (ds)...)); + case ChainContext: return_trace (u.chainContext.dispatch (c, std::forward<Ts> (ds)...)); + case Extension: return_trace (u.extension.dispatch (c, std::forward<Ts> (ds)...)); + default: return_trace (c->default_return_value ()); + } + } + + bool intersects (const hb_set_t *glyphs, unsigned int lookup_type) const + { + hb_intersects_context_t c (glyphs); + return dispatch (&c, lookup_type); + } + + protected: + union { + SinglePos single; + PairPos pair; + CursivePos cursive; + MarkBasePos markBase; + MarkLigPos markLig; + MarkMarkPos markMark; + ContextPos context; + ChainContextPos chainContext; + ExtensionPos extension; + } u; + public: + DEFINE_SIZE_MIN (0); +}; + +} +} +} + +#endif /* HB_OT_LAYOUT_GPOS_POSLOOKUPSUBTABLE_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePos.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePos.hh new file mode 100644 index 0000000000..57e146befd --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePos.hh @@ -0,0 +1,98 @@ +#ifndef OT_LAYOUT_GPOS_SINGLEPOS_HH +#define OT_LAYOUT_GPOS_SINGLEPOS_HH + +#include "SinglePosFormat1.hh" +#include "SinglePosFormat2.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct SinglePos +{ + protected: + union { + HBUINT16 format; /* Format identifier */ + SinglePosFormat1 format1; + SinglePosFormat2 format2; + } u; + + public: + template<typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + unsigned get_format (Iterator glyph_val_iter_pairs) + { + hb_array_t<const Value> first_val_iter = hb_second (*glyph_val_iter_pairs); + + for (const auto iter : glyph_val_iter_pairs) + for (const auto _ : hb_zip (iter.second, first_val_iter)) + if (_.first != _.second) + return 2; + + return 1; + } + + template<typename Iterator, + typename SrcLookup, + hb_requires (hb_is_iterator (Iterator))> + void serialize (hb_serialize_context_t *c, + const SrcLookup* src, + Iterator glyph_val_iter_pairs, + const hb_map_t *layout_variation_idx_map) + { + if (unlikely (!c->extend_min (u.format))) return; + unsigned format = 2; + ValueFormat new_format = src->get_value_format (); + + if (glyph_val_iter_pairs) + { + format = get_format (glyph_val_iter_pairs); + new_format = src->get_value_format ().get_effective_format (+ glyph_val_iter_pairs | hb_map (hb_second)); + } + + u.format = format; + switch (u.format) { + case 1: u.format1.serialize (c, + src, + glyph_val_iter_pairs, + new_format, + layout_variation_idx_map); + return; + case 2: u.format2.serialize (c, + src, + glyph_val_iter_pairs, + new_format, + layout_variation_idx_map); + return; + default:return; + } + } + + template <typename context_t, typename ...Ts> + typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const + { + TRACE_DISPATCH (this, u.format); + if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); + switch (u.format) { + case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); + case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...)); + default:return_trace (c->default_return_value ()); + } + } +}; + + +template<typename Iterator, typename SrcLookup> +static void +SinglePos_serialize (hb_serialize_context_t *c, + const SrcLookup *src, + Iterator it, + const hb_map_t *layout_variation_idx_map) +{ c->start_embed<SinglePos> ()->serialize (c, src, it, layout_variation_idx_map); } + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_SINGLEPOS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat1.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat1.hh new file mode 100644 index 0000000000..8b7840ed0e --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat1.hh @@ -0,0 +1,124 @@ +#ifndef OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH +#define OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH + +#include "Common.hh" +#include "ValueFormat.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct SinglePosFormat1 +{ + protected: + HBUINT16 format; /* Format identifier--format = 1 */ + Offset16To<Coverage> + coverage; /* Offset to Coverage table--from + * beginning of subtable */ + ValueFormat valueFormat; /* Defines the types of data in the + * ValueRecord */ + ValueRecord values; /* Defines positioning + * value(s)--applied to all glyphs in + * the Coverage table */ + public: + DEFINE_SIZE_ARRAY (6, values); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && + coverage.sanitize (c, this) && + valueFormat.sanitize_value (c, this, values)); + } + + bool intersects (const hb_set_t *glyphs) const + { return (this+coverage).intersects (glyphs); } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + if (!valueFormat.has_device ()) return; + + auto it = + + hb_iter (this+coverage) + | hb_filter (c->glyph_set) + ; + + if (!it) return; + valueFormat.collect_variation_indices (c, this, values.as_array (valueFormat.get_len ())); + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } + + const Coverage &get_coverage () const { return this+coverage; } + + ValueFormat get_value_format () const { return valueFormat; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); + if (likely (index == NOT_COVERED)) return_trace (false); + + valueFormat.apply_value (c, this, values, buffer->cur_pos()); + + buffer->idx++; + return_trace (true); + } + + template<typename Iterator, + typename SrcLookup, + hb_requires (hb_is_iterator (Iterator))> + void serialize (hb_serialize_context_t *c, + const SrcLookup *src, + Iterator it, + ValueFormat newFormat, + const hb_map_t *layout_variation_idx_map) + { + if (unlikely (!c->extend_min (this))) return; + if (unlikely (!c->check_assign (valueFormat, + newFormat, + HB_SERIALIZE_ERROR_INT_OVERFLOW))) return; + + for (const hb_array_t<const Value>& _ : + it | hb_map (hb_second)) + { + src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map); + // Only serialize the first entry in the iterator, the rest are assumed to + // be the same. + break; + } + + auto glyphs = + + it + | hb_map_retains_sorting (hb_first) + ; + + coverage.serialize_serialize (c, glyphs); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + auto it = + + hb_iter (this+coverage) + | hb_filter (glyphset) + | hb_map_retains_sorting (glyph_map) + | hb_zip (hb_repeat (values.as_array (valueFormat.get_len ()))) + ; + + bool ret = bool (it); + SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map); + return_trace (ret); + } +}; + +} +} +} + +#endif /* OT_LAYOUT_GPOS_SINGLEPOSFORMAT1_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat2.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat2.hh new file mode 100644 index 0000000000..0d038b4422 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/SinglePosFormat2.hh @@ -0,0 +1,140 @@ +#ifndef OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH +#define OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH + +#include "Common.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +struct SinglePosFormat2 +{ + protected: + HBUINT16 format; /* Format identifier--format = 2 */ + Offset16To<Coverage> + coverage; /* Offset to Coverage table--from + * beginning of subtable */ + ValueFormat valueFormat; /* Defines the types of data in the + * ValueRecord */ + HBUINT16 valueCount; /* Number of ValueRecords */ + ValueRecord values; /* Array of ValueRecords--positioning + * values applied to glyphs */ + public: + DEFINE_SIZE_ARRAY (8, values); + + bool sanitize (hb_sanitize_context_t *c) const + { + TRACE_SANITIZE (this); + return_trace (c->check_struct (this) && + coverage.sanitize (c, this) && + valueFormat.sanitize_values (c, this, values, valueCount)); + } + + bool intersects (const hb_set_t *glyphs) const + { return (this+coverage).intersects (glyphs); } + + void closure_lookups (hb_closure_lookups_context_t *c) const {} + void collect_variation_indices (hb_collect_variation_indices_context_t *c) const + { + if (!valueFormat.has_device ()) return; + + auto it = + + hb_zip (this+coverage, hb_range ((unsigned) valueCount)) + | hb_filter (c->glyph_set, hb_first) + ; + + if (!it) return; + + unsigned sub_length = valueFormat.get_len (); + const hb_array_t<const Value> values_array = values.as_array (valueCount * sub_length); + + for (unsigned i : + it + | hb_map (hb_second)) + valueFormat.collect_variation_indices (c, this, values_array.sub_array (i * sub_length, sub_length)); + + } + + void collect_glyphs (hb_collect_glyphs_context_t *c) const + { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } + + const Coverage &get_coverage () const { return this+coverage; } + + ValueFormat get_value_format () const { return valueFormat; } + + bool apply (hb_ot_apply_context_t *c) const + { + TRACE_APPLY (this); + hb_buffer_t *buffer = c->buffer; + unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); + if (likely (index == NOT_COVERED)) return_trace (false); + + if (likely (index >= valueCount)) return_trace (false); + + valueFormat.apply_value (c, this, + &values[index * valueFormat.get_len ()], + buffer->cur_pos()); + + buffer->idx++; + return_trace (true); + } + + template<typename Iterator, + typename SrcLookup, + hb_requires (hb_is_iterator (Iterator))> + void serialize (hb_serialize_context_t *c, + const SrcLookup *src, + Iterator it, + ValueFormat newFormat, + const hb_map_t *layout_variation_idx_map) + { + auto out = c->extend_min (this); + if (unlikely (!out)) return; + if (unlikely (!c->check_assign (valueFormat, newFormat, HB_SERIALIZE_ERROR_INT_OVERFLOW))) return; + if (unlikely (!c->check_assign (valueCount, it.len (), HB_SERIALIZE_ERROR_ARRAY_OVERFLOW))) return; + + + it + | hb_map (hb_second) + | hb_apply ([&] (hb_array_t<const Value> _) + { src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map); }) + ; + + auto glyphs = + + it + | hb_map_retains_sorting (hb_first) + ; + + coverage.serialize_serialize (c, glyphs); + } + + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + const hb_set_t &glyphset = *c->plan->glyphset_gsub (); + const hb_map_t &glyph_map = *c->plan->glyph_map; + + unsigned sub_length = valueFormat.get_len (); + auto values_array = values.as_array (valueCount * sub_length); + + auto it = + + hb_zip (this+coverage, hb_range ((unsigned) valueCount)) + | hb_filter (glyphset, hb_first) + | hb_map_retains_sorting ([&] (const hb_pair_t<hb_codepoint_t, unsigned>& _) + { + return hb_pair (glyph_map[_.first], + values_array.sub_array (_.second * sub_length, + sub_length)); + }) + ; + + bool ret = bool (it); + SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map); + return_trace (ret); + } +}; + + +} +} +} + +#endif /* OT_LAYOUT_GPOS_SINGLEPOSFORMAT2_HH */ diff --git a/thirdparty/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh new file mode 100644 index 0000000000..b29f287bce --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/Layout/GPOS/ValueFormat.hh @@ -0,0 +1,329 @@ +#ifndef OT_LAYOUT_GPOS_VALUEFORMAT_HH +#define OT_LAYOUT_GPOS_VALUEFORMAT_HH + +#include "../../../hb-ot-layout-gsubgpos.hh" + +namespace OT { +namespace Layout { +namespace GPOS_impl { + +typedef HBUINT16 Value; + +typedef UnsizedArrayOf<Value> ValueRecord; + +struct ValueFormat : HBUINT16 +{ + enum Flags { + xPlacement = 0x0001u, /* Includes horizontal adjustment for placement */ + yPlacement = 0x0002u, /* Includes vertical adjustment for placement */ + xAdvance = 0x0004u, /* Includes horizontal adjustment for advance */ + yAdvance = 0x0008u, /* Includes vertical adjustment for advance */ + xPlaDevice = 0x0010u, /* Includes horizontal Device table for placement */ + yPlaDevice = 0x0020u, /* Includes vertical Device table for placement */ + xAdvDevice = 0x0040u, /* Includes horizontal Device table for advance */ + yAdvDevice = 0x0080u, /* Includes vertical Device table for advance */ + ignored = 0x0F00u, /* Was used in TrueType Open for MM fonts */ + reserved = 0xF000u, /* For future use */ + + devices = 0x00F0u /* Mask for having any Device table */ + }; + +/* All fields are options. Only those available advance the value pointer. */ +#if 0 + HBINT16 xPlacement; /* Horizontal adjustment for + * placement--in design units */ + HBINT16 yPlacement; /* Vertical adjustment for + * placement--in design units */ + HBINT16 xAdvance; /* Horizontal adjustment for + * advance--in design units (only used + * for horizontal writing) */ + HBINT16 yAdvance; /* Vertical adjustment for advance--in + * design units (only used for vertical + * writing) */ + Offset16To<Device> xPlaDevice; /* Offset to Device table for + * horizontal placement--measured from + * beginning of PosTable (may be NULL) */ + Offset16To<Device> yPlaDevice; /* Offset to Device table for vertical + * placement--measured from beginning + * of PosTable (may be NULL) */ + Offset16To<Device> xAdvDevice; /* Offset to Device table for + * horizontal advance--measured from + * beginning of PosTable (may be NULL) */ + Offset16To<Device> yAdvDevice; /* Offset to Device table for vertical + * advance--measured from beginning of + * PosTable (may be NULL) */ +#endif + + IntType& operator = (uint16_t i) { v = i; return *this; } + + unsigned int get_len () const { return hb_popcount ((unsigned int) *this); } + unsigned int get_size () const { return get_len () * Value::static_size; } + + bool apply_value (hb_ot_apply_context_t *c, + const void *base, + const Value *values, + hb_glyph_position_t &glyph_pos) const + { + bool ret = false; + unsigned int format = *this; + if (!format) return ret; + + hb_font_t *font = c->font; + bool horizontal = +#ifndef HB_NO_VERTICAL + HB_DIRECTION_IS_HORIZONTAL (c->direction) +#else + true +#endif + ; + + if (format & xPlacement) glyph_pos.x_offset += font->em_scale_x (get_short (values++, &ret)); + if (format & yPlacement) glyph_pos.y_offset += font->em_scale_y (get_short (values++, &ret)); + if (format & xAdvance) { + if (likely (horizontal)) glyph_pos.x_advance += font->em_scale_x (get_short (values, &ret)); + values++; + } + /* y_advance values grow downward but font-space grows upward, hence negation */ + if (format & yAdvance) { + if (unlikely (!horizontal)) glyph_pos.y_advance -= font->em_scale_y (get_short (values, &ret)); + values++; + } + + if (!has_device ()) return ret; + + bool use_x_device = font->x_ppem || font->num_coords; + bool use_y_device = font->y_ppem || font->num_coords; + + if (!use_x_device && !use_y_device) return ret; + + const VariationStore &store = c->var_store; + auto *cache = c->var_store_cache; + + /* pixel -> fractional pixel */ + if (format & xPlaDevice) { + if (use_x_device) glyph_pos.x_offset += (base + get_device (values, &ret)).get_x_delta (font, store, cache); + values++; + } + if (format & yPlaDevice) { + if (use_y_device) glyph_pos.y_offset += (base + get_device (values, &ret)).get_y_delta (font, store, cache); + values++; + } + if (format & xAdvDevice) { + if (horizontal && use_x_device) glyph_pos.x_advance += (base + get_device (values, &ret)).get_x_delta (font, store, cache); + values++; + } + if (format & yAdvDevice) { + /* y_advance values grow downward but font-space grows upward, hence negation */ + if (!horizontal && use_y_device) glyph_pos.y_advance -= (base + get_device (values, &ret)).get_y_delta (font, store, cache); + values++; + } + return ret; + } + + unsigned int get_effective_format (const Value *values) const + { + unsigned int format = *this; + for (unsigned flag = xPlacement; flag <= yAdvDevice; flag = flag << 1) { + if (format & flag) should_drop (*values++, (Flags) flag, &format); + } + + return format; + } + + template<typename Iterator, + hb_requires (hb_is_iterator (Iterator))> + unsigned int get_effective_format (Iterator it) const { + unsigned int new_format = 0; + + for (const hb_array_t<const Value>& values : it) + new_format = new_format | get_effective_format (&values); + + return new_format; + } + + void copy_values (hb_serialize_context_t *c, + unsigned int new_format, + const void *base, + const Value *values, + const hb_map_t *layout_variation_idx_map) const + { + unsigned int format = *this; + if (!format) return; + + if (format & xPlacement) copy_value (c, new_format, xPlacement, *values++); + if (format & yPlacement) copy_value (c, new_format, yPlacement, *values++); + if (format & xAdvance) copy_value (c, new_format, xAdvance, *values++); + if (format & yAdvance) copy_value (c, new_format, yAdvance, *values++); + + if (format & xPlaDevice) copy_device (c, base, values++, layout_variation_idx_map); + if (format & yPlaDevice) copy_device (c, base, values++, layout_variation_idx_map); + if (format & xAdvDevice) copy_device (c, base, values++, layout_variation_idx_map); + if (format & yAdvDevice) copy_device (c, base, values++, layout_variation_idx_map); + } + + void copy_value (hb_serialize_context_t *c, + unsigned int new_format, + Flags flag, + Value value) const + { + // Filter by new format. + if (!(new_format & flag)) return; + c->copy (value); + } + + void collect_variation_indices (hb_collect_variation_indices_context_t *c, + const void *base, + const hb_array_t<const Value>& values) const + { + unsigned format = *this; + unsigned i = 0; + if (format & xPlacement) i++; + if (format & yPlacement) i++; + if (format & xAdvance) i++; + if (format & yAdvance) i++; + if (format & xPlaDevice) + { + (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); + i++; + } + + if (format & ValueFormat::yPlaDevice) + { + (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); + i++; + } + + if (format & ValueFormat::xAdvDevice) + { + + (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); + i++; + } + + if (format & ValueFormat::yAdvDevice) + { + + (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); + i++; + } + } + + private: + bool sanitize_value_devices (hb_sanitize_context_t *c, const void *base, const Value *values) const + { + unsigned int format = *this; + + if (format & xPlacement) values++; + if (format & yPlacement) values++; + if (format & xAdvance) values++; + if (format & yAdvance) values++; + + if ((format & xPlaDevice) && !get_device (values++).sanitize (c, base)) return false; + if ((format & yPlaDevice) && !get_device (values++).sanitize (c, base)) return false; + if ((format & xAdvDevice) && !get_device (values++).sanitize (c, base)) return false; + if ((format & yAdvDevice) && !get_device (values++).sanitize (c, base)) return false; + + return true; + } + + static inline Offset16To<Device>& get_device (Value* value) + { + return *static_cast<Offset16To<Device> *> (value); + } + static inline const Offset16To<Device>& get_device (const Value* value, bool *worked=nullptr) + { + if (worked) *worked |= bool (*value); + return *static_cast<const Offset16To<Device> *> (value); + } + + bool copy_device (hb_serialize_context_t *c, const void *base, + const Value *src_value, const hb_map_t *layout_variation_idx_map) const + { + Value *dst_value = c->copy (*src_value); + + if (!dst_value) return false; + if (*dst_value == 0) return true; + + *dst_value = 0; + c->push (); + if ((base + get_device (src_value)).copy (c, layout_variation_idx_map)) + { + c->add_link (*dst_value, c->pop_pack ()); + return true; + } + else + { + c->pop_discard (); + return false; + } + } + + static inline const HBINT16& get_short (const Value* value, bool *worked=nullptr) + { + if (worked) *worked |= bool (*value); + return *reinterpret_cast<const HBINT16 *> (value); + } + + public: + + bool has_device () const + { + unsigned int format = *this; + return (format & devices) != 0; + } + + bool sanitize_value (hb_sanitize_context_t *c, const void *base, const Value *values) const + { + TRACE_SANITIZE (this); + return_trace (c->check_range (values, get_size ()) && (!has_device () || sanitize_value_devices (c, base, values))); + } + + bool sanitize_values (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count) const + { + TRACE_SANITIZE (this); + unsigned int len = get_len (); + + if (!c->check_range (values, count, get_size ())) return_trace (false); + + if (!has_device ()) return_trace (true); + + for (unsigned int i = 0; i < count; i++) { + if (!sanitize_value_devices (c, base, values)) + return_trace (false); + values += len; + } + + return_trace (true); + } + + /* Just sanitize referenced Device tables. Doesn't check the values themselves. */ + bool sanitize_values_stride_unsafe (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count, unsigned int stride) const + { + TRACE_SANITIZE (this); + + if (!has_device ()) return_trace (true); + + for (unsigned int i = 0; i < count; i++) { + if (!sanitize_value_devices (c, base, values)) + return_trace (false); + values += stride; + } + + return_trace (true); + } + + private: + + void should_drop (Value value, Flags flag, unsigned int* format) const + { + if (value) return; + *format = *format & ~flag; + } + +}; + +} +} +} + +#endif // #ifndef OT_LAYOUT_GPOS_VALUEFORMAT_HH diff --git a/thirdparty/harfbuzz/src/OT/Layout/GSUB/GSUB.hh b/thirdparty/harfbuzz/src/OT/Layout/GSUB/GSUB.hh index ad153ce8d7..3f0c4b2ad9 100644 --- a/thirdparty/harfbuzz/src/OT/Layout/GSUB/GSUB.hh +++ b/thirdparty/harfbuzz/src/OT/Layout/GSUB/GSUB.hh @@ -19,6 +19,8 @@ namespace GSUB { struct GSUB : GSUBGPOS { + using Lookup = SubstLookup; + static constexpr hb_tag_t tableTag = HB_OT_TAG_GSUB; const SubstLookup& get_lookup (unsigned int i) const diff --git a/thirdparty/harfbuzz/src/OT/Layout/GSUB/SubstLookup.hh b/thirdparty/harfbuzz/src/OT/Layout/GSUB/SubstLookup.hh index 3419b5a734..8fb3b55097 100644 --- a/thirdparty/harfbuzz/src/OT/Layout/GSUB/SubstLookup.hh +++ b/thirdparty/harfbuzz/src/OT/Layout/GSUB/SubstLookup.hh @@ -10,7 +10,7 @@ namespace GSUB { struct SubstLookup : Lookup { - typedef SubstLookupSubTable SubTable; + using SubTable = SubstLookupSubTable; bool sanitize (hb_sanitize_context_t *c) const { return Lookup::sanitize<SubTable> (c); } @@ -73,8 +73,6 @@ struct SubstLookup : Lookup return hb_closure_lookups_context_t::default_return_value (); } - c->set_recurse_func (dispatch_closure_lookups_recurse_func); - hb_closure_lookups_context_t::return_t ret = dispatch (c); return ret; } @@ -100,8 +98,6 @@ struct SubstLookup : Lookup return dispatch (c); } - static inline bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index); - bool serialize_single (hb_serialize_context_t *c, uint32_t lookup_props, hb_sorted_array_t<const HBGlyphID16> glyphs, @@ -206,8 +202,6 @@ struct SubstLookup : Lookup return ret; } - HB_INTERNAL static hb_closure_lookups_context_t::return_t dispatch_closure_lookups_recurse_func (hb_closure_lookups_context_t *c, unsigned lookup_index); - template <typename context_t, typename ...Ts> typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const { return Lookup::dispatch<SubTable> (c, std::forward<Ts> (ds)...); } diff --git a/thirdparty/harfbuzz/src/OT/glyf/CompositeGlyph.hh b/thirdparty/harfbuzz/src/OT/glyf/CompositeGlyph.hh new file mode 100644 index 0000000000..c145beaa49 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/CompositeGlyph.hh @@ -0,0 +1,258 @@ +#ifndef OT_GLYF_COMPOSITEGLYPH_HH +#define OT_GLYF_COMPOSITEGLYPH_HH + + +#include "../../hb-open-type.hh" + + +namespace OT { +namespace glyf_impl { + + +struct CompositeGlyphRecord +{ + protected: + enum composite_glyph_flag_t + { + ARG_1_AND_2_ARE_WORDS = 0x0001, + ARGS_ARE_XY_VALUES = 0x0002, + ROUND_XY_TO_GRID = 0x0004, + WE_HAVE_A_SCALE = 0x0008, + MORE_COMPONENTS = 0x0020, + WE_HAVE_AN_X_AND_Y_SCALE = 0x0040, + WE_HAVE_A_TWO_BY_TWO = 0x0080, + WE_HAVE_INSTRUCTIONS = 0x0100, + USE_MY_METRICS = 0x0200, + OVERLAP_COMPOUND = 0x0400, + SCALED_COMPONENT_OFFSET = 0x0800, + UNSCALED_COMPONENT_OFFSET = 0x1000 + }; + + public: + unsigned int get_size () const + { + unsigned int size = min_size; + /* arg1 and 2 are int16 */ + if (flags & ARG_1_AND_2_ARE_WORDS) size += 4; + /* arg1 and 2 are int8 */ + else size += 2; + + /* One x 16 bit (scale) */ + if (flags & WE_HAVE_A_SCALE) size += 2; + /* Two x 16 bit (xscale, yscale) */ + else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) size += 4; + /* Four x 16 bit (xscale, scale01, scale10, yscale) */ + else if (flags & WE_HAVE_A_TWO_BY_TWO) size += 8; + + return size; + } + + void drop_instructions_flag () { flags = (uint16_t) flags & ~WE_HAVE_INSTRUCTIONS; } + void set_overlaps_flag () + { + flags = (uint16_t) flags | OVERLAP_COMPOUND; + } + + bool has_instructions () const { return flags & WE_HAVE_INSTRUCTIONS; } + + bool has_more () const { return flags & MORE_COMPONENTS; } + bool is_use_my_metrics () const { return flags & USE_MY_METRICS; } + bool is_anchored () const { return !(flags & ARGS_ARE_XY_VALUES); } + void get_anchor_points (unsigned int &point1, unsigned int &point2) const + { + const HBUINT8 *p = &StructAfter<const HBUINT8> (glyphIndex); + if (flags & ARG_1_AND_2_ARE_WORDS) + { + point1 = ((const HBUINT16 *) p)[0]; + point2 = ((const HBUINT16 *) p)[1]; + } + else + { + point1 = p[0]; + point2 = p[1]; + } + } + + void transform_points (contour_point_vector_t &points) const + { + float matrix[4]; + contour_point_t trans; + if (get_transformation (matrix, trans)) + { + if (scaled_offsets ()) + { + points.translate (trans); + points.transform (matrix); + } + else + { + points.transform (matrix); + points.translate (trans); + } + } + } + + protected: + bool scaled_offsets () const + { return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; } + + bool get_transformation (float (&matrix)[4], contour_point_t &trans) const + { + matrix[0] = matrix[3] = 1.f; + matrix[1] = matrix[2] = 0.f; + + int tx, ty; + const HBINT8 *p = &StructAfter<const HBINT8> (glyphIndex); + if (flags & ARG_1_AND_2_ARE_WORDS) + { + tx = *(const HBINT16 *) p; + p += HBINT16::static_size; + ty = *(const HBINT16 *) p; + p += HBINT16::static_size; + } + else + { + tx = *p++; + ty = *p++; + } + if (is_anchored ()) tx = ty = 0; + + trans.init ((float) tx, (float) ty); + + { + const F2DOT14 *points = (const F2DOT14 *) p; + if (flags & WE_HAVE_A_SCALE) + { + matrix[0] = matrix[3] = points[0].to_float (); + return true; + } + else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) + { + matrix[0] = points[0].to_float (); + matrix[3] = points[1].to_float (); + return true; + } + else if (flags & WE_HAVE_A_TWO_BY_TWO) + { + matrix[0] = points[0].to_float (); + matrix[1] = points[1].to_float (); + matrix[2] = points[2].to_float (); + matrix[3] = points[3].to_float (); + return true; + } + } + return tx || ty; + } + + public: + HBUINT16 flags; + HBGlyphID16 glyphIndex; + public: + DEFINE_SIZE_MIN (4); +}; + +struct composite_iter_t : hb_iter_with_fallback_t<composite_iter_t, const CompositeGlyphRecord &> +{ + typedef const CompositeGlyphRecord *__item_t__; + composite_iter_t (hb_bytes_t glyph_, __item_t__ current_) : + glyph (glyph_), current (nullptr), current_size (0) + { + set_current (current_); + } + + composite_iter_t () : glyph (hb_bytes_t ()), current (nullptr), current_size (0) {} + + item_t __item__ () const { return *current; } + bool __more__ () const { return current; } + void __next__ () + { + if (!current->has_more ()) { current = nullptr; return; } + + set_current (&StructAtOffset<CompositeGlyphRecord> (current, current_size)); + } + composite_iter_t __end__ () const { return composite_iter_t (); } + bool operator != (const composite_iter_t& o) const + { return current != o.current; } + + + void set_current (__item_t__ current_) + { + if (!glyph.check_range (current_, CompositeGlyphRecord::min_size)) + { + current = nullptr; + current_size = 0; + return; + } + unsigned size = current_->get_size (); + if (!glyph.check_range (current_, size)) + { + current = nullptr; + current_size = 0; + return; + } + + current = current_; + current_size = size; + } + + private: + hb_bytes_t glyph; + __item_t__ current; + unsigned current_size; +}; + +struct CompositeGlyph +{ + const GlyphHeader &header; + hb_bytes_t bytes; + CompositeGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) : + header (header_), bytes (bytes_) {} + + composite_iter_t iter () const + { return composite_iter_t (bytes, &StructAfter<CompositeGlyphRecord, GlyphHeader> (header)); } + + unsigned int instructions_length (hb_bytes_t bytes) const + { + unsigned int start = bytes.length; + unsigned int end = bytes.length; + const CompositeGlyphRecord *last = nullptr; + for (auto &item : iter ()) + last = &item; + if (unlikely (!last)) return 0; + + if (last->has_instructions ()) + start = (char *) last - &bytes + last->get_size (); + if (unlikely (start > end)) return 0; + return end - start; + } + + /* Trimming for composites not implemented. + * If removing hints it falls out of that. */ + const hb_bytes_t trim_padding () const { return bytes; } + + void drop_hints () + { + for (const auto &_ : iter ()) + const_cast<CompositeGlyphRecord &> (_).drop_instructions_flag (); + } + + /* Chop instructions off the end */ + void drop_hints_bytes (hb_bytes_t &dest_start) const + { dest_start = bytes.sub_array (0, bytes.length - instructions_length (bytes)); } + + void set_overlaps_flag () + { + CompositeGlyphRecord& glyph_chain = const_cast<CompositeGlyphRecord &> ( + StructAfter<CompositeGlyphRecord, GlyphHeader> (header)); + if (!bytes.check_range(&glyph_chain, CompositeGlyphRecord::min_size)) + return; + glyph_chain.set_overlaps_flag (); + } +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_COMPOSITEGLYPH_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/Glyph.hh b/thirdparty/harfbuzz/src/OT/glyf/Glyph.hh new file mode 100644 index 0000000000..2199d2c48b --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/Glyph.hh @@ -0,0 +1,233 @@ +#ifndef OT_GLYF_GLYPH_HH +#define OT_GLYF_GLYPH_HH + + +#include "../../hb-open-type.hh" + +#include "GlyphHeader.hh" +#include "SimpleGlyph.hh" +#include "CompositeGlyph.hh" + + +namespace OT { + +struct glyf_accelerator_t; + +namespace glyf_impl { + + +enum phantom_point_index_t +{ + PHANTOM_LEFT = 0, + PHANTOM_RIGHT = 1, + PHANTOM_TOP = 2, + PHANTOM_BOTTOM = 3, + PHANTOM_COUNT = 4 +}; + +struct Glyph +{ + enum glyph_type_t { EMPTY, SIMPLE, COMPOSITE }; + + public: + composite_iter_t get_composite_iterator () const + { + if (type != COMPOSITE) return composite_iter_t (); + return CompositeGlyph (*header, bytes).iter (); + } + + const hb_bytes_t trim_padding () const + { + switch (type) { + case COMPOSITE: return CompositeGlyph (*header, bytes).trim_padding (); + case SIMPLE: return SimpleGlyph (*header, bytes).trim_padding (); + default: return bytes; + } + } + + void drop_hints () + { + switch (type) { + case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints (); return; + case SIMPLE: SimpleGlyph (*header, bytes).drop_hints (); return; + default: return; + } + } + + void set_overlaps_flag () + { + switch (type) { + case COMPOSITE: CompositeGlyph (*header, bytes).set_overlaps_flag (); return; + case SIMPLE: SimpleGlyph (*header, bytes).set_overlaps_flag (); return; + default: return; + } + } + + void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const + { + switch (type) { + case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints_bytes (dest_start); return; + case SIMPLE: SimpleGlyph (*header, bytes).drop_hints_bytes (dest_start, dest_end); return; + default: return; + } + } + + /* Note: Recursively calls itself. + * all_points includes phantom points + */ + template <typename accelerator_t> + bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator, + contour_point_vector_t &all_points /* OUT */, + bool phantom_only = false, + unsigned int depth = 0) const + { + if (unlikely (depth > HB_MAX_NESTING_LEVEL)) return false; + contour_point_vector_t stack_points; + bool inplace = type == SIMPLE && all_points.length == 0; + /* Load into all_points if it's empty, as an optimization. */ + contour_point_vector_t &points = inplace ? all_points : stack_points; + + switch (type) { + case COMPOSITE: + { + /* pseudo component points for each component in composite glyph */ + unsigned num_points = hb_len (CompositeGlyph (*header, bytes).iter ()); + if (unlikely (!points.resize (num_points))) return false; + break; + } + case SIMPLE: + if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only))) + return false; + break; + } + + /* Init phantom points */ + if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false; + hb_array_t<contour_point_t> phantoms = points.sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT); + { + int h_delta = (int) header->xMin - + glyf_accelerator.hmtx->get_side_bearing (gid); + int v_orig = (int) header->yMax + +#ifndef HB_NO_VERTICAL + glyf_accelerator.vmtx->get_side_bearing (gid) +#else + 0 +#endif + ; + unsigned h_adv = glyf_accelerator.hmtx->get_advance (gid); + unsigned v_adv = +#ifndef HB_NO_VERTICAL + glyf_accelerator.vmtx->get_advance (gid) +#else + - font->face->get_upem () +#endif + ; + phantoms[PHANTOM_LEFT].x = h_delta; + phantoms[PHANTOM_RIGHT].x = h_adv + h_delta; + phantoms[PHANTOM_TOP].y = v_orig; + phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv; + } + +#ifndef HB_NO_VAR + glyf_accelerator.gvar->apply_deltas_to_points (gid, font, points.as_array ()); +#endif + + switch (type) { + case SIMPLE: + if (!inplace) + all_points.extend (points.as_array ()); + break; + case COMPOSITE: + { + contour_point_vector_t comp_points; + unsigned int comp_index = 0; + for (auto &item : get_composite_iterator ()) + { + comp_points.reset (); + if (unlikely (!glyf_accelerator.glyph_for_gid (item.glyphIndex) + .get_points (font, glyf_accelerator, comp_points, + phantom_only, depth + 1))) + return false; + + /* Copy phantom points from component if USE_MY_METRICS flag set */ + if (item.is_use_my_metrics ()) + for (unsigned int i = 0; i < PHANTOM_COUNT; i++) + phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i]; + + /* Apply component transformation & translation */ + item.transform_points (comp_points); + + /* Apply translation from gvar */ + comp_points.translate (points[comp_index]); + + if (item.is_anchored ()) + { + unsigned int p1, p2; + item.get_anchor_points (p1, p2); + if (likely (p1 < all_points.length && p2 < comp_points.length)) + { + contour_point_t delta; + delta.init (all_points[p1].x - comp_points[p2].x, + all_points[p1].y - comp_points[p2].y); + + comp_points.translate (delta); + } + } + + all_points.extend (comp_points.sub_array (0, comp_points.length - PHANTOM_COUNT)); + + comp_index++; + } + + all_points.extend (phantoms); + } break; + default: + all_points.extend (phantoms); + } + + if (depth == 0) /* Apply at top level */ + { + /* Undocumented rasterizer behavior: + * Shift points horizontally by the updated left side bearing + */ + contour_point_t delta; + delta.init (-phantoms[PHANTOM_LEFT].x, 0.f); + if (delta.x) all_points.translate (delta); + } + + return !all_points.in_error (); + } + + bool get_extents (hb_font_t *font, const glyf_accelerator_t &glyf_accelerator, + hb_glyph_extents_t *extents) const + { + if (type == EMPTY) return true; /* Empty glyph; zero extents. */ + return header->get_extents (font, glyf_accelerator, gid, extents); + } + + hb_bytes_t get_bytes () const { return bytes; } + + Glyph (hb_bytes_t bytes_ = hb_bytes_t (), + hb_codepoint_t gid_ = (hb_codepoint_t) -1) : bytes (bytes_), + header (bytes.as<GlyphHeader> ()), + gid (gid_) + { + int num_contours = header->numberOfContours; + if (unlikely (num_contours == 0)) type = EMPTY; + else if (num_contours > 0) type = SIMPLE; + else type = COMPOSITE; /* negative numbers */ + } + + protected: + hb_bytes_t bytes; + const GlyphHeader *header; + hb_codepoint_t gid; + unsigned type; +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_GLYPH_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/GlyphHeader.hh b/thirdparty/harfbuzz/src/OT/glyf/GlyphHeader.hh new file mode 100644 index 0000000000..792bd5478f --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/GlyphHeader.hh @@ -0,0 +1,48 @@ +#ifndef OT_GLYF_GLYPHHEADER_HH +#define OT_GLYF_GLYPHHEADER_HH + + +#include "../../hb-open-type.hh" + + +namespace OT { +namespace glyf_impl { + + +struct GlyphHeader +{ + bool has_data () const { return numberOfContours; } + + template <typename accelerator_t> + bool get_extents (hb_font_t *font, const accelerator_t &glyf_accelerator, + hb_codepoint_t gid, hb_glyph_extents_t *extents) const + { + /* Undocumented rasterizer behavior: shift glyph to the left by (lsb - xMin), i.e., xMin = lsb */ + /* extents->x_bearing = hb_min (glyph_header.xMin, glyph_header.xMax); */ + extents->x_bearing = font->em_scale_x (glyf_accelerator.hmtx->get_side_bearing (gid)); + extents->y_bearing = font->em_scale_y (hb_max (yMin, yMax)); + extents->width = font->em_scale_x (hb_max (xMin, xMax) - hb_min (xMin, xMax)); + extents->height = font->em_scale_y (hb_min (yMin, yMax) - hb_max (yMin, yMax)); + + return true; + } + + HBINT16 numberOfContours; + /* If the number of contours is + * greater than or equal to zero, + * this is a simple glyph; if negative, + * this is a composite glyph. */ + FWORD xMin; /* Minimum x for coordinate data. */ + FWORD yMin; /* Minimum y for coordinate data. */ + FWORD xMax; /* Maximum x for coordinate data. */ + FWORD yMax; /* Maximum y for coordinate data. */ + public: + DEFINE_SIZE_STATIC (10); +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_GLYPHHEADER_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/SimpleGlyph.hh b/thirdparty/harfbuzz/src/OT/glyf/SimpleGlyph.hh new file mode 100644 index 0000000000..6df978cf13 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/SimpleGlyph.hh @@ -0,0 +1,216 @@ +#ifndef OT_GLYF_SIMPLEGLYPH_HH +#define OT_GLYF_SIMPLEGLYPH_HH + + +#include "../../hb-open-type.hh" + + +namespace OT { +namespace glyf_impl { + + +struct SimpleGlyph +{ + enum simple_glyph_flag_t + { + FLAG_ON_CURVE = 0x01, + FLAG_X_SHORT = 0x02, + FLAG_Y_SHORT = 0x04, + FLAG_REPEAT = 0x08, + FLAG_X_SAME = 0x10, + FLAG_Y_SAME = 0x20, + FLAG_OVERLAP_SIMPLE = 0x40, + FLAG_RESERVED2 = 0x80 + }; + + const GlyphHeader &header; + hb_bytes_t bytes; + SimpleGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) : + header (header_), bytes (bytes_) {} + + unsigned int instruction_len_offset () const + { return GlyphHeader::static_size + 2 * header.numberOfContours; } + + unsigned int length (unsigned int instruction_len) const + { return instruction_len_offset () + 2 + instruction_len; } + + unsigned int instructions_length () const + { + unsigned int instruction_length_offset = instruction_len_offset (); + if (unlikely (instruction_length_offset + 2 > bytes.length)) return 0; + + const HBUINT16 &instructionLength = StructAtOffset<HBUINT16> (&bytes, instruction_length_offset); + /* Out of bounds of the current glyph */ + if (unlikely (length (instructionLength) > bytes.length)) return 0; + return instructionLength; + } + + const hb_bytes_t trim_padding () const + { + /* based on FontTools _g_l_y_f.py::trim */ + const uint8_t *glyph = (uint8_t*) bytes.arrayZ; + const uint8_t *glyph_end = glyph + bytes.length; + /* simple glyph w/contours, possibly trimmable */ + glyph += instruction_len_offset (); + + if (unlikely (glyph + 2 >= glyph_end)) return hb_bytes_t (); + unsigned int num_coordinates = StructAtOffset<HBUINT16> (glyph - 2, 0) + 1; + unsigned int num_instructions = StructAtOffset<HBUINT16> (glyph, 0); + + glyph += 2 + num_instructions; + + unsigned int coord_bytes = 0; + unsigned int coords_with_flags = 0; + while (glyph < glyph_end) + { + uint8_t flag = *glyph; + glyph++; + + unsigned int repeat = 1; + if (flag & FLAG_REPEAT) + { + if (unlikely (glyph >= glyph_end)) return hb_bytes_t (); + repeat = *glyph + 1; + glyph++; + } + + unsigned int xBytes, yBytes; + xBytes = yBytes = 0; + if (flag & FLAG_X_SHORT) xBytes = 1; + else if ((flag & FLAG_X_SAME) == 0) xBytes = 2; + + if (flag & FLAG_Y_SHORT) yBytes = 1; + else if ((flag & FLAG_Y_SAME) == 0) yBytes = 2; + + coord_bytes += (xBytes + yBytes) * repeat; + coords_with_flags += repeat; + if (coords_with_flags >= num_coordinates) break; + } + + if (unlikely (coords_with_flags != num_coordinates)) return hb_bytes_t (); + return bytes.sub_array (0, bytes.length + coord_bytes - (glyph_end - glyph)); + } + + /* zero instruction length */ + void drop_hints () + { + GlyphHeader &glyph_header = const_cast<GlyphHeader &> (header); + (HBUINT16 &) StructAtOffset<HBUINT16> (&glyph_header, instruction_len_offset ()) = 0; + } + + void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const + { + unsigned int instructions_len = instructions_length (); + unsigned int glyph_length = length (instructions_len); + dest_start = bytes.sub_array (0, glyph_length - instructions_len); + dest_end = bytes.sub_array (glyph_length, bytes.length - glyph_length); + } + + void set_overlaps_flag () + { + if (unlikely (!header.numberOfContours)) return; + + unsigned flags_offset = length (instructions_length ()); + if (unlikely (flags_offset + 1 > bytes.length)) return; + + HBUINT8 &first_flag = (HBUINT8 &) StructAtOffset<HBUINT16> (&bytes, flags_offset); + first_flag = (uint8_t) first_flag | FLAG_OVERLAP_SIMPLE; + } + + static bool read_flags (const HBUINT8 *&p /* IN/OUT */, + contour_point_vector_t &points_ /* IN/OUT */, + const HBUINT8 *end) + { + unsigned count = points_.length; + for (unsigned int i = 0; i < count;) + { + if (unlikely (p + 1 > end)) return false; + uint8_t flag = *p++; + points_.arrayZ[i++].flag = flag; + if (flag & FLAG_REPEAT) + { + if (unlikely (p + 1 > end)) return false; + unsigned int repeat_count = *p++; + unsigned stop = hb_min (i + repeat_count, count); + for (; i < stop;) + points_.arrayZ[i++].flag = flag; + } + } + return true; + } + + static bool read_points (const HBUINT8 *&p /* IN/OUT */, + contour_point_vector_t &points_ /* IN/OUT */, + const HBUINT8 *end, + float contour_point_t::*m, + const simple_glyph_flag_t short_flag, + const simple_glyph_flag_t same_flag) + { + int v = 0; + + unsigned count = points_.length; + for (unsigned i = 0; i < count; i++) + { + unsigned flag = points_[i].flag; + if (flag & short_flag) + { + if (unlikely (p + 1 > end)) return false; + if (flag & same_flag) + v += *p++; + else + v -= *p++; + } + else + { + if (!(flag & same_flag)) + { + if (unlikely (p + HBINT16::static_size > end)) return false; + v += *(const HBINT16 *) p; + p += HBINT16::static_size; + } + } + points_.arrayZ[i].*m = v; + } + return true; + } + + bool get_contour_points (contour_point_vector_t &points_ /* OUT */, + bool phantom_only = false) const + { + const HBUINT16 *endPtsOfContours = &StructAfter<HBUINT16> (header); + int num_contours = header.numberOfContours; + assert (num_contours); + /* One extra item at the end, for the instruction-count below. */ + if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours]))) return false; + unsigned int num_points = endPtsOfContours[num_contours - 1] + 1; + + points_.alloc (num_points + 4); // Allocate for phantom points, to avoid a possible copy + if (!points_.resize (num_points)) return false; + if (phantom_only) return true; + + for (int i = 0; i < num_contours; i++) + points_[endPtsOfContours[i]].is_end_point = true; + + /* Skip instructions */ + const HBUINT8 *p = &StructAtOffset<HBUINT8> (&endPtsOfContours[num_contours + 1], + endPtsOfContours[num_contours]); + + if (unlikely ((const char *) p < bytes.arrayZ)) return false; /* Unlikely overflow */ + const HBUINT8 *end = (const HBUINT8 *) (bytes.arrayZ + bytes.length); + if (unlikely (p >= end)) return false; + + /* Read x & y coordinates */ + return read_flags (p, points_, end) + && read_points (p, points_, end, &contour_point_t::x, + FLAG_X_SHORT, FLAG_X_SAME) + && read_points (p, points_, end, &contour_point_t::y, + FLAG_Y_SHORT, FLAG_Y_SAME); + } +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_SIMPLEGLYPH_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/SubsetGlyph.hh b/thirdparty/harfbuzz/src/OT/glyf/SubsetGlyph.hh new file mode 100644 index 0000000000..ebe4372047 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/SubsetGlyph.hh @@ -0,0 +1,72 @@ +#ifndef OT_GLYF_SUBSETGLYPH_HH +#define OT_GLYF_SUBSETGLYPH_HH + + +#include "../../hb-open-type.hh" + + +namespace OT { +namespace glyf_impl { + + +struct SubsetGlyph +{ + hb_codepoint_t new_gid; + hb_codepoint_t old_gid; + Glyph source_glyph; + hb_bytes_t dest_start; /* region of source_glyph to copy first */ + hb_bytes_t dest_end; /* region of source_glyph to copy second */ + + bool serialize (hb_serialize_context_t *c, + bool use_short_loca, + const hb_subset_plan_t *plan) const + { + TRACE_SERIALIZE (this); + + hb_bytes_t dest_glyph = dest_start.copy (c); + dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + dest_end.copy (c).length); + unsigned int pad_length = use_short_loca ? padding () : 0; + DEBUG_MSG (SUBSET, nullptr, "serialize %d byte glyph, width %d pad %d", dest_glyph.length, dest_glyph.length + pad_length, pad_length); + + HBUINT8 pad; + pad = 0; + while (pad_length > 0) + { + c->embed (pad); + pad_length--; + } + + if (unlikely (!dest_glyph.length)) return_trace (true); + + /* update components gids */ + for (auto &_ : Glyph (dest_glyph).get_composite_iterator ()) + { + hb_codepoint_t new_gid; + if (plan->new_gid_for_old_gid (_.glyphIndex, &new_gid)) + const_cast<CompositeGlyphRecord &> (_).glyphIndex = new_gid; + } + + if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING) + Glyph (dest_glyph).drop_hints (); + + if (plan->flags & HB_SUBSET_FLAGS_SET_OVERLAPS_FLAG) + Glyph (dest_glyph).set_overlaps_flag (); + + return_trace (true); + } + + void drop_hints_bytes () + { source_glyph.drop_hints_bytes (dest_start, dest_end); } + + unsigned int length () const { return dest_start.length + dest_end.length; } + /* pad to 2 to ensure 2-byte loca will be ok */ + unsigned int padding () const { return length () % 2; } + unsigned int padded_size () const { return length () + padding (); } +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_SUBSETGLYPH_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/glyf-helpers.hh b/thirdparty/harfbuzz/src/OT/glyf/glyf-helpers.hh new file mode 100644 index 0000000000..f51f7a81fc --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/glyf-helpers.hh @@ -0,0 +1,90 @@ +#ifndef OT_GLYF_GLYF_HELPERS_HH +#define OT_GLYF_GLYF_HELPERS_HH + + +#include "../../hb-open-type.hh" +#include "../../hb-subset-plan.hh" + +#include "loca.hh" + + +namespace OT { +namespace glyf_impl { + + +template<typename IteratorIn, typename IteratorOut, + hb_requires (hb_is_source_of (IteratorIn, unsigned int)), + hb_requires (hb_is_sink_of (IteratorOut, unsigned))> +static void +_write_loca (IteratorIn it, bool short_offsets, IteratorOut dest) +{ + unsigned right_shift = short_offsets ? 1 : 0; + unsigned int offset = 0; + dest << 0; + + it + | hb_map ([=, &offset] (unsigned int padded_size) + { + offset += padded_size; + DEBUG_MSG (SUBSET, nullptr, "loca entry offset %d", offset); + return offset >> right_shift; + }) + | hb_sink (dest) + ; +} + +static bool +_add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca) +{ + hb_blob_t *head_blob = hb_sanitize_context_t ().reference_table<head> (plan->source); + hb_blob_t *head_prime_blob = hb_blob_copy_writable_or_fail (head_blob); + hb_blob_destroy (head_blob); + + if (unlikely (!head_prime_blob)) + return false; + + head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr); + head_prime->indexToLocFormat = use_short_loca ? 0 : 1; + bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob); + + hb_blob_destroy (head_prime_blob); + return success; +} + +template<typename Iterator, + hb_requires (hb_is_source_of (Iterator, unsigned int))> +static bool +_add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_short_loca) +{ + unsigned num_offsets = padded_offsets.len () + 1; + unsigned entry_size = use_short_loca ? 2 : 4; + char *loca_prime_data = (char *) hb_calloc (entry_size, num_offsets); + + if (unlikely (!loca_prime_data)) return false; + + DEBUG_MSG (SUBSET, nullptr, "loca entry_size %d num_offsets %d size %d", + entry_size, num_offsets, entry_size * num_offsets); + + if (use_short_loca) + _write_loca (padded_offsets, true, hb_array ((HBUINT16 *) loca_prime_data, num_offsets)); + else + _write_loca (padded_offsets, false, hb_array ((HBUINT32 *) loca_prime_data, num_offsets)); + + hb_blob_t *loca_blob = hb_blob_create (loca_prime_data, + entry_size * num_offsets, + HB_MEMORY_MODE_WRITABLE, + loca_prime_data, + hb_free); + + bool result = plan->add_table (HB_OT_TAG_loca, loca_blob) + && _add_head_and_set_loca_version (plan, use_short_loca); + + hb_blob_destroy (loca_blob); + return result; +} + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_GLYF_HELPERS_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/glyf.hh b/thirdparty/harfbuzz/src/OT/glyf/glyf.hh new file mode 100644 index 0000000000..f74513cb96 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/glyf.hh @@ -0,0 +1,388 @@ +#ifndef OT_GLYF_GLYF_HH +#define OT_GLYF_GLYF_HH + + +#include "../../hb-open-type.hh" +#include "../../hb-ot-head-table.hh" +#include "../../hb-ot-hmtx-table.hh" +#include "../../hb-ot-var-gvar-table.hh" +#include "../../hb-draw.hh" + +#include "glyf-helpers.hh" +#include "Glyph.hh" +#include "SubsetGlyph.hh" +#include "loca.hh" +#include "path-builder.hh" + + +namespace OT { + + +/* + * glyf -- TrueType Glyph Data + * https://docs.microsoft.com/en-us/typography/opentype/spec/glyf + */ +#define HB_OT_TAG_glyf HB_TAG('g','l','y','f') + + +struct glyf +{ + friend struct glyf_accelerator_t; + + static constexpr hb_tag_t tableTag = HB_OT_TAG_glyf; + + bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const + { + TRACE_SANITIZE (this); + /* Runtime checks as eager sanitizing each glyph is costy */ + return_trace (true); + } + + /* requires source of SubsetGlyph complains the identifier isn't declared */ + template <typename Iterator> + bool serialize (hb_serialize_context_t *c, + Iterator it, + bool use_short_loca, + const hb_subset_plan_t *plan) + { + TRACE_SERIALIZE (this); + unsigned init_len = c->length (); + for (const auto &_ : it) _.serialize (c, use_short_loca, plan); + + /* As a special case when all glyph in the font are empty, add a zero byte + * to the table, so that OTS doesn’t reject it, and to make the table work + * on Windows as well. + * See https://github.com/khaledhosny/ots/issues/52 */ + if (init_len == c->length ()) + { + HBUINT8 empty_byte; + empty_byte = 0; + c->copy (empty_byte); + } + return_trace (true); + } + + /* Byte region(s) per glyph to output + unpadded, hints removed if so requested + If we fail to process a glyph we produce an empty (0-length) glyph */ + bool subset (hb_subset_context_t *c) const + { + TRACE_SUBSET (this); + + glyf *glyf_prime = c->serializer->start_embed <glyf> (); + if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false); + + hb_vector_t<glyf_impl::SubsetGlyph> glyphs; + _populate_subset_glyphs (c->plan, &glyphs); + + auto padded_offsets = + + hb_iter (glyphs) + | hb_map (&glyf_impl::SubsetGlyph::padded_size) + ; + + unsigned max_offset = + padded_offsets | hb_reduce (hb_add, 0); + bool use_short_loca = max_offset < 0x1FFFF; + + + glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan); + if (!use_short_loca) { + padded_offsets = + + hb_iter (glyphs) + | hb_map (&glyf_impl::SubsetGlyph::length) + ; + } + + + if (unlikely (c->serializer->in_error ())) return_trace (false); + return_trace (c->serializer->check_success (glyf_impl::_add_loca_and_head (c->plan, + padded_offsets, + use_short_loca))); + } + + void + _populate_subset_glyphs (const hb_subset_plan_t *plan, + hb_vector_t<glyf_impl::SubsetGlyph> *glyphs /* OUT */) const; + + protected: + UnsizedArrayOf<HBUINT8> + dataZ; /* Glyphs data. */ + public: + DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always + * check the size externally, allow Null() object of it by + * defining it _MIN instead. */ +}; + +struct glyf_accelerator_t +{ + glyf_accelerator_t (hb_face_t *face) + { + short_offset = false; + num_glyphs = 0; + loca_table = nullptr; + glyf_table = nullptr; +#ifndef HB_NO_VAR + gvar = nullptr; +#endif + hmtx = nullptr; +#ifndef HB_NO_VERTICAL + vmtx = nullptr; +#endif + const OT::head &head = *face->table.head; + if (head.indexToLocFormat > 1 || head.glyphDataFormat > 0) + /* Unknown format. Leave num_glyphs=0, that takes care of disabling us. */ + return; + short_offset = 0 == head.indexToLocFormat; + + loca_table = face->table.loca.get_blob (); // Needs no destruct! + glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face); +#ifndef HB_NO_VAR + gvar = face->table.gvar; +#endif + hmtx = face->table.hmtx; +#ifndef HB_NO_VERTICAL + vmtx = face->table.vmtx; +#endif + + num_glyphs = hb_max (1u, loca_table.get_length () / (short_offset ? 2 : 4)) - 1; + num_glyphs = hb_min (num_glyphs, face->get_num_glyphs ()); + } + ~glyf_accelerator_t () + { + glyf_table.destroy (); + } + + bool has_data () const { return num_glyphs; } + + protected: + template<typename T> + bool get_points (hb_font_t *font, hb_codepoint_t gid, T consumer) const + { + if (gid >= num_glyphs) return false; + + /* Making this allocfree is not that easy + https://github.com/harfbuzz/harfbuzz/issues/2095 + mostly because of gvar handling in VF fonts, + perhaps a separate path for non-VF fonts can be considered */ + contour_point_vector_t all_points; + + bool phantom_only = !consumer.is_consuming_contour_points (); + if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, phantom_only))) + return false; + + if (consumer.is_consuming_contour_points ()) + { + unsigned count = all_points.length; + assert (count >= glyf_impl::PHANTOM_COUNT); + count -= glyf_impl::PHANTOM_COUNT; + for (unsigned point_index = 0; point_index < count; point_index++) + consumer.consume_point (all_points[point_index]); + consumer.points_end (); + } + + /* Where to write phantoms, nullptr if not requested */ + contour_point_t *phantoms = consumer.get_phantoms_sink (); + if (phantoms) + for (unsigned i = 0; i < glyf_impl::PHANTOM_COUNT; ++i) + phantoms[i] = all_points[all_points.length - glyf_impl::PHANTOM_COUNT + i]; + + return true; + } + +#ifndef HB_NO_VAR + struct points_aggregator_t + { + hb_font_t *font; + hb_glyph_extents_t *extents; + contour_point_t *phantoms; + + struct contour_bounds_t + { + contour_bounds_t () { min_x = min_y = FLT_MAX; max_x = max_y = -FLT_MAX; } + + void add (const contour_point_t &p) + { + min_x = hb_min (min_x, p.x); + min_y = hb_min (min_y, p.y); + max_x = hb_max (max_x, p.x); + max_y = hb_max (max_y, p.y); + } + + bool empty () const { return (min_x >= max_x) || (min_y >= max_y); } + + void get_extents (hb_font_t *font, hb_glyph_extents_t *extents) + { + if (unlikely (empty ())) + { + extents->width = 0; + extents->x_bearing = 0; + extents->height = 0; + extents->y_bearing = 0; + return; + } + extents->x_bearing = font->em_scalef_x (min_x); + extents->width = font->em_scalef_x (max_x) - extents->x_bearing; + extents->y_bearing = font->em_scalef_y (max_y); + extents->height = font->em_scalef_y (min_y) - extents->y_bearing; + } + + protected: + float min_x, min_y, max_x, max_y; + } bounds; + + points_aggregator_t (hb_font_t *font_, hb_glyph_extents_t *extents_, contour_point_t *phantoms_) + { + font = font_; + extents = extents_; + phantoms = phantoms_; + if (extents) bounds = contour_bounds_t (); + } + + void consume_point (const contour_point_t &point) { bounds.add (point); } + void points_end () { bounds.get_extents (font, extents); } + + bool is_consuming_contour_points () { return extents; } + contour_point_t *get_phantoms_sink () { return phantoms; } + }; + + public: + unsigned + get_advance_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const + { + if (unlikely (gid >= num_glyphs)) return 0; + + bool success = false; + + contour_point_t phantoms[glyf_impl::PHANTOM_COUNT]; + if (likely (font->num_coords == gvar->get_axis_count ())) + success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms)); + + if (unlikely (!success)) + return +#ifndef HB_NO_VERTICAL + is_vertical ? vmtx->get_advance (gid) : +#endif + hmtx->get_advance (gid); + + float result = is_vertical + ? phantoms[glyf_impl::PHANTOM_TOP].y - phantoms[glyf_impl::PHANTOM_BOTTOM].y + : phantoms[glyf_impl::PHANTOM_RIGHT].x - phantoms[glyf_impl::PHANTOM_LEFT].x; + return hb_clamp (roundf (result), 0.f, (float) UINT_MAX / 2); + } + + int get_side_bearing_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const + { + if (unlikely (gid >= num_glyphs)) return 0; + + hb_glyph_extents_t extents; + + contour_point_t phantoms[glyf_impl::PHANTOM_COUNT]; + if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms)))) + return +#ifndef HB_NO_VERTICAL + is_vertical ? vmtx->get_side_bearing (gid) : +#endif + hmtx->get_side_bearing (gid); + + return is_vertical + ? ceilf (phantoms[glyf_impl::PHANTOM_TOP].y) - extents.y_bearing + : floorf (phantoms[glyf_impl::PHANTOM_LEFT].x); + } +#endif + + public: + bool get_extents (hb_font_t *font, hb_codepoint_t gid, hb_glyph_extents_t *extents) const + { + if (unlikely (gid >= num_glyphs)) return false; + +#ifndef HB_NO_VAR + if (font->num_coords) + return get_points (font, gid, points_aggregator_t (font, extents, nullptr)); +#endif + return glyph_for_gid (gid).get_extents (font, *this, extents); + } + + const glyf_impl::Glyph + glyph_for_gid (hb_codepoint_t gid, bool needs_padding_removal = false) const + { + if (unlikely (gid >= num_glyphs)) return glyf_impl::Glyph (); + + unsigned int start_offset, end_offset; + + if (short_offset) + { + const HBUINT16 *offsets = (const HBUINT16 *) loca_table->dataZ.arrayZ; + start_offset = 2 * offsets[gid]; + end_offset = 2 * offsets[gid + 1]; + } + else + { + const HBUINT32 *offsets = (const HBUINT32 *) loca_table->dataZ.arrayZ; + start_offset = offsets[gid]; + end_offset = offsets[gid + 1]; + } + + if (unlikely (start_offset > end_offset || end_offset > glyf_table.get_length ())) + return glyf_impl::Glyph (); + + glyf_impl::Glyph glyph (hb_bytes_t ((const char *) this->glyf_table + start_offset, + end_offset - start_offset), gid); + return needs_padding_removal ? glyf_impl::Glyph (glyph.trim_padding (), gid) : glyph; + } + + bool + get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const + { return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session)); } + +#ifndef HB_NO_VAR + const gvar_accelerator_t *gvar; +#endif + const hmtx_accelerator_t *hmtx; +#ifndef HB_NO_VERTICAL + const vmtx_accelerator_t *vmtx; +#endif + + private: + bool short_offset; + unsigned int num_glyphs; + hb_blob_ptr_t<loca> loca_table; + hb_blob_ptr_t<glyf> glyf_table; +}; + + +inline void +glyf::_populate_subset_glyphs (const hb_subset_plan_t *plan, + hb_vector_t<glyf_impl::SubsetGlyph> *glyphs /* OUT */) const +{ + OT::glyf_accelerator_t glyf (plan->source); + + + hb_range (plan->num_output_glyphs ()) + | hb_map ([&] (hb_codepoint_t new_gid) + { + glyf_impl::SubsetGlyph subset_glyph = {0}; + subset_glyph.new_gid = new_gid; + + /* should never fail: all old gids should be mapped */ + if (!plan->old_gid_for_new_gid (new_gid, &subset_glyph.old_gid)) + return subset_glyph; + + if (new_gid == 0 && + !(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) + subset_glyph.source_glyph = glyf_impl::Glyph (); + else + subset_glyph.source_glyph = glyf.glyph_for_gid (subset_glyph.old_gid, true); + if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING) + subset_glyph.drop_hints_bytes (); + else + subset_glyph.dest_start = subset_glyph.source_glyph.get_bytes (); + return subset_glyph; + }) + | hb_sink (glyphs) + ; +} + + + +} /* namespace OT */ + + +#endif /* OT_GLYF_GLYF_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/loca.hh b/thirdparty/harfbuzz/src/OT/glyf/loca.hh new file mode 100644 index 0000000000..4481cba8ed --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/loca.hh @@ -0,0 +1,43 @@ +#ifndef OT_GLYF_LOCA_HH +#define OT_GLYF_LOCA_HH + + +#include "../../hb-open-type.hh" + + +namespace OT { + + +/* + * loca -- Index to Location + * https://docs.microsoft.com/en-us/typography/opentype/spec/loca + */ +#define HB_OT_TAG_loca HB_TAG('l','o','c','a') + +struct loca +{ + friend struct glyf; + friend struct glyf_accelerator_t; + + static constexpr hb_tag_t tableTag = HB_OT_TAG_loca; + + bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const + { + TRACE_SANITIZE (this); + return_trace (true); + } + + protected: + UnsizedArrayOf<HBUINT8> + dataZ; /* Location data. */ + public: + DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always + * check the size externally, allow Null() object of it by + * defining it _MIN instead. */ +}; + + +} /* namespace OT */ + + +#endif /* OT_GLYF_LOCA_HH */ diff --git a/thirdparty/harfbuzz/src/OT/glyf/path-builder.hh b/thirdparty/harfbuzz/src/OT/glyf/path-builder.hh new file mode 100644 index 0000000000..9bfc45a1a6 --- /dev/null +++ b/thirdparty/harfbuzz/src/OT/glyf/path-builder.hh @@ -0,0 +1,135 @@ +#ifndef OT_GLYF_PATH_BUILDER_HH +#define OT_GLYF_PATH_BUILDER_HH + + +#include "../../hb.hh" + + +namespace OT { +namespace glyf_impl { + + +struct path_builder_t +{ + hb_font_t *font; + hb_draw_session_t *draw_session; + + struct optional_point_t + { + optional_point_t () {} + optional_point_t (float x_, float y_) : has_data (true), x (x_), y (y_) {} + operator bool () const { return has_data; } + + bool has_data = false; + float x = 0.; + float y = 0.; + + optional_point_t lerp (optional_point_t p, float t) + { return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); } + } first_oncurve, first_offcurve, last_offcurve; + + path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) + { + font = font_; + draw_session = &draw_session_; + first_oncurve = first_offcurve = last_offcurve = optional_point_t (); + } + + /* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287 + See also: + * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html + * https://stackoverflow.com/a/20772557 */ + void consume_point (const contour_point_t &point) + { + bool is_on_curve = point.flag & glyf_impl::SimpleGlyph::FLAG_ON_CURVE; + optional_point_t p (font->em_fscalef_x (point.x), font->em_fscalef_y (point.y)); + if (!first_oncurve) + { + if (is_on_curve) + { + first_oncurve = p; + draw_session->move_to (p.x, p.y); + } + else + { + if (first_offcurve) + { + optional_point_t mid = first_offcurve.lerp (p, .5f); + first_oncurve = mid; + last_offcurve = p; + draw_session->move_to (mid.x, mid.y); + } + else + first_offcurve = p; + } + } + else + { + if (last_offcurve) + { + if (is_on_curve) + { + draw_session->quadratic_to (last_offcurve.x, last_offcurve.y, + p.x, p.y); + last_offcurve = optional_point_t (); + } + else + { + optional_point_t mid = last_offcurve.lerp (p, .5f); + draw_session->quadratic_to (last_offcurve.x, last_offcurve.y, + mid.x, mid.y); + last_offcurve = p; + } + } + else + { + if (is_on_curve) + draw_session->line_to (p.x, p.y); + else + last_offcurve = p; + } + } + + if (point.is_end_point) + { + if (first_offcurve && last_offcurve) + { + optional_point_t mid = last_offcurve.lerp (first_offcurve, .5f); + draw_session->quadratic_to (last_offcurve.x, last_offcurve.y, + mid.x, mid.y); + last_offcurve = optional_point_t (); + /* now check the rest */ + } + + if (first_offcurve && first_oncurve) + draw_session->quadratic_to (first_offcurve.x, first_offcurve.y, + first_oncurve.x, first_oncurve.y); + else if (last_offcurve && first_oncurve) + draw_session->quadratic_to (last_offcurve.x, last_offcurve.y, + first_oncurve.x, first_oncurve.y); + else if (first_oncurve) + draw_session->line_to (first_oncurve.x, first_oncurve.y); + else if (first_offcurve) + { + float x = first_offcurve.x, y = first_offcurve.y; + draw_session->move_to (x, y); + draw_session->quadratic_to (x, y, x, y); + } + + /* Getting ready for the next contour */ + first_oncurve = first_offcurve = last_offcurve = optional_point_t (); + draw_session->close_path (); + } + } + void points_end () {} + + bool is_consuming_contour_points () { return true; } + contour_point_t *get_phantoms_sink () { return nullptr; } +}; + + +} /* namespace glyf_impl */ +} /* namespace OT */ + + +#endif /* OT_GLYF_PATH_BUILDER_HH */ diff --git a/thirdparty/harfbuzz/src/graph/graph.hh b/thirdparty/harfbuzz/src/graph/graph.hh new file mode 100644 index 0000000000..52ca9dd142 --- /dev/null +++ b/thirdparty/harfbuzz/src/graph/graph.hh @@ -0,0 +1,860 @@ +/* + * Copyright © 2022 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Garret Rieger + */ + +#ifndef GRAPH_GRAPH_HH +#define GRAPH_GRAPH_HH + +namespace graph { + +/** + * Represents a serialized table in the form of a graph. + * Provides methods for modifying and reordering the graph. + */ +struct graph_t +{ + struct vertex_t + { + hb_serialize_context_t::object_t obj; + int64_t distance = 0 ; + int64_t space = 0 ; + hb_vector_t<unsigned> parents; + unsigned start = 0; + unsigned end = 0; + unsigned priority = 0; + + friend void swap (vertex_t& a, vertex_t& b) + { + hb_swap (a.obj, b.obj); + hb_swap (a.distance, b.distance); + hb_swap (a.space, b.space); + hb_swap (a.parents, b.parents); + hb_swap (a.start, b.start); + hb_swap (a.end, b.end); + hb_swap (a.priority, b.priority); + } + + bool is_shared () const + { + return parents.length > 1; + } + + unsigned incoming_edges () const + { + return parents.length; + } + + void remove_parent (unsigned parent_index) + { + for (unsigned i = 0; i < parents.length; i++) + { + if (parents[i] != parent_index) continue; + parents.remove (i); + break; + } + } + + void remap_parents (const hb_vector_t<unsigned>& id_map) + { + for (unsigned i = 0; i < parents.length; i++) + parents[i] = id_map[parents[i]]; + } + + void remap_parent (unsigned old_index, unsigned new_index) + { + for (unsigned i = 0; i < parents.length; i++) + { + if (parents[i] == old_index) + parents[i] = new_index; + } + } + + bool is_leaf () const + { + return !obj.real_links.length && !obj.virtual_links.length; + } + + bool raise_priority () + { + if (has_max_priority ()) return false; + priority++; + return true; + } + + bool has_max_priority () const { + return priority >= 3; + } + + int64_t modified_distance (unsigned order) const + { + // TODO(garretrieger): once priority is high enough, should try + // setting distance = 0 which will force to sort immediately after + // it's parent where possible. + + int64_t modified_distance = + hb_min (hb_max(distance + distance_modifier (), 0), 0x7FFFFFFFFFF); + if (has_max_priority ()) { + modified_distance = 0; + } + return (modified_distance << 18) | (0x003FFFF & order); + } + + int64_t distance_modifier () const + { + if (!priority) return 0; + int64_t table_size = obj.tail - obj.head; + + if (priority == 1) + return -table_size / 2; + + return -table_size; + } + }; + + /* + * A topological sorting of an object graph. Ordered + * in reverse serialization order (first object in the + * serialization is at the end of the list). This matches + * the 'packed' object stack used internally in the + * serializer + */ + template<typename T> + graph_t (const T& objects) + : parents_invalid (true), + distance_invalid (true), + positions_invalid (true), + successful (true) + { + num_roots_for_space_.push (1); + bool removed_nil = false; + vertices_.alloc (objects.length); + vertices_scratch_.alloc (objects.length); + for (unsigned i = 0; i < objects.length; i++) + { + // TODO(grieger): check all links point to valid objects. + + // If this graph came from a serialization buffer object 0 is the + // nil object. We don't need it for our purposes here so drop it. + if (i == 0 && !objects[i]) + { + removed_nil = true; + continue; + } + + vertex_t* v = vertices_.push (); + if (check_success (!vertices_.in_error ())) + v->obj = *objects[i]; + if (!removed_nil) continue; + // Fix indices to account for removed nil object. + for (auto& l : v->obj.all_links_writer ()) { + l.objidx--; + } + } + } + + ~graph_t () + { + vertices_.fini (); + } + + bool in_error () const + { + return !successful || + vertices_.in_error () || + num_roots_for_space_.in_error (); + } + + const vertex_t& root () const + { + return vertices_[root_idx ()]; + } + + unsigned root_idx () const + { + // Object graphs are in reverse order, the first object is at the end + // of the vector. Since the graph is topologically sorted it's safe to + // assume the first object has no incoming edges. + return vertices_.length - 1; + } + + const hb_serialize_context_t::object_t& object(unsigned i) const + { + return vertices_[i].obj; + } + + /* + * Generates a new topological sorting of graph ordered by the shortest + * distance to each node. + */ + void sort_shortest_distance () + { + positions_invalid = true; + + if (vertices_.length <= 1) { + // Graph of 1 or less doesn't need sorting. + return; + } + + update_distances (); + + hb_priority_queue_t queue; + hb_vector_t<vertex_t> &sorted_graph = vertices_scratch_; + if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return; + hb_vector_t<unsigned> id_map; + if (unlikely (!check_success (id_map.resize (vertices_.length)))) return; + + hb_vector_t<unsigned> removed_edges; + if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return; + update_parents (); + + queue.insert (root ().modified_distance (0), root_idx ()); + int new_id = root_idx (); + unsigned order = 1; + while (!queue.in_error () && !queue.is_empty ()) + { + unsigned next_id = queue.pop_minimum().second; + + hb_swap (sorted_graph[new_id], vertices_[next_id]); + const vertex_t& next = sorted_graph[new_id]; + + id_map[next_id] = new_id--; + + for (const auto& link : next.obj.all_links ()) { + removed_edges[link.objidx]++; + if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx])) + // Add the order that the links were encountered to the priority. + // This ensures that ties between priorities objects are broken in a consistent + // way. More specifically this is set up so that if a set of objects have the same + // distance they'll be added to the topological order in the order that they are + // referenced from the parent object. + queue.insert (vertices_[link.objidx].modified_distance (order++), + link.objidx); + } + } + + check_success (!queue.in_error ()); + check_success (!sorted_graph.in_error ()); + if (!check_success (new_id == -1)) + print_orphaned_nodes (); + + remap_all_obj_indices (id_map, &sorted_graph); + + hb_swap (vertices_, sorted_graph); + } + + /* + * Assign unique space numbers to each connected subgraph of 32 bit offset(s). + */ + bool assign_32bit_spaces () + { + unsigned root_index = root_idx (); + hb_set_t visited; + hb_set_t roots; + for (unsigned i = 0; i <= root_index; i++) + { + // Only real links can form 32 bit spaces + for (auto& l : vertices_[i].obj.real_links) + { + if (l.width == 4 && !l.is_signed) + { + roots.add (l.objidx); + find_subgraph (l.objidx, visited); + } + } + } + + // Mark everything not in the subgraphs of 32 bit roots as visited. + // This prevents 32 bit subgraphs from being connected via nodes not in the 32 bit subgraphs. + visited.invert (); + + if (!roots) return false; + + while (roots) + { + unsigned next = HB_SET_VALUE_INVALID; + if (unlikely (!check_success (!roots.in_error ()))) break; + if (!roots.next (&next)) break; + + hb_set_t connected_roots; + find_connected_nodes (next, roots, visited, connected_roots); + if (unlikely (!check_success (!connected_roots.in_error ()))) break; + + isolate_subgraph (connected_roots); + if (unlikely (!check_success (!connected_roots.in_error ()))) break; + + unsigned next_space = this->next_space (); + num_roots_for_space_.push (0); + for (unsigned root : connected_roots) + { + DEBUG_MSG (SUBSET_REPACK, nullptr, "Subgraph %u gets space %u", root, next_space); + vertices_[root].space = next_space; + num_roots_for_space_[next_space] = num_roots_for_space_[next_space] + 1; + distance_invalid = true; + positions_invalid = true; + } + + // TODO(grieger): special case for GSUB/GPOS use extension promotions to move 16 bit space + // into the 32 bit space as needed, instead of using isolation. + } + + + + return true; + } + + /* + * Isolates the subgraph of nodes reachable from root. Any links to nodes in the subgraph + * that originate from outside of the subgraph will be removed by duplicating the linked to + * object. + * + * Indices stored in roots will be updated if any of the roots are duplicated to new indices. + */ + bool isolate_subgraph (hb_set_t& roots) + { + update_parents (); + hb_map_t subgraph; + + // incoming edges to root_idx should be all 32 bit in length so we don't need to de-dup these + // set the subgraph incoming edge count to match all of root_idx's incoming edges + hb_set_t parents; + for (unsigned root_idx : roots) + { + subgraph.set (root_idx, wide_parents (root_idx, parents)); + find_subgraph (root_idx, subgraph); + } + + unsigned original_root_idx = root_idx (); + hb_map_t index_map; + bool made_changes = false; + for (auto entry : subgraph.iter ()) + { + const auto& node = vertices_[entry.first]; + unsigned subgraph_incoming_edges = entry.second; + + if (subgraph_incoming_edges < node.incoming_edges ()) + { + // Only de-dup objects with incoming links from outside the subgraph. + made_changes = true; + duplicate_subgraph (entry.first, index_map); + } + } + + if (!made_changes) + return false; + + if (original_root_idx != root_idx () + && parents.has (original_root_idx)) + { + // If the root idx has changed since parents was determined, update root idx in parents + parents.add (root_idx ()); + parents.del (original_root_idx); + } + + auto new_subgraph = + + subgraph.keys () + | hb_map([&] (unsigned node_idx) { + const unsigned *v; + if (index_map.has (node_idx, &v)) return *v; + return node_idx; + }) + ; + + remap_obj_indices (index_map, new_subgraph); + remap_obj_indices (index_map, parents.iter (), true); + + // Update roots set with new indices as needed. + unsigned next = HB_SET_VALUE_INVALID; + while (roots.next (&next)) + { + const unsigned *v; + if (index_map.has (next, &v)) + { + roots.del (next); + roots.add (*v); + } + } + + return true; + } + + void find_subgraph (unsigned node_idx, hb_map_t& subgraph) + { + for (const auto& link : vertices_[node_idx].obj.all_links ()) + { + const unsigned *v; + if (subgraph.has (link.objidx, &v)) + { + subgraph.set (link.objidx, *v + 1); + continue; + } + subgraph.set (link.objidx, 1); + find_subgraph (link.objidx, subgraph); + } + } + + void find_subgraph (unsigned node_idx, hb_set_t& subgraph) + { + if (subgraph.has (node_idx)) return; + subgraph.add (node_idx); + for (const auto& link : vertices_[node_idx].obj.all_links ()) + find_subgraph (link.objidx, subgraph); + } + + /* + * duplicates all nodes in the subgraph reachable from node_idx. Does not re-assign + * links. index_map is updated with mappings from old id to new id. If a duplication has already + * been performed for a given index, then it will be skipped. + */ + void duplicate_subgraph (unsigned node_idx, hb_map_t& index_map) + { + if (index_map.has (node_idx)) + return; + + index_map.set (node_idx, duplicate (node_idx)); + for (const auto& l : object (node_idx).all_links ()) { + duplicate_subgraph (l.objidx, index_map); + } + } + + /* + * Creates a copy of node_idx and returns it's new index. + */ + unsigned duplicate (unsigned node_idx) + { + positions_invalid = true; + distance_invalid = true; + + auto* clone = vertices_.push (); + auto& child = vertices_[node_idx]; + if (vertices_.in_error ()) { + return -1; + } + + clone->obj.head = child.obj.head; + clone->obj.tail = child.obj.tail; + clone->distance = child.distance; + clone->space = child.space; + clone->parents.reset (); + + unsigned clone_idx = vertices_.length - 2; + for (const auto& l : child.obj.real_links) + { + clone->obj.real_links.push (l); + vertices_[l.objidx].parents.push (clone_idx); + } + for (const auto& l : child.obj.virtual_links) + { + clone->obj.virtual_links.push (l); + vertices_[l.objidx].parents.push (clone_idx); + } + + check_success (!clone->obj.real_links.in_error ()); + check_success (!clone->obj.virtual_links.in_error ()); + + // The last object is the root of the graph, so swap back the root to the end. + // The root's obj idx does change, however since it's root nothing else refers to it. + // all other obj idx's will be unaffected. + hb_swap (vertices_[vertices_.length - 2], *clone); + + // Since the root moved, update the parents arrays of all children on the root. + for (const auto& l : root ().obj.all_links ()) + vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ()); + + return clone_idx; + } + + /* + * Creates a copy of child and re-assigns the link from + * parent to the clone. The copy is a shallow copy, objects + * linked from child are not duplicated. + */ + bool duplicate (unsigned parent_idx, unsigned child_idx) + { + update_parents (); + + unsigned links_to_child = 0; + for (const auto& l : vertices_[parent_idx].obj.all_links ()) + { + if (l.objidx == child_idx) links_to_child++; + } + + if (vertices_[child_idx].incoming_edges () <= links_to_child) + { + // Can't duplicate this node, doing so would orphan the original one as all remaining links + // to child are from parent. + DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %d => %d", + parent_idx, child_idx); + return false; + } + + DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %d => %d", + parent_idx, child_idx); + + unsigned clone_idx = duplicate (child_idx); + if (clone_idx == (unsigned) -1) return false; + // duplicate shifts the root node idx, so if parent_idx was root update it. + if (parent_idx == clone_idx) parent_idx++; + + auto& parent = vertices_[parent_idx]; + for (auto& l : parent.obj.all_links_writer ()) + { + if (l.objidx != child_idx) + continue; + + reassign_link (l, parent_idx, clone_idx); + } + + return true; + } + + /* + * Raises the sorting priority of all children. + */ + bool raise_childrens_priority (unsigned parent_idx) + { + DEBUG_MSG (SUBSET_REPACK, nullptr, " Raising priority of all children of %d", + parent_idx); + // This operation doesn't change ordering until a sort is run, so no need + // to invalidate positions. It does not change graph structure so no need + // to update distances or edge counts. + auto& parent = vertices_[parent_idx].obj; + bool made_change = false; + for (auto& l : parent.all_links_writer ()) + made_change |= vertices_[l.objidx].raise_priority (); + return made_change; + } + + void print_orphaned_nodes () + { + if (!DEBUG_ENABLED(SUBSET_REPACK)) return; + + DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); + parents_invalid = true; + update_parents(); + + for (unsigned i = 0; i < root_idx (); i++) + { + const auto& v = vertices_[i]; + if (!v.parents) + DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i); + } + } + + unsigned num_roots_for_space (unsigned space) const + { + return num_roots_for_space_[space]; + } + + unsigned next_space () const + { + return num_roots_for_space_.length; + } + + void move_to_new_space (const hb_set_t& indices) + { + num_roots_for_space_.push (0); + unsigned new_space = num_roots_for_space_.length - 1; + + for (unsigned index : indices) { + auto& node = vertices_[index]; + num_roots_for_space_[node.space] = num_roots_for_space_[node.space] - 1; + num_roots_for_space_[new_space] = num_roots_for_space_[new_space] + 1; + node.space = new_space; + distance_invalid = true; + positions_invalid = true; + } + } + + unsigned space_for (unsigned index, unsigned* root = nullptr) const + { + const auto& node = vertices_[index]; + if (node.space) + { + if (root != nullptr) + *root = index; + return node.space; + } + + if (!node.parents) + { + if (root) + *root = index; + return 0; + } + + return space_for (node.parents[0], root); + } + + void err_other_error () { this->successful = false; } + + size_t total_size_in_bytes () const { + size_t total_size = 0; + for (unsigned i = 0; i < vertices_.length; i++) { + size_t size = vertices_[i].obj.tail - vertices_[i].obj.head; + total_size += size; + } + return total_size; + } + + + private: + + /* + * Returns the numbers of incoming edges that are 32bits wide. + */ + unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const + { + unsigned count = 0; + hb_set_t visited; + for (unsigned p : vertices_[node_idx].parents) + { + if (visited.has (p)) continue; + visited.add (p); + + // Only real links can be wide + for (const auto& l : vertices_[p].obj.real_links) + { + if (l.objidx == node_idx && l.width == 4 && !l.is_signed) + { + count++; + parents.add (p); + } + } + } + return count; + } + + bool check_success (bool success) + { return this->successful && (success || ((void) err_other_error (), false)); } + + public: + /* + * Creates a map from objid to # of incoming edges. + */ + void update_parents () + { + if (!parents_invalid) return; + + for (unsigned i = 0; i < vertices_.length; i++) + vertices_[i].parents.reset (); + + for (unsigned p = 0; p < vertices_.length; p++) + { + for (auto& l : vertices_[p].obj.all_links ()) + { + vertices_[l.objidx].parents.push (p); + } + } + + parents_invalid = false; + } + + /* + * compute the serialized start and end positions for each vertex. + */ + void update_positions () + { + if (!positions_invalid) return; + + unsigned current_pos = 0; + for (int i = root_idx (); i >= 0; i--) + { + auto& v = vertices_[i]; + v.start = current_pos; + current_pos += v.obj.tail - v.obj.head; + v.end = current_pos; + } + + positions_invalid = false; + } + + /* + * Finds the distance to each object in the graph + * from the initial node. + */ + void update_distances () + { + if (!distance_invalid) return; + + // Uses Dijkstra's algorithm to find all of the shortest distances. + // https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm + // + // Implementation Note: + // Since our priority queue doesn't support fast priority decreases + // we instead just add new entries into the queue when a priority changes. + // Redundant ones are filtered out later on by the visited set. + // According to https://www3.cs.stonybrook.edu/~rezaul/papers/TR-07-54.pdf + // for practical performance this is faster then using a more advanced queue + // (such as a fibonacci queue) with a fast decrease priority. + for (unsigned i = 0; i < vertices_.length; i++) + { + if (i == vertices_.length - 1) + vertices_[i].distance = 0; + else + vertices_[i].distance = hb_int_max (int64_t); + } + + hb_priority_queue_t queue; + queue.insert (0, vertices_.length - 1); + + hb_vector_t<bool> visited; + visited.resize (vertices_.length); + + while (!queue.in_error () && !queue.is_empty ()) + { + unsigned next_idx = queue.pop_minimum ().second; + if (visited[next_idx]) continue; + const auto& next = vertices_[next_idx]; + int64_t next_distance = vertices_[next_idx].distance; + visited[next_idx] = true; + + for (const auto& link : next.obj.all_links ()) + { + if (visited[link.objidx]) continue; + + const auto& child = vertices_[link.objidx].obj; + unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide + int64_t child_weight = (child.tail - child.head) + + ((int64_t) 1 << (link_width * 8)) * (vertices_[link.objidx].space + 1); + int64_t child_distance = next_distance + child_weight; + + if (child_distance < vertices_[link.objidx].distance) + { + vertices_[link.objidx].distance = child_distance; + queue.insert (child_distance, link.objidx); + } + } + } + + check_success (!queue.in_error ()); + if (!check_success (queue.is_empty ())) + { + print_orphaned_nodes (); + return; + } + + distance_invalid = false; + } + + private: + /* + * Updates a link in the graph to point to a different object. Corrects the + * parents vector on the previous and new child nodes. + */ + void reassign_link (hb_serialize_context_t::object_t::link_t& link, + unsigned parent_idx, + unsigned new_idx) + { + unsigned old_idx = link.objidx; + link.objidx = new_idx; + vertices_[old_idx].remove_parent (parent_idx); + vertices_[new_idx].parents.push (parent_idx); + } + + /* + * Updates all objidx's in all links using the provided mapping. Corrects incoming edge counts. + */ + template<typename Iterator, hb_requires (hb_is_iterator (Iterator))> + void remap_obj_indices (const hb_map_t& id_map, + Iterator subgraph, + bool only_wide = false) + { + if (!id_map) return; + for (unsigned i : subgraph) + { + for (auto& link : vertices_[i].obj.all_links_writer ()) + { + const unsigned *v; + if (!id_map.has (link.objidx, &v)) continue; + if (only_wide && !(link.width == 4 && !link.is_signed)) continue; + + reassign_link (link, i, *v); + } + } + } + + /* + * Updates all objidx's in all links using the provided mapping. + */ + void remap_all_obj_indices (const hb_vector_t<unsigned>& id_map, + hb_vector_t<vertex_t>* sorted_graph) const + { + for (unsigned i = 0; i < sorted_graph->length; i++) + { + (*sorted_graph)[i].remap_parents (id_map); + for (auto& link : (*sorted_graph)[i].obj.all_links_writer ()) + { + link.objidx = id_map[link.objidx]; + } + } + } + + /* + * Finds all nodes in targets that are reachable from start_idx, nodes in visited will be skipped. + * For this search the graph is treated as being undirected. + * + * Connected targets will be added to connected and removed from targets. All visited nodes + * will be added to visited. + */ + void find_connected_nodes (unsigned start_idx, + hb_set_t& targets, + hb_set_t& visited, + hb_set_t& connected) + { + if (unlikely (!check_success (!visited.in_error ()))) return; + if (visited.has (start_idx)) return; + visited.add (start_idx); + + if (targets.has (start_idx)) + { + targets.del (start_idx); + connected.add (start_idx); + } + + const auto& v = vertices_[start_idx]; + + // Graph is treated as undirected so search children and parents of start_idx + for (const auto& l : v.obj.all_links ()) + find_connected_nodes (l.objidx, targets, visited, connected); + + for (unsigned p : v.parents) + find_connected_nodes (p, targets, visited, connected); + } + + public: + // TODO(garretrieger): make private, will need to move most of offset overflow code into graph. + hb_vector_t<vertex_t> vertices_; + hb_vector_t<vertex_t> vertices_scratch_; + private: + bool parents_invalid; + bool distance_invalid; + bool positions_invalid; + bool successful; + hb_vector_t<unsigned> num_roots_for_space_; +}; + +} + +#endif // GRAPH_GRAPH_HH diff --git a/thirdparty/harfbuzz/src/graph/serialize.hh b/thirdparty/harfbuzz/src/graph/serialize.hh new file mode 100644 index 0000000000..ecc6cc5aea --- /dev/null +++ b/thirdparty/harfbuzz/src/graph/serialize.hh @@ -0,0 +1,249 @@ +/* + * Copyright © 2022 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Garret Rieger + */ + +#ifndef GRAPH_SERIALIZE_HH +#define GRAPH_SERIALIZE_HH + +namespace graph { + +struct overflow_record_t +{ + unsigned parent; + unsigned child; +}; + +inline +int64_t compute_offset ( + const graph_t& graph, + unsigned parent_idx, + const hb_serialize_context_t::object_t::link_t& link) +{ + const auto& parent = graph.vertices_[parent_idx]; + const auto& child = graph.vertices_[link.objidx]; + int64_t offset = 0; + switch ((hb_serialize_context_t::whence_t) link.whence) { + case hb_serialize_context_t::whence_t::Head: + offset = child.start - parent.start; break; + case hb_serialize_context_t::whence_t::Tail: + offset = child.start - parent.end; break; + case hb_serialize_context_t::whence_t::Absolute: + offset = child.start; break; + } + + assert (offset >= link.bias); + offset -= link.bias; + return offset; +} + +inline +bool is_valid_offset (int64_t offset, + const hb_serialize_context_t::object_t::link_t& link) +{ + if (unlikely (!link.width)) + // Virtual links can't overflow. + return link.is_signed || offset >= 0; + + if (link.is_signed) + { + if (link.width == 4) + return offset >= -((int64_t) 1 << 31) && offset < ((int64_t) 1 << 31); + else + return offset >= -(1 << 15) && offset < (1 << 15); + } + else + { + if (link.width == 4) + return offset >= 0 && offset < ((int64_t) 1 << 32); + else if (link.width == 3) + return offset >= 0 && offset < ((int32_t) 1 << 24); + else + return offset >= 0 && offset < (1 << 16); + } +} + +/* + * Will any offsets overflow on graph when it's serialized? + */ +inline bool +will_overflow (graph_t& graph, + hb_vector_t<overflow_record_t>* overflows = nullptr) +{ + if (overflows) overflows->resize (0); + graph.update_positions (); + + const auto& vertices = graph.vertices_; + for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--) + { + // Don't need to check virtual links for overflow + for (const auto& link : vertices[parent_idx].obj.real_links) + { + int64_t offset = compute_offset (graph, parent_idx, link); + if (is_valid_offset (offset, link)) + continue; + + if (!overflows) return true; + + overflow_record_t r; + r.parent = parent_idx; + r.child = link.objidx; + overflows->push (r); + } + } + + if (!overflows) return false; + return overflows->length; +} + +inline +void print_overflows (graph_t& graph, + const hb_vector_t<overflow_record_t>& overflows) +{ + if (!DEBUG_ENABLED(SUBSET_REPACK)) return; + + graph.update_parents (); + int limit = 10; + for (const auto& o : overflows) + { + if (!limit--) break; + const auto& parent = graph.vertices_[o.parent]; + const auto& child = graph.vertices_[o.child]; + DEBUG_MSG (SUBSET_REPACK, nullptr, + " overflow from " + "%4d (%4d in, %4d out, space %2d) => " + "%4d (%4d in, %4d out, space %2d)", + o.parent, + parent.incoming_edges (), + parent.obj.real_links.length + parent.obj.virtual_links.length, + graph.space_for (o.parent), + o.child, + child.incoming_edges (), + child.obj.real_links.length + child.obj.virtual_links.length, + graph.space_for (o.child)); + } + if (overflows.length > 10) { + DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %d more overflows.", overflows.length - 10); + } +} + +template <typename O> inline void +serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link, + char* head, + hb_serialize_context_t* c) +{ + OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position); + *offset = 0; + c->add_link (*offset, + // serializer has an extra nil object at the start of the + // object array. So all id's are +1 of what our id's are. + link.objidx + 1, + (hb_serialize_context_t::whence_t) link.whence, + link.bias); +} + +inline +void serialize_link (const hb_serialize_context_t::object_t::link_t& link, + char* head, + hb_serialize_context_t* c) +{ + switch (link.width) + { + case 0: + // Virtual links aren't serialized. + return; + case 4: + if (link.is_signed) + { + serialize_link_of_type<OT::HBINT32> (link, head, c); + } else { + serialize_link_of_type<OT::HBUINT32> (link, head, c); + } + return; + case 2: + if (link.is_signed) + { + serialize_link_of_type<OT::HBINT16> (link, head, c); + } else { + serialize_link_of_type<OT::HBUINT16> (link, head, c); + } + return; + case 3: + serialize_link_of_type<OT::HBUINT24> (link, head, c); + return; + default: + // Unexpected link width. + assert (0); + } +} + +/* + * serialize graph into the provided serialization buffer. + */ +inline hb_blob_t* serialize (const graph_t& graph) +{ + hb_vector_t<char> buffer; + size_t size = graph.total_size_in_bytes (); + if (!buffer.alloc (size)) { + DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer."); + return nullptr; + } + hb_serialize_context_t c((void *) buffer, size); + + c.start_serialize<void> (); + const auto& vertices = graph.vertices_; + for (unsigned i = 0; i < vertices.length; i++) { + c.push (); + + size_t size = vertices[i].obj.tail - vertices[i].obj.head; + char* start = c.allocate_size <char> (size); + if (!start) { + DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space."); + return nullptr; + } + + memcpy (start, vertices[i].obj.head, size); + + // Only real links needs to be serialized. + for (const auto& link : vertices[i].obj.real_links) + serialize_link (link, start, &c); + + // All duplications are already encoded in the graph, so don't + // enable sharing during packing. + c.pop_pack (false); + } + c.end_serialize (); + + if (c.in_error ()) { + DEBUG_MSG (SUBSET_REPACK, nullptr, "Error during serialization. Err flag: %d", + c.errors); + return nullptr; + } + + return c.copy_blob (); +} + +} // namespace graph + +#endif // GRAPH_SERIALIZE_HH diff --git a/thirdparty/harfbuzz/src/hb-aat-layout-kerx-table.hh b/thirdparty/harfbuzz/src/hb-aat-layout-kerx-table.hh index 0354b47d5a..6a24c90c31 100644 --- a/thirdparty/harfbuzz/src/hb-aat-layout-kerx-table.hh +++ b/thirdparty/harfbuzz/src/hb-aat-layout-kerx-table.hh @@ -287,7 +287,7 @@ struct KerxSubTableFormat1 * in the 'kern' table example. */ if (v == -0x8000) { - o.attach_type() = ATTACH_TYPE_NONE; + o.attach_type() = OT::Layout::GPOS_impl::ATTACH_TYPE_NONE; o.attach_chain() = 0; o.y_offset = 0; } @@ -310,7 +310,7 @@ struct KerxSubTableFormat1 /* CoreText doesn't do crossStream kerning in vertical. We do. */ if (v == -0x8000) { - o.attach_type() = ATTACH_TYPE_NONE; + o.attach_type() = OT::Layout::GPOS_impl::ATTACH_TYPE_NONE; o.attach_chain() = 0; o.x_offset = 0; } @@ -567,7 +567,7 @@ struct KerxSubTableFormat4 } break; } - o.attach_type() = ATTACH_TYPE_MARK; + o.attach_type() = OT::Layout::GPOS_impl::ATTACH_TYPE_MARK; o.attach_chain() = (int) mark - (int) buffer->idx; buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; } @@ -901,7 +901,7 @@ struct KerxTable unsigned int count = c->buffer->len; for (unsigned int i = 0; i < count; i++) { - pos[i].attach_type() = ATTACH_TYPE_CURSIVE; + pos[i].attach_type() = OT::Layout::GPOS_impl::ATTACH_TYPE_CURSIVE; pos[i].attach_chain() = HB_DIRECTION_IS_FORWARD (c->buffer->props.direction) ? -1 : +1; /* We intentionally don't set HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT, * since there needs to be a non-zero attachment for post-positioning to diff --git a/thirdparty/harfbuzz/src/hb-aat-layout-morx-table.hh b/thirdparty/harfbuzz/src/hb-aat-layout-morx-table.hh index b77c1f4d44..3d053cb13e 100644 --- a/thirdparty/harfbuzz/src/hb-aat-layout-morx-table.hh +++ b/thirdparty/harfbuzz/src/hb-aat-layout-morx-table.hh @@ -123,7 +123,7 @@ struct RearrangementSubtable bool reverse_l = 3 == (m >> 4); bool reverse_r = 3 == (m & 0x0F); - if (end - start >= l + r) + if (end - start >= l + r && end-start <= HB_MAX_CONTEXT_LENGTH) { buffer->merge_clusters (start, hb_min (buffer->idx + 1, buffer->len)); buffer->merge_clusters (start, end); diff --git a/thirdparty/harfbuzz/src/hb-algs.hh b/thirdparty/harfbuzz/src/hb-algs.hh index 05b4df52f1..f1633d886a 100644 --- a/thirdparty/harfbuzz/src/hb-algs.hh +++ b/thirdparty/harfbuzz/src/hb-algs.hh @@ -59,7 +59,7 @@ static inline constexpr T operator | (T l, T r) { return T ((unsigned) l | (unsigned) r); } \ static inline constexpr T operator & (T l, T r) { return T ((unsigned) l & (unsigned) r); } \ static inline constexpr T operator ^ (T l, T r) { return T ((unsigned) l ^ (unsigned) r); } \ - static inline constexpr T operator ~ (T r) { return T (~(unsigned int) r); } \ + static inline constexpr unsigned operator ~ (T r) { return (~(unsigned) r); } \ static inline T& operator |= (T &l, T r) { l = l | r; return l; } \ static inline T& operator &= (T& l, T r) { l = l & r; return l; } \ static inline T& operator ^= (T& l, T r) { l = l ^ r; return l; } \ @@ -227,31 +227,26 @@ struct } HB_FUNCOBJ (hb_bool); -template <typename T> -static inline -constexpr T hb_coerce (const T v) { return v; } -template <typename T, typename V, - hb_enable_if (!hb_is_same (hb_decay<T>, hb_decay<V>) && std::is_pointer<V>::value)> -static inline -constexpr T hb_coerce (const V v) { return *v; } - struct { private: template <typename T> constexpr auto - impl (const T& v, hb_priority<2>) const HB_RETURN (uint32_t, hb_deref (v).hash ()) + impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, hb_deref (v).hash ()) - template <typename T> constexpr auto - impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v))) + template <typename T> constexpr uint32_t + impl (const hb::shared_ptr<T>& v, hb_priority<1>) const + { + return v.get () ? v.get ()->hash () : 0; + } + template <typename T> constexpr uint32_t + impl (const hb::unique_ptr<T>& v, hb_priority<1>) const + { + return v.get () ? v.get ()->hash () : 0; + } - template <typename T, - hb_enable_if (std::is_integral<T>::value)> constexpr auto - impl (const T& v, hb_priority<0>) const HB_AUTO_RETURN - ( - /* Knuth's multiplicative method: */ - (uint32_t) v * 2654435761u - ) + template <typename T> constexpr auto + impl (const T& v, hb_priority<0>) const HB_RETURN (uint32_t, std::hash<hb_decay<decltype (hb_deref (v))>>{} (hb_deref (v))) public: @@ -862,6 +857,11 @@ hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3) { return hb_in_range (u, lo1, hi1) || hb_in_range (u, lo2, hi2) || hb_in_range (u, lo3, hi3); } +template <typename T> static inline bool +hb_in_ranges (T u, T lo1, T hi1, T lo2, T hi2, T lo3, T hi3, T lo4, T hi4) +{ + return hb_in_range (u, lo1, hi1) || hb_in_range (u, lo2, hi2) || hb_in_range (u, lo3, hi3) || hb_in_range (u, lo4, hi4); +} /* diff --git a/thirdparty/harfbuzz/src/hb-array.hh b/thirdparty/harfbuzz/src/hb-array.hh index 1963698cf7..5baeb6f7fe 100644 --- a/thirdparty/harfbuzz/src/hb-array.hh +++ b/thirdparty/harfbuzz/src/hb-array.hh @@ -56,7 +56,6 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&> hb_array_t& operator= (const hb_array_t&) = default; hb_array_t& operator= (hb_array_t&&) = default; - constexpr hb_array_t (std::nullptr_t) : hb_array_t () {} constexpr hb_array_t (Type *array_, unsigned int length_) : arrayZ (array_), length (length_) {} template <unsigned int length_> constexpr hb_array_t (Type (&array_)[length_]) : hb_array_t (array_, length_) {} @@ -314,7 +313,6 @@ struct hb_sorted_array_t : hb_sorted_array_t& operator= (const hb_sorted_array_t&) = default; hb_sorted_array_t& operator= (hb_sorted_array_t&&) = default; - constexpr hb_sorted_array_t (std::nullptr_t) : hb_sorted_array_t () {} constexpr hb_sorted_array_t (Type *array_, unsigned int length_) : hb_array_t<Type> (array_, length_) {} template <unsigned int length_> constexpr hb_sorted_array_t (Type (&array_)[length_]) : hb_array_t<Type> (array_) {} diff --git a/thirdparty/harfbuzz/src/hb-bimap.hh b/thirdparty/harfbuzz/src/hb-bimap.hh index 5b313bf59c..8e8c988716 100644 --- a/thirdparty/harfbuzz/src/hb-bimap.hh +++ b/thirdparty/harfbuzz/src/hb-bimap.hh @@ -54,17 +54,18 @@ struct hb_bimap_t if (unlikely (rhs == HB_MAP_VALUE_INVALID)) { del (lhs); return; } forw_map.set (lhs, rhs); - if (in_error ()) return; + if (unlikely (in_error ())) return; back_map.set (rhs, lhs); - if (in_error ()) forw_map.del (lhs); + if (unlikely (in_error ())) forw_map.del (lhs); } hb_codepoint_t get (hb_codepoint_t lhs) const { return forw_map.get (lhs); } hb_codepoint_t backward (hb_codepoint_t rhs) const { return back_map.get (rhs); } hb_codepoint_t operator [] (hb_codepoint_t lhs) const { return get (lhs); } - bool has (hb_codepoint_t lhs, hb_codepoint_t *vp = nullptr) const { return forw_map.has (lhs, vp); } + bool has (hb_codepoint_t lhs) const { return forw_map.has (lhs); } + void del (hb_codepoint_t lhs) { @@ -78,7 +79,7 @@ struct hb_bimap_t back_map.clear (); } - bool is_empty () const { return get_population () == 0; } + bool is_empty () const { return forw_map.is_empty (); } unsigned int get_population () const { return forw_map.get_population (); } diff --git a/thirdparty/harfbuzz/src/hb-bit-set-invertible.hh b/thirdparty/harfbuzz/src/hb-bit-set-invertible.hh index caea47d8d3..27fb0732ea 100644 --- a/thirdparty/harfbuzz/src/hb-bit-set-invertible.hh +++ b/thirdparty/harfbuzz/src/hb-bit-set-invertible.hh @@ -80,7 +80,7 @@ struct hb_bit_set_invertible_t next (&v); return v == INVALID; } - uint32_t hash () const { return s.hash () ^ inverted; } + uint32_t hash () const { return s.hash () ^ (uint32_t) inverted; } hb_codepoint_t get_min () const { @@ -100,7 +100,7 @@ struct hb_bit_set_invertible_t void add (hb_codepoint_t g) { unlikely (inverted) ? s.del (g) : s.add (g); } bool add_range (hb_codepoint_t a, hb_codepoint_t b) - { return unlikely (inverted) ? (s.del_range (a, b), true) : s.add_range (a, b); } + { return unlikely (inverted) ? ((void) s.del_range (a, b), true) : s.add_range (a, b); } template <typename T> void add_array (const T *array, unsigned int count, unsigned int stride=sizeof(T)) diff --git a/thirdparty/harfbuzz/src/hb-bit-set.hh b/thirdparty/harfbuzz/src/hb-bit-set.hh index 11a4359dc9..438fb66a2c 100644 --- a/thirdparty/harfbuzz/src/hb-bit-set.hh +++ b/thirdparty/harfbuzz/src/hb-bit-set.hh @@ -56,7 +56,7 @@ struct hb_bit_set_t { successful = true; population = 0; - last_page_lookup = 0; + last_page_lookup.set_relaxed (0); page_map.init (); pages.init (); } @@ -78,7 +78,7 @@ struct hb_bit_set_t bool successful = true; /* Allocations successful */ mutable unsigned int population = 0; - mutable unsigned int last_page_lookup = 0; + mutable hb_atomic_int_t last_page_lookup = 0; hb_sorted_vector_t<page_map_t> page_map; hb_vector_t<page_t> pages; @@ -607,7 +607,7 @@ struct hb_bit_set_t const auto* page_map_array = page_map.arrayZ; unsigned int major = get_major (*codepoint); - unsigned int i = last_page_lookup; + unsigned int i = last_page_lookup.get_relaxed (); if (unlikely (i >= page_map.length || page_map_array[i].major != major)) { @@ -625,7 +625,7 @@ struct hb_bit_set_t if (pages_array[current.index].next (codepoint)) { *codepoint += current.major * page_t::PAGE_BITS; - last_page_lookup = i; + last_page_lookup.set_relaxed (i); return true; } i++; @@ -638,11 +638,11 @@ struct hb_bit_set_t if (m != INVALID) { *codepoint = current.major * page_t::PAGE_BITS + m; - last_page_lookup = i; + last_page_lookup.set_relaxed (i); return true; } } - last_page_lookup = 0; + last_page_lookup.set_relaxed (0); *codepoint = INVALID; return false; } @@ -725,7 +725,7 @@ struct hb_bit_set_t { const auto* page_map_array = page_map.arrayZ; unsigned int major = get_major (codepoint); - unsigned int i = last_page_lookup; + unsigned int i = last_page_lookup.get_relaxed (); if (unlikely (i >= page_map.length || page_map_array[i].major != major)) { page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST); @@ -766,7 +766,7 @@ struct hb_bit_set_t { const auto* page_map_array = page_map.arrayZ; unsigned int major = get_major (codepoint); - unsigned int i = last_page_lookup; + unsigned int i = last_page_lookup.get_relaxed (); if (unlikely (i >= page_map.length || page_map_array[i].major != major)) { page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST); @@ -893,15 +893,15 @@ struct hb_bit_set_t /* The extra page_map length is necessary; can't just rely on vector here, * since the next check would be tricked because a null page also has * major==0, which we can't distinguish from an actualy major==0 page... */ - if (likely (last_page_lookup < page_map.length)) + unsigned i = last_page_lookup.get_relaxed (); + if (likely (i < page_map.length)) { - auto &cached_page = page_map.arrayZ[last_page_lookup]; + auto &cached_page = page_map.arrayZ[i]; if (cached_page.major == major) return &pages[cached_page.index]; } page_map_t map = {major, pages.length}; - unsigned int i; if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST)) { if (!insert) @@ -917,7 +917,7 @@ struct hb_bit_set_t page_map[i] = map; } - last_page_lookup = i; + last_page_lookup.set_relaxed (i); return &pages[page_map[i].index]; } const page_t *page_for (hb_codepoint_t g) const @@ -927,19 +927,19 @@ struct hb_bit_set_t /* The extra page_map length is necessary; can't just rely on vector here, * since the next check would be tricked because a null page also has * major==0, which we can't distinguish from an actualy major==0 page... */ - if (likely (last_page_lookup < page_map.length)) + unsigned i = last_page_lookup.get_relaxed (); + if (likely (i < page_map.length)) { - auto &cached_page = page_map.arrayZ[last_page_lookup]; + auto &cached_page = page_map.arrayZ[i]; if (cached_page.major == major) return &pages[cached_page.index]; } page_map_t key = {major}; - unsigned int i; if (!page_map.bfind (key, &i)) return nullptr; - last_page_lookup = i; + last_page_lookup.set_relaxed (i); return &pages[page_map[i].index]; } page_t &page_at (unsigned int i) { return pages[page_map[i].index]; } diff --git a/thirdparty/harfbuzz/src/hb-blob.cc b/thirdparty/harfbuzz/src/hb-blob.cc index 65e44c7f6a..b561a9374e 100644 --- a/thirdparty/harfbuzz/src/hb-blob.cc +++ b/thirdparty/harfbuzz/src/hb-blob.cc @@ -369,7 +369,7 @@ hb_blob_get_length (hb_blob_t *blob) * * Fetches the data from a blob. * - * Returns: (transfer none) (array length=length): the byte data of @blob. + * Returns: (nullable) (transfer none) (array length=length): the byte data of @blob. * * Since: 0.9.2 **/ @@ -572,7 +572,7 @@ _open_resource_fork (const char *file_name, hb_mapped_file_t *file) strncpy (rsrc_name, file_name, name_len); strncpy (rsrc_name + name_len, _PATH_RSRCFORKSPEC, - sizeof (_PATH_RSRCFORKSPEC) - 1); + sizeof (_PATH_RSRCFORKSPEC)); int fd = open (rsrc_name, O_RDONLY | O_BINARY, 0); hb_free (rsrc_name); diff --git a/thirdparty/harfbuzz/src/hb-buffer-serialize.cc b/thirdparty/harfbuzz/src/hb-buffer-serialize.cc index 6539b89640..3f619a113e 100644 --- a/thirdparty/harfbuzz/src/hb-buffer-serialize.cc +++ b/thirdparty/harfbuzz/src/hb-buffer-serialize.cc @@ -31,7 +31,7 @@ #include "hb-buffer.hh" -static const char *serialize_formats[] = { +static const char *_hb_buffer_serialize_formats[] = { "text", "json", nullptr @@ -50,7 +50,7 @@ static const char *serialize_formats[] = { const char ** hb_buffer_serialize_list_formats () { - return serialize_formats; + return _hb_buffer_serialize_formats; } /** @@ -91,8 +91,8 @@ hb_buffer_serialize_format_to_string (hb_buffer_serialize_format_t format) { switch ((unsigned) format) { - case HB_BUFFER_SERIALIZE_FORMAT_TEXT: return serialize_formats[0]; - case HB_BUFFER_SERIALIZE_FORMAT_JSON: return serialize_formats[1]; + case HB_BUFFER_SERIALIZE_FORMAT_TEXT: return _hb_buffer_serialize_formats[0]; + case HB_BUFFER_SERIALIZE_FORMAT_JSON: return _hb_buffer_serialize_formats[1]; default: case HB_BUFFER_SERIALIZE_FORMAT_INVALID: return nullptr; } @@ -400,9 +400,9 @@ _hb_buffer_serialize_unicode_text (hb_buffer_t *buffer, * @buf: (out) (array length=buf_size) (element-type uint8_t): output string to * write serialized buffer into. * @buf_size: the size of @buf. - * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of byes written into @buf. + * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of bytes written into @buf. * @font: (nullable): the #hb_font_t used to shape this buffer, needed to - * read glyph names and extents. If %NULL, and empty font will be used. + * read glyph names and extents. If %NULL, an empty font will be used. * @format: the #hb_buffer_serialize_format_t to use for formatting the output. * @flags: the #hb_buffer_serialize_flags_t that control what glyph properties * to serialize. @@ -514,7 +514,7 @@ hb_buffer_serialize_glyphs (hb_buffer_t *buffer, * @buf: (out) (array length=buf_size) (element-type uint8_t): output string to * write serialized buffer into. * @buf_size: the size of @buf. - * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of byes written into @buf. + * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of bytes written into @buf. * @format: the #hb_buffer_serialize_format_t to use for formatting the output. * @flags: the #hb_buffer_serialize_flags_t that control what glyph properties * to serialize. @@ -637,9 +637,9 @@ _hb_buffer_serialize_invalid (hb_buffer_t *buffer, * @buf: (out) (array length=buf_size) (element-type uint8_t): output string to * write serialized buffer into. * @buf_size: the size of @buf. - * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of byes written into @buf. + * @buf_consumed: (out) (optional): if not %NULL, will be set to the number of bytes written into @buf. * @font: (nullable): the #hb_font_t used to shape this buffer, needed to - * read glyph names and extents. If %NULL, and empty font will be used. + * read glyph names and extents. If %NULL, an empty font will be used. * @format: the #hb_buffer_serialize_format_t to use for formatting the output. * @flags: the #hb_buffer_serialize_flags_t that control what glyph properties * to serialize. diff --git a/thirdparty/harfbuzz/src/hb-buffer-verify.cc b/thirdparty/harfbuzz/src/hb-buffer-verify.cc index dea2c11c35..5453e1ca94 100644 --- a/thirdparty/harfbuzz/src/hb-buffer-verify.cc +++ b/thirdparty/harfbuzz/src/hb-buffer-verify.cc @@ -102,9 +102,9 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer, /* Check that breaking up shaping at safe-to-break is indeed safe. */ hb_buffer_t *fragment = hb_buffer_create_similar (buffer); - hb_buffer_set_flags (fragment, hb_buffer_get_flags (fragment) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_set_flags (fragment, (hb_buffer_flags_t (hb_buffer_get_flags (fragment) & ~HB_BUFFER_FLAG_VERIFY))); hb_buffer_t *reconstruction = hb_buffer_create_similar (buffer); - hb_buffer_set_flags (reconstruction, hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_set_flags (reconstruction, (hb_buffer_flags_t (hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY))); unsigned int num_glyphs; hb_glyph_info_t *info = hb_buffer_get_glyph_infos (buffer, &num_glyphs); @@ -169,6 +169,12 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer, hb_buffer_destroy (fragment); return false; } + else if (!fragment->successful || fragment->shaping_failed) + { + hb_buffer_destroy (reconstruction); + hb_buffer_destroy (fragment); + return true; + } hb_buffer_append (reconstruction, fragment, 0, -1); start = end; @@ -238,10 +244,10 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer, hb_buffer_t *fragments[2] {hb_buffer_create_similar (buffer), hb_buffer_create_similar (buffer)}; - hb_buffer_set_flags (fragments[0], hb_buffer_get_flags (fragments[0]) & ~HB_BUFFER_FLAG_VERIFY); - hb_buffer_set_flags (fragments[1], hb_buffer_get_flags (fragments[1]) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_set_flags (fragments[0], (hb_buffer_flags_t (hb_buffer_get_flags (fragments[0]) & ~HB_BUFFER_FLAG_VERIFY))); + hb_buffer_set_flags (fragments[1], (hb_buffer_flags_t (hb_buffer_get_flags (fragments[1]) & ~HB_BUFFER_FLAG_VERIFY))); hb_buffer_t *reconstruction = hb_buffer_create_similar (buffer); - hb_buffer_set_flags (reconstruction, hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY); + hb_buffer_set_flags (reconstruction, (hb_buffer_flags_t (hb_buffer_get_flags (reconstruction) & ~HB_BUFFER_FLAG_VERIFY))); hb_segment_properties_t props; hb_buffer_get_segment_properties (buffer, &props); hb_buffer_set_segment_properties (fragments[0], &props); @@ -317,12 +323,22 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer, ret = false; goto out; } + else if (!fragments[0]->successful || fragments[0]->shaping_failed) + { + ret = true; + goto out; + } if (!hb_shape_full (font, fragments[1], features, num_features, shapers)) { buffer_verify_error (buffer, font, BUFFER_VERIFY_ERROR "shaping failed while shaping fragment."); ret = false; goto out; } + else if (!fragments[1]->successful || fragments[1]->shaping_failed) + { + ret = true; + goto out; + } if (!forward) { @@ -402,6 +418,7 @@ hb_buffer_t::verify (hb_buffer_t *text_buffer, ret = false; if (!ret) { +#ifndef HB_NO_BUFFER_SERIALIZE unsigned len = text_buffer->len; hb_vector_t<char> bytes; if (likely (bytes.resize (len * 10 + 16))) @@ -414,6 +431,7 @@ hb_buffer_t::verify (hb_buffer_t *text_buffer, HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS); buffer_verify_error (this, font, BUFFER_VERIFY_ERROR "text was: %s.", bytes.arrayZ); } +#endif } return ret; } diff --git a/thirdparty/harfbuzz/src/hb-buffer.cc b/thirdparty/harfbuzz/src/hb-buffer.cc index 6a9ee3ccc8..2272ecf09b 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.cc +++ b/thirdparty/harfbuzz/src/hb-buffer.cc @@ -81,8 +81,8 @@ hb_segment_properties_equal (const hb_segment_properties_t *a, unsigned int hb_segment_properties_hash (const hb_segment_properties_t *p) { - return (unsigned int) p->direction ^ - (unsigned int) p->script ^ + return ((unsigned int) p->direction * 31 + + (unsigned int) p->script) * 31 + (intptr_t) (p->language); } @@ -289,6 +289,7 @@ hb_buffer_t::clear () props = default_props; successful = true; + shaping_failed = false; have_output = false; have_positions = false; @@ -310,6 +311,7 @@ hb_buffer_t::enter () { deallocate_var_all (); serial = 0; + shaping_failed = false; scratch_flags = HB_BUFFER_SCRATCH_FLAG_DEFAULT; if (likely (!hb_unsigned_mul_overflows (len, HB_BUFFER_MAX_LEN_FACTOR))) { @@ -329,6 +331,7 @@ hb_buffer_t::leave () max_ops = HB_BUFFER_MAX_OPS_DEFAULT; deallocate_var_all (); serial = 0; + // Intentionally not reseting shaping_failed, such that it can be inspected. } @@ -542,7 +545,8 @@ hb_buffer_t::delete_glyph () /* The logic here is duplicated in hb_ot_hide_default_ignorables(). */ unsigned int cluster = info[idx].cluster; - if (idx + 1 < len && cluster == info[idx + 1].cluster) + if ((idx + 1 < len && cluster == info[idx + 1].cluster) || + (out_len && cluster == out_info[out_len - 1].cluster)) { /* Cluster survives; do nothing. */ goto done; @@ -623,6 +627,7 @@ DEFINE_NULL_INSTANCE (hb_buffer_t) = HB_SEGMENT_PROPERTIES_DEFAULT, false, /* successful */ + true, /* shaping_failed */ false, /* have_output */ true /* have_positions */ @@ -631,7 +636,7 @@ DEFINE_NULL_INSTANCE (hb_buffer_t) = /** - * hb_buffer_create: (Xconstructor) + * hb_buffer_create: * * Creates a new #hb_buffer_t with all properties to defaults. * @@ -834,7 +839,7 @@ hb_buffer_set_content_type (hb_buffer_t *buffer, * Since: 0.9.5 **/ hb_buffer_content_type_t -hb_buffer_get_content_type (hb_buffer_t *buffer) +hb_buffer_get_content_type (const hb_buffer_t *buffer) { return buffer->content_type; } @@ -876,7 +881,7 @@ hb_buffer_set_unicode_funcs (hb_buffer_t *buffer, * Since: 0.9.2 **/ hb_unicode_funcs_t * -hb_buffer_get_unicode_funcs (hb_buffer_t *buffer) +hb_buffer_get_unicode_funcs (const hb_buffer_t *buffer) { return buffer->unicode; } @@ -919,7 +924,7 @@ hb_buffer_set_direction (hb_buffer_t *buffer, * Since: 0.9.2 **/ hb_direction_t -hb_buffer_get_direction (hb_buffer_t *buffer) +hb_buffer_get_direction (const hb_buffer_t *buffer) { return buffer->props.direction; } @@ -963,7 +968,7 @@ hb_buffer_set_script (hb_buffer_t *buffer, * Since: 0.9.2 **/ hb_script_t -hb_buffer_get_script (hb_buffer_t *buffer) +hb_buffer_get_script (const hb_buffer_t *buffer) { return buffer->props.script; } @@ -1007,7 +1012,7 @@ hb_buffer_set_language (hb_buffer_t *buffer, * Since: 0.9.2 **/ hb_language_t -hb_buffer_get_language (hb_buffer_t *buffer) +hb_buffer_get_language (const hb_buffer_t *buffer) { return buffer->props.language; } @@ -1043,7 +1048,7 @@ hb_buffer_set_segment_properties (hb_buffer_t *buffer, * Since: 0.9.7 **/ void -hb_buffer_get_segment_properties (hb_buffer_t *buffer, +hb_buffer_get_segment_properties (const hb_buffer_t *buffer, hb_segment_properties_t *props) { *props = buffer->props; @@ -1081,7 +1086,7 @@ hb_buffer_set_flags (hb_buffer_t *buffer, * Since: 0.9.7 **/ hb_buffer_flags_t -hb_buffer_get_flags (hb_buffer_t *buffer) +hb_buffer_get_flags (const hb_buffer_t *buffer) { return buffer->flags; } @@ -1120,7 +1125,7 @@ hb_buffer_set_cluster_level (hb_buffer_t *buffer, * Since: 0.9.42 **/ hb_buffer_cluster_level_t -hb_buffer_get_cluster_level (hb_buffer_t *buffer) +hb_buffer_get_cluster_level (const hb_buffer_t *buffer) { return buffer->cluster_level; } @@ -1161,7 +1166,7 @@ hb_buffer_set_replacement_codepoint (hb_buffer_t *buffer, * Since: 0.9.31 **/ hb_codepoint_t -hb_buffer_get_replacement_codepoint (hb_buffer_t *buffer) +hb_buffer_get_replacement_codepoint (const hb_buffer_t *buffer) { return buffer->replacement; } @@ -1201,7 +1206,7 @@ hb_buffer_set_invisible_glyph (hb_buffer_t *buffer, * Since: 2.0.0 **/ hb_codepoint_t -hb_buffer_get_invisible_glyph (hb_buffer_t *buffer) +hb_buffer_get_invisible_glyph (const hb_buffer_t *buffer) { return buffer->invisible; } @@ -1241,7 +1246,7 @@ hb_buffer_set_not_found_glyph (hb_buffer_t *buffer, * Since: 3.1.0 **/ hb_codepoint_t -hb_buffer_get_not_found_glyph (hb_buffer_t *buffer) +hb_buffer_get_not_found_glyph (const hb_buffer_t *buffer) { return buffer->not_found; } @@ -1381,7 +1386,7 @@ hb_buffer_set_length (hb_buffer_t *buffer, * Since: 0.9.2 **/ unsigned int -hb_buffer_get_length (hb_buffer_t *buffer) +hb_buffer_get_length (const hb_buffer_t *buffer) { return buffer->len; } diff --git a/thirdparty/harfbuzz/src/hb-buffer.h b/thirdparty/harfbuzz/src/hb-buffer.h index 14aaa5e1f5..22fb3496f2 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.h +++ b/thirdparty/harfbuzz/src/hb-buffer.h @@ -289,7 +289,7 @@ hb_buffer_set_content_type (hb_buffer_t *buffer, hb_buffer_content_type_t content_type); HB_EXTERN hb_buffer_content_type_t -hb_buffer_get_content_type (hb_buffer_t *buffer); +hb_buffer_get_content_type (const hb_buffer_t *buffer); HB_EXTERN void @@ -297,21 +297,21 @@ hb_buffer_set_unicode_funcs (hb_buffer_t *buffer, hb_unicode_funcs_t *unicode_funcs); HB_EXTERN hb_unicode_funcs_t * -hb_buffer_get_unicode_funcs (hb_buffer_t *buffer); +hb_buffer_get_unicode_funcs (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_direction (hb_buffer_t *buffer, hb_direction_t direction); HB_EXTERN hb_direction_t -hb_buffer_get_direction (hb_buffer_t *buffer); +hb_buffer_get_direction (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_script (hb_buffer_t *buffer, hb_script_t script); HB_EXTERN hb_script_t -hb_buffer_get_script (hb_buffer_t *buffer); +hb_buffer_get_script (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_language (hb_buffer_t *buffer, @@ -319,14 +319,14 @@ hb_buffer_set_language (hb_buffer_t *buffer, HB_EXTERN hb_language_t -hb_buffer_get_language (hb_buffer_t *buffer); +hb_buffer_get_language (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_segment_properties (hb_buffer_t *buffer, const hb_segment_properties_t *props); HB_EXTERN void -hb_buffer_get_segment_properties (hb_buffer_t *buffer, +hb_buffer_get_segment_properties (const hb_buffer_t *buffer, hb_segment_properties_t *props); HB_EXTERN void @@ -373,6 +373,7 @@ hb_buffer_guess_segment_properties (hb_buffer_t *buffer); * flag indicating that the @HB_GLYPH_FLAG_UNSAFE_TO_CONCAT * glyph-flag should be produced by the shaper. By default * it will not be produced since it incurs a cost. Since: 4.0.0 + * @HB_BUFFER_FLAG_DEFINED: All currently defined flags: Since: 4.4.0 * * Flags for #hb_buffer_t. * @@ -386,7 +387,9 @@ typedef enum { /*< flags >*/ HB_BUFFER_FLAG_REMOVE_DEFAULT_IGNORABLES = 0x00000008u, HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE = 0x00000010u, HB_BUFFER_FLAG_VERIFY = 0x00000020u, - HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT = 0x00000040u + HB_BUFFER_FLAG_PRODUCE_UNSAFE_TO_CONCAT = 0x00000040u, + + HB_BUFFER_FLAG_DEFINED = 0x0000007Fu } hb_buffer_flags_t; HB_EXTERN void @@ -394,7 +397,7 @@ hb_buffer_set_flags (hb_buffer_t *buffer, hb_buffer_flags_t flags); HB_EXTERN hb_buffer_flags_t -hb_buffer_get_flags (hb_buffer_t *buffer); +hb_buffer_get_flags (const hb_buffer_t *buffer); /** * hb_buffer_cluster_level_t: @@ -436,7 +439,7 @@ hb_buffer_set_cluster_level (hb_buffer_t *buffer, hb_buffer_cluster_level_t cluster_level); HB_EXTERN hb_buffer_cluster_level_t -hb_buffer_get_cluster_level (hb_buffer_t *buffer); +hb_buffer_get_cluster_level (const hb_buffer_t *buffer); /** * HB_BUFFER_REPLACEMENT_CODEPOINT_DEFAULT: @@ -453,21 +456,21 @@ hb_buffer_set_replacement_codepoint (hb_buffer_t *buffer, hb_codepoint_t replacement); HB_EXTERN hb_codepoint_t -hb_buffer_get_replacement_codepoint (hb_buffer_t *buffer); +hb_buffer_get_replacement_codepoint (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_invisible_glyph (hb_buffer_t *buffer, hb_codepoint_t invisible); HB_EXTERN hb_codepoint_t -hb_buffer_get_invisible_glyph (hb_buffer_t *buffer); +hb_buffer_get_invisible_glyph (const hb_buffer_t *buffer); HB_EXTERN void hb_buffer_set_not_found_glyph (hb_buffer_t *buffer, hb_codepoint_t not_found); HB_EXTERN hb_codepoint_t -hb_buffer_get_not_found_glyph (hb_buffer_t *buffer); +hb_buffer_get_not_found_glyph (const hb_buffer_t *buffer); /* @@ -549,7 +552,7 @@ hb_buffer_set_length (hb_buffer_t *buffer, unsigned int length); HB_EXTERN unsigned int -hb_buffer_get_length (hb_buffer_t *buffer); +hb_buffer_get_length (const hb_buffer_t *buffer); /* Getting glyphs out of the buffer */ @@ -583,6 +586,7 @@ hb_buffer_normalize_glyphs (hb_buffer_t *buffer); * @HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS: serialize glyph flags. Since: 1.5.0 * @HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES: do not serialize glyph advances, * glyph offsets will reflect absolute glyph positions. Since: 1.8.0 + * @HB_BUFFER_SERIALIZE_FLAG_DEFINED: All currently defined flags. Since: 4.4.0 * * Flags that control what glyph information are serialized in hb_buffer_serialize_glyphs(). * @@ -595,7 +599,9 @@ typedef enum { /*< flags >*/ HB_BUFFER_SERIALIZE_FLAG_NO_GLYPH_NAMES = 0x00000004u, HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS = 0x00000008u, HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS = 0x00000010u, - HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES = 0x00000020u + HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES = 0x00000020u, + + HB_BUFFER_SERIALIZE_FLAG_DEFINED = 0x0000003Fu } hb_buffer_serialize_flags_t; /** diff --git a/thirdparty/harfbuzz/src/hb-buffer.hh b/thirdparty/harfbuzz/src/hb-buffer.hh index bc6992905e..6ca78f28d4 100644 --- a/thirdparty/harfbuzz/src/hb-buffer.hh +++ b/thirdparty/harfbuzz/src/hb-buffer.hh @@ -57,6 +57,7 @@ static_assert ((sizeof (hb_glyph_info_t) == 20), ""); static_assert ((sizeof (hb_glyph_info_t) == sizeof (hb_glyph_position_t)), ""); +HB_MARK_AS_FLAG_T (hb_glyph_flags_t); HB_MARK_AS_FLAG_T (hb_buffer_flags_t); HB_MARK_AS_FLAG_T (hb_buffer_serialize_flags_t); HB_MARK_AS_FLAG_T (hb_buffer_diff_flags_t); @@ -69,12 +70,13 @@ enum hb_buffer_scratch_flags_t { HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT = 0x00000008u, HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000010u, HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS = 0x00000020u, + HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE = 0x00000040u, - /* Reserved for complex shapers' internal use. */ - HB_BUFFER_SCRATCH_FLAG_COMPLEX0 = 0x01000000u, - HB_BUFFER_SCRATCH_FLAG_COMPLEX1 = 0x02000000u, - HB_BUFFER_SCRATCH_FLAG_COMPLEX2 = 0x04000000u, - HB_BUFFER_SCRATCH_FLAG_COMPLEX3 = 0x08000000u, + /* Reserved for shapers' internal use. */ + HB_BUFFER_SCRATCH_FLAG_SHAPER0 = 0x01000000u, + HB_BUFFER_SCRATCH_FLAG_SHAPER1 = 0x02000000u, + HB_BUFFER_SCRATCH_FLAG_SHAPER2 = 0x04000000u, + HB_BUFFER_SCRATCH_FLAG_SHAPER3 = 0x08000000u, }; HB_MARK_AS_FLAG_T (hb_buffer_scratch_flags_t); @@ -106,6 +108,7 @@ struct hb_buffer_t hb_segment_properties_t props; /* Script, language, direction */ bool successful; /* Allocations successful */ + bool shaping_failed; /* Shaping failure */ bool have_output; /* Whether we have an output buffer going on */ bool have_positions; /* Whether we have positions */ @@ -130,9 +133,7 @@ struct hb_buffer_t * Managed by enter / leave */ -#ifndef HB_NDEBUG uint8_t allocated_var_bits; -#endif uint8_t serial; hb_buffer_scratch_flags_t scratch_flags; /* Have space-fallback, etc. */ unsigned int max_len; /* Maximum allowed len. */ @@ -161,38 +162,40 @@ struct hb_buffer_t void allocate_var (unsigned int start, unsigned int count) { -#ifndef HB_NDEBUG unsigned int end = start + count; assert (end <= 8); unsigned int bits = (1u<<end) - (1u<<start); assert (0 == (allocated_var_bits & bits)); allocated_var_bits |= bits; -#endif + } + bool try_allocate_var (unsigned int start, unsigned int count) + { + unsigned int end = start + count; + assert (end <= 8); + unsigned int bits = (1u<<end) - (1u<<start); + if (allocated_var_bits & bits) + return false; + allocated_var_bits |= bits; + return true; } void deallocate_var (unsigned int start, unsigned int count) { -#ifndef HB_NDEBUG unsigned int end = start + count; assert (end <= 8); unsigned int bits = (1u<<end) - (1u<<start); assert (bits == (allocated_var_bits & bits)); allocated_var_bits &= ~bits; -#endif } void assert_var (unsigned int start, unsigned int count) { -#ifndef HB_NDEBUG unsigned int end = start + count; assert (end <= 8); - unsigned int bits = (1u<<end) - (1u<<start); + HB_UNUSED unsigned int bits = (1u<<end) - (1u<<start); assert (bits == (allocated_var_bits & bits)); -#endif } void deallocate_var_all () { -#ifndef HB_NDEBUG allocated_var_bits = 0; -#endif } hb_glyph_info_t &cur (unsigned int i = 0) { return info[idx + i]; } @@ -549,7 +552,7 @@ struct hb_buffer_t #ifdef HB_NO_BUFFER_MESSAGE return true; #else - if (!messaging ()) + if (likely (!messaging ())) return true; message_depth++; @@ -619,9 +622,10 @@ DECLARE_NULL_INSTANCE (hb_buffer_t); #define HB_BUFFER_XALLOCATE_VAR(b, func, var) \ b->func (offsetof (hb_glyph_info_t, var) - offsetof(hb_glyph_info_t, var1), \ sizeof (b->info[0].var)) -#define HB_BUFFER_ALLOCATE_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, allocate_var, var ()) -#define HB_BUFFER_DEALLOCATE_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, deallocate_var, var ()) -#define HB_BUFFER_ASSERT_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, assert_var, var ()) +#define HB_BUFFER_ALLOCATE_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, allocate_var, var ()) +#define HB_BUFFER_TRY_ALLOCATE_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, try_allocate_var, var ()) +#define HB_BUFFER_DEALLOCATE_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, deallocate_var, var ()) +#define HB_BUFFER_ASSERT_VAR(b, var) HB_BUFFER_XALLOCATE_VAR (b, assert_var, var ()) #endif /* HB_BUFFER_HH */ diff --git a/thirdparty/harfbuzz/src/hb-cache.hh b/thirdparty/harfbuzz/src/hb-cache.hh index e617b75de9..d6b229ed65 100644 --- a/thirdparty/harfbuzz/src/hb-cache.hh +++ b/thirdparty/harfbuzz/src/hb-cache.hh @@ -32,7 +32,7 @@ /* Implements a lockfree cache for int->int functions. */ -template <unsigned int key_bits, unsigned int value_bits, unsigned int cache_bits> +template <unsigned int key_bits=16, unsigned int value_bits=8 + 32 - key_bits, unsigned int cache_bits=8> struct hb_cache_t { static_assert ((key_bits >= cache_bits), ""); diff --git a/thirdparty/harfbuzz/src/hb-common.cc b/thirdparty/harfbuzz/src/hb-common.cc index 41229b9183..7266d9b01f 100644 --- a/thirdparty/harfbuzz/src/hb-common.cc +++ b/thirdparty/harfbuzz/src/hb-common.cc @@ -160,7 +160,7 @@ hb_tag_to_string (hb_tag_t tag, char *buf) /* hb_direction_t */ -const char direction_strings[][4] = { +static const char direction_strings[][4] = { "ltr", "rtl", "ttb", diff --git a/thirdparty/harfbuzz/src/hb-config.hh b/thirdparty/harfbuzz/src/hb-config.hh index 4b46dea938..2578231d23 100644 --- a/thirdparty/harfbuzz/src/hb-config.hh +++ b/thirdparty/harfbuzz/src/hb-config.hh @@ -64,6 +64,7 @@ #define HB_NO_FACE_COLLECT_UNICODES #define HB_NO_GETENV #define HB_NO_HINTING +#define HB_NO_LANGUAGE_LONG #define HB_NO_LANGUAGE_PRIVATE_SUBTAG #define HB_NO_LAYOUT_FEATURE_PARAMS #define HB_NO_LAYOUT_COLLECT_GLYPHS @@ -145,10 +146,10 @@ #endif #ifdef HB_NO_OT_SHAPE_FALLBACK -#define HB_NO_OT_SHAPE_COMPLEX_ARABIC_FALLBACK -#define HB_NO_OT_SHAPE_COMPLEX_HEBREW_FALLBACK -#define HB_NO_OT_SHAPE_COMPLEX_THAI_FALLBACK -#define HB_NO_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS +#define HB_NO_OT_SHAPER_ARABIC_FALLBACK +#define HB_NO_OT_SHAPER_HEBREW_FALLBACK +#define HB_NO_OT_SHAPER_THAI_FALLBACK +#define HB_NO_OT_SHAPER_VOWEL_CONSTRAINTS #endif #ifdef NDEBUG @@ -163,5 +164,9 @@ #endif #endif +#ifdef HB_OPTIMIZE_SIZE +#define HB_NO_OT_LAYOUT_LOOKUP_CACHE +#endif + #endif /* HB_CONFIG_HH */ diff --git a/thirdparty/harfbuzz/src/hb-coretext.cc b/thirdparty/harfbuzz/src/hb-coretext.cc index 6ccc1b0a2b..99b33c001e 100644 --- a/thirdparty/harfbuzz/src/hb-coretext.cc +++ b/thirdparty/harfbuzz/src/hb-coretext.cc @@ -332,7 +332,7 @@ _hb_coretext_shaper_font_data_create (hb_font_t *font) return nullptr; } - if (font->coords) + if (font->num_coords) { CFMutableDictionaryRef variations = CFDictionaryCreateMutable (kCFAllocatorDefault, @@ -379,37 +379,6 @@ _hb_coretext_shaper_font_data_destroy (hb_coretext_font_data_t *data) CFRelease ((CTFontRef) data); } -static const hb_coretext_font_data_t * -hb_coretext_font_data_sync (hb_font_t *font) -{ -retry: - const hb_coretext_font_data_t *data = font->data.coretext; - if (unlikely (!data)) return nullptr; - - if (fabs (CTFontGetSize ((CTFontRef) data) - (CGFloat) font->ptem) > (CGFloat) .5) - { - /* XXX-MT-bug - * Note that evaluating condition above can be dangerous if another thread - * got here first and destructed data. That's, as always, bad use pattern. - * If you modify the font (change font size), other threads must not be - * using it at the same time. However, since this check is delayed to - * when one actually tries to shape something, this is a XXX race condition - * (and the only one we have that I know of) right now. Ie. you modify the - * font size in one thread, then (supposedly safely) try to use it from two - * or more threads and BOOM! I'm not sure how to fix this. We want RCU. - */ - - /* Drop and recreate. */ - /* If someone dropped it in the mean time, throw it away and don't touch it. - * Otherwise, destruct it. */ - if (likely (font->data.coretext.cmpexch (const_cast<hb_coretext_font_data_t *> (data), nullptr))) - _hb_coretext_shaper_font_data_destroy (const_cast<hb_coretext_font_data_t *> (data)); - else - goto retry; - } - return font->data.coretext; -} - /** * hb_coretext_font_create: * @ct_font: The CTFontRef to work upon @@ -455,8 +424,8 @@ hb_coretext_font_create (CTFontRef ct_font) CTFontRef hb_coretext_font_get_ct_font (hb_font_t *font) { - const hb_coretext_font_data_t *data = hb_coretext_font_data_sync (font); - return data ? (CTFontRef) data : nullptr; + CTFontRef ct_font = (CTFontRef) (const void *) font->data.coretext; + return ct_font ? (CTFontRef) ct_font : nullptr; } @@ -516,7 +485,7 @@ _hb_coretext_shape (hb_shape_plan_t *shape_plan, { hb_face_t *face = font->face; CGFontRef cg_font = (CGFontRef) (const void *) face->data.coretext; - CTFontRef ct_font = (CTFontRef) hb_coretext_font_data_sync (font); + CTFontRef ct_font = (CTFontRef) (const void *) font->data.coretext; CGFloat ct_font_size = CTFontGetSize (ct_font); CGFloat x_mult = (CGFloat) font->x_scale / ct_font_size; @@ -1106,7 +1075,8 @@ resize_and_retry: advance = positions[j + 1].x - positions[j].x; else /* last glyph */ advance = run_advance - (positions[j].x - positions[0].x); - info->mask = round (advance * x_mult); + /* int cast necessary to pass through negative values. */ + info->mask = (int) round (advance * x_mult); info->var1.i32 = x_offset; info->var2.i32 = round (positions[j].y * y_mult); info++; @@ -1122,7 +1092,8 @@ resize_and_retry: advance = positions[j + 1].y - positions[j].y; else /* last glyph */ advance = run_advance - (positions[j].y - positions[0].y); - info->mask = round (advance * y_mult); + /* int cast necessary to pass through negative values. */ + info->mask = (int) round (advance * y_mult); info->var1.i32 = round (positions[j].x * x_mult); info->var2.i32 = y_offset; info++; @@ -1151,7 +1122,7 @@ resize_and_retry: pos->x_offset = info->var1.i32; pos->y_offset = info->var2.i32; - info++, pos++; + info++; pos++; } else for (unsigned int i = 0; i < count; i++) @@ -1160,7 +1131,7 @@ resize_and_retry: pos->x_offset = info->var1.i32; pos->y_offset = info->var2.i32; - info++, pos++; + info++; pos++; } /* Fix up clusters so that we never return out-of-order indices; @@ -1173,7 +1144,8 @@ resize_and_retry: * This does *not* mean we'll form the same clusters as Uniscribe * or the native OT backend, only that the cluster indices will be * monotonic in the output buffer. */ - if (count > 1 && (status_or & kCTRunStatusNonMonotonic)) + if (count > 1 && (status_or & kCTRunStatusNonMonotonic) && + buffer->cluster_level != HB_BUFFER_CLUSTER_LEVEL_CHARACTERS) { hb_glyph_info_t *info = buffer->info; if (HB_DIRECTION_IS_FORWARD (buffer->props.direction)) @@ -1197,6 +1169,10 @@ resize_and_retry: } } + /* TODO: Sometimes the above positioning code generates negative + * advance values. Fix them up. Example, with NotoNastaliqUrdu + * font and sequence ابهد. */ + buffer->clear_glyph_flags (); buffer->unsafe_to_break (); diff --git a/thirdparty/harfbuzz/src/hb-cplusplus.hh b/thirdparty/harfbuzz/src/hb-cplusplus.hh new file mode 100644 index 0000000000..86d0452080 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-cplusplus.hh @@ -0,0 +1,192 @@ +/* + * Copyright © 2022 Behdad Esfahbod + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + */ + +#ifndef HB_CPLUSPLUS_HH +#define HB_CPLUSPLUS_HH + +#include "hb.h" + +HB_BEGIN_DECLS +HB_END_DECLS + +#ifdef __cplusplus + +#include <functional> +#include <utility> + +#if 0 +#if !(__cplusplus >= 201103L) +#error "HarfBuzz C++ helpers require C++11" +#endif +#endif + +namespace hb { + + +template <typename T> +struct vtable; + +template <typename T> +struct shared_ptr +{ + using element_type = T; + + using v = vtable<T>; + + explicit shared_ptr (T *p = nullptr) : p (p) {} + shared_ptr (const shared_ptr &o) : p (v::reference (o.p)) {} + shared_ptr (shared_ptr &&o) : p (o.p) { o.p = nullptr; } + shared_ptr& operator = (const shared_ptr &o) { if (p != o.p) { destroy (); p = o.p; reference (); } return *this; } + shared_ptr& operator = (shared_ptr &&o) { v::destroy (p); p = o.p; o.p = nullptr; return *this; } + ~shared_ptr () { v::destroy (p); p = nullptr; } + + T* get() const { return p; } + + void swap (shared_ptr &o) { std::swap (p, o.p); } + friend void swap (shared_ptr &a, shared_ptr &b) { std::swap (a.p, b.p); } + + operator T * () const { return p; } + T& operator * () const { return *get (); } + T* operator -> () const { return get (); } + operator bool () { return p; } + bool operator == (const shared_ptr &o) { return p == o.p; } + bool operator != (const shared_ptr &o) { return p != o.p; } + + static T* get_empty() { return v::get_empty (); } + T* reference() { return v::reference (p); } + void destroy() { v::destroy (p); } + void set_user_data (hb_user_data_key_t *key, + void *value, + hb_destroy_func_t destroy, + hb_bool_t replace) { v::set_user_data (p, key, value, destroy, replace); } + void * get_user_data (hb_user_data_key_t *key) { return v::get_user_data (p, key); } + + private: + T *p; +}; + +template<typename T> struct is_shared_ptr : std::false_type {}; +template<typename T> struct is_shared_ptr<shared_ptr<T>> : std::true_type {}; + +template <typename T> +struct unique_ptr +{ + using element_type = T; + + using v = vtable<T>; + + explicit unique_ptr (T *p = nullptr) : p (p) {} + unique_ptr (const unique_ptr &o) = delete; + unique_ptr (unique_ptr &&o) : p (o.p) { o.p = nullptr; } + unique_ptr& operator = (const unique_ptr &o) = delete; + unique_ptr& operator = (unique_ptr &&o) { v::destroy (p); p = o.p; o.p = nullptr; return *this; } + ~unique_ptr () { v::destroy (p); p = nullptr; } + + T* get() const { return p; } + T* release () { T* v = p; p = nullptr; return v; } + + void swap (unique_ptr &o) { std::swap (p, o.p); } + friend void swap (unique_ptr &a, unique_ptr &b) { std::swap (a.p, b.p); } + + operator T * () const { return p; } + T& operator * () const { return *get (); } + T* operator -> () const { return get (); } + operator bool () { return p; } + + private: + T *p; +}; + +template<typename T> struct is_unique_ptr : std::false_type {}; +template<typename T> struct is_unique_ptr<unique_ptr<T>> : std::true_type {}; + +template <typename T, + T * (*_get_empty) (void), + T * (*_reference) (T *), + void (*_destroy) (T *), + hb_bool_t (*_set_user_data) (T *, + hb_user_data_key_t *, + void *, + hb_destroy_func_t, + hb_bool_t), + void * (*_get_user_data) (T *, + hb_user_data_key_t *)> +struct vtable_t +{ + static constexpr auto get_empty = _get_empty; + static constexpr auto reference = _reference; + static constexpr auto destroy = _destroy; + static constexpr auto set_user_data = _set_user_data; + static constexpr auto get_user_data = _get_user_data; +}; + +#define HB_DEFINE_VTABLE(name) \ + template<> \ + struct vtable<hb_##name##_t> \ + : vtable_t<hb_##name##_t, \ + &hb_##name##_get_empty, \ + &hb_##name##_reference, \ + &hb_##name##_destroy, \ + &hb_##name##_set_user_data, \ + &hb_##name##_get_user_data> {} + +HB_DEFINE_VTABLE (buffer); +HB_DEFINE_VTABLE (blob); +HB_DEFINE_VTABLE (face); +HB_DEFINE_VTABLE (font); +HB_DEFINE_VTABLE (font_funcs); +HB_DEFINE_VTABLE (map); +HB_DEFINE_VTABLE (set); +HB_DEFINE_VTABLE (shape_plan); +HB_DEFINE_VTABLE (unicode_funcs); + +#undef HB_DEFINE_VTABLE + + +} // namespace hb + +template<typename T> +struct std::hash<hb::shared_ptr<T>> +{ + std::size_t operator()(const hb::shared_ptr<T>& v) const noexcept + { + std::size_t h = std::hash<decltype (v.get ())>{}(v.get ()); + return h; + } +}; + +template<typename T> +struct std::hash<hb::unique_ptr<T>> +{ + std::size_t operator()(const hb::unique_ptr<T>& v) const noexcept + { + std::size_t h = std::hash<decltype (v.get ())>{}(v.get ()); + return h; + } +}; + + +#endif /* __cplusplus */ + +#endif /* HB_CPLUSPLUS_HH */ diff --git a/thirdparty/harfbuzz/src/hb-directwrite.cc b/thirdparty/harfbuzz/src/hb-directwrite.cc index f177ff31c0..ce04f5bab1 100644 --- a/thirdparty/harfbuzz/src/hb-directwrite.cc +++ b/thirdparty/harfbuzz/src/hb-directwrite.cc @@ -241,17 +241,12 @@ struct hb_directwrite_font_data_t {}; hb_directwrite_font_data_t * _hb_directwrite_shaper_font_data_create (hb_font_t *font) { - hb_directwrite_font_data_t *data = new hb_directwrite_font_data_t; - if (unlikely (!data)) - return nullptr; - - return data; + return (hb_directwrite_font_data_t *) HB_SHAPER_DATA_SUCCEEDED; } void _hb_directwrite_shaper_font_data_destroy (hb_directwrite_font_data_t *data) { - delete data; } diff --git a/thirdparty/harfbuzz/src/hb-draw.cc b/thirdparty/harfbuzz/src/hb-draw.cc index b31019b07e..fdc71102d6 100644 --- a/thirdparty/harfbuzz/src/hb-draw.cc +++ b/thirdparty/harfbuzz/src/hb-draw.cc @@ -56,12 +56,14 @@ hb_draw_quadratic_to_nil (hb_draw_funcs_t *dfuncs, void *draw_data, float to_x, float to_y, void *user_data HB_UNUSED) { +#define HB_ONE_THIRD 0.33333333f dfuncs->emit_cubic_to (draw_data, *st, - (st->current_x + 2.f * control_x) / 3.f, - (st->current_y + 2.f * control_y) / 3.f, - (to_x + 2.f * control_x) / 3.f, - (to_y + 2.f * control_y) / 3.f, + (st->current_x + 2.f * control_x) * HB_ONE_THIRD, + (st->current_y + 2.f * control_y) * HB_ONE_THIRD, + (to_x + 2.f * control_x) * HB_ONE_THIRD, + (to_y + 2.f * control_y) * HB_ONE_THIRD, to_x, to_y); +#undef HB_ONE_THIRD } static void @@ -89,25 +91,46 @@ hb_draw_funcs_set_##name##_func (hb_draw_funcs_t *dfuncs, \ if (hb_object_is_immutable (dfuncs)) \ return; \ \ - if (dfuncs->destroy.name) \ - dfuncs->destroy.name (dfuncs->user_data.name); \ - \ - if (func) { \ - dfuncs->func.name = func; \ - dfuncs->user_data.name = user_data; \ - dfuncs->destroy.name = destroy; \ - } else { \ - dfuncs->func.name = hb_draw_##name##_nil; \ - dfuncs->user_data.name = nullptr; \ - dfuncs->destroy.name = nullptr; \ - } \ + if (dfuncs->destroy && dfuncs->destroy->name) \ + dfuncs->destroy->name (!dfuncs->user_data ? nullptr : dfuncs->user_data->name); \ + \ + if (user_data && !dfuncs->user_data) \ + { \ + dfuncs->user_data = (decltype (dfuncs->user_data)) hb_calloc (1, sizeof (*dfuncs->user_data)); \ + if (unlikely (!dfuncs->user_data)) \ + goto fail; \ + } \ + if (destroy && !dfuncs->destroy) \ + { \ + dfuncs->destroy = (decltype (dfuncs->destroy)) hb_calloc (1, sizeof (*dfuncs->destroy)); \ + if (unlikely (!dfuncs->destroy)) \ + goto fail; \ + } \ + \ + if (func) { \ + dfuncs->func.name = func; \ + if (dfuncs->user_data) \ + dfuncs->user_data->name = user_data; \ + if (dfuncs->destroy) \ + dfuncs->destroy->name = destroy; \ + } else { \ + dfuncs->func.name = hb_draw_##name##_nil; \ + if (dfuncs->user_data) \ + dfuncs->user_data->name = nullptr; \ + if (dfuncs->destroy) \ + dfuncs->destroy->name = nullptr; \ + } \ + \ +fail: \ + if (destroy) \ + destroy (user_data); \ } HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS #undef HB_DRAW_FUNC_IMPLEMENT /** - * hb_draw_funcs_create: (Xconstructor) + * hb_draw_funcs_create: * * Creates a new draw callbacks object. * @@ -177,11 +200,13 @@ hb_draw_funcs_destroy (hb_draw_funcs_t *dfuncs) { if (!hb_object_destroy (dfuncs)) return; + if (dfuncs->destroy) + { #define HB_DRAW_FUNC_IMPLEMENT(name) \ - if (dfuncs->destroy.name) dfuncs->destroy.name (dfuncs->user_data.name); - HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS + if (dfuncs->destroy->name) dfuncs->destroy->name (!dfuncs->user_data ? nullptr : dfuncs->user_data->name); + HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS #undef HB_DRAW_FUNC_IMPLEMENT - + } hb_free (dfuncs); } diff --git a/thirdparty/harfbuzz/src/hb-draw.hh b/thirdparty/harfbuzz/src/hb-draw.hh index 28bc9218e1..768f51a875 100644 --- a/thirdparty/harfbuzz/src/hb-draw.hh +++ b/thirdparty/harfbuzz/src/hb-draw.hh @@ -54,31 +54,31 @@ struct hb_draw_funcs_t #define HB_DRAW_FUNC_IMPLEMENT(name) void *name; HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS #undef HB_DRAW_FUNC_IMPLEMENT - } user_data; + } *user_data; struct { #define HB_DRAW_FUNC_IMPLEMENT(name) hb_destroy_func_t name; HB_DRAW_FUNCS_IMPLEMENT_CALLBACKS #undef HB_DRAW_FUNC_IMPLEMENT - } destroy; + } *destroy; void emit_move_to (void *draw_data, hb_draw_state_t &st, float to_x, float to_y) { func.move_to (this, draw_data, &st, to_x, to_y, - user_data.move_to); } + !user_data ? nullptr : user_data->move_to); } void emit_line_to (void *draw_data, hb_draw_state_t &st, float to_x, float to_y) { func.line_to (this, draw_data, &st, to_x, to_y, - user_data.line_to); } + !user_data ? nullptr : user_data->line_to); } void emit_quadratic_to (void *draw_data, hb_draw_state_t &st, float control_x, float control_y, float to_x, float to_y) { func.quadratic_to (this, draw_data, &st, control_x, control_y, to_x, to_y, - user_data.quadratic_to); } + !user_data ? nullptr : user_data->quadratic_to); } void emit_cubic_to (void *draw_data, hb_draw_state_t &st, float control1_x, float control1_y, float control2_x, float control2_y, @@ -87,10 +87,10 @@ struct hb_draw_funcs_t control1_x, control1_y, control2_x, control2_y, to_x, to_y, - user_data.cubic_to); } + !user_data ? nullptr : user_data->cubic_to); } void emit_close_path (void *draw_data, hb_draw_state_t &st) { func.close_path (this, draw_data, &st, - user_data.close_path); } + !user_data ? nullptr : user_data->close_path); } void move_to (void *draw_data, hb_draw_state_t &st, diff --git a/thirdparty/harfbuzz/src/hb-face.cc b/thirdparty/harfbuzz/src/hb-face.cc index 5365598636..7cdd6cb49f 100644 --- a/thirdparty/harfbuzz/src/hb-face.cc +++ b/thirdparty/harfbuzz/src/hb-face.cc @@ -190,7 +190,7 @@ _hb_face_for_data_reference_table (hb_face_t *face HB_UNUSED, hb_tag_t tag, void } /** - * hb_face_create: (Xconstructor) + * hb_face_create: * @blob: #hb_blob_t to work upon * @index: The index of the face within @blob * @@ -342,7 +342,7 @@ hb_face_set_user_data (hb_face_t *face, * Since: 0.9.2 **/ void * -hb_face_get_user_data (const hb_face_t *face, +hb_face_get_user_data (hb_face_t *face, hb_user_data_key_t *key) { return hb_object_get_user_data (face, key); diff --git a/thirdparty/harfbuzz/src/hb-face.h b/thirdparty/harfbuzz/src/hb-face.h index 6ef2f8b886..3b7477a76c 100644 --- a/thirdparty/harfbuzz/src/hb-face.h +++ b/thirdparty/harfbuzz/src/hb-face.h @@ -96,7 +96,7 @@ hb_face_set_user_data (hb_face_t *face, hb_bool_t replace); HB_EXTERN void * -hb_face_get_user_data (const hb_face_t *face, +hb_face_get_user_data (hb_face_t *face, hb_user_data_key_t *key); HB_EXTERN void diff --git a/thirdparty/harfbuzz/src/hb-font.cc b/thirdparty/harfbuzz/src/hb-font.cc index 5700e06271..8c4fe863b6 100644 --- a/thirdparty/harfbuzz/src/hb-font.cc +++ b/thirdparty/harfbuzz/src/hb-font.cc @@ -636,16 +636,8 @@ DEFINE_NULL_INSTANCE (hb_font_funcs_t) = { HB_OBJECT_HEADER_STATIC, - { -#define HB_FONT_FUNC_IMPLEMENT(name) nullptr, - HB_FONT_FUNCS_IMPLEMENT_CALLBACKS -#undef HB_FONT_FUNC_IMPLEMENT - }, - { -#define HB_FONT_FUNC_IMPLEMENT(name) nullptr, - HB_FONT_FUNCS_IMPLEMENT_CALLBACKS -#undef HB_FONT_FUNC_IMPLEMENT - }, + nullptr, + nullptr, { { #define HB_FONT_FUNC_IMPLEMENT(name) hb_font_get_##name##_nil, @@ -658,16 +650,8 @@ DEFINE_NULL_INSTANCE (hb_font_funcs_t) = static const hb_font_funcs_t _hb_font_funcs_default = { HB_OBJECT_HEADER_STATIC, - { -#define HB_FONT_FUNC_IMPLEMENT(name) nullptr, - HB_FONT_FUNCS_IMPLEMENT_CALLBACKS -#undef HB_FONT_FUNC_IMPLEMENT - }, - { -#define HB_FONT_FUNC_IMPLEMENT(name) nullptr, - HB_FONT_FUNCS_IMPLEMENT_CALLBACKS -#undef HB_FONT_FUNC_IMPLEMENT - }, + nullptr, + nullptr, { { #define HB_FONT_FUNC_IMPLEMENT(name) hb_font_get_##name##_default, @@ -679,7 +663,7 @@ static const hb_font_funcs_t _hb_font_funcs_default = { /** - * hb_font_funcs_create: (Xconstructor) + * hb_font_funcs_create: * * Creates a new #hb_font_funcs_t structure of font functions. * @@ -746,10 +730,16 @@ hb_font_funcs_destroy (hb_font_funcs_t *ffuncs) { if (!hb_object_destroy (ffuncs)) return; -#define HB_FONT_FUNC_IMPLEMENT(name) if (ffuncs->destroy.name) \ - ffuncs->destroy.name (ffuncs->user_data.name); - HB_FONT_FUNCS_IMPLEMENT_CALLBACKS + if (ffuncs->destroy) + { +#define HB_FONT_FUNC_IMPLEMENT(name) if (ffuncs->destroy->name) \ + ffuncs->destroy->name (!ffuncs->user_data ? nullptr : ffuncs->user_data->name); + HB_FONT_FUNCS_IMPLEMENT_CALLBACKS #undef HB_FONT_FUNC_IMPLEMENT + } + + hb_free (ffuncs->destroy); + hb_free (ffuncs->user_data); hb_free (ffuncs); } @@ -841,24 +831,50 @@ hb_font_funcs_set_##name##_func (hb_font_funcs_t *ffuncs, \ hb_destroy_func_t destroy) \ { \ if (hb_object_is_immutable (ffuncs)) \ + goto fail; \ + \ + if (!func) \ { \ if (destroy) \ destroy (user_data); \ - return; \ + destroy = nullptr; \ + user_data = nullptr; \ } \ \ - if (ffuncs->destroy.name) \ - ffuncs->destroy.name (ffuncs->user_data.name); \ + if (ffuncs->destroy && ffuncs->destroy->name) \ + ffuncs->destroy->name (!ffuncs->user_data ? nullptr : ffuncs->user_data->name); \ + \ + if (user_data && !ffuncs->user_data) \ + { \ + ffuncs->user_data = (decltype (ffuncs->user_data)) hb_calloc (1, sizeof (*ffuncs->user_data)); \ + if (unlikely (!ffuncs->user_data)) \ + goto fail; \ + } \ + if (destroy && !ffuncs->destroy) \ + { \ + ffuncs->destroy = (decltype (ffuncs->destroy)) hb_calloc (1, sizeof (*ffuncs->destroy)); \ + if (unlikely (!ffuncs->destroy)) \ + goto fail; \ + } \ \ if (func) { \ ffuncs->get.f.name = func; \ - ffuncs->user_data.name = user_data; \ - ffuncs->destroy.name = destroy; \ + if (ffuncs->user_data) \ + ffuncs->user_data->name = user_data; \ + if (ffuncs->destroy) \ + ffuncs->destroy->name = destroy; \ } else { \ ffuncs->get.f.name = hb_font_get_##name##_default; \ - ffuncs->user_data.name = nullptr; \ - ffuncs->destroy.name = nullptr; \ + if (ffuncs->user_data) \ + ffuncs->user_data->name = nullptr; \ + if (ffuncs->destroy) \ + ffuncs->destroy->name = nullptr; \ } \ + return; \ + \ +fail: \ + if (destroy) \ + destroy (user_data); \ } HB_FONT_FUNCS_IMPLEMENT_CALLBACKS @@ -1623,6 +1639,9 @@ DEFINE_NULL_INSTANCE (hb_font_t) = { HB_OBJECT_HEADER_STATIC, + 0, /* serial */ + 0, /* serial_coords */ + nullptr, /* parent */ const_cast<hb_face_t *> (&_hb_Null_hb_face_t), @@ -1630,6 +1649,8 @@ DEFINE_NULL_INSTANCE (hb_font_t) = 1000, /* y_scale */ 0., /* slant */ 0., /* slant_xy; */ + 1.f, /* x_multf */ + 1.f, /* y_multf */ 1<<16, /* x_mult */ 1<<16, /* y_mult */ @@ -1662,14 +1683,15 @@ _hb_font_create (hb_face_t *face) font->face = hb_face_reference (face); font->klass = hb_font_funcs_get_empty (); font->data.init0 (font); - font->x_scale = font->y_scale = hb_face_get_upem (face); + font->x_scale = font->y_scale = face->get_upem (); + font->x_multf = font->y_multf = 1.f; font->x_mult = font->y_mult = 1 << 16; return font; } /** - * hb_font_create: (Xconstructor) + * hb_font_create: * @face: a face. * * Constructs a new font object from the specified face. @@ -1715,6 +1737,8 @@ _hb_font_adopt_var_coords (hb_font_t *font, font->coords = coords; font->design_coords = design_coords; font->num_coords = coords_length; + + font->mults_changed (); // Easiest to call this to drop cached data } /** @@ -1744,7 +1768,6 @@ hb_font_create_sub_font (hb_font_t *parent) font->x_scale = parent->x_scale; font->y_scale = parent->y_scale; font->slant = parent->slant; - font->mults_changed (); font->x_ppem = parent->x_ppem; font->y_ppem = parent->y_ppem; font->ptem = parent->ptem; @@ -1767,6 +1790,8 @@ hb_font_create_sub_font (hb_font_t *parent) } } + font->mults_changed (); + return font; } @@ -1852,6 +1877,9 @@ hb_font_set_user_data (hb_font_t *font, hb_destroy_func_t destroy /* May be NULL. */, hb_bool_t replace) { + if (!hb_object_is_immutable (font)) + font->serial++; + return hb_object_set_user_data (font, key, data, destroy, replace); } @@ -1911,6 +1939,45 @@ hb_font_is_immutable (hb_font_t *font) } /** + * hb_font_get_serial: + * @font: #hb_font_t to work upon + * + * Returns the internal serial number of the font. The serial + * number is increased every time a setting on the font is + * changed, using a setter function. + * + * Return value: serial number + * + * Since: 4.4.0. + **/ +unsigned int +hb_font_get_serial (hb_font_t *font) +{ + return font->serial; +} + +/** + * hb_font_changed: + * @font: #hb_font_t to work upon + * + * Notifies the @font that underlying font data has changed. + * This has the effect of increasing the serial as returned + * by hb_font_get_serial(), which invalidates internal caches. + * + * Since: 4.4.0. + **/ +void +hb_font_changed (hb_font_t *font) +{ + if (hb_object_is_immutable (font)) + return; + + font->serial++; + + font->mults_changed (); +} + +/** * hb_font_set_parent: * @font: #hb_font_t to work upon * @parent: The parent font object to assign @@ -1926,6 +1993,11 @@ hb_font_set_parent (hb_font_t *font, if (hb_object_is_immutable (font)) return; + if (parent == font->parent) + return; + + font->serial++; + if (!parent) parent = hb_font_get_empty (); @@ -1968,6 +2040,11 @@ hb_font_set_face (hb_font_t *font, if (hb_object_is_immutable (font)) return; + if (face == font->face) + return; + + font->serial++; + if (unlikely (!face)) face = hb_face_get_empty (); @@ -2022,6 +2099,8 @@ hb_font_set_funcs (hb_font_t *font, return; } + font->serial++; + if (font->destroy) font->destroy (font->user_data); @@ -2059,6 +2138,8 @@ hb_font_set_funcs_data (hb_font_t *font, return; } + font->serial++; + if (font->destroy) font->destroy (font->user_data); @@ -2085,6 +2166,11 @@ hb_font_set_scale (hb_font_t *font, if (hb_object_is_immutable (font)) return; + if (font->x_scale == x_scale && font->y_scale == y_scale) + return; + + font->serial++; + font->x_scale = x_scale; font->y_scale = y_scale; font->mults_changed (); @@ -2127,6 +2213,11 @@ hb_font_set_ppem (hb_font_t *font, if (hb_object_is_immutable (font)) return; + if (font->x_ppem == x_ppem && font->y_ppem == y_ppem) + return; + + font->serial++; + font->x_ppem = x_ppem; font->y_ppem = y_ppem; } @@ -2169,6 +2260,11 @@ hb_font_set_ptem (hb_font_t *font, if (hb_object_is_immutable (font)) return; + if (font->ptem == ptem) + return; + + font->serial++; + font->ptem = ptem; } @@ -2216,6 +2312,11 @@ hb_font_set_synthetic_slant (hb_font_t *font, float slant) if (hb_object_is_immutable (font)) return; + if (font->slant == slant) + return; + + font->serial++; + font->slant = slant; font->mults_changed (); } @@ -2263,6 +2364,8 @@ hb_font_set_variations (hb_font_t *font, if (hb_object_is_immutable (font)) return; + font->serial_coords = ++font->serial; + if (!variations_length) { hb_font_set_var_coords_normalized (font, nullptr, 0); @@ -2322,6 +2425,8 @@ hb_font_set_var_coords_design (hb_font_t *font, if (hb_object_is_immutable (font)) return; + font->serial_coords = ++font->serial; + int *normalized = coords_length ? (int *) hb_calloc (coords_length, sizeof (int)) : nullptr; float *design_coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (float)) : nullptr; @@ -2355,6 +2460,8 @@ hb_font_set_var_named_instance (hb_font_t *font, if (hb_object_is_immutable (font)) return; + font->serial_coords = ++font->serial; + unsigned int coords_length = hb_ot_var_named_instance_get_design_coords (font->face, instance_index, nullptr, nullptr); float *coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (float)) : nullptr; @@ -2391,6 +2498,8 @@ hb_font_set_var_coords_normalized (hb_font_t *font, if (hb_object_is_immutable (font)) return; + font->serial_coords = ++font->serial; + int *copy = coords_length ? (int *) hb_calloc (coords_length, sizeof (coords[0])) : nullptr; int *unmapped = coords_length ? (int *) hb_calloc (coords_length, sizeof (coords[0])) : nullptr; float *design_coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (design_coords[0])) : nullptr; diff --git a/thirdparty/harfbuzz/src/hb-font.h b/thirdparty/harfbuzz/src/hb-font.h index 9548857535..ca6ecf7963 100644 --- a/thirdparty/harfbuzz/src/hb-font.h +++ b/thirdparty/harfbuzz/src/hb-font.h @@ -1002,6 +1002,12 @@ hb_font_make_immutable (hb_font_t *font); HB_EXTERN hb_bool_t hb_font_is_immutable (hb_font_t *font); +HB_EXTERN unsigned int +hb_font_get_serial (hb_font_t *font); + +HB_EXTERN void +hb_font_changed (hb_font_t *font); + HB_EXTERN void hb_font_set_parent (hb_font_t *font, hb_font_t *parent); diff --git a/thirdparty/harfbuzz/src/hb-font.hh b/thirdparty/harfbuzz/src/hb-font.hh index 70311b4a85..bb402e23eb 100644 --- a/thirdparty/harfbuzz/src/hb-font.hh +++ b/thirdparty/harfbuzz/src/hb-font.hh @@ -68,13 +68,13 @@ struct hb_font_funcs_t #define HB_FONT_FUNC_IMPLEMENT(name) void *name; HB_FONT_FUNCS_IMPLEMENT_CALLBACKS #undef HB_FONT_FUNC_IMPLEMENT - } user_data; + } *user_data; struct { #define HB_FONT_FUNC_IMPLEMENT(name) hb_destroy_func_t name; HB_FONT_FUNCS_IMPLEMENT_CALLBACKS #undef HB_FONT_FUNC_IMPLEMENT - } destroy; + } *destroy; /* Don't access these directly. Call font->get_*() instead. */ union get_t { @@ -104,6 +104,8 @@ DECLARE_NULL_INSTANCE (hb_font_funcs_t); struct hb_font_t { hb_object_header_t header; + unsigned int serial; + unsigned int serial_coords; hb_font_t *parent; hb_face_t *face; @@ -112,6 +114,8 @@ struct hb_font_t int32_t y_scale; float slant; float slant_xy; + float x_multf; + float y_multf; int64_t x_mult; int64_t y_mult; @@ -137,12 +141,12 @@ struct hb_font_t { return HB_DIRECTION_IS_VERTICAL(direction) ? y_mult : x_mult; } hb_position_t em_scale_x (int16_t v) { return em_mult (v, x_mult); } hb_position_t em_scale_y (int16_t v) { return em_mult (v, y_mult); } - hb_position_t em_scalef_x (float v) { return em_scalef (v, x_scale); } - hb_position_t em_scalef_y (float v) { return em_scalef (v, y_scale); } - float em_fscale_x (int16_t v) { return em_fscale (v, x_scale); } - float em_fscale_y (int16_t v) { return em_fscale (v, y_scale); } - float em_fscalef_x (float v) { return em_fscalef (v, x_scale); } - float em_fscalef_y (float v) { return em_fscalef (v, y_scale); } + hb_position_t em_scalef_x (float v) { return em_multf (v, x_multf); } + hb_position_t em_scalef_y (float v) { return em_multf (v, y_multf); } + float em_fscale_x (int16_t v) { return em_fmult (v, x_multf); } + float em_fscale_y (int16_t v) { return em_fmult (v, y_multf); } + float em_fscalef_x (float v) { return em_fmultf (v, x_multf); } + float em_fscalef_y (float v) { return em_fmultf (v, y_multf); } hb_position_t em_scale_dir (int16_t v, hb_direction_t direction) { return em_mult (v, dir_mult (direction)); } @@ -205,14 +209,14 @@ struct hb_font_t memset (extents, 0, sizeof (*extents)); return klass->get.f.font_h_extents (this, user_data, extents, - klass->user_data.font_h_extents); + !klass->user_data ? nullptr : klass->user_data->font_h_extents); } hb_bool_t get_font_v_extents (hb_font_extents_t *extents) { memset (extents, 0, sizeof (*extents)); return klass->get.f.font_v_extents (this, user_data, extents, - klass->user_data.font_v_extents); + !klass->user_data ? nullptr : klass->user_data->font_v_extents); } bool has_glyph (hb_codepoint_t unicode) @@ -228,7 +232,7 @@ struct hb_font_t *glyph = not_found; return klass->get.f.nominal_glyph (this, user_data, unicode, glyph, - klass->user_data.nominal_glyph); + !klass->user_data ? nullptr : klass->user_data->nominal_glyph); } unsigned int get_nominal_glyphs (unsigned int count, const hb_codepoint_t *first_unicode, @@ -240,7 +244,7 @@ struct hb_font_t count, first_unicode, unicode_stride, first_glyph, glyph_stride, - klass->user_data.nominal_glyphs); + !klass->user_data ? nullptr : klass->user_data->nominal_glyphs); } hb_bool_t get_variation_glyph (hb_codepoint_t unicode, hb_codepoint_t variation_selector, @@ -250,21 +254,21 @@ struct hb_font_t *glyph = not_found; return klass->get.f.variation_glyph (this, user_data, unicode, variation_selector, glyph, - klass->user_data.variation_glyph); + !klass->user_data ? nullptr : klass->user_data->variation_glyph); } hb_position_t get_glyph_h_advance (hb_codepoint_t glyph) { return klass->get.f.glyph_h_advance (this, user_data, glyph, - klass->user_data.glyph_h_advance); + !klass->user_data ? nullptr : klass->user_data->glyph_h_advance); } hb_position_t get_glyph_v_advance (hb_codepoint_t glyph) { return klass->get.f.glyph_v_advance (this, user_data, glyph, - klass->user_data.glyph_v_advance); + !klass->user_data ? nullptr : klass->user_data->glyph_v_advance); } void get_glyph_h_advances (unsigned int count, @@ -277,7 +281,7 @@ struct hb_font_t count, first_glyph, glyph_stride, first_advance, advance_stride, - klass->user_data.glyph_h_advances); + !klass->user_data ? nullptr : klass->user_data->glyph_h_advances); } void get_glyph_v_advances (unsigned int count, @@ -290,7 +294,7 @@ struct hb_font_t count, first_glyph, glyph_stride, first_advance, advance_stride, - klass->user_data.glyph_v_advances); + !klass->user_data ? nullptr : klass->user_data->glyph_v_advances); } hb_bool_t get_glyph_h_origin (hb_codepoint_t glyph, @@ -299,7 +303,7 @@ struct hb_font_t *x = *y = 0; return klass->get.f.glyph_h_origin (this, user_data, glyph, x, y, - klass->user_data.glyph_h_origin); + !klass->user_data ? nullptr : klass->user_data->glyph_h_origin); } hb_bool_t get_glyph_v_origin (hb_codepoint_t glyph, @@ -308,7 +312,7 @@ struct hb_font_t *x = *y = 0; return klass->get.f.glyph_v_origin (this, user_data, glyph, x, y, - klass->user_data.glyph_v_origin); + !klass->user_data ? nullptr : klass->user_data->glyph_v_origin); } hb_position_t get_glyph_h_kerning (hb_codepoint_t left_glyph, @@ -319,7 +323,7 @@ struct hb_font_t #else return klass->get.f.glyph_h_kerning (this, user_data, left_glyph, right_glyph, - klass->user_data.glyph_h_kerning); + !klass->user_data ? nullptr : klass->user_data->glyph_h_kerning); #endif } @@ -331,7 +335,7 @@ struct hb_font_t #else return klass->get.f.glyph_v_kerning (this, user_data, top_glyph, bottom_glyph, - klass->user_data.glyph_v_kerning); + !klass->user_data ? nullptr : klass->user_data->glyph_v_kerning); #endif } @@ -342,7 +346,7 @@ struct hb_font_t return klass->get.f.glyph_extents (this, user_data, glyph, extents, - klass->user_data.glyph_extents); + !klass->user_data ? nullptr : klass->user_data->glyph_extents); } hb_bool_t get_glyph_contour_point (hb_codepoint_t glyph, unsigned int point_index, @@ -352,7 +356,7 @@ struct hb_font_t return klass->get.f.glyph_contour_point (this, user_data, glyph, point_index, x, y, - klass->user_data.glyph_contour_point); + !klass->user_data ? nullptr : klass->user_data->glyph_contour_point); } hb_bool_t get_glyph_name (hb_codepoint_t glyph, @@ -362,7 +366,7 @@ struct hb_font_t return klass->get.f.glyph_name (this, user_data, glyph, name, size, - klass->user_data.glyph_name); + !klass->user_data ? nullptr : klass->user_data->glyph_name); } hb_bool_t get_glyph_from_name (const char *name, int len, /* -1 means nul-terminated */ @@ -373,7 +377,7 @@ struct hb_font_t return klass->get.f.glyph_from_name (this, user_data, name, len, glyph, - klass->user_data.glyph_from_name); + !klass->user_data ? nullptr : klass->user_data->glyph_from_name); } void get_glyph_shape (hb_codepoint_t glyph, @@ -382,7 +386,7 @@ struct hb_font_t klass->get.f.glyph_shape (this, user_data, glyph, draw_funcs, draw_data, - klass->user_data.glyph_shape); + !klass->user_data ? nullptr : klass->user_data->glyph_shape); } @@ -444,7 +448,6 @@ struct hb_font_t { *x = get_glyph_h_advance (glyph) / 2; - /* TODO cache this somehow?! */ hb_font_extents_t extents; get_h_extents_with_fallback (&extents); *y = extents.ascender; @@ -628,20 +631,26 @@ struct hb_font_t void mults_changed () { - signed upem = face->get_upem (); - x_mult = ((int64_t) x_scale << 16) / upem; - y_mult = ((int64_t) y_scale << 16) / upem; + float upem = face->get_upem (); + x_multf = x_scale / upem; + y_multf = y_scale / upem; + bool x_neg = x_scale < 0; + x_mult = (x_neg ? -((int64_t) -x_scale << 16) : ((int64_t) x_scale << 16)) / upem; + bool y_neg = y_scale < 0; + y_mult = (y_neg ? -((int64_t) -y_scale << 16) : ((int64_t) y_scale << 16)) / upem; slant_xy = y_scale ? slant * x_scale / y_scale : 0.f; + + data.fini (); } hb_position_t em_mult (int16_t v, int64_t mult) { return (hb_position_t) ((v * mult + 32768) >> 16); } - hb_position_t em_scalef (float v, int scale) - { return (hb_position_t) roundf (em_fscalef (v, scale)); } - float em_fscalef (float v, int scale) - { return v * scale / face->get_upem (); } - float em_fscale (int16_t v, int scale) - { return (float) v * scale / face->get_upem (); } + hb_position_t em_multf (float v, float mult) + { return (hb_position_t) roundf (em_fmultf (v, mult)); } + float em_fmultf (float v, float mult) + { return v * mult; } + float em_fmult (int16_t v, float mult) + { return (float) v * mult; } }; DECLARE_NULL_INSTANCE (hb_font_t); diff --git a/thirdparty/harfbuzz/src/hb-ft.cc b/thirdparty/harfbuzz/src/hb-ft.cc index d3c5d3db93..b526a82865 100644 --- a/thirdparty/harfbuzz/src/hb-ft.cc +++ b/thirdparty/harfbuzz/src/hb-ft.cc @@ -37,6 +37,8 @@ #include "hb-font.hh" #include "hb-machinery.hh" #include "hb-cache.hh" +#include "hb-ot-os2-table.hh" +#include "hb-ot-shaper-arabic-pua.hh" #include FT_ADVANCES_H #include FT_MULTIPLE_MASTERS_H @@ -86,7 +88,7 @@ struct hb_ft_font_t mutable hb_mutex_t lock; FT_Face ft_face; - mutable int cached_x_scale; + mutable unsigned cached_serial; mutable hb_advance_cache_t advance_cache; }; @@ -103,7 +105,7 @@ _hb_ft_font_create (FT_Face ft_face, bool symbol, bool unref) ft_font->load_flags = FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING; - ft_font->cached_x_scale = 0; + ft_font->cached_serial = (unsigned) -1; ft_font->advance_cache.init (); return ft_font; @@ -130,6 +132,58 @@ _hb_ft_font_destroy (void *data) hb_free (ft_font); } + +/* hb_font changed, update FT_Face. */ +static void _hb_ft_hb_font_changed (hb_font_t *font, FT_Face ft_face) +{ + + FT_Set_Char_Size (ft_face, + abs (font->x_scale), abs (font->y_scale), + 0, 0); +#if 0 + font->x_ppem * 72 * 64 / font->x_scale, + font->y_ppem * 72 * 64 / font->y_scale); +#endif + if (font->x_scale < 0 || font->y_scale < 0) + { + FT_Matrix matrix = { font->x_scale < 0 ? -1 : +1, 0, + 0, font->y_scale < 0 ? -1 : +1}; + FT_Set_Transform (ft_face, &matrix, nullptr); + } + +#if defined(HAVE_FT_GET_VAR_BLEND_COORDINATES) && !defined(HB_NO_VAR) + unsigned int num_coords; + const int *coords = hb_font_get_var_coords_normalized (font, &num_coords); + if (num_coords) + { + FT_Fixed *ft_coords = (FT_Fixed *) hb_calloc (num_coords, sizeof (FT_Fixed)); + if (ft_coords) + { + for (unsigned int i = 0; i < num_coords; i++) + ft_coords[i] = coords[i] * 4; + FT_Set_Var_Blend_Coordinates (ft_face, num_coords, ft_coords); + hb_free (ft_coords); + } + } +#endif +} + +/* Check if hb_font changed, update FT_Face. */ +static inline bool +_hb_ft_hb_font_check_changed (hb_font_t *font, + const hb_ft_font_t *ft_font) +{ + if (font->serial != ft_font->cached_serial) + { + _hb_ft_hb_font_changed (font, ft_font->ft_face); + ft_font->advance_cache.clear (); + ft_font->cached_serial = font->serial; + return true; + } + return false; +} + + /** * hb_ft_font_set_load_flags: * @font: #hb_font_t to work upon @@ -181,7 +235,7 @@ hb_ft_font_get_load_flags (hb_font_t *font) } /** - * hb_ft_font_get_face: + * hb_ft_font_get_face: (skip) * @font: #hb_font_t to work upon * * Fetches the FT_Face associated with the specified #hb_font_t @@ -203,7 +257,7 @@ hb_ft_font_get_face (hb_font_t *font) } /** - * hb_ft_font_lock_face: + * hb_ft_font_lock_face: (skip) * @font: #hb_font_t to work upon * * Gets the FT_Face associated with @font, This face will be kept around until @@ -246,7 +300,7 @@ hb_ft_font_unlock_face (hb_font_t *font) static hb_bool_t -hb_ft_get_nominal_glyph (hb_font_t *font HB_UNUSED, +hb_ft_get_nominal_glyph (hb_font_t *font, void *font_data, hb_codepoint_t unicode, hb_codepoint_t *glyph, @@ -258,14 +312,29 @@ hb_ft_get_nominal_glyph (hb_font_t *font HB_UNUSED, if (unlikely (!g)) { - if (unlikely (ft_font->symbol) && unicode <= 0x00FFu) + if (unlikely (ft_font->symbol)) { - /* For symbol-encoded OpenType fonts, we duplicate the - * U+F000..F0FF range at U+0000..U+00FF. That's what - * Windows seems to do, and that's hinted about at: - * https://docs.microsoft.com/en-us/typography/opentype/spec/recom - * under "Non-Standard (Symbol) Fonts". */ - g = FT_Get_Char_Index (ft_font->ft_face, 0xF000u + unicode); + switch ((unsigned) font->face->table.OS2->get_font_page ()) { + case OT::OS2::font_page_t::FONT_PAGE_NONE: + if (unicode <= 0x00FFu) + /* For symbol-encoded OpenType fonts, we duplicate the + * U+F000..F0FF range at U+0000..U+00FF. That's what + * Windows seems to do, and that's hinted about at: + * https://docs.microsoft.com/en-us/typography/opentype/spec/recom + * under "Non-Standard (Symbol) Fonts". */ + g = FT_Get_Char_Index (ft_font->ft_face, 0xF000u + unicode); + break; +#ifndef HB_NO_OT_SHAPER_ARABIC_FALLBACK + case OT::OS2::font_page_t::FONT_PAGE_SIMP_ARABIC: + g = FT_Get_Char_Index (ft_font->ft_face, _hb_arabic_pua_simp_map (unicode)); + break; + case OT::OS2::font_page_t::FONT_PAGE_TRAD_ARABIC: + g = FT_Get_Char_Index (ft_font->ft_face, _hb_arabic_pua_trad_map (unicode)); + break; +#endif + default: + break; + } if (!g) return false; } @@ -337,12 +406,6 @@ hb_ft_get_glyph_h_advances (hb_font_t* font, void* font_data, int load_flags = ft_font->load_flags; int mult = font->x_scale < 0 ? -1 : +1; - if (font->x_scale != ft_font->cached_x_scale) - { - ft_font->advance_cache.clear (); - ft_font->cached_x_scale = font->x_scale; - } - for (unsigned int i = 0; i < count; i++) { FT_Fixed v = 0; @@ -426,6 +489,7 @@ hb_ft_get_glyph_h_kerning (hb_font_t *font, void *user_data HB_UNUSED) { const hb_ft_font_t *ft_font = (const hb_ft_font_t *) font_data; + hb_lock_t lock (ft_font->lock); FT_Vector kerningv; FT_Kerning_Mode mode = font->x_ppem ? FT_KERNING_DEFAULT : FT_KERNING_UNFITTED; @@ -556,6 +620,7 @@ hb_ft_get_font_h_extents (hb_font_t *font HB_UNUSED, const hb_ft_font_t *ft_font = (const hb_ft_font_t *) font_data; hb_lock_t lock (ft_font->lock); FT_Face ft_face = ft_font->ft_face; + metrics->ascender = FT_MulFix(ft_face->ascender, ft_face->size->metrics.y_scale); metrics->descender = FT_MulFix(ft_face->descender, ft_face->size->metrics.y_scale); metrics->line_gap = FT_MulFix( ft_face->height, ft_face->size->metrics.y_scale ) - (metrics->ascender - metrics->descender); @@ -619,6 +684,8 @@ hb_ft_get_glyph_shape (hb_font_t *font HB_UNUSED, hb_lock_t lock (ft_font->lock); FT_Face ft_face = ft_font->ft_face; + _hb_ft_hb_font_check_changed (font, ft_font); + if (unlikely (FT_Load_Glyph (ft_face, glyph, FT_LOAD_NO_BITMAP | ft_font->load_flags))) return; @@ -964,6 +1031,31 @@ hb_ft_font_changed (hb_font_t *font) } /** + * hb_ft_hb_font_changed: + * @font: #hb_font_t to work upon + * + * Refreshes the state of the underlying FT_Face of @font when the hb_font_t + * @font has changed. + * This function should be called after changing the size or + * variation-axis settings on the @font. + * This call is fast if nothing has changed on @font. + * + * Return value: true if changed, false otherwise + * + * Since: 4.4.0 + **/ +hb_bool_t +hb_ft_hb_font_changed (hb_font_t *font) +{ + if (font->destroy != (hb_destroy_func_t) _hb_ft_font_destroy) + return false; + + hb_ft_font_t *ft_font = (hb_ft_font_t *) font->user_data; + + return _hb_ft_hb_font_check_changed (font, ft_font); +} + +/** * hb_ft_font_create_referenced: * @ft_face: FT_Face to work upon * @@ -1081,35 +1173,7 @@ hb_ft_font_set_funcs (hb_font_t *font) if (FT_Select_Charmap (ft_face, FT_ENCODING_MS_SYMBOL)) FT_Select_Charmap (ft_face, FT_ENCODING_UNICODE); - FT_Set_Char_Size (ft_face, - abs (font->x_scale), abs (font->y_scale), - 0, 0); -#if 0 - font->x_ppem * 72 * 64 / font->x_scale, - font->y_ppem * 72 * 64 / font->y_scale); -#endif - if (font->x_scale < 0 || font->y_scale < 0) - { - FT_Matrix matrix = { font->x_scale < 0 ? -1 : +1, 0, - 0, font->y_scale < 0 ? -1 : +1}; - FT_Set_Transform (ft_face, &matrix, nullptr); - } - -#if defined(HAVE_FT_GET_VAR_BLEND_COORDINATES) && !defined(HB_NO_VAR) - unsigned int num_coords; - const int *coords = hb_font_get_var_coords_normalized (font, &num_coords); - if (num_coords) - { - FT_Fixed *ft_coords = (FT_Fixed *) hb_calloc (num_coords, sizeof (FT_Fixed)); - if (ft_coords) - { - for (unsigned int i = 0; i < num_coords; i++) - ft_coords[i] = coords[i] * 4; - FT_Set_Var_Blend_Coordinates (ft_face, num_coords, ft_coords); - hb_free (ft_coords); - } - } -#endif + _hb_ft_hb_font_changed (font, ft_face); ft_face->generic.data = blob; ft_face->generic.finalizer = (FT_Generic_Finalizer) _release_blob; diff --git a/thirdparty/harfbuzz/src/hb-ft.h b/thirdparty/harfbuzz/src/hb-ft.h index bf07115ab9..6a8a7abe8c 100644 --- a/thirdparty/harfbuzz/src/hb-ft.h +++ b/thirdparty/harfbuzz/src/hb-ft.h @@ -122,10 +122,17 @@ hb_ft_font_set_load_flags (hb_font_t *font, int load_flags); HB_EXTERN int hb_ft_font_get_load_flags (hb_font_t *font); -/* Call when size or variations settings on underlying FT_Face change. */ +/* Call when size or variations settings on underlying FT_Face changed, + * and you want to update the hb_font_t from it. */ HB_EXTERN void hb_ft_font_changed (hb_font_t *font); +/* Call when size or variations settings on underlying hb_font_t may have + * changed, and you want to update the FT_Face from it. This call is fast + * if nothing changed on hb_font_t. Returns true if changed. */ +HB_EXTERN hb_bool_t +hb_ft_hb_font_changed (hb_font_t *font); + /* Makes an hb_font_t use FreeType internally to implement font functions. * Note: this internally creates an FT_Face. Use it when you create your * hb_face_t using hb_face_create(). */ diff --git a/thirdparty/harfbuzz/src/hb-iter.hh b/thirdparty/harfbuzz/src/hb-iter.hh index 43a3098f65..d4461f166c 100644 --- a/thirdparty/harfbuzz/src/hb-iter.hh +++ b/thirdparty/harfbuzz/src/hb-iter.hh @@ -43,17 +43,12 @@ * is writable, then the iterator returns lvalues, otherwise it * returns rvalues. * - * TODO Document more. - * - * If iterator implementation implements operator!=, then can be + * If iterator implementation implements operator!=, then it can be * used in range-based for loop. That already happens if the iterator * is random-access. Otherwise, the range-based for loop incurs * one traversal to find end(), which can be avoided if written * as a while-style for loop, or if iterator implements a faster - * __end__() method. - * TODO When opting in for C++17, address this by changing return - * type of .end()? - */ + * __end__() method. */ /* * Base classes for iterators. @@ -75,10 +70,6 @@ struct hb_iter_t iter_t* thiz () { return static_cast< iter_t *> (this); } public: - /* TODO: - * Port operators below to use hb_enable_if to sniff which method implements - * an operator and use it, and remove hb_iter_fallback_mixin_t completely. */ - /* Operators. */ iter_t iter () const { return *thiz(); } iter_t operator + () const { return *thiz(); } @@ -87,8 +78,7 @@ struct hb_iter_t explicit operator bool () const { return thiz()->__more__ (); } unsigned len () const { return thiz()->__len__ (); } /* The following can only be enabled if item_t is reference type. Otherwise - * it will be returning pointer to temporary rvalue. - * TODO Use a wrapper return type to fix for non-reference type. */ + * it will be returning pointer to temporary rvalue. */ template <typename T = item_t, hb_enable_if (std::is_reference<T>::value)> hb_remove_reference<item_t>* operator -> () const { return std::addressof (**thiz()); } diff --git a/thirdparty/harfbuzz/src/hb-machinery.hh b/thirdparty/harfbuzz/src/hb-machinery.hh index e52a6a4124..ff2a99f5ed 100644 --- a/thirdparty/harfbuzz/src/hb-machinery.hh +++ b/thirdparty/harfbuzz/src/hb-machinery.hh @@ -176,7 +176,7 @@ struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData> void init0 () {} /* Init, when memory is already set to 0. No-op for us. */ void init () { instance.set_relaxed (nullptr); } - void fini () { do_destroy (instance.get ()); } + void fini () { do_destroy (instance.get ()); init (); } void free_instance () { diff --git a/thirdparty/harfbuzz/src/hb-map.cc b/thirdparty/harfbuzz/src/hb-map.cc index 6c83c670c9..089615c72a 100644 --- a/thirdparty/harfbuzz/src/hb-map.cc +++ b/thirdparty/harfbuzz/src/hb-map.cc @@ -40,7 +40,7 @@ /** - * hb_map_create: (Xconstructor) + * hb_map_create: * * Creates a new, initially empty map. * @@ -172,6 +172,25 @@ hb_map_allocation_successful (const hb_map_t *map) return map->successful; } +/** + * hb_map_copy: + * @map: A map + * + * Allocate a copy of @map. + * + * Return value: Newly-allocated map. + * + * Since: 4.4.0 + **/ +hb_map_t * +hb_map_copy (const hb_map_t *map) +{ + hb_map_t *copy = hb_map_create (); + if (unlikely (!copy)) return nullptr; + copy->resize (map->population); + hb_copy (*map, *copy); + return copy; +} /** * hb_map_set: @@ -309,3 +328,20 @@ hb_map_is_equal (const hb_map_t *map, return map->is_equal (*other); } +/** + * hb_map_hash: + * @map: A map + * + * Creates a hash representing @map. + * + * Return value: + * A hash of @map. + * + * Since: 4.4.0 + **/ +HB_EXTERN unsigned int +hb_map_hash (const hb_map_t *map) +{ + return map->hash (); +} + diff --git a/thirdparty/harfbuzz/src/hb-map.h b/thirdparty/harfbuzz/src/hb-map.h index 3f67c50b92..41d877d6a1 100644 --- a/thirdparty/harfbuzz/src/hb-map.h +++ b/thirdparty/harfbuzz/src/hb-map.h @@ -82,6 +82,9 @@ hb_map_get_user_data (hb_map_t *map, HB_EXTERN hb_bool_t hb_map_allocation_successful (const hb_map_t *map); +HB_EXTERN hb_map_t * +hb_map_copy (const hb_map_t *map); + HB_EXTERN void hb_map_clear (hb_map_t *map); @@ -95,6 +98,9 @@ HB_EXTERN hb_bool_t hb_map_is_equal (const hb_map_t *map, const hb_map_t *other); +HB_EXTERN unsigned int +hb_map_hash (const hb_map_t *map); + HB_EXTERN void hb_map_set (hb_map_t *map, hb_codepoint_t key, diff --git a/thirdparty/harfbuzz/src/hb-map.hh b/thirdparty/harfbuzz/src/hb-map.hh index aec7d87f42..5efad8d20c 100644 --- a/thirdparty/harfbuzz/src/hb-map.hh +++ b/thirdparty/harfbuzz/src/hb-map.hh @@ -34,15 +34,13 @@ * hb_hashmap_t */ +extern HB_INTERNAL const hb_codepoint_t minus_1; + template <typename K, typename V, - typename k_invalid_t = K, - typename v_invalid_t = V, - k_invalid_t kINVALID = std::is_pointer<K>::value ? 0 : std::is_signed<K>::value ? hb_int_min (K) : (K) -1, - v_invalid_t vINVALID = std::is_pointer<V>::value ? 0 : std::is_signed<V>::value ? hb_int_min (V) : (V) -1> + bool minus_one = false> struct hb_hashmap_t { hb_hashmap_t () { init (); } - hb_hashmap_t (std::nullptr_t) : hb_hashmap_t () {} ~hb_hashmap_t () { fini (); } hb_hashmap_t (const hb_hashmap_t& o) : hb_hashmap_t () { resize (population); hb_copy (o, *this); } @@ -68,38 +66,44 @@ struct hb_hashmap_t struct item_t { K key; + uint32_t hash : 30; + uint32_t is_used_ : 1; + uint32_t is_tombstone_ : 1; V value; - uint32_t hash; + + bool is_used () const { return is_used_; } + void set_used (bool is_used) { is_used_ = is_used; } + bool is_tombstone () const { return is_tombstone_; } + void set_tombstone (bool is_tombstone) { is_tombstone_ = is_tombstone; } + bool is_real () const { return is_used_ && !is_tombstone_; } + + template <bool v = minus_one, + hb_enable_if (v == false)> + static inline const V& default_value () { return Null(V); }; + template <bool v = minus_one, + hb_enable_if (v == true)> + static inline const V& default_value () + { + static_assert (hb_is_same (V, hb_codepoint_t), ""); + return minus_1; + }; void clear () { new (std::addressof (key)) K (); - key = hb_coerce<K> (kINVALID); new (std::addressof (value)) V (); - value = hb_coerce<V> (vINVALID); hash = 0; + is_used_ = false; + is_tombstone_ = false; } bool operator == (const K &o) { return hb_deref (key) == hb_deref (o); } bool operator == (const item_t &o) { return *this == o.key; } - bool is_unused () const - { - const K inv = hb_coerce<K> (kINVALID); - return key == inv; - } - bool is_tombstone () const - { - const K kinv = hb_coerce<K> (kINVALID); - const V vinv = hb_coerce<V> (vINVALID); - return key != kinv && value == vinv; - } - bool is_real () const - { - const K kinv = hb_coerce<K> (kINVALID); - const V vinv = hb_coerce<V> (vINVALID); - return key != kinv && value != vinv; - } hb_pair_t<K, V> get_pair() const { return hb_pair_t<K, V> (key, value); } + hb_pair_t<const K &, const V &> get_pair_ref() const { return hb_pair_t<const K &, const V &> (key, value); } + + uint32_t total_hash () const + { return (hash * 31) + hb_hash (value); } }; hb_object_header_t header; @@ -200,27 +204,39 @@ struct hb_hashmap_t return true; } - bool set (K key, const V& value) { return set_with_hash (key, hb_hash (key), value); } - bool set (K key, V&& value) { return set_with_hash (key, hb_hash (key), std::move (value)); } + template <typename VV> + bool set (K key, VV&& value) { return set_with_hash (key, hb_hash (key), std::forward<VV> (value)); } - V get (K key) const + const V& get (K key) const { - if (unlikely (!items)) return hb_coerce<V> (vINVALID); + if (unlikely (!items)) return item_t::default_value (); unsigned int i = bucket_for (key); - return items[i].is_real () && items[i] == key ? items[i].value : hb_coerce<V> (vINVALID); + return items[i].is_real () && items[i] == key ? items[i].value : item_t::default_value (); } - void del (K key) { set (key, hb_coerce<V> (vINVALID)); } + void del (K key) { set_with_hash (key, hb_hash (key), item_t::default_value (), true); } /* Has interface. */ - typedef V value_t; + typedef const V& value_t; value_t operator [] (K k) const { return get (k); } - bool has (K k, V *vp = nullptr) const + bool has (K key, const V **vp = nullptr) const { - V v = (*this)[k]; - if (vp) *vp = v; - const V vinv = hb_coerce<V> (vINVALID); - return v != vinv; + if (unlikely (!items)) + { + if (vp) *vp = &item_t::default_value (); + return false; + } + unsigned int i = bucket_for (key); + if (items[i].is_real () && items[i] == key) + { + if (vp) *vp = &items[i].value; + return true; + } + else + { + if (vp) *vp = &item_t::default_value (); + return false; + } } /* Projection. */ V operator () (K k) const { return get (k); } @@ -242,8 +258,9 @@ struct hb_hashmap_t uint32_t hash () const { uint32_t h = 0; - for (auto pair : iter ()) - h ^= (hb_hash (pair.first) * 31) + hb_hash (pair.second); + for (const auto &item : + hb_array (items, mask ? mask + 1 : 0) + | hb_filter (&item_t::is_real)) + h ^= item.total_hash (); return h; } @@ -271,6 +288,12 @@ struct hb_hashmap_t | hb_filter (&item_t::is_real) | hb_map (&item_t::get_pair) ) + auto iter_ref () const HB_AUTO_RETURN + ( + + hb_array (items, mask ? mask + 1 : 0) + | hb_filter (&item_t::is_real) + | hb_map (&item_t::get_pair_ref) + ) auto keys () const HB_AUTO_RETURN ( + hb_array (items, mask ? mask + 1 : 0) @@ -293,19 +316,16 @@ struct hb_hashmap_t protected: template <typename VV> - bool set_with_hash (K key, uint32_t hash, VV&& value) + bool set_with_hash (K key, uint32_t hash, VV&& value, bool is_delete=false) { if (unlikely (!successful)) return false; - const K kinv = hb_coerce<K> (kINVALID); - if (unlikely (key == kinv)) return true; if (unlikely ((occupancy + occupancy / 2) >= mask && !resize ())) return false; unsigned int i = bucket_for_hash (key, hash); - const V vinv = hb_coerce<V> (vINVALID); - if (value == vinv && items[i].key != key) + if (is_delete && items[i].key != key) return true; /* Trying to delete non-existent key. */ - if (!items[i].is_unused ()) + if (items[i].is_used ()) { occupancy--; if (!items[i].is_tombstone ()) @@ -313,27 +333,30 @@ struct hb_hashmap_t } items[i].key = key; - items[i].value = value; + items[i].value = std::forward<VV> (value); items[i].hash = hash; + items[i].set_used (true); + items[i].set_tombstone (is_delete); occupancy++; - if (!items[i].is_tombstone ()) + if (!is_delete) population++; return true; } - unsigned int bucket_for (K key) const + unsigned int bucket_for (const K &key) const { return bucket_for_hash (key, hb_hash (key)); } - unsigned int bucket_for_hash (K key, uint32_t hash) const + unsigned int bucket_for_hash (const K &key, uint32_t hash) const { + hash &= 0x3FFFFFFF; // We only store lower 30bit of hash unsigned int i = hash % prime; unsigned int step = 0; unsigned int tombstone = (unsigned) -1; - while (!items[i].is_unused ()) + while (items[i].is_used ()) { if (items[i].hash == hash && items[i] == key) return i; @@ -402,21 +425,14 @@ struct hb_hashmap_t struct hb_map_t : hb_hashmap_t<hb_codepoint_t, hb_codepoint_t, - hb_codepoint_t, - hb_codepoint_t, - HB_MAP_VALUE_INVALID, - HB_MAP_VALUE_INVALID> + true> { using hashmap = hb_hashmap_t<hb_codepoint_t, hb_codepoint_t, - hb_codepoint_t, - hb_codepoint_t, - HB_MAP_VALUE_INVALID, - HB_MAP_VALUE_INVALID>; + true>; ~hb_map_t () = default; hb_map_t () : hashmap () {} - hb_map_t (std::nullptr_t) : hb_map_t () {} hb_map_t (const hb_map_t &o) : hashmap ((hashmap &) o) {} hb_map_t (hb_map_t &&o) : hashmap (std::move ((hashmap &) o)) {} hb_map_t& operator= (const hb_map_t&) = default; diff --git a/thirdparty/harfbuzz/src/hb-meta.hh b/thirdparty/harfbuzz/src/hb-meta.hh index 90757d38ac..e97d790fc3 100644 --- a/thirdparty/harfbuzz/src/hb-meta.hh +++ b/thirdparty/harfbuzz/src/hb-meta.hh @@ -188,7 +188,7 @@ template <> struct hb_int_max<signed long long> : hb_integral_constant<signed l template <> struct hb_int_max<unsigned long long> : hb_integral_constant<unsigned long long, ULLONG_MAX> {}; #define hb_int_max(T) hb_int_max<T>::value -#if __GNUG__ && __GNUC__ < 5 +#if defined(__GNUC__) && __GNUC__ < 5 #define hb_is_trivially_copyable(T) __has_trivial_copy(T) #define hb_is_trivially_copy_assignable(T) __has_trivial_assign(T) #define hb_is_trivially_constructible(T) __has_trivial_constructor(T) diff --git a/thirdparty/harfbuzz/src/hb-null.hh b/thirdparty/harfbuzz/src/hb-null.hh index db38a4dfd2..78eb6474d5 100644 --- a/thirdparty/harfbuzz/src/hb-null.hh +++ b/thirdparty/harfbuzz/src/hb-null.hh @@ -37,7 +37,7 @@ /* Global nul-content Null pool. Enlarge as necessary. */ -#define HB_NULL_POOL_SIZE 384 +#define HB_NULL_POOL_SIZE 448 /* Use SFINAE to sniff whether T has min_size; in which case return the larger * of sizeof(T) and T::null_size, otherwise return sizeof(T). @@ -108,7 +108,7 @@ struct NullHelper /* Specializations for arbitrary-content Null objects expressed in bytes. */ #define DECLARE_NULL_NAMESPACE_BYTES(Namespace, Type) \ } /* Close namespace. */ \ - extern HB_INTERNAL const unsigned char _hb_Null_##Namespace##_##Type[Namespace::Type::null_size]; \ + extern HB_INTERNAL const unsigned char _hb_Null_##Namespace##_##Type[hb_null_size (Namespace::Type)]; \ template <> \ struct Null<Namespace::Type> { \ static Namespace::Type const & get_null () { \ @@ -118,7 +118,7 @@ struct NullHelper namespace Namespace { \ static_assert (true, "") /* Require semicolon after. */ #define DEFINE_NULL_NAMESPACE_BYTES(Namespace, Type) \ - const unsigned char _hb_Null_##Namespace##_##Type[Namespace::Type::null_size] + const unsigned char _hb_Null_##Namespace##_##Type[hb_null_size (Namespace::Type)] /* Specializations for arbitrary-content Null objects expressed as struct initializer. */ #define DECLARE_NULL_INSTANCE(Type) \ diff --git a/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh b/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh index b5047002ac..4aa337f78b 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-cff1-table.hh @@ -406,6 +406,8 @@ struct Charset1_2 { void collect_glyph_to_sid_map (hb_map_t *mapping, unsigned int num_glyphs) const { hb_codepoint_t gid = 1; + if (gid >= num_glyphs) + return; for (unsigned i = 0;; i++) { hb_codepoint_t sid = ranges[i].first; @@ -1138,7 +1140,8 @@ struct cff1 cff1_top_dict_interp_env_t env (fontDictStr); cff1_font_dict_interpreter_t font_interp (env); font = fontDicts.push (); - if (unlikely (font == &Crap (cff1_font_dict_values_t))) { fini (); return; } + if (unlikely (fontDicts.in_error ())) { fini (); return; } + font->init (); if (unlikely (!font_interp.interpret (*font))) { fini (); return; } PRIVDICTVAL *priv = &privateDicts[i]; @@ -1333,7 +1336,7 @@ struct cff1 if (names) { names->fini (); - free (names); + hb_free (names); } SUPER::fini (); @@ -1379,7 +1382,7 @@ struct cff1 hb_sorted_vector_t<gname_t> *names = glyph_names.get (); if (unlikely (!names)) { - names = (hb_sorted_vector_t<gname_t> *) calloc (sizeof (hb_sorted_vector_t<gname_t>), 1); + names = (hb_sorted_vector_t<gname_t> *) hb_calloc (sizeof (hb_sorted_vector_t<gname_t>), 1); if (likely (names)) { names->init (); @@ -1409,14 +1412,14 @@ struct cff1 if (names) { names->fini (); - free (names); + hb_free (names); } goto retry; } - } + } gname_t key = { hb_bytes_t (name, len), 0 }; - const gname_t *gname = glyph_names->bsearch (key); + const gname_t *gname = names ? names->bsearch (key) : nullptr; if (!gname) return false; hb_codepoint_t gid = sid_to_glyph (gname->sid); if (!gid && gname->sid) return false; diff --git a/thirdparty/harfbuzz/src/hb-ot-cmap-table.hh b/thirdparty/harfbuzz/src/hb-ot-cmap-table.hh index 7e96d9c8b3..09c9fe93f3 100644 --- a/thirdparty/harfbuzz/src/hb-ot-cmap-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-cmap-table.hh @@ -27,6 +27,8 @@ #ifndef HB_OT_CMAP_TABLE_HH #define HB_OT_CMAP_TABLE_HH +#include "hb-ot-os2-table.hh" +#include "hb-ot-shaper-arabic-pua.hh" #include "hb-open-type.hh" #include "hb-set.hh" @@ -1476,33 +1478,47 @@ struct SubtableUnicodesCache { private: const void* base; - hb_hashmap_t<intptr_t, hb_set_t*> cached_unicodes; + hb_hashmap_t<intptr_t, hb::unique_ptr<hb_set_t>> cached_unicodes; public: SubtableUnicodesCache(const void* cmap_base) : base(cmap_base), cached_unicodes() {} - ~SubtableUnicodesCache() - { - for (hb_set_t* s : cached_unicodes.values()) { - hb_set_destroy (s); - } - } - hb_set_t* set_for(const EncodingRecord* record) + hb_set_t* set_for (const EncodingRecord* record) { - if (!cached_unicodes.has ((intptr_t) record)) { - hb_set_t* new_set = hb_set_create (); - if (!cached_unicodes.set ((intptr_t) record, new_set)) { - hb_set_destroy (new_set); + if (!cached_unicodes.has ((intptr_t) record)) + { + hb_set_t *s = hb_set_create (); + if (unlikely (s->in_error ())) + return hb_set_get_empty (); + + (base+record->subtable).collect_unicodes (s); + + if (unlikely (!cached_unicodes.set ((intptr_t) record, hb::unique_ptr<hb_set_t> {s}))) return hb_set_get_empty (); - } - (base+record->subtable).collect_unicodes (cached_unicodes.get ((intptr_t) record)); + + return s; } return cached_unicodes.get ((intptr_t) record); } }; +static inline uint_fast16_t +_hb_symbol_pua_map (unsigned codepoint) +{ + if (codepoint <= 0x00FFu) + { + /* For symbol-encoded OpenType fonts, we duplicate the + * U+F000..F0FF range at U+0000..U+00FF. That's what + * Windows seems to do, and that's hinted about at: + * https://docs.microsoft.com/en-us/typography/opentype/spec/recom + * under "Non-Standard (Symbol) Fonts". */ + return 0xF000u + codepoint; + } + return 0; +} + struct cmap { static constexpr hb_tag_t tableTag = HB_OT_TAG_cmap; @@ -1726,7 +1742,24 @@ struct cmap this->get_glyph_data = subtable; if (unlikely (symbol)) - this->get_glyph_funcZ = get_glyph_from_symbol<CmapSubtable>; + { + switch ((unsigned) face->table.OS2->get_font_page ()) { + case OS2::font_page_t::FONT_PAGE_NONE: + this->get_glyph_funcZ = get_glyph_from_symbol<CmapSubtable, _hb_symbol_pua_map>; + break; +#ifndef HB_NO_OT_SHAPER_ARABIC_FALLBACK + case OS2::font_page_t::FONT_PAGE_SIMP_ARABIC: + this->get_glyph_funcZ = get_glyph_from_symbol<CmapSubtable, _hb_arabic_pua_simp_map>; + break; + case OS2::font_page_t::FONT_PAGE_TRAD_ARABIC: + this->get_glyph_funcZ = get_glyph_from_symbol<CmapSubtable, _hb_arabic_pua_trad_map>; + break; +#endif + default: + this->get_glyph_funcZ = get_glyph_from<CmapSubtable>; + break; + } + } else { switch (subtable->u.format) { @@ -1808,6 +1841,7 @@ struct cmap typedef bool (*hb_cmap_get_glyph_func_t) (const void *obj, hb_codepoint_t codepoint, hb_codepoint_t *glyph); + typedef uint_fast16_t (*hb_pua_remap_func_t) (unsigned); template <typename Type> HB_INTERNAL static bool get_glyph_from (const void *obj, @@ -1818,7 +1852,7 @@ struct cmap return typed_obj->get_glyph (codepoint, glyph); } - template <typename Type> + template <typename Type, hb_pua_remap_func_t remap> HB_INTERNAL static bool get_glyph_from_symbol (const void *obj, hb_codepoint_t codepoint, hb_codepoint_t *glyph) @@ -1827,15 +1861,8 @@ struct cmap if (likely (typed_obj->get_glyph (codepoint, glyph))) return true; - if (codepoint <= 0x00FFu) - { - /* For symbol-encoded OpenType fonts, we duplicate the - * U+F000..F0FF range at U+0000..U+00FF. That's what - * Windows seems to do, and that's hinted about at: - * https://docs.microsoft.com/en-us/typography/opentype/spec/recom - * under "Non-Standard (Symbol) Fonts". */ - return typed_obj->get_glyph (0xF000u + codepoint, glyph); - } + if (hb_codepoint_t c = remap (codepoint)) + return typed_obj->get_glyph (c, glyph); return false; } diff --git a/thirdparty/harfbuzz/src/hb-ot-color-cpal-table.hh b/thirdparty/harfbuzz/src/hb-ot-color-cpal-table.hh index 24476eda17..bcab77f79d 100644 --- a/thirdparty/harfbuzz/src/hb-ot-color-cpal-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-color-cpal-table.hh @@ -97,9 +97,10 @@ struct CPALV1Tail c->push (); for (const auto _ : colorLabels) { - if (!color_index_map->has (_)) continue; + const hb_codepoint_t *v; + if (!color_index_map->has (_, &v)) continue; NameID new_color_idx; - new_color_idx = color_index_map->get (_); + new_color_idx = *v; if (!c->copy<NameID> (new_color_idx)) { c->pop_discard (); diff --git a/thirdparty/harfbuzz/src/hb-ot-color-sbix-table.hh b/thirdparty/harfbuzz/src/hb-ot-color-sbix-table.hh index 9741ebd450..d0e2235fb2 100644 --- a/thirdparty/harfbuzz/src/hb-ot-color-sbix-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-color-sbix-table.hh @@ -298,6 +298,12 @@ struct sbix const PNGHeader &png = *blob->as<PNGHeader>(); + if (png.IHDR.height >= 65536 || png.IHDR.width >= 65536) + { + hb_blob_destroy (blob); + return false; + } + extents->x_bearing = x_offset; extents->y_bearing = png.IHDR.height + y_offset; extents->width = png.IHDR.width; diff --git a/thirdparty/harfbuzz/src/hb-ot-font.cc b/thirdparty/harfbuzz/src/hb-ot-font.cc index 0f44ee4d5f..af1bc86d48 100644 --- a/thirdparty/harfbuzz/src/hb-ot-font.cc +++ b/thirdparty/harfbuzz/src/hb-ot-font.cc @@ -30,6 +30,7 @@ #include "hb-ot.h" +#include "hb-cache.hh" #include "hb-font.hh" #include "hb-machinery.hh" #include "hb-ot-face.hh" @@ -58,6 +59,41 @@ * never need to call these functions directly. **/ +struct hb_ot_font_t +{ + const hb_ot_face_t *ot_face; + + /* h_advance caching */ + mutable hb_atomic_int_t cached_coords_serial; + mutable hb_atomic_ptr_t<hb_advance_cache_t> advance_cache; +}; + +static hb_ot_font_t * +_hb_ot_font_create (hb_font_t *font) +{ + hb_ot_font_t *ot_font = (hb_ot_font_t *) hb_calloc (1, sizeof (hb_ot_font_t)); + if (unlikely (!ot_font)) + return nullptr; + + ot_font->ot_face = &font->face->table; + + return ot_font; +} + +static void +_hb_ot_font_destroy (void *font_data) +{ + hb_ot_font_t *ot_font = (hb_ot_font_t *) font_data; + + auto *cache = ot_font->advance_cache.get_relaxed (); + if (cache) + { + cache->fini (); + hb_free (cache); + } + + hb_free (ot_font); +} static hb_bool_t hb_ot_get_nominal_glyph (hb_font_t *font HB_UNUSED, @@ -66,7 +102,8 @@ hb_ot_get_nominal_glyph (hb_font_t *font HB_UNUSED, hb_codepoint_t *glyph, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; return ot_face->cmap->get_nominal_glyph (unicode, glyph); } @@ -80,7 +117,8 @@ hb_ot_get_nominal_glyphs (hb_font_t *font HB_UNUSED, unsigned int glyph_stride, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; return ot_face->cmap->get_nominal_glyphs (count, first_unicode, unicode_stride, first_glyph, glyph_stride); @@ -94,7 +132,8 @@ hb_ot_get_variation_glyph (hb_font_t *font HB_UNUSED, hb_codepoint_t *glyph, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; return ot_face->cmap->get_variation_glyph (unicode, variation_selector, glyph); } @@ -107,15 +146,83 @@ hb_ot_get_glyph_h_advances (hb_font_t* font, void* font_data, unsigned advance_stride, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; const OT::hmtx_accelerator_t &hmtx = *ot_face->hmtx; - for (unsigned int i = 0; i < count; i++) +#ifndef HB_NO_VAR + const OT::HVARVVAR &HVAR = *hmtx.var_table; + const OT::VariationStore &varStore = &HVAR + HVAR.varStore; + OT::VariationStore::cache_t *varStore_cache = font->num_coords * count >= 128 ? varStore.create_cache () : nullptr; + + bool use_cache = font->num_coords; +#else + OT::VariationStore::cache_t *varStore_cache = nullptr; + bool use_cache = false; +#endif + + hb_advance_cache_t *cache = nullptr; + if (use_cache) { - *first_advance = font->em_scale_x (hmtx.get_advance (*first_glyph, font)); - first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride); - first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride); + retry: + cache = ot_font->advance_cache.get (); + if (unlikely (!cache)) + { + cache = (hb_advance_cache_t *) hb_malloc (sizeof (hb_advance_cache_t)); + if (unlikely (!cache)) + { + use_cache = false; + goto out; + } + + cache->init (); + if (unlikely (!ot_font->advance_cache.cmpexch (nullptr, cache))) + { + hb_free (cache); + goto retry; + } + ot_font->cached_coords_serial.set (font->serial_coords); + } + } + out: + + if (!use_cache) + { + for (unsigned int i = 0; i < count; i++) + { + *first_advance = font->em_scale_x (hmtx.get_advance (*first_glyph, font, varStore_cache)); + first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride); + first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride); + } } + else + { /* Use cache. */ + if (ot_font->cached_coords_serial.get () != (int) font->serial_coords) + { + ot_font->advance_cache->init (); + ot_font->cached_coords_serial.set (font->serial_coords); + } + + for (unsigned int i = 0; i < count; i++) + { + hb_position_t v; + unsigned cv; + if (ot_font->advance_cache->get (*first_glyph, &cv)) + v = cv; + else + { + v = hmtx.get_advance (*first_glyph, font, varStore_cache); + ot_font->advance_cache->set (*first_glyph, v); + } + *first_advance = font->em_scale_x (v); + first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride); + first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride); + } + } + +#ifndef HB_NO_VAR + OT::VariationStore::destroy_cache (varStore_cache); +#endif } #ifndef HB_NO_VERTICAL @@ -128,16 +235,31 @@ hb_ot_get_glyph_v_advances (hb_font_t* font, void* font_data, unsigned advance_stride, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; const OT::vmtx_accelerator_t &vmtx = *ot_face->vmtx; if (vmtx.has_data ()) + { +#ifndef HB_NO_VAR + const OT::HVARVVAR &VVAR = *vmtx.var_table; + const OT::VariationStore &varStore = &VVAR + VVAR.varStore; + OT::VariationStore::cache_t *varStore_cache = font->num_coords ? varStore.create_cache () : nullptr; +#else + OT::VariationStore::cache_t *varStore_cache = nullptr; +#endif + for (unsigned int i = 0; i < count; i++) { - *first_advance = font->em_scale_y (-(int) vmtx.get_advance (*first_glyph, font)); + *first_advance = font->em_scale_y (-(int) vmtx.get_advance (*first_glyph, font, varStore_cache)); first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride); first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride); } + +#ifndef HB_NO_VAR + OT::VariationStore::destroy_cache (varStore_cache); +#endif + } else { hb_font_extents_t font_extents; @@ -163,7 +285,8 @@ hb_ot_get_glyph_v_origin (hb_font_t *font, hb_position_t *y, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; *x = font->get_glyph_h_advance (glyph) / 2; @@ -208,7 +331,8 @@ hb_ot_get_glyph_extents (hb_font_t *font, hb_glyph_extents_t *extents, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; #if !defined(HB_NO_OT_FONT_BITMAP) && !defined(HB_NO_COLOR) if (ot_face->sbix->get_extents (font, glyph, extents)) return true; @@ -234,7 +358,9 @@ hb_ot_get_glyph_name (hb_font_t *font HB_UNUSED, char *name, unsigned int size, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; + if (ot_face->post->get_glyph_name (glyph, name, size)) return true; #ifndef HB_NO_OT_FONT_CFF if (ot_face->cff1->get_glyph_name (glyph, name, size)) return true; @@ -248,7 +374,9 @@ hb_ot_get_glyph_from_name (hb_font_t *font HB_UNUSED, hb_codepoint_t *glyph, void *user_data HB_UNUSED) { - const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; + const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data; + const hb_ot_face_t *ot_face = ot_font->ot_face; + if (ot_face->post->get_glyph_from_name (name, len, glyph)) return true; #ifndef HB_NO_OT_FONT_CFF if (ot_face->cff1->get_glyph_from_name (name, len, glyph)) return true; @@ -364,10 +492,14 @@ _hb_ot_get_font_funcs () void hb_ot_font_set_funcs (hb_font_t *font) { + hb_ot_font_t *ot_font = _hb_ot_font_create (font); + if (unlikely (!ot_font)) + return; + hb_font_set_funcs (font, _hb_ot_get_font_funcs (), - &font->face->table, - nullptr); + ot_font, + _hb_ot_font_destroy); } #ifndef HB_NO_VAR diff --git a/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh b/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh index 866bb7e04c..c32ff7636d 100644 --- a/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-glyf-table.hh @@ -30,1334 +30,6 @@ #ifndef HB_OT_GLYF_TABLE_HH #define HB_OT_GLYF_TABLE_HH -#include "hb-open-type.hh" -#include "hb-ot-head-table.hh" -#include "hb-ot-hmtx-table.hh" -#include "hb-ot-var-gvar-table.hh" -#include "hb-draw.hh" - -namespace OT { - - -/* - * loca -- Index to Location - * https://docs.microsoft.com/en-us/typography/opentype/spec/loca - */ -#define HB_OT_TAG_loca HB_TAG('l','o','c','a') - -#ifndef HB_MAX_COMPOSITE_OPERATIONS -#define HB_MAX_COMPOSITE_OPERATIONS 100000 -#endif - - -struct loca -{ - friend struct glyf; - - static constexpr hb_tag_t tableTag = HB_OT_TAG_loca; - - bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const - { - TRACE_SANITIZE (this); - return_trace (true); - } - - protected: - UnsizedArrayOf<HBUINT8> - dataZ; /* Location data. */ - public: - DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always - * check the size externally, allow Null() object of it by - * defining it _MIN instead. */ -}; - - -/* - * glyf -- TrueType Glyph Data - * https://docs.microsoft.com/en-us/typography/opentype/spec/glyf - */ -#define HB_OT_TAG_glyf HB_TAG('g','l','y','f') - - -struct glyf -{ - static constexpr hb_tag_t tableTag = HB_OT_TAG_glyf; - - bool sanitize (hb_sanitize_context_t *c HB_UNUSED) const - { - TRACE_SANITIZE (this); - /* Runtime checks as eager sanitizing each glyph is costy */ - return_trace (true); - } - - template<typename Iterator, - hb_requires (hb_is_source_of (Iterator, unsigned int))> - static bool - _add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets, bool use_short_loca) - { - unsigned num_offsets = padded_offsets.len () + 1; - unsigned entry_size = use_short_loca ? 2 : 4; - char *loca_prime_data = (char *) hb_calloc (entry_size, num_offsets); - - if (unlikely (!loca_prime_data)) return false; - - DEBUG_MSG (SUBSET, nullptr, "loca entry_size %d num_offsets %d size %d", - entry_size, num_offsets, entry_size * num_offsets); - - if (use_short_loca) - _write_loca (padded_offsets, 1, hb_array ((HBUINT16 *) loca_prime_data, num_offsets)); - else - _write_loca (padded_offsets, 0, hb_array ((HBUINT32 *) loca_prime_data, num_offsets)); - - hb_blob_t *loca_blob = hb_blob_create (loca_prime_data, - entry_size * num_offsets, - HB_MEMORY_MODE_WRITABLE, - loca_prime_data, - hb_free); - - bool result = plan->add_table (HB_OT_TAG_loca, loca_blob) - && _add_head_and_set_loca_version (plan, use_short_loca); - - hb_blob_destroy (loca_blob); - return result; - } - - template<typename IteratorIn, typename IteratorOut, - hb_requires (hb_is_source_of (IteratorIn, unsigned int)), - hb_requires (hb_is_sink_of (IteratorOut, unsigned))> - static void - _write_loca (IteratorIn it, unsigned right_shift, IteratorOut dest) - { - unsigned int offset = 0; - dest << 0; - + it - | hb_map ([=, &offset] (unsigned int padded_size) - { - offset += padded_size; - DEBUG_MSG (SUBSET, nullptr, "loca entry offset %d", offset); - return offset >> right_shift; - }) - | hb_sink (dest) - ; - } - - /* requires source of SubsetGlyph complains the identifier isn't declared */ - template <typename Iterator> - bool serialize (hb_serialize_context_t *c, - Iterator it, - bool use_short_loca, - const hb_subset_plan_t *plan) - { - TRACE_SERIALIZE (this); - unsigned init_len = c->length (); - for (const auto &_ : it) _.serialize (c, use_short_loca, plan); - - /* As a special case when all glyph in the font are empty, add a zero byte - * to the table, so that OTS doesn’t reject it, and to make the table work - * on Windows as well. - * See https://github.com/khaledhosny/ots/issues/52 */ - if (init_len == c->length ()) - { - HBUINT8 empty_byte; - empty_byte = 0; - c->copy (empty_byte); - } - return_trace (true); - } - - /* Byte region(s) per glyph to output - unpadded, hints removed if so requested - If we fail to process a glyph we produce an empty (0-length) glyph */ - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - - glyf *glyf_prime = c->serializer->start_embed <glyf> (); - if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false); - - hb_vector_t<SubsetGlyph> glyphs; - _populate_subset_glyphs (c->plan, &glyphs); - - auto padded_offsets = - + hb_iter (glyphs) - | hb_map (&SubsetGlyph::padded_size) - ; - - unsigned max_offset = + padded_offsets | hb_reduce (hb_add, 0); - bool use_short_loca = max_offset < 0x1FFFF; - - - glyf_prime->serialize (c->serializer, hb_iter (glyphs), use_short_loca, c->plan); - if (!use_short_loca) { - padded_offsets = - + hb_iter (glyphs) - | hb_map (&SubsetGlyph::length) - ; - } - - - if (unlikely (c->serializer->in_error ())) return_trace (false); - return_trace (c->serializer->check_success (_add_loca_and_head (c->plan, - padded_offsets, - use_short_loca))); - } - - template <typename SubsetGlyph> - void - _populate_subset_glyphs (const hb_subset_plan_t *plan, - hb_vector_t<SubsetGlyph> *glyphs /* OUT */) const - { - OT::glyf::accelerator_t glyf (plan->source); - - + hb_range (plan->num_output_glyphs ()) - | hb_map ([&] (hb_codepoint_t new_gid) - { - SubsetGlyph subset_glyph = {0}; - subset_glyph.new_gid = new_gid; - - /* should never fail: all old gids should be mapped */ - if (!plan->old_gid_for_new_gid (new_gid, &subset_glyph.old_gid)) - return subset_glyph; - - if (new_gid == 0 && - !(plan->flags & HB_SUBSET_FLAGS_NOTDEF_OUTLINE)) - subset_glyph.source_glyph = Glyph (); - else - subset_glyph.source_glyph = glyf.glyph_for_gid (subset_glyph.old_gid, true); - if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING) - subset_glyph.drop_hints_bytes (); - else - subset_glyph.dest_start = subset_glyph.source_glyph.get_bytes (); - return subset_glyph; - }) - | hb_sink (glyphs) - ; - } - - static bool - _add_head_and_set_loca_version (hb_subset_plan_t *plan, bool use_short_loca) - { - hb_blob_t *head_blob = hb_sanitize_context_t ().reference_table<head> (plan->source); - hb_blob_t *head_prime_blob = hb_blob_copy_writable_or_fail (head_blob); - hb_blob_destroy (head_blob); - - if (unlikely (!head_prime_blob)) - return false; - - head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr); - head_prime->indexToLocFormat = use_short_loca ? 0 : 1; - bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob); - - hb_blob_destroy (head_prime_blob); - return success; - } - - struct CompositeGlyphChain - { - protected: - enum composite_glyph_flag_t - { - ARG_1_AND_2_ARE_WORDS = 0x0001, - ARGS_ARE_XY_VALUES = 0x0002, - ROUND_XY_TO_GRID = 0x0004, - WE_HAVE_A_SCALE = 0x0008, - MORE_COMPONENTS = 0x0020, - WE_HAVE_AN_X_AND_Y_SCALE = 0x0040, - WE_HAVE_A_TWO_BY_TWO = 0x0080, - WE_HAVE_INSTRUCTIONS = 0x0100, - USE_MY_METRICS = 0x0200, - OVERLAP_COMPOUND = 0x0400, - SCALED_COMPONENT_OFFSET = 0x0800, - UNSCALED_COMPONENT_OFFSET = 0x1000 - }; - - public: - unsigned int get_size () const - { - unsigned int size = min_size; - /* arg1 and 2 are int16 */ - if (flags & ARG_1_AND_2_ARE_WORDS) size += 4; - /* arg1 and 2 are int8 */ - else size += 2; - - /* One x 16 bit (scale) */ - if (flags & WE_HAVE_A_SCALE) size += 2; - /* Two x 16 bit (xscale, yscale) */ - else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) size += 4; - /* Four x 16 bit (xscale, scale01, scale10, yscale) */ - else if (flags & WE_HAVE_A_TWO_BY_TWO) size += 8; - - return size; - } - - void set_glyph_index (hb_codepoint_t new_gid) { glyphIndex = new_gid; } - hb_codepoint_t get_glyph_index () const { return glyphIndex; } - - void drop_instructions_flag () { flags = (uint16_t) flags & ~WE_HAVE_INSTRUCTIONS; } - void set_overlaps_flag () - { - flags = (uint16_t) flags | OVERLAP_COMPOUND; - } - - bool has_instructions () const { return flags & WE_HAVE_INSTRUCTIONS; } - - bool has_more () const { return flags & MORE_COMPONENTS; } - bool is_use_my_metrics () const { return flags & USE_MY_METRICS; } - bool is_anchored () const { return !(flags & ARGS_ARE_XY_VALUES); } - void get_anchor_points (unsigned int &point1, unsigned int &point2) const - { - const HBUINT8 *p = &StructAfter<const HBUINT8> (glyphIndex); - if (flags & ARG_1_AND_2_ARE_WORDS) - { - point1 = ((const HBUINT16 *) p)[0]; - point2 = ((const HBUINT16 *) p)[1]; - } - else - { - point1 = p[0]; - point2 = p[1]; - } - } - - void transform_points (contour_point_vector_t &points) const - { - float matrix[4]; - contour_point_t trans; - if (get_transformation (matrix, trans)) - { - if (scaled_offsets ()) - { - points.translate (trans); - points.transform (matrix); - } - else - { - points.transform (matrix); - points.translate (trans); - } - } - } - - protected: - bool scaled_offsets () const - { return (flags & (SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET)) == SCALED_COMPONENT_OFFSET; } - - bool get_transformation (float (&matrix)[4], contour_point_t &trans) const - { - matrix[0] = matrix[3] = 1.f; - matrix[1] = matrix[2] = 0.f; - - int tx, ty; - const HBINT8 *p = &StructAfter<const HBINT8> (glyphIndex); - if (flags & ARG_1_AND_2_ARE_WORDS) - { - tx = *(const HBINT16 *) p; - p += HBINT16::static_size; - ty = *(const HBINT16 *) p; - p += HBINT16::static_size; - } - else - { - tx = *p++; - ty = *p++; - } - if (is_anchored ()) tx = ty = 0; - - trans.init ((float) tx, (float) ty); - - { - const F2DOT14 *points = (const F2DOT14 *) p; - if (flags & WE_HAVE_A_SCALE) - { - matrix[0] = matrix[3] = points[0].to_float (); - return true; - } - else if (flags & WE_HAVE_AN_X_AND_Y_SCALE) - { - matrix[0] = points[0].to_float (); - matrix[3] = points[1].to_float (); - return true; - } - else if (flags & WE_HAVE_A_TWO_BY_TWO) - { - matrix[0] = points[0].to_float (); - matrix[1] = points[1].to_float (); - matrix[2] = points[2].to_float (); - matrix[3] = points[3].to_float (); - return true; - } - } - return tx || ty; - } - - protected: - HBUINT16 flags; - HBGlyphID16 glyphIndex; - public: - DEFINE_SIZE_MIN (4); - }; - - struct composite_iter_t : hb_iter_with_fallback_t<composite_iter_t, const CompositeGlyphChain &> - { - typedef const CompositeGlyphChain *__item_t__; - composite_iter_t (hb_bytes_t glyph_, __item_t__ current_) : - glyph (glyph_), current (nullptr), current_size (0) - { - set_next (current_); - } - - composite_iter_t () : glyph (hb_bytes_t ()), current (nullptr), current_size (0) {} - - const CompositeGlyphChain &__item__ () const { return *current; } - bool __more__ () const { return current; } - void __next__ () - { - if (!current->has_more ()) { current = nullptr; return; } - - set_next (&StructAtOffset<CompositeGlyphChain> (current, current_size)); - } - bool operator != (const composite_iter_t& o) const - { return glyph != o.glyph || current != o.current; } - - - void set_next (const CompositeGlyphChain *composite) - { - if (!glyph.check_range (composite, CompositeGlyphChain::min_size)) - { - current = nullptr; - current_size = 0; - return; - } - unsigned size = composite->get_size (); - if (!glyph.check_range (composite, size)) - { - current = nullptr; - current_size = 0; - return; - } - - current = composite; - current_size = size; - } - - private: - hb_bytes_t glyph; - __item_t__ current; - unsigned current_size; - }; - - enum phantom_point_index_t - { - PHANTOM_LEFT = 0, - PHANTOM_RIGHT = 1, - PHANTOM_TOP = 2, - PHANTOM_BOTTOM = 3, - PHANTOM_COUNT = 4 - }; - - struct accelerator_t; - - struct Glyph - { - enum simple_glyph_flag_t - { - FLAG_ON_CURVE = 0x01, - FLAG_X_SHORT = 0x02, - FLAG_Y_SHORT = 0x04, - FLAG_REPEAT = 0x08, - FLAG_X_SAME = 0x10, - FLAG_Y_SAME = 0x20, - FLAG_OVERLAP_SIMPLE = 0x40, - FLAG_RESERVED2 = 0x80 - }; - - private: - struct GlyphHeader - { - bool has_data () const { return numberOfContours; } - - bool get_extents (hb_font_t *font, const accelerator_t &glyf_accelerator, - hb_codepoint_t gid, hb_glyph_extents_t *extents) const - { - /* Undocumented rasterizer behavior: shift glyph to the left by (lsb - xMin), i.e., xMin = lsb */ - /* extents->x_bearing = hb_min (glyph_header.xMin, glyph_header.xMax); */ - extents->x_bearing = font->em_scale_x (glyf_accelerator.hmtx->get_side_bearing (gid)); - extents->y_bearing = font->em_scale_y (hb_max (yMin, yMax)); - extents->width = font->em_scale_x (hb_max (xMin, xMax) - hb_min (xMin, xMax)); - extents->height = font->em_scale_y (hb_min (yMin, yMax) - hb_max (yMin, yMax)); - - return true; - } - - HBINT16 numberOfContours; - /* If the number of contours is - * greater than or equal to zero, - * this is a simple glyph; if negative, - * this is a composite glyph. */ - FWORD xMin; /* Minimum x for coordinate data. */ - FWORD yMin; /* Minimum y for coordinate data. */ - FWORD xMax; /* Maximum x for coordinate data. */ - FWORD yMax; /* Maximum y for coordinate data. */ - public: - DEFINE_SIZE_STATIC (10); - }; - - struct SimpleGlyph - { - const GlyphHeader &header; - hb_bytes_t bytes; - SimpleGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) : - header (header_), bytes (bytes_) {} - - unsigned int instruction_len_offset () const - { return GlyphHeader::static_size + 2 * header.numberOfContours; } - - unsigned int length (unsigned int instruction_len) const - { return instruction_len_offset () + 2 + instruction_len; } - - unsigned int instructions_length () const - { - unsigned int instruction_length_offset = instruction_len_offset (); - if (unlikely (instruction_length_offset + 2 > bytes.length)) return 0; - - const HBUINT16 &instructionLength = StructAtOffset<HBUINT16> (&bytes, instruction_length_offset); - /* Out of bounds of the current glyph */ - if (unlikely (length (instructionLength) > bytes.length)) return 0; - return instructionLength; - } - - const Glyph trim_padding () const - { - /* based on FontTools _g_l_y_f.py::trim */ - const uint8_t *glyph = (uint8_t*) bytes.arrayZ; - const uint8_t *glyph_end = glyph + bytes.length; - /* simple glyph w/contours, possibly trimmable */ - glyph += instruction_len_offset (); - - if (unlikely (glyph + 2 >= glyph_end)) return Glyph (); - unsigned int num_coordinates = StructAtOffset<HBUINT16> (glyph - 2, 0) + 1; - unsigned int num_instructions = StructAtOffset<HBUINT16> (glyph, 0); - - glyph += 2 + num_instructions; - - unsigned int coord_bytes = 0; - unsigned int coords_with_flags = 0; - while (glyph < glyph_end) - { - uint8_t flag = *glyph; - glyph++; - - unsigned int repeat = 1; - if (flag & FLAG_REPEAT) - { - if (unlikely (glyph >= glyph_end)) return Glyph (); - repeat = *glyph + 1; - glyph++; - } - - unsigned int xBytes, yBytes; - xBytes = yBytes = 0; - if (flag & FLAG_X_SHORT) xBytes = 1; - else if ((flag & FLAG_X_SAME) == 0) xBytes = 2; - - if (flag & FLAG_Y_SHORT) yBytes = 1; - else if ((flag & FLAG_Y_SAME) == 0) yBytes = 2; - - coord_bytes += (xBytes + yBytes) * repeat; - coords_with_flags += repeat; - if (coords_with_flags >= num_coordinates) break; - } - - if (unlikely (coords_with_flags != num_coordinates)) return Glyph (); - return Glyph (bytes.sub_array (0, bytes.length + coord_bytes - (glyph_end - glyph))); - } - - /* zero instruction length */ - void drop_hints () - { - GlyphHeader &glyph_header = const_cast<GlyphHeader &> (header); - (HBUINT16 &) StructAtOffset<HBUINT16> (&glyph_header, instruction_len_offset ()) = 0; - } - - void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const - { - unsigned int instructions_len = instructions_length (); - unsigned int glyph_length = length (instructions_len); - dest_start = bytes.sub_array (0, glyph_length - instructions_len); - dest_end = bytes.sub_array (glyph_length, bytes.length - glyph_length); - } - - void set_overlaps_flag () - { - if (unlikely (!header.numberOfContours)) return; - - unsigned flags_offset = length (instructions_length ()); - if (unlikely (flags_offset + 1 > bytes.length)) return; - - HBUINT8 &first_flag = (HBUINT8 &) StructAtOffset<HBUINT16> (&bytes, flags_offset); - first_flag = (uint8_t) first_flag | FLAG_OVERLAP_SIMPLE; - } - - static bool read_points (const HBUINT8 *&p /* IN/OUT */, - contour_point_vector_t &points_ /* IN/OUT */, - const hb_bytes_t &bytes, - void (* setter) (contour_point_t &_, float v), - const simple_glyph_flag_t short_flag, - const simple_glyph_flag_t same_flag) - { - float v = 0; - for (unsigned i = 0; i < points_.length; i++) - { - uint8_t flag = points_[i].flag; - if (flag & short_flag) - { - if (unlikely (!bytes.check_range (p))) return false; - if (flag & same_flag) - v += *p++; - else - v -= *p++; - } - else - { - if (!(flag & same_flag)) - { - if (unlikely (!bytes.check_range ((const HBUINT16 *) p))) return false; - v += *(const HBINT16 *) p; - p += HBINT16::static_size; - } - } - setter (points_[i], v); - } - return true; - } - - bool get_contour_points (contour_point_vector_t &points_ /* OUT */, - bool phantom_only = false) const - { - const HBUINT16 *endPtsOfContours = &StructAfter<HBUINT16> (header); - int num_contours = header.numberOfContours; - if (unlikely (!bytes.check_range (&endPtsOfContours[num_contours + 1]))) return false; - unsigned int num_points = endPtsOfContours[num_contours - 1] + 1; - - points_.resize (num_points); - for (unsigned int i = 0; i < points_.length; i++) points_[i].init (); - if (phantom_only) return true; - - for (int i = 0; i < num_contours; i++) - points_[endPtsOfContours[i]].is_end_point = true; - - /* Skip instructions */ - const HBUINT8 *p = &StructAtOffset<HBUINT8> (&endPtsOfContours[num_contours + 1], - endPtsOfContours[num_contours]); - - /* Read flags */ - for (unsigned int i = 0; i < num_points; i++) - { - if (unlikely (!bytes.check_range (p))) return false; - uint8_t flag = *p++; - points_[i].flag = flag; - if (flag & FLAG_REPEAT) - { - if (unlikely (!bytes.check_range (p))) return false; - unsigned int repeat_count = *p++; - while ((repeat_count-- > 0) && (++i < num_points)) - points_[i].flag = flag; - } - } - - /* Read x & y coordinates */ - return read_points (p, points_, bytes, [] (contour_point_t &p, float v) { p.x = v; }, - FLAG_X_SHORT, FLAG_X_SAME) - && read_points (p, points_, bytes, [] (contour_point_t &p, float v) { p.y = v; }, - FLAG_Y_SHORT, FLAG_Y_SAME); - } - }; - - struct CompositeGlyph - { - const GlyphHeader &header; - hb_bytes_t bytes; - CompositeGlyph (const GlyphHeader &header_, hb_bytes_t bytes_) : - header (header_), bytes (bytes_) {} - - composite_iter_t get_iterator () const - { return composite_iter_t (bytes, &StructAfter<CompositeGlyphChain, GlyphHeader> (header)); } - - unsigned int instructions_length (hb_bytes_t bytes) const - { - unsigned int start = bytes.length; - unsigned int end = bytes.length; - const CompositeGlyphChain *last = nullptr; - for (auto &item : get_iterator ()) - last = &item; - if (unlikely (!last)) return 0; - - if (last->has_instructions ()) - start = (char *) last - &bytes + last->get_size (); - if (unlikely (start > end)) return 0; - return end - start; - } - - /* Trimming for composites not implemented. - * If removing hints it falls out of that. */ - const Glyph trim_padding () const { return Glyph (bytes); } - - void drop_hints () - { - for (const auto &_ : get_iterator ()) - const_cast<CompositeGlyphChain &> (_).drop_instructions_flag (); - } - - /* Chop instructions off the end */ - void drop_hints_bytes (hb_bytes_t &dest_start) const - { dest_start = bytes.sub_array (0, bytes.length - instructions_length (bytes)); } - - void set_overlaps_flag () - { - const_cast<CompositeGlyphChain &> (StructAfter<CompositeGlyphChain, GlyphHeader> (header)) - .set_overlaps_flag (); - } - }; - - enum glyph_type_t { EMPTY, SIMPLE, COMPOSITE }; - - public: - composite_iter_t get_composite_iterator () const - { - if (type != COMPOSITE) return composite_iter_t (); - return CompositeGlyph (*header, bytes).get_iterator (); - } - - const Glyph trim_padding () const - { - switch (type) { - case COMPOSITE: return CompositeGlyph (*header, bytes).trim_padding (); - case SIMPLE: return SimpleGlyph (*header, bytes).trim_padding (); - default: return bytes; - } - } - - void drop_hints () - { - switch (type) { - case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints (); return; - case SIMPLE: SimpleGlyph (*header, bytes).drop_hints (); return; - default: return; - } - } - - void set_overlaps_flag () - { - switch (type) { - case COMPOSITE: CompositeGlyph (*header, bytes).set_overlaps_flag (); return; - case SIMPLE: SimpleGlyph (*header, bytes).set_overlaps_flag (); return; - default: return; - } - } - - void drop_hints_bytes (hb_bytes_t &dest_start, hb_bytes_t &dest_end) const - { - switch (type) { - case COMPOSITE: CompositeGlyph (*header, bytes).drop_hints_bytes (dest_start); return; - case SIMPLE: SimpleGlyph (*header, bytes).drop_hints_bytes (dest_start, dest_end); return; - default: return; - } - } - - /* Note: Recursively calls itself. - * all_points includes phantom points - */ - bool get_points (hb_font_t *font, const accelerator_t &glyf_accelerator, - contour_point_vector_t &all_points /* OUT */, - bool phantom_only = false, - unsigned int depth = 0) const - { - if (unlikely (depth > HB_MAX_NESTING_LEVEL)) return false; - contour_point_vector_t points; - - switch (type) { - case COMPOSITE: - { - /* pseudo component points for each component in composite glyph */ - unsigned num_points = hb_len (CompositeGlyph (*header, bytes).get_iterator ()); - if (unlikely (!points.resize (num_points))) return false; - for (unsigned i = 0; i < points.length; i++) - points[i].init (); - break; - } - case SIMPLE: - if (unlikely (!SimpleGlyph (*header, bytes).get_contour_points (points, phantom_only))) - return false; - break; - } - - /* Init phantom points */ - if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false; - hb_array_t<contour_point_t> phantoms = points.sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT); - { - for (unsigned i = 0; i < PHANTOM_COUNT; ++i) phantoms[i].init (); - int h_delta = (int) header->xMin - - glyf_accelerator.hmtx->get_side_bearing (gid); - int v_orig = (int) header->yMax + -#ifndef HB_NO_VERTICAL - glyf_accelerator.vmtx->get_side_bearing (gid) -#else - 0 -#endif - ; - unsigned h_adv = glyf_accelerator.hmtx->get_advance (gid); - unsigned v_adv = -#ifndef HB_NO_VERTICAL - glyf_accelerator.vmtx->get_advance (gid) -#else - - font->face->get_upem () -#endif - ; - phantoms[PHANTOM_LEFT].x = h_delta; - phantoms[PHANTOM_RIGHT].x = h_adv + h_delta; - phantoms[PHANTOM_TOP].y = v_orig; - phantoms[PHANTOM_BOTTOM].y = v_orig - (int) v_adv; - } - -#ifndef HB_NO_VAR - glyf_accelerator.gvar->apply_deltas_to_points (gid, font, points.as_array ()); -#endif - - switch (type) { - case SIMPLE: - all_points.extend (points.as_array ()); - break; - case COMPOSITE: - { - unsigned int comp_index = 0; - for (auto &item : get_composite_iterator ()) - { - contour_point_vector_t comp_points; - if (unlikely (!glyf_accelerator.glyph_for_gid (item.get_glyph_index ()) - .get_points (font, glyf_accelerator, comp_points, - phantom_only, depth + 1) - || comp_points.length < PHANTOM_COUNT)) - return false; - - /* Copy phantom points from component if USE_MY_METRICS flag set */ - if (item.is_use_my_metrics ()) - for (unsigned int i = 0; i < PHANTOM_COUNT; i++) - phantoms[i] = comp_points[comp_points.length - PHANTOM_COUNT + i]; - - /* Apply component transformation & translation */ - item.transform_points (comp_points); - - /* Apply translation from gvar */ - comp_points.translate (points[comp_index]); - - if (item.is_anchored ()) - { - unsigned int p1, p2; - item.get_anchor_points (p1, p2); - if (likely (p1 < all_points.length && p2 < comp_points.length)) - { - contour_point_t delta; - delta.init (all_points[p1].x - comp_points[p2].x, - all_points[p1].y - comp_points[p2].y); - - comp_points.translate (delta); - } - } - - all_points.extend (comp_points.sub_array (0, comp_points.length - PHANTOM_COUNT)); - - comp_index++; - } - - all_points.extend (phantoms); - } break; - default: - all_points.extend (phantoms); - } - - if (depth == 0) /* Apply at top level */ - { - /* Undocumented rasterizer behavior: - * Shift points horizontally by the updated left side bearing - */ - contour_point_t delta; - delta.init (-phantoms[PHANTOM_LEFT].x, 0.f); - if (delta.x) all_points.translate (delta); - } - - return true; - } - - bool get_extents (hb_font_t *font, const accelerator_t &glyf_accelerator, - hb_glyph_extents_t *extents) const - { - if (type == EMPTY) return true; /* Empty glyph; zero extents. */ - return header->get_extents (font, glyf_accelerator, gid, extents); - } - - hb_bytes_t get_bytes () const { return bytes; } - - Glyph (hb_bytes_t bytes_ = hb_bytes_t (), - hb_codepoint_t gid_ = (hb_codepoint_t) -1) : bytes (bytes_), gid (gid_), - header (bytes.as<GlyphHeader> ()) - { - int num_contours = header->numberOfContours; - if (unlikely (num_contours == 0)) type = EMPTY; - else if (num_contours > 0) type = SIMPLE; - else type = COMPOSITE; /* negative numbers */ - } - - protected: - hb_bytes_t bytes; - hb_codepoint_t gid; - const GlyphHeader *header; - unsigned type; - }; - - struct accelerator_t - { - accelerator_t (hb_face_t *face) - { - short_offset = false; - num_glyphs = 0; - loca_table = nullptr; - glyf_table = nullptr; -#ifndef HB_NO_VAR - gvar = nullptr; -#endif - hmtx = nullptr; -#ifndef HB_NO_VERTICAL - vmtx = nullptr; -#endif - const OT::head &head = *face->table.head; - if (head.indexToLocFormat > 1 || head.glyphDataFormat > 0) - /* Unknown format. Leave num_glyphs=0, that takes care of disabling us. */ - return; - short_offset = 0 == head.indexToLocFormat; - - loca_table = face->table.loca.get_blob (); // Needs no destruct! - glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face); -#ifndef HB_NO_VAR - gvar = face->table.gvar; -#endif - hmtx = face->table.hmtx; -#ifndef HB_NO_VERTICAL - vmtx = face->table.vmtx; -#endif - - num_glyphs = hb_max (1u, loca_table.get_length () / (short_offset ? 2 : 4)) - 1; - num_glyphs = hb_min (num_glyphs, face->get_num_glyphs ()); - } - ~accelerator_t () - { - glyf_table.destroy (); - } - - bool has_data () const { return num_glyphs; } - - protected: - template<typename T> - bool get_points (hb_font_t *font, hb_codepoint_t gid, T consumer) const - { - if (gid >= num_glyphs) return false; - - /* Making this allocfree is not that easy - https://github.com/harfbuzz/harfbuzz/issues/2095 - mostly because of gvar handling in VF fonts, - perhaps a separate path for non-VF fonts can be considered */ - contour_point_vector_t all_points; - - bool phantom_only = !consumer.is_consuming_contour_points (); - if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, phantom_only))) - return false; - - if (consumer.is_consuming_contour_points ()) - { - for (unsigned point_index = 0; point_index + 4 < all_points.length; ++point_index) - consumer.consume_point (all_points[point_index]); - consumer.points_end (); - } - - /* Where to write phantoms, nullptr if not requested */ - contour_point_t *phantoms = consumer.get_phantoms_sink (); - if (phantoms) - for (unsigned i = 0; i < PHANTOM_COUNT; ++i) - phantoms[i] = all_points[all_points.length - PHANTOM_COUNT + i]; - - return true; - } - -#ifndef HB_NO_VAR - struct points_aggregator_t - { - hb_font_t *font; - hb_glyph_extents_t *extents; - contour_point_t *phantoms; - - struct contour_bounds_t - { - contour_bounds_t () { min_x = min_y = FLT_MAX; max_x = max_y = -FLT_MAX; } - - void add (const contour_point_t &p) - { - min_x = hb_min (min_x, p.x); - min_y = hb_min (min_y, p.y); - max_x = hb_max (max_x, p.x); - max_y = hb_max (max_y, p.y); - } - - bool empty () const { return (min_x >= max_x) || (min_y >= max_y); } - - void get_extents (hb_font_t *font, hb_glyph_extents_t *extents) - { - if (unlikely (empty ())) - { - extents->width = 0; - extents->x_bearing = 0; - extents->height = 0; - extents->y_bearing = 0; - return; - } - extents->x_bearing = font->em_scalef_x (min_x); - extents->width = font->em_scalef_x (max_x) - extents->x_bearing; - extents->y_bearing = font->em_scalef_y (max_y); - extents->height = font->em_scalef_y (min_y) - extents->y_bearing; - } - - protected: - float min_x, min_y, max_x, max_y; - } bounds; - - points_aggregator_t (hb_font_t *font_, hb_glyph_extents_t *extents_, contour_point_t *phantoms_) - { - font = font_; - extents = extents_; - phantoms = phantoms_; - if (extents) bounds = contour_bounds_t (); - } - - void consume_point (const contour_point_t &point) { bounds.add (point); } - void points_end () { bounds.get_extents (font, extents); } - - bool is_consuming_contour_points () { return extents; } - contour_point_t *get_phantoms_sink () { return phantoms; } - }; - - public: - unsigned - get_advance_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const - { - if (unlikely (gid >= num_glyphs)) return 0; - - bool success = false; - - contour_point_t phantoms[PHANTOM_COUNT]; - if (likely (font->num_coords == gvar->get_axis_count ())) - success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms)); - - if (unlikely (!success)) - return -#ifndef HB_NO_VERTICAL - is_vertical ? vmtx->get_advance (gid) : -#endif - hmtx->get_advance (gid); - - float result = is_vertical - ? phantoms[PHANTOM_TOP].y - phantoms[PHANTOM_BOTTOM].y - : phantoms[PHANTOM_RIGHT].x - phantoms[PHANTOM_LEFT].x; - return hb_clamp (roundf (result), 0.f, (float) UINT_MAX / 2); - } - - int get_side_bearing_var (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const - { - if (unlikely (gid >= num_glyphs)) return 0; - - hb_glyph_extents_t extents; - - contour_point_t phantoms[PHANTOM_COUNT]; - if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms)))) - return -#ifndef HB_NO_VERTICAL - is_vertical ? vmtx->get_side_bearing (gid) : -#endif - hmtx->get_side_bearing (gid); - - return is_vertical - ? ceilf (phantoms[PHANTOM_TOP].y) - extents.y_bearing - : floorf (phantoms[PHANTOM_LEFT].x); - } -#endif - - public: - bool get_extents (hb_font_t *font, hb_codepoint_t gid, hb_glyph_extents_t *extents) const - { - if (unlikely (gid >= num_glyphs)) return false; - -#ifndef HB_NO_VAR - if (font->num_coords && font->num_coords == gvar->get_axis_count ()) - return get_points (font, gid, points_aggregator_t (font, extents, nullptr)); -#endif - return glyph_for_gid (gid).get_extents (font, *this, extents); - } - - const Glyph - glyph_for_gid (hb_codepoint_t gid, bool needs_padding_removal = false) const - { - if (unlikely (gid >= num_glyphs)) return Glyph (); - - unsigned int start_offset, end_offset; - - if (short_offset) - { - const HBUINT16 *offsets = (const HBUINT16 *) loca_table->dataZ.arrayZ; - start_offset = 2 * offsets[gid]; - end_offset = 2 * offsets[gid + 1]; - } - else - { - const HBUINT32 *offsets = (const HBUINT32 *) loca_table->dataZ.arrayZ; - start_offset = offsets[gid]; - end_offset = offsets[gid + 1]; - } - - if (unlikely (start_offset > end_offset || end_offset > glyf_table.get_length ())) - return Glyph (); - - Glyph glyph (hb_bytes_t ((const char *) this->glyf_table + start_offset, - end_offset - start_offset), gid); - return needs_padding_removal ? glyph.trim_padding () : glyph; - } - - unsigned - add_gid_and_children (hb_codepoint_t gid, - hb_set_t *gids_to_retain, - unsigned depth = 0, - unsigned operation_count = 0) const - { - if (unlikely (depth++ > HB_MAX_NESTING_LEVEL)) return operation_count; - if (unlikely (operation_count++ > HB_MAX_COMPOSITE_OPERATIONS)) return operation_count; - /* Check if is already visited */ - if (gids_to_retain->has (gid)) return operation_count; - - gids_to_retain->add (gid); - - auto it = glyph_for_gid (gid).get_composite_iterator (); - while (it) - { - auto item = *(it++); - operation_count = - add_gid_and_children (item.get_glyph_index (), gids_to_retain, depth, operation_count); - } - - return operation_count; - } - - struct path_builder_t - { - hb_font_t *font; - hb_draw_session_t *draw_session; - - struct optional_point_t - { - optional_point_t () { has_data = false; } - optional_point_t (float x_, float y_) { x = x_; y = y_; has_data = true; } - - bool has_data; - float x; - float y; - - optional_point_t lerp (optional_point_t p, float t) - { return optional_point_t (x + t * (p.x - x), y + t * (p.y - y)); } - } first_oncurve, first_offcurve, last_offcurve; - - path_builder_t (hb_font_t *font_, hb_draw_session_t &draw_session_) - { - font = font_; - draw_session = &draw_session_; - first_oncurve = first_offcurve = last_offcurve = optional_point_t (); - } - - /* based on https://github.com/RazrFalcon/ttf-parser/blob/4f32821/src/glyf.rs#L287 - See also: - * https://developer.apple.com/fonts/TrueType-Reference-Manual/RM01/Chap1.html - * https://stackoverflow.com/a/20772557 */ - void consume_point (const contour_point_t &point) - { - bool is_on_curve = point.flag & Glyph::FLAG_ON_CURVE; - optional_point_t p (point.x, point.y); - if (!first_oncurve.has_data) - { - if (is_on_curve) - { - first_oncurve = p; - draw_session->move_to (font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); - } - else - { - if (first_offcurve.has_data) - { - optional_point_t mid = first_offcurve.lerp (p, .5f); - first_oncurve = mid; - last_offcurve = p; - draw_session->move_to (font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); - } - else - first_offcurve = p; - } - } - else - { - if (last_offcurve.has_data) - { - if (is_on_curve) - { - draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), - font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); - last_offcurve = optional_point_t (); - } - else - { - optional_point_t mid = last_offcurve.lerp (p, .5f); - draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), - font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); - last_offcurve = p; - } - } - else - { - if (is_on_curve) - draw_session->line_to (font->em_fscalef_x (p.x), font->em_fscalef_y (p.y)); - else - last_offcurve = p; - } - } - - if (point.is_end_point) - { - if (first_offcurve.has_data && last_offcurve.has_data) - { - optional_point_t mid = last_offcurve.lerp (first_offcurve, .5f); - draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), - font->em_fscalef_x (mid.x), font->em_fscalef_y (mid.y)); - last_offcurve = optional_point_t (); - /* now check the rest */ - } - - if (first_offcurve.has_data && first_oncurve.has_data) - draw_session->quadratic_to (font->em_fscalef_x (first_offcurve.x), font->em_fscalef_y (first_offcurve.y), - font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); - else if (last_offcurve.has_data && first_oncurve.has_data) - draw_session->quadratic_to (font->em_fscalef_x (last_offcurve.x), font->em_fscalef_y (last_offcurve.y), - font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); - else if (first_oncurve.has_data) - draw_session->line_to (font->em_fscalef_x (first_oncurve.x), font->em_fscalef_y (first_oncurve.y)); - else if (first_offcurve.has_data) - { - float x = font->em_fscalef_x (first_offcurve.x), y = font->em_fscalef_x (first_offcurve.y); - draw_session->move_to (x, y); - draw_session->quadratic_to (x, y, x, y); - } - - /* Getting ready for the next contour */ - first_oncurve = first_offcurve = last_offcurve = optional_point_t (); - draw_session->close_path (); - } - } - void points_end () {} - - bool is_consuming_contour_points () { return true; } - contour_point_t *get_phantoms_sink () { return nullptr; } - }; - - bool - get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const - { return get_points (font, gid, path_builder_t (font, draw_session)); } - -#ifndef HB_NO_VAR - const gvar_accelerator_t *gvar; -#endif - const hmtx_accelerator_t *hmtx; -#ifndef HB_NO_VERTICAL - const vmtx_accelerator_t *vmtx; -#endif - - private: - bool short_offset; - unsigned int num_glyphs; - hb_blob_ptr_t<loca> loca_table; - hb_blob_ptr_t<glyf> glyf_table; - }; - - struct SubsetGlyph - { - hb_codepoint_t new_gid; - hb_codepoint_t old_gid; - Glyph source_glyph; - hb_bytes_t dest_start; /* region of source_glyph to copy first */ - hb_bytes_t dest_end; /* region of source_glyph to copy second */ - - bool serialize (hb_serialize_context_t *c, - bool use_short_loca, - const hb_subset_plan_t *plan) const - { - TRACE_SERIALIZE (this); - - hb_bytes_t dest_glyph = dest_start.copy (c); - dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + dest_end.copy (c).length); - unsigned int pad_length = use_short_loca ? padding () : 0; - DEBUG_MSG (SUBSET, nullptr, "serialize %d byte glyph, width %d pad %d", dest_glyph.length, dest_glyph.length + pad_length, pad_length); - - HBUINT8 pad; - pad = 0; - while (pad_length > 0) - { - c->embed (pad); - pad_length--; - } - - if (unlikely (!dest_glyph.length)) return_trace (true); - - /* update components gids */ - for (auto &_ : Glyph (dest_glyph).get_composite_iterator ()) - { - hb_codepoint_t new_gid; - if (plan->new_gid_for_old_gid (_.get_glyph_index (), &new_gid)) - const_cast<CompositeGlyphChain &> (_).set_glyph_index (new_gid); - } - - if (plan->flags & HB_SUBSET_FLAGS_NO_HINTING) - Glyph (dest_glyph).drop_hints (); - - if (plan->flags & HB_SUBSET_FLAGS_SET_OVERLAPS_FLAG) - Glyph (dest_glyph).set_overlaps_flag (); - - return_trace (true); - } - - void drop_hints_bytes () - { source_glyph.drop_hints_bytes (dest_start, dest_end); } - - unsigned int length () const { return dest_start.length + dest_end.length; } - /* pad to 2 to ensure 2-byte loca will be ok */ - unsigned int padding () const { return length () % 2; } - unsigned int padded_size () const { return length () + padding (); } - }; - - protected: - UnsizedArrayOf<HBUINT8> - dataZ; /* Glyphs data. */ - public: - DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always - * check the size externally, allow Null() object of it by - * defining it _MIN instead. */ -}; - -struct glyf_accelerator_t : glyf::accelerator_t { - glyf_accelerator_t (hb_face_t *face) : glyf::accelerator_t (face) {} -}; - - -} /* namespace OT */ - +#include "OT/glyf/glyf.hh" #endif /* HB_OT_GLYF_TABLE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh b/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh index d5e1fc91d2..d0e46e0b0f 100644 --- a/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-hmtx-table.hh @@ -242,7 +242,7 @@ struct hmtxvmtx return side_bearing; if (var_table.get_length ()) - return side_bearing + var_table->get_side_bearing_var (glyph, font->coords, font->num_coords); // TODO Optimize?! + return side_bearing + var_table->get_side_bearing_var (glyph, font->coords, font->num_coords); return _glyf_get_side_bearing_var (font, glyph, T::tableTag == HB_OT_TAG_vmtx); #else @@ -284,7 +284,8 @@ struct hmtxvmtx } unsigned int get_advance (hb_codepoint_t glyph, - hb_font_t *font) const + hb_font_t *font, + VariationStore::cache_t *store_cache = nullptr) const { unsigned int advance = get_advance (glyph); @@ -293,7 +294,7 @@ struct hmtxvmtx return advance; if (var_table.get_length ()) - return advance + roundf (var_table->get_advance_var (glyph, font)); // TODO Optimize?! + return advance + roundf (var_table->get_advance_var (glyph, font, store_cache)); // TODO Optimize?! return _glyf_get_advance_var (font, glyph, T::tableTag == HB_OT_TAG_vmtx); #else @@ -310,7 +311,7 @@ struct hmtxvmtx unsigned int default_advance; - private: + public: hb_blob_ptr_t<hmtxvmtx> table; hb_blob_ptr_t<HVARVVAR> var_table; }; diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-common.hh b/thirdparty/harfbuzz/src/hb-ot-layout-common.hh index b644df708d..d343805346 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-common.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-common.hh @@ -102,7 +102,7 @@ static void ClassDef_remap_and_serialize ( struct hb_prune_langsys_context_t { hb_prune_langsys_context_t (const void *table_, - hb_hashmap_t<unsigned, hb_set_t *> *script_langsys_map_, + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *script_langsys_map_, const hb_map_t *duplicate_feature_map_, hb_set_t *new_collected_feature_indexes_) :table (table_), @@ -122,7 +122,7 @@ struct hb_prune_langsys_context_t public: const void *table; - hb_hashmap_t<unsigned, hb_set_t *> *script_langsys_map; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *script_langsys_map; const hb_map_t *duplicate_feature_map; hb_set_t *new_feature_indexes; @@ -162,14 +162,14 @@ struct hb_subset_layout_context_t : hb_subset_context_t *subset_context; const hb_tag_t table_tag; const hb_map_t *lookup_index_map; - const hb_hashmap_t<unsigned, hb_set_t *> *script_langsys_map; + const hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *script_langsys_map; const hb_map_t *feature_index_map; unsigned cur_script_index; hb_subset_layout_context_t (hb_subset_context_t *c_, hb_tag_t tag_, hb_map_t *lookup_map_, - hb_hashmap_t<unsigned, hb_set_t *> *script_langsys_map_, + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *script_langsys_map_, hb_map_t *feature_index_map_) : subset_context (c_), table_tag (tag_), @@ -659,7 +659,8 @@ struct LangSys auto *out = c->serializer->start_embed (*this); if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); - out->reqFeatureIndex = l->feature_index_map->has (reqFeatureIndex) ? l->feature_index_map->get (reqFeatureIndex) : 0xFFFFu; + const unsigned *v; + out->reqFeatureIndex = l->feature_index_map->has (reqFeatureIndex, &v) ? *v : 0xFFFFu; if (!l->visitFeatureIndex (featureIndex.len)) return_trace (false); @@ -722,12 +723,8 @@ struct Script if (!c->script_langsys_map->has (script_index)) { - hb_set_t* empty_set = hb_set_create (); - if (unlikely (!c->script_langsys_map->set (script_index, empty_set))) - { - hb_set_destroy (empty_set); + if (unlikely (!c->script_langsys_map->set (script_index, hb::unique_ptr<hb_set_t> {hb_set_create ()}))) return; - } } unsigned langsys_count = get_lang_sys_count (); @@ -2004,6 +2001,8 @@ struct ClassDefFormat1 return_trace (c->check_struct (this) && classValue.sanitize (c)); } + unsigned cost () const { return 1; } + template <typename set_t> bool collect_coverage (set_t *glyphs) const { @@ -2240,6 +2239,8 @@ struct ClassDefFormat2 return_trace (rangeRecord.sanitize (c)); } + unsigned cost () const { return hb_bit_storage ((unsigned) rangeRecord.len); /* bsearch cost */ } + template <typename set_t> bool collect_coverage (set_t *glyphs) const { @@ -2480,6 +2481,15 @@ struct ClassDef } } + unsigned cost () const + { + switch (u.format) { + case 1: return u.format1.cost (); + case 2: return u.format2.cost (); + default:return 0u; + } + } + /* Might return false if array looks unsorted. * Used for faster rejection of corrupt data. */ template <typename set_t> @@ -2601,14 +2611,27 @@ struct VarRegionAxis DEFINE_SIZE_STATIC (6); }; +#define REGION_CACHE_ITEM_CACHE_INVALID 2.f + struct VarRegionList { + using cache_t = float; + float evaluate (unsigned int region_index, - const int *coords, unsigned int coord_len) const + const int *coords, unsigned int coord_len, + cache_t *cache = nullptr) const { if (unlikely (region_index >= regionCount)) return 0.; + float *cached_value = nullptr; + if (cache) + { + cached_value = &(cache[region_index]); + if (likely (*cached_value != REGION_CACHE_ITEM_CACHE_INVALID)) + return *cached_value; + } + const VarRegionAxis *axes = axesZ.arrayZ + (region_index * axisCount); float v = 1.; @@ -2618,9 +2641,16 @@ struct VarRegionList int coord = i < coord_len ? coords[i] : 0; float factor = axes[i].evaluate (coord); if (factor == 0.f) + { + if (cache) + *cached_value = 0.; return 0.; + } v *= factor; } + + if (cache) + *cached_value = v; return v; } @@ -2668,7 +2698,7 @@ struct VarData { return regionIndices.len; } unsigned int get_row_size () const - { return shortCount + regionIndices.len; } + { return (wordCount () + regionIndices.len) * (longWords () ? 2 : 1); } unsigned int get_size () const { return min_size @@ -2678,13 +2708,17 @@ struct VarData float get_delta (unsigned int inner, const int *coords, unsigned int coord_count, - const VarRegionList ®ions) const + const VarRegionList ®ions, + VarRegionList::cache_t *cache = nullptr) const { if (unlikely (inner >= itemCount)) return 0.; unsigned int count = regionIndices.len; - unsigned int scount = shortCount; + bool is_long = longWords (); + unsigned word_count = wordCount (); + unsigned int scount = is_long ? count - word_count : word_count; + unsigned int lcount = is_long ? word_count : 0; const HBUINT8 *bytes = get_delta_bytes (); const HBUINT8 *row = bytes + inner * (scount + count); @@ -2692,16 +2726,22 @@ struct VarData float delta = 0.; unsigned int i = 0; - const HBINT16 *scursor = reinterpret_cast<const HBINT16 *> (row); + const HBINT16 *lcursor = reinterpret_cast<const HBINT16 *> (row); + for (; i < lcount; i++) + { + float scalar = regions.evaluate (regionIndices.arrayZ[i], coords, coord_count, cache); + delta += scalar * *lcursor++; + } + const HBINT16 *scursor = reinterpret_cast<const HBINT16 *> (lcursor); for (; i < scount; i++) { - float scalar = regions.evaluate (regionIndices.arrayZ[i], coords, coord_count); + float scalar = regions.evaluate (regionIndices.arrayZ[i], coords, coord_count, cache); delta += scalar * *scursor++; } const HBINT8 *bcursor = reinterpret_cast<const HBINT8 *> (scursor); for (; i < count; i++) { - float scalar = regions.evaluate (regionIndices.arrayZ[i], coords, coord_count); + float scalar = regions.evaluate (regionIndices.arrayZ[i], coords, coord_count, cache); delta += scalar * *bcursor++; } @@ -2725,7 +2765,7 @@ struct VarData TRACE_SANITIZE (this); return_trace (c->check_struct (this) && regionIndices.sanitize (c) && - shortCount <= regionIndices.len && + wordCount () <= regionIndices.len && c->check_range (get_delta_bytes (), itemCount, get_row_size ())); @@ -2740,43 +2780,66 @@ struct VarData if (unlikely (!c->extend_min (this))) return_trace (false); itemCount = inner_map.get_next_value (); - /* Optimize short count */ - unsigned short ri_count = src->regionIndices.len; - enum delta_size_t { kZero=0, kByte, kShort }; + /* Optimize word count */ + unsigned ri_count = src->regionIndices.len; + enum delta_size_t { kZero=0, kNonWord, kWord }; hb_vector_t<delta_size_t> delta_sz; hb_vector_t<unsigned int> ri_map; /* maps old index to new index */ delta_sz.resize (ri_count); ri_map.resize (ri_count); - unsigned int new_short_count = 0; + unsigned int new_word_count = 0; unsigned int r; + + bool has_long = false; + if (src->longWords ()) + { + for (r = 0; r < ri_count; r++) + { + for (unsigned int i = 0; i < inner_map.get_next_value (); i++) + { + unsigned int old = inner_map.backward (i); + int32_t delta = src->get_item_delta (old, r); + if (delta < -65536 || 65535 < delta) + { + has_long = true; + break; + } + } + } + } + + signed min_threshold = has_long ? -65536 : -128; + signed max_threshold = has_long ? +65535 : +127; for (r = 0; r < ri_count; r++) { delta_sz[r] = kZero; for (unsigned int i = 0; i < inner_map.get_next_value (); i++) { unsigned int old = inner_map.backward (i); - int16_t delta = src->get_item_delta (old, r); - if (delta < -128 || 127 < delta) + int32_t delta = src->get_item_delta (old, r); + if (delta < min_threshold || max_threshold < delta) { - delta_sz[r] = kShort; - new_short_count++; + delta_sz[r] = kWord; + new_word_count++; break; } else if (delta != 0) - delta_sz[r] = kByte; + delta_sz[r] = kNonWord; } } - unsigned int short_index = 0; - unsigned int byte_index = new_short_count; + + unsigned int word_index = 0; + unsigned int non_word_index = new_word_count; unsigned int new_ri_count = 0; for (r = 0; r < ri_count; r++) if (delta_sz[r]) { - ri_map[r] = (delta_sz[r] == kShort)? short_index++ : byte_index++; + ri_map[r] = (delta_sz[r] == kWord)? word_index++ : non_word_index++; new_ri_count++; } - shortCount = new_short_count; + wordSizeCount = new_word_count | (has_long ? 0x8000u /* LONG_WORDS */ : 0); + regionIndices.len = new_ri_count; if (unlikely (!c->extend (this))) return_trace (false); @@ -2816,28 +2879,55 @@ struct VarData HBUINT8 *get_delta_bytes () { return &StructAfter<HBUINT8> (regionIndices); } - int16_t get_item_delta (unsigned int item, unsigned int region) const + int32_t get_item_delta (unsigned int item, unsigned int region) const { if ( item >= itemCount || unlikely (region >= regionIndices.len)) return 0; - const HBINT8 *p = (const HBINT8 *)get_delta_bytes () + item * get_row_size (); - if (region < shortCount) - return ((const HBINT16 *)p)[region]; + const HBINT8 *p = (const HBINT8 *) get_delta_bytes () + item * get_row_size (); + unsigned word_count = wordCount (); + bool is_long = longWords (); + if (is_long) + { + if (region < word_count) + return ((const HBINT32 *) p)[region]; + else + return ((const HBINT16 *)(p + HBINT32::static_size * word_count))[region - word_count]; + } else - return (p + HBINT16::static_size * shortCount)[region - shortCount]; + { + if (region < word_count) + return ((const HBINT16 *) p)[region]; + else + return (p + HBINT16::static_size * word_count)[region - word_count]; + } } - void set_item_delta (unsigned int item, unsigned int region, int16_t delta) + void set_item_delta (unsigned int item, unsigned int region, int32_t delta) { HBINT8 *p = (HBINT8 *)get_delta_bytes () + item * get_row_size (); - if (region < shortCount) - ((HBINT16 *)p)[region] = delta; + unsigned word_count = wordCount (); + bool is_long = longWords (); + if (is_long) + { + if (region < word_count) + ((HBINT32 *) p)[region] = delta; + else + ((HBINT16 *)(p + HBINT32::static_size * word_count))[region - word_count] = delta; + } else - (p + HBINT16::static_size * shortCount)[region - shortCount] = delta; + { + if (region < word_count) + ((HBINT16 *) p)[region] = delta; + else + (p + HBINT16::static_size * word_count)[region - word_count] = delta; + } } + bool longWords () const { return wordSizeCount & 0x8000u /* LONG_WORDS */; } + unsigned wordCount () const { return wordSizeCount & 0x7FFFu /* WORD_DELTA_COUNT_MASK */; } + protected: HBUINT16 itemCount; - HBUINT16 shortCount; + HBUINT16 wordSizeCount; Array16Of<HBUINT16> regionIndices; /*UnsizedArrayOf<HBUINT8>bytesX;*/ public: @@ -2846,9 +2936,28 @@ struct VarData struct VariationStore { + using cache_t = VarRegionList::cache_t; + + cache_t *create_cache () const + { + auto &r = this+regions; + unsigned count = r.regionCount; + + float *cache = (float *) hb_malloc (sizeof (float) * count); + if (unlikely (!cache)) return nullptr; + + for (unsigned i = 0; i < count; i++) + cache[i] = REGION_CACHE_ITEM_CACHE_INVALID; + + return cache; + } + + static void destroy_cache (cache_t *cache) { hb_free (cache); } + private: float get_delta (unsigned int outer, unsigned int inner, - const int *coords, unsigned int coord_count) const + const int *coords, unsigned int coord_count, + VarRegionList::cache_t *cache = nullptr) const { #ifdef HB_NO_VAR return 0.f; @@ -2859,16 +2968,18 @@ struct VariationStore return (this+dataSets[outer]).get_delta (inner, coords, coord_count, - this+regions); + this+regions, + cache); } public: float get_delta (unsigned int index, - const int *coords, unsigned int coord_count) const + const int *coords, unsigned int coord_count, + VarRegionList::cache_t *cache = nullptr) const { unsigned int outer = index >> 16; unsigned int inner = index & 0xFFFF; - return get_delta (outer, inner, coords, coord_count); + return get_delta (outer, inner, coords, coord_count, cache); } bool sanitize (hb_sanitize_context_t *c) const @@ -2995,6 +3106,8 @@ struct VariationStore DEFINE_SIZE_ARRAY_SIZED (8, dataSets); }; +#undef REGION_CACHE_ITEM_CACHE_INVALID + /* * Feature Variations */ @@ -3462,11 +3575,15 @@ struct VariationDevice private: - hb_position_t get_x_delta (hb_font_t *font, const VariationStore &store) const - { return font->em_scalef_x (get_delta (font, store)); } + hb_position_t get_x_delta (hb_font_t *font, + const VariationStore &store, + VariationStore::cache_t *store_cache = nullptr) const + { return font->em_scalef_x (get_delta (font, store, store_cache)); } - hb_position_t get_y_delta (hb_font_t *font, const VariationStore &store) const - { return font->em_scalef_y (get_delta (font, store)); } + hb_position_t get_y_delta (hb_font_t *font, + const VariationStore &store, + VariationStore::cache_t *store_cache = nullptr) const + { return font->em_scalef_y (get_delta (font, store, store_cache)); } VariationDevice* copy (hb_serialize_context_t *c, const hb_map_t *layout_variation_idx_map) const { @@ -3500,9 +3617,11 @@ struct VariationDevice private: - float get_delta (hb_font_t *font, const VariationStore &store) const + float get_delta (hb_font_t *font, + const VariationStore &store, + VariationStore::cache_t *store_cache = nullptr) const { - return store.get_delta (varIdx, font->coords, font->num_coords); + return store.get_delta (varIdx, font->coords, font->num_coords, (VariationStore::cache_t *) store_cache); } protected: @@ -3525,7 +3644,9 @@ struct DeviceHeader struct Device { - hb_position_t get_x_delta (hb_font_t *font, const VariationStore &store=Null (VariationStore)) const + hb_position_t get_x_delta (hb_font_t *font, + const VariationStore &store=Null (VariationStore), + VariationStore::cache_t *store_cache = nullptr) const { switch (u.b.format) { @@ -3535,13 +3656,15 @@ struct Device #endif #ifndef HB_NO_VAR case 0x8000: - return u.variation.get_x_delta (font, store); + return u.variation.get_x_delta (font, store, store_cache); #endif default: return 0; } } - hb_position_t get_y_delta (hb_font_t *font, const VariationStore &store=Null (VariationStore)) const + hb_position_t get_y_delta (hb_font_t *font, + const VariationStore &store=Null (VariationStore), + VariationStore::cache_t *store_cache = nullptr) const { switch (u.b.format) { @@ -3551,7 +3674,7 @@ struct Device #endif #ifndef HB_NO_VAR case 0x8000: - return u.variation.get_y_delta (font, store); + return u.variation.get_y_delta (font, store, store_cache); #endif default: return 0; diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-gdef-table.hh b/thirdparty/harfbuzz/src/hb-ot-layout-gdef-table.hh index a76d644c4b..9d47bac2b9 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-gdef-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-gdef-table.hh @@ -571,7 +571,7 @@ struct GDEF static_assert (((unsigned int) HB_OT_LAYOUT_GLYPH_PROPS_MARK == (unsigned int) LookupFlag::IgnoreMarks), ""); switch (klass) { - default: return 0; + default: return HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED; case BaseGlyph: return HB_OT_LAYOUT_GLYPH_PROPS_BASE_GLYPH; case LigatureGlyph: return HB_OT_LAYOUT_GLYPH_PROPS_LIGATURE; case MarkGlyph: diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-gpos-table.hh b/thirdparty/harfbuzz/src/hb-ot-layout-gpos-table.hh index 2f9186a2a7..064d31cdff 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-gpos-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-gpos-table.hh @@ -29,3090 +29,14 @@ #ifndef HB_OT_LAYOUT_GPOS_TABLE_HH #define HB_OT_LAYOUT_GPOS_TABLE_HH -#include "hb-ot-layout-gsubgpos.hh" - +#include "OT/Layout/GPOS.hh" namespace OT { -struct MarkArray; -static void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage, - const MarkArray &mark_array, - const hb_set_t &glyphset, - hb_map_t* klass_mapping /* INOUT */); - -/* buffer **position** var allocations */ -#define attach_chain() var.i16[0] /* glyph to which this attaches to, relative to current glyphs; negative for going back, positive for forward. */ -#define attach_type() var.u8[2] /* attachment type */ -/* Note! if attach_chain() is zero, the value of attach_type() is irrelevant. */ - -enum attach_type_t { - ATTACH_TYPE_NONE = 0X00, - - /* Each attachment should be either a mark or a cursive; can't be both. */ - ATTACH_TYPE_MARK = 0X01, - ATTACH_TYPE_CURSIVE = 0X02, -}; - - -/* Shared Tables: ValueRecord, Anchor Table, and MarkArray */ - -typedef HBUINT16 Value; - -typedef UnsizedArrayOf<Value> ValueRecord; - -struct ValueFormat : HBUINT16 -{ - enum Flags { - xPlacement = 0x0001u, /* Includes horizontal adjustment for placement */ - yPlacement = 0x0002u, /* Includes vertical adjustment for placement */ - xAdvance = 0x0004u, /* Includes horizontal adjustment for advance */ - yAdvance = 0x0008u, /* Includes vertical adjustment for advance */ - xPlaDevice = 0x0010u, /* Includes horizontal Device table for placement */ - yPlaDevice = 0x0020u, /* Includes vertical Device table for placement */ - xAdvDevice = 0x0040u, /* Includes horizontal Device table for advance */ - yAdvDevice = 0x0080u, /* Includes vertical Device table for advance */ - ignored = 0x0F00u, /* Was used in TrueType Open for MM fonts */ - reserved = 0xF000u, /* For future use */ - - devices = 0x00F0u /* Mask for having any Device table */ - }; - -/* All fields are options. Only those available advance the value pointer. */ -#if 0 - HBINT16 xPlacement; /* Horizontal adjustment for - * placement--in design units */ - HBINT16 yPlacement; /* Vertical adjustment for - * placement--in design units */ - HBINT16 xAdvance; /* Horizontal adjustment for - * advance--in design units (only used - * for horizontal writing) */ - HBINT16 yAdvance; /* Vertical adjustment for advance--in - * design units (only used for vertical - * writing) */ - Offset16To<Device> xPlaDevice; /* Offset to Device table for - * horizontal placement--measured from - * beginning of PosTable (may be NULL) */ - Offset16To<Device> yPlaDevice; /* Offset to Device table for vertical - * placement--measured from beginning - * of PosTable (may be NULL) */ - Offset16To<Device> xAdvDevice; /* Offset to Device table for - * horizontal advance--measured from - * beginning of PosTable (may be NULL) */ - Offset16To<Device> yAdvDevice; /* Offset to Device table for vertical - * advance--measured from beginning of - * PosTable (may be NULL) */ -#endif - - IntType& operator = (uint16_t i) { v = i; return *this; } - - unsigned int get_len () const { return hb_popcount ((unsigned int) *this); } - unsigned int get_size () const { return get_len () * Value::static_size; } - - bool apply_value (hb_ot_apply_context_t *c, - const void *base, - const Value *values, - hb_glyph_position_t &glyph_pos) const - { - bool ret = false; - unsigned int format = *this; - if (!format) return ret; - - hb_font_t *font = c->font; - bool horizontal = -#ifndef HB_NO_VERTICAL - HB_DIRECTION_IS_HORIZONTAL (c->direction) -#else - true -#endif - ; - - if (format & xPlacement) glyph_pos.x_offset += font->em_scale_x (get_short (values++, &ret)); - if (format & yPlacement) glyph_pos.y_offset += font->em_scale_y (get_short (values++, &ret)); - if (format & xAdvance) { - if (likely (horizontal)) glyph_pos.x_advance += font->em_scale_x (get_short (values, &ret)); - values++; - } - /* y_advance values grow downward but font-space grows upward, hence negation */ - if (format & yAdvance) { - if (unlikely (!horizontal)) glyph_pos.y_advance -= font->em_scale_y (get_short (values, &ret)); - values++; - } - - if (!has_device ()) return ret; - - bool use_x_device = font->x_ppem || font->num_coords; - bool use_y_device = font->y_ppem || font->num_coords; - - if (!use_x_device && !use_y_device) return ret; - - const VariationStore &store = c->var_store; - - /* pixel -> fractional pixel */ - if (format & xPlaDevice) { - if (use_x_device) glyph_pos.x_offset += (base + get_device (values, &ret)).get_x_delta (font, store); - values++; - } - if (format & yPlaDevice) { - if (use_y_device) glyph_pos.y_offset += (base + get_device (values, &ret)).get_y_delta (font, store); - values++; - } - if (format & xAdvDevice) { - if (horizontal && use_x_device) glyph_pos.x_advance += (base + get_device (values, &ret)).get_x_delta (font, store); - values++; - } - if (format & yAdvDevice) { - /* y_advance values grow downward but font-space grows upward, hence negation */ - if (!horizontal && use_y_device) glyph_pos.y_advance -= (base + get_device (values, &ret)).get_y_delta (font, store); - values++; - } - return ret; - } - - unsigned int get_effective_format (const Value *values) const - { - unsigned int format = *this; - for (unsigned flag = xPlacement; flag <= yAdvDevice; flag = flag << 1) { - if (format & flag) should_drop (*values++, (Flags) flag, &format); - } - - return format; - } - - template<typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - unsigned int get_effective_format (Iterator it) const { - unsigned int new_format = 0; - - for (const hb_array_t<const Value>& values : it) - new_format = new_format | get_effective_format (&values); - - return new_format; - } - - void copy_values (hb_serialize_context_t *c, - unsigned int new_format, - const void *base, - const Value *values, - const hb_map_t *layout_variation_idx_map) const - { - unsigned int format = *this; - if (!format) return; - - if (format & xPlacement) copy_value (c, new_format, xPlacement, *values++); - if (format & yPlacement) copy_value (c, new_format, yPlacement, *values++); - if (format & xAdvance) copy_value (c, new_format, xAdvance, *values++); - if (format & yAdvance) copy_value (c, new_format, yAdvance, *values++); - - if (format & xPlaDevice) copy_device (c, base, values++, layout_variation_idx_map); - if (format & yPlaDevice) copy_device (c, base, values++, layout_variation_idx_map); - if (format & xAdvDevice) copy_device (c, base, values++, layout_variation_idx_map); - if (format & yAdvDevice) copy_device (c, base, values++, layout_variation_idx_map); - } - - void copy_value (hb_serialize_context_t *c, - unsigned int new_format, - Flags flag, - Value value) const - { - // Filter by new format. - if (!(new_format & flag)) return; - c->copy (value); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - const void *base, - const hb_array_t<const Value>& values) const - { - unsigned format = *this; - unsigned i = 0; - if (format & xPlacement) i++; - if (format & yPlacement) i++; - if (format & xAdvance) i++; - if (format & yAdvance) i++; - if (format & xPlaDevice) - { - (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); - i++; - } - - if (format & ValueFormat::yPlaDevice) - { - (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); - i++; - } - - if (format & ValueFormat::xAdvDevice) - { - - (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); - i++; - } - - if (format & ValueFormat::yAdvDevice) - { - - (base + get_device (&(values[i]))).collect_variation_indices (c->layout_variation_indices); - i++; - } - } - - private: - bool sanitize_value_devices (hb_sanitize_context_t *c, const void *base, const Value *values) const - { - unsigned int format = *this; - - if (format & xPlacement) values++; - if (format & yPlacement) values++; - if (format & xAdvance) values++; - if (format & yAdvance) values++; - - if ((format & xPlaDevice) && !get_device (values++).sanitize (c, base)) return false; - if ((format & yPlaDevice) && !get_device (values++).sanitize (c, base)) return false; - if ((format & xAdvDevice) && !get_device (values++).sanitize (c, base)) return false; - if ((format & yAdvDevice) && !get_device (values++).sanitize (c, base)) return false; - - return true; - } - - static inline Offset16To<Device>& get_device (Value* value) - { - return *static_cast<Offset16To<Device> *> (value); - } - static inline const Offset16To<Device>& get_device (const Value* value, bool *worked=nullptr) - { - if (worked) *worked |= bool (*value); - return *static_cast<const Offset16To<Device> *> (value); - } - - bool copy_device (hb_serialize_context_t *c, const void *base, - const Value *src_value, const hb_map_t *layout_variation_idx_map) const - { - Value *dst_value = c->copy (*src_value); - - if (!dst_value) return false; - if (*dst_value == 0) return true; - - *dst_value = 0; - c->push (); - if ((base + get_device (src_value)).copy (c, layout_variation_idx_map)) - { - c->add_link (*dst_value, c->pop_pack ()); - return true; - } - else - { - c->pop_discard (); - return false; - } - } - - static inline const HBINT16& get_short (const Value* value, bool *worked=nullptr) - { - if (worked) *worked |= bool (*value); - return *reinterpret_cast<const HBINT16 *> (value); - } - - public: - - bool has_device () const - { - unsigned int format = *this; - return (format & devices) != 0; - } - - bool sanitize_value (hb_sanitize_context_t *c, const void *base, const Value *values) const - { - TRACE_SANITIZE (this); - return_trace (c->check_range (values, get_size ()) && (!has_device () || sanitize_value_devices (c, base, values))); - } - - bool sanitize_values (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count) const - { - TRACE_SANITIZE (this); - unsigned int len = get_len (); - - if (!c->check_range (values, count, get_size ())) return_trace (false); - - if (!has_device ()) return_trace (true); - - for (unsigned int i = 0; i < count; i++) { - if (!sanitize_value_devices (c, base, values)) - return_trace (false); - values += len; - } - - return_trace (true); - } - - /* Just sanitize referenced Device tables. Doesn't check the values themselves. */ - bool sanitize_values_stride_unsafe (hb_sanitize_context_t *c, const void *base, const Value *values, unsigned int count, unsigned int stride) const - { - TRACE_SANITIZE (this); - - if (!has_device ()) return_trace (true); - - for (unsigned int i = 0; i < count; i++) { - if (!sanitize_value_devices (c, base, values)) - return_trace (false); - values += stride; - } - - return_trace (true); - } - - private: - - void should_drop (Value value, Flags flag, unsigned int* format) const - { - if (value) return; - *format = *format & ~flag; - } - -}; - -template<typename Iterator, typename SrcLookup> -static void SinglePos_serialize (hb_serialize_context_t *c, - const SrcLookup *src, - Iterator it, - const hb_map_t *layout_variation_idx_map); - - -struct AnchorFormat1 -{ - void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED, - float *x, float *y) const - { - hb_font_t *font = c->font; - *x = font->em_fscale_x (xCoordinate); - *y = font->em_fscale_y (yCoordinate); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this)); - } - - AnchorFormat1* copy (hb_serialize_context_t *c) const - { - TRACE_SERIALIZE (this); - AnchorFormat1* out = c->embed<AnchorFormat1> (this); - if (!out) return_trace (out); - out->format = 1; - return_trace (out); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - FWORD xCoordinate; /* Horizontal value--in design units */ - FWORD yCoordinate; /* Vertical value--in design units */ - public: - DEFINE_SIZE_STATIC (6); -}; - -struct AnchorFormat2 -{ - void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id, - float *x, float *y) const - { - hb_font_t *font = c->font; - -#ifdef HB_NO_HINTING - *x = font->em_fscale_x (xCoordinate); - *y = font->em_fscale_y (yCoordinate); - return; -#endif - - unsigned int x_ppem = font->x_ppem; - unsigned int y_ppem = font->y_ppem; - hb_position_t cx = 0, cy = 0; - bool ret; - - ret = (x_ppem || y_ppem) && - font->get_glyph_contour_point_for_origin (glyph_id, anchorPoint, HB_DIRECTION_LTR, &cx, &cy); - *x = ret && x_ppem ? cx : font->em_fscale_x (xCoordinate); - *y = ret && y_ppem ? cy : font->em_fscale_y (yCoordinate); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this)); - } - - AnchorFormat2* copy (hb_serialize_context_t *c) const - { - TRACE_SERIALIZE (this); - return_trace (c->embed<AnchorFormat2> (this)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 2 */ - FWORD xCoordinate; /* Horizontal value--in design units */ - FWORD yCoordinate; /* Vertical value--in design units */ - HBUINT16 anchorPoint; /* Index to glyph contour point */ - public: - DEFINE_SIZE_STATIC (8); -}; - -struct AnchorFormat3 -{ - void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id HB_UNUSED, - float *x, float *y) const - { - hb_font_t *font = c->font; - *x = font->em_fscale_x (xCoordinate); - *y = font->em_fscale_y (yCoordinate); - - if (font->x_ppem || font->num_coords) - *x += (this+xDeviceTable).get_x_delta (font, c->var_store); - if (font->y_ppem || font->num_coords) - *y += (this+yDeviceTable).get_y_delta (font, c->var_store); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && xDeviceTable.sanitize (c, this) && yDeviceTable.sanitize (c, this)); - } - - AnchorFormat3* copy (hb_serialize_context_t *c, - const hb_map_t *layout_variation_idx_map) const - { - TRACE_SERIALIZE (this); - if (!layout_variation_idx_map) return_trace (nullptr); - - auto *out = c->embed<AnchorFormat3> (this); - if (unlikely (!out)) return_trace (nullptr); - - out->xDeviceTable.serialize_copy (c, xDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map); - out->yDeviceTable.serialize_copy (c, yDeviceTable, this, 0, hb_serialize_context_t::Head, layout_variation_idx_map); - return_trace (out); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - (this+xDeviceTable).collect_variation_indices (c->layout_variation_indices); - (this+yDeviceTable).collect_variation_indices (c->layout_variation_indices); - } - - protected: - HBUINT16 format; /* Format identifier--format = 3 */ - FWORD xCoordinate; /* Horizontal value--in design units */ - FWORD yCoordinate; /* Vertical value--in design units */ - Offset16To<Device> - xDeviceTable; /* Offset to Device table for X - * coordinate-- from beginning of - * Anchor table (may be NULL) */ - Offset16To<Device> - yDeviceTable; /* Offset to Device table for Y - * coordinate-- from beginning of - * Anchor table (may be NULL) */ - public: - DEFINE_SIZE_STATIC (10); -}; - -struct Anchor -{ - void get_anchor (hb_ot_apply_context_t *c, hb_codepoint_t glyph_id, - float *x, float *y) const - { - *x = *y = 0; - switch (u.format) { - case 1: u.format1.get_anchor (c, glyph_id, x, y); return; - case 2: u.format2.get_anchor (c, glyph_id, x, y); return; - case 3: u.format3.get_anchor (c, glyph_id, x, y); return; - default: return; - } - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - if (!u.format.sanitize (c)) return_trace (false); - switch (u.format) { - case 1: return_trace (u.format1.sanitize (c)); - case 2: return_trace (u.format2.sanitize (c)); - case 3: return_trace (u.format3.sanitize (c)); - default:return_trace (true); - } - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - switch (u.format) { - case 1: return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer)))); - case 2: - if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) - { - // AnchorFormat 2 just containins extra hinting information, so - // if hints are being dropped convert to format 1. - return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer)))); - } - return_trace (bool (reinterpret_cast<Anchor *> (u.format2.copy (c->serializer)))); - case 3: return_trace (bool (reinterpret_cast<Anchor *> (u.format3.copy (c->serializer, - c->plan->layout_variation_idx_map)))); - default:return_trace (false); - } - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - switch (u.format) { - case 1: case 2: - return; - case 3: - u.format3.collect_variation_indices (c); - return; - default: return; - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - AnchorFormat1 format1; - AnchorFormat2 format2; - AnchorFormat3 format3; - } u; - public: - DEFINE_SIZE_UNION (2, format); -}; - - -struct AnchorMatrix -{ - const Anchor& get_anchor (unsigned int row, unsigned int col, - unsigned int cols, bool *found) const - { - *found = false; - if (unlikely (row >= rows || col >= cols)) return Null (Anchor); - *found = !matrixZ[row * cols + col].is_null (); - return this+matrixZ[row * cols + col]; - } - - template <typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - Iterator index_iter) const - { - for (unsigned i : index_iter) - (this+matrixZ[i]).collect_variation_indices (c); - } - - template <typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - bool subset (hb_subset_context_t *c, - unsigned num_rows, - Iterator index_iter) const - { - TRACE_SUBSET (this); - - auto *out = c->serializer->start_embed (this); - - if (!index_iter) return_trace (false); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - - out->rows = num_rows; - for (const unsigned i : index_iter) - { - auto *offset = c->serializer->embed (matrixZ[i]); - if (!offset) return_trace (false); - offset->serialize_subset (c, matrixZ[i], this); - } - - return_trace (true); - } - - bool sanitize (hb_sanitize_context_t *c, unsigned int cols) const - { - TRACE_SANITIZE (this); - if (!c->check_struct (this)) return_trace (false); - if (unlikely (hb_unsigned_mul_overflows (rows, cols))) return_trace (false); - unsigned int count = rows * cols; - if (!c->check_array (matrixZ.arrayZ, count)) return_trace (false); - for (unsigned int i = 0; i < count; i++) - if (!matrixZ[i].sanitize (c, this)) return_trace (false); - return_trace (true); - } - - HBUINT16 rows; /* Number of rows */ - UnsizedArrayOf<Offset16To<Anchor>> - matrixZ; /* Matrix of offsets to Anchor tables-- - * from beginning of AnchorMatrix table */ - public: - DEFINE_SIZE_ARRAY (2, matrixZ); -}; - - -struct MarkRecord -{ - friend struct MarkArray; - - unsigned get_class () const { return (unsigned) klass; } - bool sanitize (hb_sanitize_context_t *c, const void *base) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && markAnchor.sanitize (c, base)); - } - - MarkRecord *subset (hb_subset_context_t *c, - const void *src_base, - const hb_map_t *klass_mapping) const - { - TRACE_SUBSET (this); - auto *out = c->serializer->embed (this); - if (unlikely (!out)) return_trace (nullptr); - - out->klass = klass_mapping->get (klass); - out->markAnchor.serialize_subset (c, markAnchor, src_base); - return_trace (out); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - const void *src_base) const - { - (src_base+markAnchor).collect_variation_indices (c); - } - - protected: - HBUINT16 klass; /* Class defined for this mark */ - Offset16To<Anchor> - markAnchor; /* Offset to Anchor table--from - * beginning of MarkArray table */ - public: - DEFINE_SIZE_STATIC (4); -}; - -struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Coverage order */ -{ - bool apply (hb_ot_apply_context_t *c, - unsigned int mark_index, unsigned int glyph_index, - const AnchorMatrix &anchors, unsigned int class_count, - unsigned int glyph_pos) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - const MarkRecord &record = Array16Of<MarkRecord>::operator[](mark_index); - unsigned int mark_class = record.klass; - - const Anchor& mark_anchor = this + record.markAnchor; - bool found; - const Anchor& glyph_anchor = anchors.get_anchor (glyph_index, mark_class, class_count, &found); - /* If this subtable doesn't have an anchor for this base and this class, - * return false such that the subsequent subtables have a chance at it. */ - if (unlikely (!found)) return_trace (false); - - float mark_x, mark_y, base_x, base_y; - - buffer->unsafe_to_break (glyph_pos, buffer->idx + 1); - mark_anchor.get_anchor (c, buffer->cur().codepoint, &mark_x, &mark_y); - glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y); - - hb_glyph_position_t &o = buffer->cur_pos(); - o.x_offset = roundf (base_x - mark_x); - o.y_offset = roundf (base_y - mark_y); - o.attach_type() = ATTACH_TYPE_MARK; - o.attach_chain() = (int) glyph_pos - (int) buffer->idx; - buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; - - buffer->idx++; - return_trace (true); - } - - template <typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - bool subset (hb_subset_context_t *c, - Iterator coverage, - const hb_map_t *klass_mapping) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - - auto* out = c->serializer->start_embed (this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - - auto mark_iter = - + hb_zip (coverage, this->iter ()) - | hb_filter (glyphset, hb_first) - | hb_map (hb_second) - ; - - unsigned new_length = 0; - for (const auto& mark_record : mark_iter) { - if (unlikely (!mark_record.subset (c, this, klass_mapping))) - return_trace (false); - new_length++; - } - - if (unlikely (!c->serializer->check_assign (out->len, new_length, - HB_SERIALIZE_ERROR_ARRAY_OVERFLOW))) - return_trace (false); - - return_trace (true); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (Array16Of<MarkRecord>::sanitize (c, this)); - } -}; - - -/* Lookups */ - -struct SinglePosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { return (this+coverage).intersects (glyphs); } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - if (!valueFormat.has_device ()) return; - - auto it = - + hb_iter (this+coverage) - | hb_filter (c->glyph_set) - ; - - if (!it) return; - valueFormat.collect_variation_indices (c, this, values.as_array (valueFormat.get_len ())); - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } - - const Coverage &get_coverage () const { return this+coverage; } - - ValueFormat get_value_format () const { return valueFormat; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); - if (likely (index == NOT_COVERED)) return_trace (false); - - valueFormat.apply_value (c, this, values, buffer->cur_pos()); - - buffer->idx++; - return_trace (true); - } - - template<typename Iterator, - typename SrcLookup, - hb_requires (hb_is_iterator (Iterator))> - void serialize (hb_serialize_context_t *c, - const SrcLookup *src, - Iterator it, - ValueFormat newFormat, - const hb_map_t *layout_variation_idx_map) - { - if (unlikely (!c->extend_min (this))) return; - if (unlikely (!c->check_assign (valueFormat, - newFormat, - HB_SERIALIZE_ERROR_INT_OVERFLOW))) return; - - for (const hb_array_t<const Value>& _ : + it | hb_map (hb_second)) - { - src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map); - // Only serialize the first entry in the iterator, the rest are assumed to - // be the same. - break; - } - - auto glyphs = - + it - | hb_map_retains_sorting (hb_first) - ; - - coverage.serialize_serialize (c, glyphs); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto it = - + hb_iter (this+coverage) - | hb_filter (glyphset) - | hb_map_retains_sorting (glyph_map) - | hb_zip (hb_repeat (values.as_array (valueFormat.get_len ()))) - ; - - bool ret = bool (it); - SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map); - return_trace (ret); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && - coverage.sanitize (c, this) && - valueFormat.sanitize_value (c, this, values)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - coverage; /* Offset to Coverage table--from - * beginning of subtable */ - ValueFormat valueFormat; /* Defines the types of data in the - * ValueRecord */ - ValueRecord values; /* Defines positioning - * value(s)--applied to all glyphs in - * the Coverage table */ - public: - DEFINE_SIZE_ARRAY (6, values); -}; - -struct SinglePosFormat2 -{ - bool intersects (const hb_set_t *glyphs) const - { return (this+coverage).intersects (glyphs); } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - if (!valueFormat.has_device ()) return; - - auto it = - + hb_zip (this+coverage, hb_range ((unsigned) valueCount)) - | hb_filter (c->glyph_set, hb_first) - ; - - if (!it) return; - - unsigned sub_length = valueFormat.get_len (); - const hb_array_t<const Value> values_array = values.as_array (valueCount * sub_length); - - for (unsigned i : + it - | hb_map (hb_second)) - valueFormat.collect_variation_indices (c, this, values_array.sub_array (i * sub_length, sub_length)); - - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } - - const Coverage &get_coverage () const { return this+coverage; } - - ValueFormat get_value_format () const { return valueFormat; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); - if (likely (index == NOT_COVERED)) return_trace (false); - - if (likely (index >= valueCount)) return_trace (false); - - valueFormat.apply_value (c, this, - &values[index * valueFormat.get_len ()], - buffer->cur_pos()); - - buffer->idx++; - return_trace (true); - } - - template<typename Iterator, - typename SrcLookup, - hb_requires (hb_is_iterator (Iterator))> - void serialize (hb_serialize_context_t *c, - const SrcLookup *src, - Iterator it, - ValueFormat newFormat, - const hb_map_t *layout_variation_idx_map) - { - auto out = c->extend_min (this); - if (unlikely (!out)) return; - if (unlikely (!c->check_assign (valueFormat, newFormat, HB_SERIALIZE_ERROR_INT_OVERFLOW))) return; - if (unlikely (!c->check_assign (valueCount, it.len (), HB_SERIALIZE_ERROR_ARRAY_OVERFLOW))) return; - - + it - | hb_map (hb_second) - | hb_apply ([&] (hb_array_t<const Value> _) - { src->get_value_format ().copy_values (c, newFormat, src, &_, layout_variation_idx_map); }) - ; - - auto glyphs = - + it - | hb_map_retains_sorting (hb_first) - ; - - coverage.serialize_serialize (c, glyphs); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - unsigned sub_length = valueFormat.get_len (); - auto values_array = values.as_array (valueCount * sub_length); - - auto it = - + hb_zip (this+coverage, hb_range ((unsigned) valueCount)) - | hb_filter (glyphset, hb_first) - | hb_map_retains_sorting ([&] (const hb_pair_t<hb_codepoint_t, unsigned>& _) - { - return hb_pair (glyph_map[_.first], - values_array.sub_array (_.second * sub_length, - sub_length)); - }) - ; - - bool ret = bool (it); - SinglePos_serialize (c->serializer, this, it, c->plan->layout_variation_idx_map); - return_trace (ret); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && - coverage.sanitize (c, this) && - valueFormat.sanitize_values (c, this, values, valueCount)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 2 */ - Offset16To<Coverage> - coverage; /* Offset to Coverage table--from - * beginning of subtable */ - ValueFormat valueFormat; /* Defines the types of data in the - * ValueRecord */ - HBUINT16 valueCount; /* Number of ValueRecords */ - ValueRecord values; /* Array of ValueRecords--positioning - * values applied to glyphs */ - public: - DEFINE_SIZE_ARRAY (8, values); -}; - -struct SinglePos -{ - template<typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - unsigned get_format (Iterator glyph_val_iter_pairs) - { - hb_array_t<const Value> first_val_iter = hb_second (*glyph_val_iter_pairs); - - for (const auto iter : glyph_val_iter_pairs) - for (const auto _ : hb_zip (iter.second, first_val_iter)) - if (_.first != _.second) - return 2; - - return 1; - } - - - template<typename Iterator, - typename SrcLookup, - hb_requires (hb_is_iterator (Iterator))> - void serialize (hb_serialize_context_t *c, - const SrcLookup* src, - Iterator glyph_val_iter_pairs, - const hb_map_t *layout_variation_idx_map) - { - if (unlikely (!c->extend_min (u.format))) return; - unsigned format = 2; - ValueFormat new_format = src->get_value_format (); - - if (glyph_val_iter_pairs) - { - format = get_format (glyph_val_iter_pairs); - new_format = src->get_value_format ().get_effective_format (+ glyph_val_iter_pairs | hb_map (hb_second)); - } - - u.format = format; - switch (u.format) { - case 1: u.format1.serialize (c, - src, - glyph_val_iter_pairs, - new_format, - layout_variation_idx_map); - return; - case 2: u.format2.serialize (c, - src, - glyph_val_iter_pairs, - new_format, - layout_variation_idx_map); - return; - default:return; - } - } - - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - SinglePosFormat1 format1; - SinglePosFormat2 format2; - } u; -}; - -template<typename Iterator, typename SrcLookup> -static void -SinglePos_serialize (hb_serialize_context_t *c, - const SrcLookup *src, - Iterator it, - const hb_map_t *layout_variation_idx_map) -{ c->start_embed<SinglePos> ()->serialize (c, src, it, layout_variation_idx_map); } - - -struct PairValueRecord -{ - friend struct PairSet; - - int cmp (hb_codepoint_t k) const - { return secondGlyph.cmp (k); } - - struct context_t - { - const void *base; - const ValueFormat *valueFormats; - const ValueFormat *newFormats; - unsigned len1; /* valueFormats[0].get_len() */ - const hb_map_t *glyph_map; - const hb_map_t *layout_variation_idx_map; - }; - - bool subset (hb_subset_context_t *c, - context_t *closure) const - { - TRACE_SERIALIZE (this); - auto *s = c->serializer; - auto *out = s->start_embed (*this); - if (unlikely (!s->extend_min (out))) return_trace (false); - - out->secondGlyph = (*closure->glyph_map)[secondGlyph]; - - closure->valueFormats[0].copy_values (s, - closure->newFormats[0], - closure->base, &values[0], - closure->layout_variation_idx_map); - closure->valueFormats[1].copy_values (s, - closure->newFormats[1], - closure->base, - &values[closure->len1], - closure->layout_variation_idx_map); - - return_trace (true); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - const ValueFormat *valueFormats, - const void *base) const - { - unsigned record1_len = valueFormats[0].get_len (); - unsigned record2_len = valueFormats[1].get_len (); - const hb_array_t<const Value> values_array = values.as_array (record1_len + record2_len); - - if (valueFormats[0].has_device ()) - valueFormats[0].collect_variation_indices (c, base, values_array.sub_array (0, record1_len)); - - if (valueFormats[1].has_device ()) - valueFormats[1].collect_variation_indices (c, base, values_array.sub_array (record1_len, record2_len)); - } - - bool intersects (const hb_set_t& glyphset) const - { - return glyphset.has(secondGlyph); - } - - const Value* get_values_1 () const - { - return &values[0]; - } - - const Value* get_values_2 (ValueFormat format1) const - { - return &values[format1.get_len ()]; - } - - protected: - HBGlyphID16 secondGlyph; /* GlyphID of second glyph in the - * pair--first glyph is listed in the - * Coverage table */ - ValueRecord values; /* Positioning data for the first glyph - * followed by for second glyph */ - public: - DEFINE_SIZE_ARRAY (2, values); -}; - -struct PairSet -{ - friend struct PairPosFormat1; - - bool intersects (const hb_set_t *glyphs, - const ValueFormat *valueFormats) const - { - unsigned int len1 = valueFormats[0].get_len (); - unsigned int len2 = valueFormats[1].get_len (); - unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); - - const PairValueRecord *record = &firstPairValueRecord; - unsigned int count = len; - for (unsigned int i = 0; i < count; i++) - { - if (glyphs->has (record->secondGlyph)) - return true; - record = &StructAtOffset<const PairValueRecord> (record, record_size); - } - return false; - } - - void collect_glyphs (hb_collect_glyphs_context_t *c, - const ValueFormat *valueFormats) const - { - unsigned int len1 = valueFormats[0].get_len (); - unsigned int len2 = valueFormats[1].get_len (); - unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); - - const PairValueRecord *record = &firstPairValueRecord; - c->input->add_array (&record->secondGlyph, len, record_size); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - const ValueFormat *valueFormats) const - { - unsigned len1 = valueFormats[0].get_len (); - unsigned len2 = valueFormats[1].get_len (); - unsigned record_size = HBUINT16::static_size * (1 + len1 + len2); - - const PairValueRecord *record = &firstPairValueRecord; - unsigned count = len; - for (unsigned i = 0; i < count; i++) - { - if (c->glyph_set->has (record->secondGlyph)) - { record->collect_variation_indices (c, valueFormats, this); } - - record = &StructAtOffset<const PairValueRecord> (record, record_size); - } - } - - bool apply (hb_ot_apply_context_t *c, - const ValueFormat *valueFormats, - unsigned int pos) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int len1 = valueFormats[0].get_len (); - unsigned int len2 = valueFormats[1].get_len (); - unsigned int record_size = HBUINT16::static_size * (1 + len1 + len2); - - const PairValueRecord *record = hb_bsearch (buffer->info[pos].codepoint, - &firstPairValueRecord, - len, - record_size); - if (record) - { - bool applied_first = valueFormats[0].apply_value (c, this, &record->values[0], buffer->cur_pos()); - bool applied_second = valueFormats[1].apply_value (c, this, &record->values[len1], buffer->pos[pos]); - if (applied_first || applied_second) - buffer->unsafe_to_break (buffer->idx, pos + 1); - if (len2) - pos++; - buffer->idx = pos; - return_trace (true); - } - buffer->unsafe_to_concat (buffer->idx, pos + 1); - return_trace (false); - } - - bool subset (hb_subset_context_t *c, - const ValueFormat valueFormats[2], - const ValueFormat newFormats[2]) const - { - TRACE_SUBSET (this); - auto snap = c->serializer->snapshot (); - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->len = 0; - - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - unsigned len1 = valueFormats[0].get_len (); - unsigned len2 = valueFormats[1].get_len (); - unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2); - - PairValueRecord::context_t context = - { - this, - valueFormats, - newFormats, - len1, - &glyph_map, - c->plan->layout_variation_idx_map - }; - - const PairValueRecord *record = &firstPairValueRecord; - unsigned count = len, num = 0; - for (unsigned i = 0; i < count; i++) - { - if (glyphset.has (record->secondGlyph) - && record->subset (c, &context)) num++; - record = &StructAtOffset<const PairValueRecord> (record, record_size); - } - - out->len = num; - if (!num) c->serializer->revert (snap); - return_trace (num); - } - - struct sanitize_closure_t - { - const ValueFormat *valueFormats; - unsigned int len1; /* valueFormats[0].get_len() */ - unsigned int stride; /* 1 + len1 + len2 */ - }; - - bool sanitize (hb_sanitize_context_t *c, const sanitize_closure_t *closure) const - { - TRACE_SANITIZE (this); - if (!(c->check_struct (this) - && c->check_range (&firstPairValueRecord, - len, - HBUINT16::static_size, - closure->stride))) return_trace (false); - - unsigned int count = len; - const PairValueRecord *record = &firstPairValueRecord; - return_trace (closure->valueFormats[0].sanitize_values_stride_unsafe (c, this, &record->values[0], count, closure->stride) && - closure->valueFormats[1].sanitize_values_stride_unsafe (c, this, &record->values[closure->len1], count, closure->stride)); - } - - protected: - HBUINT16 len; /* Number of PairValueRecords */ - PairValueRecord firstPairValueRecord; - /* Array of PairValueRecords--ordered - * by GlyphID of the second glyph */ - public: - DEFINE_SIZE_MIN (2); -}; - -struct PairPosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { - return - + hb_zip (this+coverage, pairSet) - | hb_filter (*glyphs, hb_first) - | hb_map (hb_second) - | hb_map ([glyphs, this] (const Offset16To<PairSet> &_) - { return (this+_).intersects (glyphs, valueFormat); }) - | hb_any - ; - } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - if ((!valueFormat[0].has_device ()) && (!valueFormat[1].has_device ())) return; - - auto it = - + hb_zip (this+coverage, pairSet) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - ; - - if (!it) return; - + it - | hb_map (hb_add (this)) - | hb_apply ([&] (const PairSet& _) { _.collect_variation_indices (c, valueFormat); }) - ; - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { - if (unlikely (!(this+coverage).collect_coverage (c->input))) return; - unsigned int count = pairSet.len; - for (unsigned int i = 0; i < count; i++) - (this+pairSet[i]).collect_glyphs (c, valueFormat); - } - - const Coverage &get_coverage () const { return this+coverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); - if (likely (index == NOT_COVERED)) return_trace (false); - - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - unsigned unsafe_to; - if (!skippy_iter.next (&unsafe_to)) - { - buffer->unsafe_to_concat (buffer->idx, unsafe_to); - return_trace (false); - } - - return_trace ((this+pairSet[index]).apply (c, valueFormat, skippy_iter.idx)); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->format = format; - out->valueFormat[0] = valueFormat[0]; - out->valueFormat[1] = valueFormat[1]; - if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) - { - hb_pair_t<unsigned, unsigned> newFormats = compute_effective_value_formats (glyphset); - out->valueFormat[0] = newFormats.first; - out->valueFormat[1] = newFormats.second; - } - - hb_sorted_vector_t<hb_codepoint_t> new_coverage; - - + hb_zip (this+coverage, pairSet) - | hb_filter (glyphset, hb_first) - | hb_filter ([this, c, out] (const Offset16To<PairSet>& _) - { - auto snap = c->serializer->snapshot (); - auto *o = out->pairSet.serialize_append (c->serializer); - if (unlikely (!o)) return false; - bool ret = o->serialize_subset (c, _, this, valueFormat, out->valueFormat); - if (!ret) - { - out->pairSet.pop (); - c->serializer->revert (snap); - } - return ret; - }, - hb_second) - | hb_map (hb_first) - | hb_map (glyph_map) - | hb_sink (new_coverage) - ; - - out->coverage.serialize_serialize (c->serializer, new_coverage.iter ()); - - return_trace (bool (new_coverage)); - } - - - hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_set_t& glyphset) const - { - unsigned len1 = valueFormat[0].get_len (); - unsigned len2 = valueFormat[1].get_len (); - unsigned record_size = HBUINT16::static_size + Value::static_size * (len1 + len2); - - unsigned format1 = 0; - unsigned format2 = 0; - for (const Offset16To<PairSet>& _ : - + hb_zip (this+coverage, pairSet) | hb_filter (glyphset, hb_first) | hb_map (hb_second)) - { - const PairSet& set = (this + _); - const PairValueRecord *record = &set.firstPairValueRecord; - - for (unsigned i = 0; i < set.len; i++) - { - if (record->intersects (glyphset)) - { - format1 = format1 | valueFormat[0].get_effective_format (record->get_values_1 ()); - format2 = format2 | valueFormat[1].get_effective_format (record->get_values_2 (valueFormat[0])); - } - record = &StructAtOffset<const PairValueRecord> (record, record_size); - } - } - - return hb_pair (format1, format2); - } - - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - - if (!c->check_struct (this)) return_trace (false); - - unsigned int len1 = valueFormat[0].get_len (); - unsigned int len2 = valueFormat[1].get_len (); - PairSet::sanitize_closure_t closure = - { - valueFormat, - len1, - 1 + len1 + len2 - }; - - return_trace (coverage.sanitize (c, this) && pairSet.sanitize (c, this, &closure)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - coverage; /* Offset to Coverage table--from - * beginning of subtable */ - ValueFormat valueFormat[2]; /* [0] Defines the types of data in - * ValueRecord1--for the first glyph - * in the pair--may be zero (0) */ - /* [1] Defines the types of data in - * ValueRecord2--for the second glyph - * in the pair--may be zero (0) */ - Array16OfOffset16To<PairSet> - pairSet; /* Array of PairSet tables - * ordered by Coverage Index */ - public: - DEFINE_SIZE_ARRAY (10, pairSet); -}; - -struct PairPosFormat2 -{ - bool intersects (const hb_set_t *glyphs) const - { - return (this+coverage).intersects (glyphs) && - (this+classDef2).intersects (glyphs); - } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - if (!intersects (c->glyph_set)) return; - if ((!valueFormat1.has_device ()) && (!valueFormat2.has_device ())) return; - - hb_set_t klass1_glyphs, klass2_glyphs; - if (!(this+classDef1).collect_coverage (&klass1_glyphs)) return; - if (!(this+classDef2).collect_coverage (&klass2_glyphs)) return; - - hb_set_t class1_set, class2_set; - for (const unsigned cp : + c->glyph_set->iter () | hb_filter (this + coverage)) - { - if (!klass1_glyphs.has (cp)) class1_set.add (0); - else - { - unsigned klass1 = (this+classDef1).get (cp); - class1_set.add (klass1); - } - } - - class2_set.add (0); - for (const unsigned cp : + c->glyph_set->iter () | hb_filter (klass2_glyphs)) - { - unsigned klass2 = (this+classDef2).get (cp); - class2_set.add (klass2); - } - - if (class1_set.is_empty () - || class2_set.is_empty () - || (class2_set.get_population() == 1 && class2_set.has(0))) - return; - - unsigned len1 = valueFormat1.get_len (); - unsigned len2 = valueFormat2.get_len (); - const hb_array_t<const Value> values_array = values.as_array ((unsigned)class1Count * (unsigned) class2Count * (len1 + len2)); - for (const unsigned class1_idx : class1_set.iter ()) - { - for (const unsigned class2_idx : class2_set.iter ()) - { - unsigned start_offset = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); - if (valueFormat1.has_device ()) - valueFormat1.collect_variation_indices (c, this, values_array.sub_array (start_offset, len1)); - - if (valueFormat2.has_device ()) - valueFormat2.collect_variation_indices (c, this, values_array.sub_array (start_offset+len1, len2)); - } - } - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { - if (unlikely (!(this+coverage).collect_coverage (c->input))) return; - if (unlikely (!(this+classDef2).collect_coverage (c->input))) return; - } - - const Coverage &get_coverage () const { return this+coverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint); - if (likely (index == NOT_COVERED)) return_trace (false); - - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - unsigned unsafe_to; - if (!skippy_iter.next (&unsafe_to)) - { - buffer->unsafe_to_concat (buffer->idx, unsafe_to); - return_trace (false); - } - - unsigned int len1 = valueFormat1.get_len (); - unsigned int len2 = valueFormat2.get_len (); - unsigned int record_len = len1 + len2; - - unsigned int klass1 = (this+classDef1).get_class (buffer->cur().codepoint); - unsigned int klass2 = (this+classDef2).get_class (buffer->info[skippy_iter.idx].codepoint); - if (unlikely (klass1 >= class1Count || klass2 >= class2Count)) - { - buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1); - return_trace (false); - } - - const Value *v = &values[record_len * (klass1 * class2Count + klass2)]; - - bool applied_first = false, applied_second = false; - - - /* Isolate simple kerning and apply it half to each side. - * Results in better cursor positinoing / underline drawing. - * - * Disabled, because causes issues... :-( - * https://github.com/harfbuzz/harfbuzz/issues/3408 - * https://github.com/harfbuzz/harfbuzz/pull/3235#issuecomment-1029814978 - */ -#ifndef HB_SPLIT_KERN - if (0) -#endif - { - if (!len2) - { - const hb_direction_t dir = buffer->props.direction; - const bool horizontal = HB_DIRECTION_IS_HORIZONTAL (dir); - const bool backward = HB_DIRECTION_IS_BACKWARD (dir); - unsigned mask = horizontal ? ValueFormat::xAdvance : ValueFormat::yAdvance; - if (backward) - mask |= mask >> 2; /* Add eg. xPlacement in RTL. */ - /* Add Devices. */ - mask |= mask << 4; - - if (valueFormat1 & ~mask) - goto bail; - - /* Is simple kern. Apply value on an empty position slot, - * then split it between sides. */ - - hb_glyph_position_t pos{}; - if (valueFormat1.apply_value (c, this, v, pos)) - { - hb_position_t *src = &pos.x_advance; - hb_position_t *dst1 = &buffer->cur_pos().x_advance; - hb_position_t *dst2 = &buffer->pos[skippy_iter.idx].x_advance; - unsigned i = horizontal ? 0 : 1; - - hb_position_t kern = src[i]; - hb_position_t kern1 = kern >> 1; - hb_position_t kern2 = kern - kern1; - - if (!backward) - { - dst1[i] += kern1; - dst2[i] += kern2; - dst2[i + 2] += kern2; - } - else - { - dst1[i] += kern1; - dst1[i + 2] += src[i + 2] - kern2; - dst2[i] += kern2; - } - - applied_first = applied_second = kern != 0; - goto success; - } - goto boring; - } - } - bail: - - - applied_first = valueFormat1.apply_value (c, this, v, buffer->cur_pos()); - applied_second = valueFormat2.apply_value (c, this, v + len1, buffer->pos[skippy_iter.idx]); - - success: - if (applied_first || applied_second) - buffer->unsafe_to_break (buffer->idx, skippy_iter.idx + 1); - else - boring: - buffer->unsafe_to_concat (buffer->idx, skippy_iter.idx + 1); - - - buffer->idx = skippy_iter.idx; - if (len2) - buffer->idx++; - - return_trace (true); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->format = format; - - hb_map_t klass1_map; - out->classDef1.serialize_subset (c, classDef1, this, &klass1_map, true, true, &(this + coverage)); - out->class1Count = klass1_map.get_population (); - - hb_map_t klass2_map; - out->classDef2.serialize_subset (c, classDef2, this, &klass2_map, true, false); - out->class2Count = klass2_map.get_population (); - - unsigned len1 = valueFormat1.get_len (); - unsigned len2 = valueFormat2.get_len (); - - hb_pair_t<unsigned, unsigned> newFormats = hb_pair (valueFormat1, valueFormat2); - if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING) - newFormats = compute_effective_value_formats (klass1_map, klass2_map); - - out->valueFormat1 = newFormats.first; - out->valueFormat2 = newFormats.second; - - for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map)) - { - for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map)) - { - unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); - valueFormat1.copy_values (c->serializer, newFormats.first, this, &values[idx], c->plan->layout_variation_idx_map); - valueFormat2.copy_values (c->serializer, newFormats.second, this, &values[idx + len1], c->plan->layout_variation_idx_map); - } - } - - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto it = - + hb_iter (this+coverage) - | hb_filter (glyphset) - | hb_map_retains_sorting (glyph_map) - ; - - out->coverage.serialize_serialize (c->serializer, it); - return_trace (out->class1Count && out->class2Count && bool (it)); - } - - - hb_pair_t<unsigned, unsigned> compute_effective_value_formats (const hb_map_t& klass1_map, - const hb_map_t& klass2_map) const - { - unsigned len1 = valueFormat1.get_len (); - unsigned len2 = valueFormat2.get_len (); - - unsigned format1 = 0; - unsigned format2 = 0; - - for (unsigned class1_idx : + hb_range ((unsigned) class1Count) | hb_filter (klass1_map)) - { - for (unsigned class2_idx : + hb_range ((unsigned) class2Count) | hb_filter (klass2_map)) - { - unsigned idx = (class1_idx * (unsigned) class2Count + class2_idx) * (len1 + len2); - format1 = format1 | valueFormat1.get_effective_format (&values[idx]); - format2 = format2 | valueFormat2.get_effective_format (&values[idx + len1]); - } - } - - return hb_pair (format1, format2); - } - - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - if (!(c->check_struct (this) - && coverage.sanitize (c, this) - && classDef1.sanitize (c, this) - && classDef2.sanitize (c, this))) return_trace (false); - - unsigned int len1 = valueFormat1.get_len (); - unsigned int len2 = valueFormat2.get_len (); - unsigned int stride = len1 + len2; - unsigned int record_size = valueFormat1.get_size () + valueFormat2.get_size (); - unsigned int count = (unsigned int) class1Count * (unsigned int) class2Count; - return_trace (c->check_range ((const void *) values, - count, - record_size) && - valueFormat1.sanitize_values_stride_unsafe (c, this, &values[0], count, stride) && - valueFormat2.sanitize_values_stride_unsafe (c, this, &values[len1], count, stride)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 2 */ - Offset16To<Coverage> - coverage; /* Offset to Coverage table--from - * beginning of subtable */ - ValueFormat valueFormat1; /* ValueRecord definition--for the - * first glyph of the pair--may be zero - * (0) */ - ValueFormat valueFormat2; /* ValueRecord definition--for the - * second glyph of the pair--may be - * zero (0) */ - Offset16To<ClassDef> - classDef1; /* Offset to ClassDef table--from - * beginning of PairPos subtable--for - * the first glyph of the pair */ - Offset16To<ClassDef> - classDef2; /* Offset to ClassDef table--from - * beginning of PairPos subtable--for - * the second glyph of the pair */ - HBUINT16 class1Count; /* Number of classes in ClassDef1 - * table--includes Class0 */ - HBUINT16 class2Count; /* Number of classes in ClassDef2 - * table--includes Class0 */ - ValueRecord values; /* Matrix of value pairs: - * class1-major, class2-minor, - * Each entry has value1 and value2 */ - public: - DEFINE_SIZE_ARRAY (16, values); -}; - -struct PairPos -{ - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - PairPosFormat1 format1; - PairPosFormat2 format2; - } u; -}; - - -struct EntryExitRecord -{ - friend struct CursivePosFormat1; - - bool sanitize (hb_sanitize_context_t *c, const void *base) const - { - TRACE_SANITIZE (this); - return_trace (entryAnchor.sanitize (c, base) && exitAnchor.sanitize (c, base)); - } - - void collect_variation_indices (hb_collect_variation_indices_context_t *c, - const void *src_base) const - { - (src_base+entryAnchor).collect_variation_indices (c); - (src_base+exitAnchor).collect_variation_indices (c); - } - - EntryExitRecord* subset (hb_subset_context_t *c, - const void *src_base) const - { - TRACE_SERIALIZE (this); - auto *out = c->serializer->embed (this); - if (unlikely (!out)) return_trace (nullptr); - - out->entryAnchor.serialize_subset (c, entryAnchor, src_base); - out->exitAnchor.serialize_subset (c, exitAnchor, src_base); - return_trace (out); - } - - protected: - Offset16To<Anchor> - entryAnchor; /* Offset to EntryAnchor table--from - * beginning of CursivePos - * subtable--may be NULL */ - Offset16To<Anchor> - exitAnchor; /* Offset to ExitAnchor table--from - * beginning of CursivePos - * subtable--may be NULL */ - public: - DEFINE_SIZE_STATIC (4); -}; - -static void -reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent); - -struct CursivePosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { return (this+coverage).intersects (glyphs); } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - + hb_zip (this+coverage, entryExitRecord) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - | hb_apply ([&] (const EntryExitRecord& record) { record.collect_variation_indices (c, this); }) - ; - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { if (unlikely (!(this+coverage).collect_coverage (c->input))) return; } - - const Coverage &get_coverage () const { return this+coverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - - const EntryExitRecord &this_record = entryExitRecord[(this+coverage).get_coverage (buffer->cur().codepoint)]; - if (!this_record.entryAnchor) return_trace (false); - - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - unsigned unsafe_from; - if (!skippy_iter.prev (&unsafe_from)) - { - buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); - return_trace (false); - } - - const EntryExitRecord &prev_record = entryExitRecord[(this+coverage).get_coverage (buffer->info[skippy_iter.idx].codepoint)]; - if (!prev_record.exitAnchor) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - unsigned int i = skippy_iter.idx; - unsigned int j = buffer->idx; - - buffer->unsafe_to_break (i, j); - float entry_x, entry_y, exit_x, exit_y; - (this+prev_record.exitAnchor).get_anchor (c, buffer->info[i].codepoint, &exit_x, &exit_y); - (this+this_record.entryAnchor).get_anchor (c, buffer->info[j].codepoint, &entry_x, &entry_y); - - hb_glyph_position_t *pos = buffer->pos; - - hb_position_t d; - /* Main-direction adjustment */ - switch (c->direction) { - case HB_DIRECTION_LTR: - pos[i].x_advance = roundf (exit_x) + pos[i].x_offset; - - d = roundf (entry_x) + pos[j].x_offset; - pos[j].x_advance -= d; - pos[j].x_offset -= d; - break; - case HB_DIRECTION_RTL: - d = roundf (exit_x) + pos[i].x_offset; - pos[i].x_advance -= d; - pos[i].x_offset -= d; - - pos[j].x_advance = roundf (entry_x) + pos[j].x_offset; - break; - case HB_DIRECTION_TTB: - pos[i].y_advance = roundf (exit_y) + pos[i].y_offset; - - d = roundf (entry_y) + pos[j].y_offset; - pos[j].y_advance -= d; - pos[j].y_offset -= d; - break; - case HB_DIRECTION_BTT: - d = roundf (exit_y) + pos[i].y_offset; - pos[i].y_advance -= d; - pos[i].y_offset -= d; - - pos[j].y_advance = roundf (entry_y); - break; - case HB_DIRECTION_INVALID: - default: - break; - } - - /* Cross-direction adjustment */ - - /* We attach child to parent (think graph theory and rooted trees whereas - * the root stays on baseline and each node aligns itself against its - * parent. - * - * Optimize things for the case of RightToLeft, as that's most common in - * Arabic. */ - unsigned int child = i; - unsigned int parent = j; - hb_position_t x_offset = entry_x - exit_x; - hb_position_t y_offset = entry_y - exit_y; - if (!(c->lookup_props & LookupFlag::RightToLeft)) - { - unsigned int k = child; - child = parent; - parent = k; - x_offset = -x_offset; - y_offset = -y_offset; - } - - /* If child was already connected to someone else, walk through its old - * chain and reverse the link direction, such that the whole tree of its - * previous connection now attaches to new parent. Watch out for case - * where new parent is on the path from old chain... - */ - reverse_cursive_minor_offset (pos, child, c->direction, parent); - - pos[child].attach_type() = ATTACH_TYPE_CURSIVE; - pos[child].attach_chain() = (int) parent - (int) child; - buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; - if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction))) - pos[child].y_offset = y_offset; - else - pos[child].x_offset = x_offset; - - /* If parent was attached to child, separate them. - * https://github.com/harfbuzz/harfbuzz/issues/2469 - */ - if (unlikely (pos[parent].attach_chain() == -pos[child].attach_chain())) - pos[parent].attach_chain() = 0; - - buffer->idx++; - return_trace (true); - } - - template <typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - void serialize (hb_subset_context_t *c, - Iterator it, - const void *src_base) - { - if (unlikely (!c->serializer->extend_min ((*this)))) return; - this->format = 1; - this->entryExitRecord.len = it.len (); - - for (const EntryExitRecord& entry_record : + it - | hb_map (hb_second)) - entry_record.subset (c, src_base); - - auto glyphs = - + it - | hb_map_retains_sorting (hb_first) - ; - - coverage.serialize_serialize (c->serializer, glyphs); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!out)) return_trace (false); - - auto it = - + hb_zip (this+coverage, entryExitRecord) - | hb_filter (glyphset, hb_first) - | hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const EntryExitRecord&> p) -> hb_pair_t<hb_codepoint_t, const EntryExitRecord&> - { return hb_pair (glyph_map[p.first], p.second);}) - ; - - bool ret = bool (it); - out->serialize (c, it, this); - return_trace (ret); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (coverage.sanitize (c, this) && entryExitRecord.sanitize (c, this)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - coverage; /* Offset to Coverage table--from - * beginning of subtable */ - Array16Of<EntryExitRecord> - entryExitRecord; /* Array of EntryExit records--in - * Coverage Index order */ - public: - DEFINE_SIZE_ARRAY (6, entryExitRecord); -}; - -struct CursivePos -{ - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - CursivePosFormat1 format1; - } u; -}; - - -typedef AnchorMatrix BaseArray; /* base-major-- - * in order of BaseCoverage Index--, - * mark-minor-- - * ordered by class--zero-based. */ - -static void Markclass_closure_and_remap_indexes (const Coverage &mark_coverage, - const MarkArray &mark_array, - const hb_set_t &glyphset, - hb_map_t* klass_mapping /* INOUT */) -{ - hb_set_t orig_classes; - - + hb_zip (mark_coverage, mark_array) - | hb_filter (glyphset, hb_first) - | hb_map (hb_second) - | hb_map (&MarkRecord::get_class) - | hb_sink (orig_classes) - ; - - unsigned idx = 0; - for (auto klass : orig_classes.iter ()) - { - if (klass_mapping->has (klass)) continue; - klass_mapping->set (klass, idx); - idx++; - } -} - -struct MarkBasePosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { - return (this+markCoverage).intersects (glyphs) && - (this+baseCoverage).intersects (glyphs); - } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - + hb_zip (this+markCoverage, this+markArray) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); }) - ; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping); - - unsigned basecount = (this+baseArray).rows; - auto base_iter = - + hb_zip (this+baseCoverage, hb_range (basecount)) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - ; - - hb_sorted_vector_t<unsigned> base_indexes; - for (const unsigned row : base_iter) - { - + hb_range ((unsigned) classCount) - | hb_filter (klass_mapping) - | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) - | hb_sink (base_indexes) - ; - } - (this+baseArray).collect_variation_indices (c, base_indexes.iter ()); - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { - if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return; - if (unlikely (!(this+baseCoverage).collect_coverage (c->input))) return; - } - - const Coverage &get_coverage () const { return this+markCoverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint); - if (likely (mark_index == NOT_COVERED)) return_trace (false); - - /* Now we search backwards for a non-mark glyph */ - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks); - do { - unsigned unsafe_from; - if (!skippy_iter.prev (&unsafe_from)) - { - buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); - return_trace (false); - } - - /* We only want to attach to the first of a MultipleSubst sequence. - * https://github.com/harfbuzz/harfbuzz/issues/740 - * Reject others... - * ...but stop if we find a mark in the MultipleSubst sequence: - * https://github.com/harfbuzz/harfbuzz/issues/1020 */ - if (!_hb_glyph_info_multiplied (&buffer->info[skippy_iter.idx]) || - 0 == _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) || - (skippy_iter.idx == 0 || - _hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx - 1]) || - _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx]) != - _hb_glyph_info_get_lig_id (&buffer->info[skippy_iter.idx - 1]) || - _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx]) != - _hb_glyph_info_get_lig_comp (&buffer->info[skippy_iter.idx - 1]) + 1 - )) - break; - skippy_iter.reject (); - } while (true); - - /* Checking that matched glyph is actually a base glyph by GDEF is too strong; disabled */ - //if (!_hb_glyph_info_is_base_glyph (&buffer->info[skippy_iter.idx])) { return_trace (false); } - - unsigned int base_index = (this+baseCoverage).get_coverage (buffer->info[skippy_iter.idx].codepoint); - if (base_index == NOT_COVERED) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - return_trace ((this+markArray).apply (c, mark_index, base_index, this+baseArray, classCount, skippy_iter.idx)); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->format = format; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping); - - if (!klass_mapping.get_population ()) return_trace (false); - out->classCount = klass_mapping.get_population (); - - auto mark_iter = - + hb_zip (this+markCoverage, this+markArray) - | hb_filter (glyphset, hb_first) - ; - - hb_sorted_vector_t<hb_codepoint_t> new_coverage; - + mark_iter - | hb_map (hb_first) - | hb_map (glyph_map) - | hb_sink (new_coverage) - ; - - if (!out->markCoverage.serialize_serialize (c->serializer, new_coverage.iter ())) - return_trace (false); - - out->markArray.serialize_subset (c, markArray, this, - (this+markCoverage).iter (), - &klass_mapping); - - unsigned basecount = (this+baseArray).rows; - auto base_iter = - + hb_zip (this+baseCoverage, hb_range (basecount)) - | hb_filter (glyphset, hb_first) - ; - - new_coverage.reset (); - + base_iter - | hb_map (hb_first) - | hb_map (glyph_map) - | hb_sink (new_coverage) - ; - - if (!out->baseCoverage.serialize_serialize (c->serializer, new_coverage.iter ())) - return_trace (false); - - hb_sorted_vector_t<unsigned> base_indexes; - for (const unsigned row : + base_iter - | hb_map (hb_second)) - { - + hb_range ((unsigned) classCount) - | hb_filter (klass_mapping) - | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) - | hb_sink (base_indexes) - ; - } - - out->baseArray.serialize_subset (c, baseArray, this, - base_iter.len (), - base_indexes.iter ()); - - return_trace (true); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && - markCoverage.sanitize (c, this) && - baseCoverage.sanitize (c, this) && - markArray.sanitize (c, this) && - baseArray.sanitize (c, this, (unsigned int) classCount)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - markCoverage; /* Offset to MarkCoverage table--from - * beginning of MarkBasePos subtable */ - Offset16To<Coverage> - baseCoverage; /* Offset to BaseCoverage table--from - * beginning of MarkBasePos subtable */ - HBUINT16 classCount; /* Number of classes defined for marks */ - Offset16To<MarkArray> - markArray; /* Offset to MarkArray table--from - * beginning of MarkBasePos subtable */ - Offset16To<BaseArray> - baseArray; /* Offset to BaseArray table--from - * beginning of MarkBasePos subtable */ - public: - DEFINE_SIZE_STATIC (12); -}; - -struct MarkBasePos -{ - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - MarkBasePosFormat1 format1; - } u; -}; - - -typedef AnchorMatrix LigatureAttach; /* component-major-- - * in order of writing direction--, - * mark-minor-- - * ordered by class--zero-based. */ - -/* Array of LigatureAttach tables ordered by LigatureCoverage Index */ -struct LigatureArray : List16OfOffset16To<LigatureAttach> -{ - template <typename Iterator, - hb_requires (hb_is_iterator (Iterator))> - bool subset (hb_subset_context_t *c, - Iterator coverage, - unsigned class_count, - const hb_map_t *klass_mapping) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - - auto *out = c->serializer->start_embed (this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - - for (const auto _ : + hb_zip (coverage, *this) - | hb_filter (glyphset, hb_first)) - { - auto *matrix = out->serialize_append (c->serializer); - if (unlikely (!matrix)) return_trace (false); - - const LigatureAttach& src = (this + _.second); - auto indexes = - + hb_range (src.rows * class_count) - | hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); }) - ; - matrix->serialize_subset (c, - _.second, - this, - src.rows, - indexes); - } - return_trace (this->len); - } -}; - -struct MarkLigPosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { - return (this+markCoverage).intersects (glyphs) && - (this+ligatureCoverage).intersects (glyphs); - } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - + hb_zip (this+markCoverage, this+markArray) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+markArray)); }) - ; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, *c->glyph_set, &klass_mapping); - - unsigned ligcount = (this+ligatureArray).len; - auto lig_iter = - + hb_zip (this+ligatureCoverage, hb_range (ligcount)) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - ; - - const LigatureArray& lig_array = this+ligatureArray; - for (const unsigned i : lig_iter) - { - hb_sorted_vector_t<unsigned> lig_indexes; - unsigned row_count = lig_array[i].rows; - for (unsigned row : + hb_range (row_count)) - { - + hb_range ((unsigned) classCount) - | hb_filter (klass_mapping) - | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) - | hb_sink (lig_indexes) - ; - } - - lig_array[i].collect_variation_indices (c, lig_indexes.iter ()); - } - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { - if (unlikely (!(this+markCoverage).collect_coverage (c->input))) return; - if (unlikely (!(this+ligatureCoverage).collect_coverage (c->input))) return; - } - - const Coverage &get_coverage () const { return this+markCoverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int mark_index = (this+markCoverage).get_coverage (buffer->cur().codepoint); - if (likely (mark_index == NOT_COVERED)) return_trace (false); - - /* Now we search backwards for a non-mark glyph */ - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks); - unsigned unsafe_from; - if (!skippy_iter.prev (&unsafe_from)) - { - buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); - return_trace (false); - } - - /* Checking that matched glyph is actually a ligature by GDEF is too strong; disabled */ - //if (!_hb_glyph_info_is_ligature (&buffer->info[skippy_iter.idx])) { return_trace (false); } - - unsigned int j = skippy_iter.idx; - unsigned int lig_index = (this+ligatureCoverage).get_coverage (buffer->info[j].codepoint); - if (lig_index == NOT_COVERED) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - const LigatureArray& lig_array = this+ligatureArray; - const LigatureAttach& lig_attach = lig_array[lig_index]; - - /* Find component to attach to */ - unsigned int comp_count = lig_attach.rows; - if (unlikely (!comp_count)) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - /* We must now check whether the ligature ID of the current mark glyph - * is identical to the ligature ID of the found ligature. If yes, we - * can directly use the component index. If not, we attach the mark - * glyph to the last component of the ligature. */ - unsigned int comp_index; - unsigned int lig_id = _hb_glyph_info_get_lig_id (&buffer->info[j]); - unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur()); - unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur()); - if (lig_id && lig_id == mark_id && mark_comp > 0) - comp_index = hb_min (comp_count, _hb_glyph_info_get_lig_comp (&buffer->cur())) - 1; - else - comp_index = comp_count - 1; - - return_trace ((this+markArray).apply (c, mark_index, comp_index, lig_attach, classCount, j)); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->format = format; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+markCoverage, this+markArray, glyphset, &klass_mapping); - - if (!klass_mapping.get_population ()) return_trace (false); - out->classCount = klass_mapping.get_population (); - - auto mark_iter = - + hb_zip (this+markCoverage, this+markArray) - | hb_filter (glyphset, hb_first) - ; - - auto new_mark_coverage = - + mark_iter - | hb_map_retains_sorting (hb_first) - | hb_map_retains_sorting (glyph_map) - ; - - if (!out->markCoverage.serialize_serialize (c->serializer, new_mark_coverage)) - return_trace (false); - - out->markArray.serialize_subset (c, markArray, this, - (this+markCoverage).iter (), - &klass_mapping); - - auto new_ligature_coverage = - + hb_iter (this + ligatureCoverage) - | hb_filter (glyphset) - | hb_map_retains_sorting (glyph_map) - ; - - if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage)) - return_trace (false); - - out->ligatureArray.serialize_subset (c, ligatureArray, this, - hb_iter (this+ligatureCoverage), classCount, &klass_mapping); - - return_trace (true); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && - markCoverage.sanitize (c, this) && - ligatureCoverage.sanitize (c, this) && - markArray.sanitize (c, this) && - ligatureArray.sanitize (c, this, (unsigned int) classCount)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - markCoverage; /* Offset to Mark Coverage table--from - * beginning of MarkLigPos subtable */ - Offset16To<Coverage> - ligatureCoverage; /* Offset to Ligature Coverage - * table--from beginning of MarkLigPos - * subtable */ - HBUINT16 classCount; /* Number of defined mark classes */ - Offset16To<MarkArray> - markArray; /* Offset to MarkArray table--from - * beginning of MarkLigPos subtable */ - Offset16To<LigatureArray> - ligatureArray; /* Offset to LigatureArray table--from - * beginning of MarkLigPos subtable */ - public: - DEFINE_SIZE_STATIC (12); -}; - - -struct MarkLigPos -{ - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - MarkLigPosFormat1 format1; - } u; -}; - - -typedef AnchorMatrix Mark2Array; /* mark2-major-- - * in order of Mark2Coverage Index--, - * mark1-minor-- - * ordered by class--zero-based. */ - -struct MarkMarkPosFormat1 -{ - bool intersects (const hb_set_t *glyphs) const - { - return (this+mark1Coverage).intersects (glyphs) && - (this+mark2Coverage).intersects (glyphs); - } - - void closure_lookups (hb_closure_lookups_context_t *c) const {} - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - + hb_zip (this+mark1Coverage, this+mark1Array) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - | hb_apply ([&] (const MarkRecord& record) { record.collect_variation_indices (c, &(this+mark1Array)); }) - ; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, *c->glyph_set, &klass_mapping); - - unsigned mark2_count = (this+mark2Array).rows; - auto mark2_iter = - + hb_zip (this+mark2Coverage, hb_range (mark2_count)) - | hb_filter (c->glyph_set, hb_first) - | hb_map (hb_second) - ; - - hb_sorted_vector_t<unsigned> mark2_indexes; - for (const unsigned row : mark2_iter) - { - + hb_range ((unsigned) classCount) - | hb_filter (klass_mapping) - | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) - | hb_sink (mark2_indexes) - ; - } - (this+mark2Array).collect_variation_indices (c, mark2_indexes.iter ()); - } - - void collect_glyphs (hb_collect_glyphs_context_t *c) const - { - if (unlikely (!(this+mark1Coverage).collect_coverage (c->input))) return; - if (unlikely (!(this+mark2Coverage).collect_coverage (c->input))) return; - } - - const Coverage &get_coverage () const { return this+mark1Coverage; } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - hb_buffer_t *buffer = c->buffer; - unsigned int mark1_index = (this+mark1Coverage).get_coverage (buffer->cur().codepoint); - if (likely (mark1_index == NOT_COVERED)) return_trace (false); - - /* now we search backwards for a suitable mark glyph until a non-mark glyph */ - hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input; - skippy_iter.reset (buffer->idx, 1); - skippy_iter.set_lookup_props (c->lookup_props & ~LookupFlag::IgnoreFlags); - unsigned unsafe_from; - if (!skippy_iter.prev (&unsafe_from)) - { - buffer->unsafe_to_concat_from_outbuffer (unsafe_from, buffer->idx + 1); - return_trace (false); - } - - if (!_hb_glyph_info_is_mark (&buffer->info[skippy_iter.idx])) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - unsigned int j = skippy_iter.idx; - - unsigned int id1 = _hb_glyph_info_get_lig_id (&buffer->cur()); - unsigned int id2 = _hb_glyph_info_get_lig_id (&buffer->info[j]); - unsigned int comp1 = _hb_glyph_info_get_lig_comp (&buffer->cur()); - unsigned int comp2 = _hb_glyph_info_get_lig_comp (&buffer->info[j]); - - if (likely (id1 == id2)) - { - if (id1 == 0) /* Marks belonging to the same base. */ - goto good; - else if (comp1 == comp2) /* Marks belonging to the same ligature component. */ - goto good; - } - else - { - /* If ligature ids don't match, it may be the case that one of the marks - * itself is a ligature. In which case match. */ - if ((id1 > 0 && !comp1) || (id2 > 0 && !comp2)) - goto good; - } - - /* Didn't match. */ - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - - good: - unsigned int mark2_index = (this+mark2Coverage).get_coverage (buffer->info[j].codepoint); - if (mark2_index == NOT_COVERED) - { - buffer->unsafe_to_concat_from_outbuffer (skippy_iter.idx, buffer->idx + 1); - return_trace (false); - } - - return_trace ((this+mark1Array).apply (c, mark1_index, mark2_index, this+mark2Array, classCount, j)); - } - - bool subset (hb_subset_context_t *c) const - { - TRACE_SUBSET (this); - const hb_set_t &glyphset = *c->plan->glyphset_gsub (); - const hb_map_t &glyph_map = *c->plan->glyph_map; - - auto *out = c->serializer->start_embed (*this); - if (unlikely (!c->serializer->extend_min (out))) return_trace (false); - out->format = format; - - hb_map_t klass_mapping; - Markclass_closure_and_remap_indexes (this+mark1Coverage, this+mark1Array, glyphset, &klass_mapping); - - if (!klass_mapping.get_population ()) return_trace (false); - out->classCount = klass_mapping.get_population (); - - auto mark1_iter = - + hb_zip (this+mark1Coverage, this+mark1Array) - | hb_filter (glyphset, hb_first) - ; - - hb_sorted_vector_t<hb_codepoint_t> new_coverage; - + mark1_iter - | hb_map (hb_first) - | hb_map (glyph_map) - | hb_sink (new_coverage) - ; - - if (!out->mark1Coverage.serialize_serialize (c->serializer, new_coverage.iter ())) - return_trace (false); - - out->mark1Array.serialize_subset (c, mark1Array, this, - (this+mark1Coverage).iter (), - &klass_mapping); - - unsigned mark2count = (this+mark2Array).rows; - auto mark2_iter = - + hb_zip (this+mark2Coverage, hb_range (mark2count)) - | hb_filter (glyphset, hb_first) - ; - - new_coverage.reset (); - + mark2_iter - | hb_map (hb_first) - | hb_map (glyph_map) - | hb_sink (new_coverage) - ; - - if (!out->mark2Coverage.serialize_serialize (c->serializer, new_coverage.iter ())) - return_trace (false); - - hb_sorted_vector_t<unsigned> mark2_indexes; - for (const unsigned row : + mark2_iter - | hb_map (hb_second)) - { - + hb_range ((unsigned) classCount) - | hb_filter (klass_mapping) - | hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; }) - | hb_sink (mark2_indexes) - ; - } - - out->mark2Array.serialize_subset (c, mark2Array, this, mark2_iter.len (), mark2_indexes.iter ()); - - return_trace (true); - } - - bool sanitize (hb_sanitize_context_t *c) const - { - TRACE_SANITIZE (this); - return_trace (c->check_struct (this) && - mark1Coverage.sanitize (c, this) && - mark2Coverage.sanitize (c, this) && - mark1Array.sanitize (c, this) && - mark2Array.sanitize (c, this, (unsigned int) classCount)); - } - - protected: - HBUINT16 format; /* Format identifier--format = 1 */ - Offset16To<Coverage> - mark1Coverage; /* Offset to Combining Mark1 Coverage - * table--from beginning of MarkMarkPos - * subtable */ - Offset16To<Coverage> - mark2Coverage; /* Offset to Combining Mark2 Coverage - * table--from beginning of MarkMarkPos - * subtable */ - HBUINT16 classCount; /* Number of defined mark classes */ - Offset16To<MarkArray> - mark1Array; /* Offset to Mark1Array table--from - * beginning of MarkMarkPos subtable */ - Offset16To<Mark2Array> - mark2Array; /* Offset to Mark2Array table--from - * beginning of MarkMarkPos subtable */ - public: - DEFINE_SIZE_STATIC (12); -}; - -struct MarkMarkPos -{ - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { - TRACE_DISPATCH (this, u.format); - if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); - switch (u.format) { - case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...)); - default:return_trace (c->default_return_value ()); - } - } - - protected: - union { - HBUINT16 format; /* Format identifier */ - MarkMarkPosFormat1 format1; - } u; -}; - - -struct ContextPos : Context {}; - -struct ChainContextPos : ChainContext {}; - -struct ExtensionPos : Extension<ExtensionPos> -{ - typedef struct PosLookupSubTable SubTable; -}; - - - -/* - * PosLookup - */ - - -struct PosLookupSubTable -{ - friend struct Lookup; - friend struct PosLookup; - - enum Type { - Single = 1, - Pair = 2, - Cursive = 3, - MarkBase = 4, - MarkLig = 5, - MarkMark = 6, - Context = 7, - ChainContext = 8, - Extension = 9 - }; - - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const - { - TRACE_DISPATCH (this, lookup_type); - switch (lookup_type) { - case Single: return_trace (u.single.dispatch (c, std::forward<Ts> (ds)...)); - case Pair: return_trace (u.pair.dispatch (c, std::forward<Ts> (ds)...)); - case Cursive: return_trace (u.cursive.dispatch (c, std::forward<Ts> (ds)...)); - case MarkBase: return_trace (u.markBase.dispatch (c, std::forward<Ts> (ds)...)); - case MarkLig: return_trace (u.markLig.dispatch (c, std::forward<Ts> (ds)...)); - case MarkMark: return_trace (u.markMark.dispatch (c, std::forward<Ts> (ds)...)); - case Context: return_trace (u.context.dispatch (c, std::forward<Ts> (ds)...)); - case ChainContext: return_trace (u.chainContext.dispatch (c, std::forward<Ts> (ds)...)); - case Extension: return_trace (u.extension.dispatch (c, std::forward<Ts> (ds)...)); - default: return_trace (c->default_return_value ()); - } - } - - bool intersects (const hb_set_t *glyphs, unsigned int lookup_type) const - { - hb_intersects_context_t c (glyphs); - return dispatch (&c, lookup_type); - } - - protected: - union { - SinglePos single; - PairPos pair; - CursivePos cursive; - MarkBasePos markBase; - MarkLigPos markLig; - MarkMarkPos markMark; - ContextPos context; - ChainContextPos chainContext; - ExtensionPos extension; - } u; - public: - DEFINE_SIZE_MIN (0); -}; - - -struct PosLookup : Lookup -{ - typedef struct PosLookupSubTable SubTable; - - const SubTable& get_subtable (unsigned int i) const - { return Lookup::get_subtable<SubTable> (i); } - - bool is_reverse () const - { - return false; - } - - bool apply (hb_ot_apply_context_t *c) const - { - TRACE_APPLY (this); - return_trace (dispatch (c)); - } - - bool intersects (const hb_set_t *glyphs) const - { - hb_intersects_context_t c (glyphs); - return dispatch (&c); - } - - hb_collect_glyphs_context_t::return_t collect_glyphs (hb_collect_glyphs_context_t *c) const - { return dispatch (c); } - - hb_closure_lookups_context_t::return_t closure_lookups (hb_closure_lookups_context_t *c, unsigned this_index) const - { - if (c->is_lookup_visited (this_index)) - return hb_closure_lookups_context_t::default_return_value (); - - c->set_lookup_visited (this_index); - if (!intersects (c->glyphs)) - { - c->set_lookup_inactive (this_index); - return hb_closure_lookups_context_t::default_return_value (); - } - c->set_recurse_func (dispatch_closure_lookups_recurse_func); - - hb_closure_lookups_context_t::return_t ret = dispatch (c); - return ret; - } - - template <typename set_t> - void collect_coverage (set_t *glyphs) const - { - hb_collect_coverage_context_t<set_t> c (glyphs); - dispatch (&c); - } - - static inline bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index); - - template <typename context_t> - static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index); - - HB_INTERNAL static hb_closure_lookups_context_t::return_t dispatch_closure_lookups_recurse_func (hb_closure_lookups_context_t *c, unsigned this_index); - - template <typename context_t, typename ...Ts> - typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const - { return Lookup::dispatch<SubTable> (c, std::forward<Ts> (ds)...); } - - bool subset (hb_subset_context_t *c) const - { return Lookup::subset<SubTable> (c); } - - bool sanitize (hb_sanitize_context_t *c) const - { return Lookup::sanitize<SubTable> (c); } -}; - -/* - * GPOS -- Glyph Positioning - * https://docs.microsoft.com/en-us/typography/opentype/spec/gpos - */ - -struct GPOS : GSUBGPOS -{ - static constexpr hb_tag_t tableTag = HB_OT_TAG_GPOS; - - const PosLookup& get_lookup (unsigned int i) const - { return static_cast<const PosLookup &> (GSUBGPOS::get_lookup (i)); } - - static inline void position_start (hb_font_t *font, hb_buffer_t *buffer); - static inline void position_finish_advances (hb_font_t *font, hb_buffer_t *buffer); - static inline void position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer); - - bool subset (hb_subset_context_t *c) const - { - hb_subset_layout_context_t l (c, tableTag, c->plan->gpos_lookups, c->plan->gpos_langsys, c->plan->gpos_features); - return GSUBGPOS::subset<PosLookup> (&l); - } - - bool sanitize (hb_sanitize_context_t *c) const - { return GSUBGPOS::sanitize<PosLookup> (c); } - - HB_INTERNAL bool is_blocklisted (hb_blob_t *blob, - hb_face_t *face) const; - - void collect_variation_indices (hb_collect_variation_indices_context_t *c) const - { - for (unsigned i = 0; i < GSUBGPOS::get_lookup_count (); i++) - { - if (!c->gpos_lookups->has (i)) continue; - const PosLookup &l = get_lookup (i); - l.dispatch (c); - } - } - - void closure_lookups (hb_face_t *face, - const hb_set_t *glyphs, - hb_set_t *lookup_indexes /* IN/OUT */) const - { GSUBGPOS::closure_lookups<PosLookup> (face, glyphs, lookup_indexes); } - - typedef GSUBGPOS::accelerator_t<GPOS> accelerator_t; -}; - - -static void -reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent) -{ - int chain = pos[i].attach_chain(), type = pos[i].attach_type(); - if (likely (!chain || 0 == (type & ATTACH_TYPE_CURSIVE))) - return; - - pos[i].attach_chain() = 0; - - unsigned int j = (int) i + chain; - - /* Stop if we see new parent in the chain. */ - if (j == new_parent) - return; - - reverse_cursive_minor_offset (pos, j, direction, new_parent); - - if (HB_DIRECTION_IS_HORIZONTAL (direction)) - pos[j].y_offset = -pos[i].y_offset; - else - pos[j].x_offset = -pos[i].x_offset; - - pos[j].attach_chain() = -chain; - pos[j].attach_type() = type; -} -static void -propagate_attachment_offsets (hb_glyph_position_t *pos, - unsigned int len, - unsigned int i, - hb_direction_t direction) -{ - /* Adjusts offsets of attached glyphs (both cursive and mark) to accumulate - * offset of glyph they are attached to. */ - int chain = pos[i].attach_chain(), type = pos[i].attach_type(); - if (likely (!chain)) - return; - - pos[i].attach_chain() = 0; - - unsigned int j = (int) i + chain; - - if (unlikely (j >= len)) - return; - - propagate_attachment_offsets (pos, len, j, direction); - - assert (!!(type & ATTACH_TYPE_MARK) ^ !!(type & ATTACH_TYPE_CURSIVE)); - - if (type & ATTACH_TYPE_CURSIVE) - { - if (HB_DIRECTION_IS_HORIZONTAL (direction)) - pos[i].y_offset += pos[j].y_offset; - else - pos[i].x_offset += pos[j].x_offset; - } - else /*if (type & ATTACH_TYPE_MARK)*/ - { - pos[i].x_offset += pos[j].x_offset; - pos[i].y_offset += pos[j].y_offset; - - assert (j < i); - if (HB_DIRECTION_IS_FORWARD (direction)) - for (unsigned int k = j; k < i; k++) { - pos[i].x_offset -= pos[k].x_advance; - pos[i].y_offset -= pos[k].y_advance; - } - else - for (unsigned int k = j + 1; k < i + 1; k++) { - pos[i].x_offset += pos[k].x_advance; - pos[i].y_offset += pos[k].y_advance; - } - } -} - -void -GPOS::position_start (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) -{ - unsigned int count = buffer->len; - for (unsigned int i = 0; i < count; i++) - buffer->pos[i].attach_chain() = buffer->pos[i].attach_type() = 0; -} - -void -GPOS::position_finish_advances (hb_font_t *font HB_UNUSED, hb_buffer_t *buffer HB_UNUSED) -{ - //_hb_buffer_assert_gsubgpos_vars (buffer); -} - -void -GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer) -{ - _hb_buffer_assert_gsubgpos_vars (buffer); - - unsigned int len; - hb_glyph_position_t *pos = hb_buffer_get_glyph_positions (buffer, &len); - hb_direction_t direction = buffer->props.direction; - - /* Handle attachments */ - if (buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT) - for (unsigned i = 0; i < len; i++) - propagate_attachment_offsets (pos, len, i, direction); - - if (unlikely (font->slant)) - { - for (unsigned i = 0; i < len; i++) - if (unlikely (pos[i].y_offset)) - pos[i].x_offset += _hb_roundf (font->slant_xy * pos[i].y_offset); - } -} - - -struct GPOS_accelerator_t : GPOS::accelerator_t { - GPOS_accelerator_t (hb_face_t *face) : GPOS::accelerator_t (face) {} -}; - +using Layout::GPOS_impl::PosLookup; +// TODO(garretrieger): Move into new layout directory. /* Out-of-class implementation for methods recursing */ - #ifndef HB_NO_OT_LAYOUT template <typename context_t> /*static*/ typename context_t::return_t PosLookup::dispatch_recurse_func (context_t *c, unsigned int lookup_index) @@ -3121,13 +45,16 @@ template <typename context_t> return l.dispatch (c); } -/*static*/ inline hb_closure_lookups_context_t::return_t PosLookup::dispatch_closure_lookups_recurse_func (hb_closure_lookups_context_t *c, unsigned this_index) +template <> +inline hb_closure_lookups_context_t::return_t +PosLookup::dispatch_recurse_func<hb_closure_lookups_context_t> (hb_closure_lookups_context_t *c, unsigned this_index) { const PosLookup &l = c->face->table.GPOS.get_relaxed ()->table->get_lookup (this_index); return l.closure_lookups (c, this_index); } -/*static*/ bool PosLookup::apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index) +template <> +inline bool PosLookup::dispatch_recurse_func<hb_ot_apply_context_t> (hb_ot_apply_context_t *c, unsigned int lookup_index) { const PosLookup &l = c->face->table.GPOS.get_relaxed ()->table->get_lookup (lookup_index); unsigned int saved_lookup_props = c->lookup_props; @@ -3141,7 +68,6 @@ template <typename context_t> } #endif - } /* namespace OT */ diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-gsub-table.hh b/thirdparty/harfbuzz/src/hb-ot-layout-gsub-table.hh index bef381430b..462542025b 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-gsub-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-gsub-table.hh @@ -59,13 +59,16 @@ template <typename context_t> return l.dispatch (c); } -/*static*/ inline hb_closure_lookups_context_t::return_t SubstLookup::dispatch_closure_lookups_recurse_func (hb_closure_lookups_context_t *c, unsigned this_index) +template <> +inline hb_closure_lookups_context_t::return_t +SubstLookup::dispatch_recurse_func<hb_closure_lookups_context_t> (hb_closure_lookups_context_t *c, unsigned this_index) { const SubstLookup &l = c->face->table.GSUB.get_relaxed ()->table->get_lookup (this_index); return l.closure_lookups (c, this_index); } -/*static*/ bool SubstLookup::apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index) +template <> +inline bool SubstLookup::dispatch_recurse_func<hb_ot_apply_context_t> (hb_ot_apply_context_t *c, unsigned int lookup_index) { const SubstLookup &l = c->face->table.GSUB.get_relaxed ()->table->get_lookup (lookup_index); unsigned int saved_lookup_props = c->lookup_props; diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh b/thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh index c9750ff63b..31da498652 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-gsubgpos.hh @@ -109,14 +109,10 @@ struct hb_closure_context_t : { done_lookups_glyph_count->set (lookup_index, glyphs->get_population ()); - if (!done_lookups_glyph_set->get (lookup_index)) + if (!done_lookups_glyph_set->has (lookup_index)) { - hb_set_t* empty_set = hb_set_create (); - if (unlikely (!done_lookups_glyph_set->set (lookup_index, empty_set))) - { - hb_set_destroy (empty_set); + if (unlikely (!done_lookups_glyph_set->set (lookup_index, hb::unique_ptr<hb_set_t> {hb_set_create ()}))) return true; - } } hb_set_clear (done_lookups_glyph_set->get (lookup_index)); @@ -165,21 +161,19 @@ struct hb_closure_context_t : hb_set_t *glyphs; hb_set_t output[1]; hb_vector_t<hb_set_t> active_glyphs_stack; - recurse_func_t recurse_func; + recurse_func_t recurse_func = nullptr; unsigned int nesting_level_left; hb_closure_context_t (hb_face_t *face_, hb_set_t *glyphs_, hb_map_t *done_lookups_glyph_count_, - hb_hashmap_t<unsigned, hb_set_t *> *done_lookups_glyph_set_, + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *done_lookups_glyph_set_, unsigned int nesting_level_left_ = HB_MAX_NESTING_LEVEL) : face (face_), glyphs (glyphs_), - recurse_func (nullptr), nesting_level_left (nesting_level_left_), done_lookups_glyph_count (done_lookups_glyph_count_), - done_lookups_glyph_set (done_lookups_glyph_set_), - lookup_count (0) + done_lookups_glyph_set (done_lookups_glyph_set_) {} ~hb_closure_context_t () { flush (); } @@ -197,8 +191,8 @@ struct hb_closure_context_t : private: hb_map_t *done_lookups_glyph_count; - hb_hashmap_t<unsigned, hb_set_t *> *done_lookups_glyph_set; - unsigned int lookup_count; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *done_lookups_glyph_set; + unsigned int lookup_count = 0; }; @@ -400,7 +394,6 @@ struct hb_collect_coverage_context_t : set_t *set; }; - struct hb_ot_apply_context_t : hb_dispatch_context_t<hb_ot_apply_context_t, bool, HB_DEBUG_APPLY> { @@ -416,7 +409,7 @@ struct hb_ot_apply_context_t : match_func (nullptr), match_data (nullptr) {} - typedef bool (*match_func_t) (hb_codepoint_t glyph_id, const HBUINT16 &value, const void *data); + typedef bool (*match_func_t) (hb_glyph_info_t &info, const HBUINT16 &value, const void *data); void set_ignore_zwnj (bool ignore_zwnj_) { ignore_zwnj = ignore_zwnj_; } void set_ignore_zwj (bool ignore_zwj_) { ignore_zwj = ignore_zwj_; } @@ -434,7 +427,7 @@ struct hb_ot_apply_context_t : MATCH_MAYBE }; - may_match_t may_match (const hb_glyph_info_t &info, + may_match_t may_match (hb_glyph_info_t &info, const HBUINT16 *glyph_data) const { if (!(info.mask & mask) || @@ -442,7 +435,7 @@ struct hb_ot_apply_context_t : return MATCH_NO; if (match_func) - return match_func (info.codepoint, *glyph_data, match_data) ? MATCH_YES : MATCH_NO; + return match_func (info, *glyph_data, match_data) ? MATCH_YES : MATCH_NO; return MATCH_MAYBE; } @@ -530,7 +523,7 @@ struct hb_ot_apply_context_t : while (idx + num_items < end) { idx++; - const hb_glyph_info_t &info = c->buffer->info[idx]; + hb_glyph_info_t &info = c->buffer->info[idx]; matcher_t::may_skip_t skip = matcher.may_skip (c, info); if (unlikely (skip == matcher_t::SKIP_YES)) @@ -563,7 +556,7 @@ struct hb_ot_apply_context_t : while (idx > num_items - 1) { idx--; - const hb_glyph_info_t &info = c->buffer->out_info[idx]; + hb_glyph_info_t &info = c->buffer->out_info[idx]; matcher_t::may_skip_t skip = matcher.may_skip (c, info); if (unlikely (skip == matcher_t::SKIP_YES)) @@ -611,7 +604,10 @@ struct hb_ot_apply_context_t : return_t recurse (unsigned int sub_lookup_index) { if (unlikely (nesting_level_left == 0 || !recurse_func || buffer->max_ops-- <= 0)) + { + buffer->shaping_failed = true; return default_return_value (); + } nesting_level_left--; bool ret = recurse_func (this, sub_lookup_index); @@ -621,35 +617,34 @@ struct hb_ot_apply_context_t : skipping_iterator_t iter_input, iter_context; + unsigned int table_index; /* GSUB/GPOS */ hb_font_t *font; hb_face_t *face; hb_buffer_t *buffer; - recurse_func_t recurse_func; + recurse_func_t recurse_func = nullptr; const GDEF &gdef; const VariationStore &var_store; + VariationStore::cache_t *var_store_cache; hb_direction_t direction; - hb_mask_t lookup_mask; - unsigned int table_index; /* GSUB/GPOS */ - unsigned int lookup_index; - unsigned int lookup_props; - unsigned int nesting_level_left; + hb_mask_t lookup_mask = 1; + unsigned int lookup_index = (unsigned) -1; + unsigned int lookup_props = 0; + unsigned int nesting_level_left = HB_MAX_NESTING_LEVEL; bool has_glyph_classes; - bool auto_zwnj; - bool auto_zwj; - bool per_syllable; - bool random; - - uint32_t random_state; - + bool auto_zwnj = true; + bool auto_zwj = true; + bool per_syllable = false; + bool random = false; + uint32_t random_state = 1; + unsigned new_syllables = (unsigned) -1; hb_ot_apply_context_t (unsigned int table_index_, hb_font_t *font_, hb_buffer_t *buffer_) : - iter_input (), iter_context (), + table_index (table_index_), font (font_), face (font->face), buffer (buffer_), - recurse_func (nullptr), gdef ( #ifndef HB_NO_OT_LAYOUT *face->table.GDEF->table @@ -658,18 +653,23 @@ struct hb_ot_apply_context_t : #endif ), var_store (gdef.get_var_store ()), + var_store_cache ( +#ifndef HB_NO_VAR + table_index == 1 && font->num_coords ? var_store.create_cache () : nullptr +#else + nullptr +#endif + ), direction (buffer_->props.direction), - lookup_mask (1), - table_index (table_index_), - lookup_index ((unsigned int) -1), - lookup_props (0), - nesting_level_left (HB_MAX_NESTING_LEVEL), - has_glyph_classes (gdef.has_glyph_classes ()), - auto_zwnj (true), - auto_zwj (true), - per_syllable (false), - random (false), - random_state (1) { init_iters (); } + has_glyph_classes (gdef.has_glyph_classes ()) + { init_iters (); } + + ~hb_ot_apply_context_t () + { +#ifndef HB_NO_VAR + VariationStore::destroy_cache (var_store_cache); +#endif + } void init_iters () { @@ -736,6 +736,9 @@ struct hb_ot_apply_context_t : bool ligature = false, bool component = false) const { + if (new_syllables != (unsigned) -1) + buffer->cur().syllable() = new_syllables; + unsigned int props = _hb_glyph_info_get_glyph_props (&buffer->cur()); props |= HB_OT_LAYOUT_GLYPH_PROPS_SUBSTITUTED; if (ligature) @@ -790,8 +793,8 @@ struct hb_ot_apply_context_t : }; -struct hb_get_subtables_context_t : - hb_dispatch_context_t<hb_get_subtables_context_t> +struct hb_accelerate_subtables_context_t : + hb_dispatch_context_t<hb_accelerate_subtables_context_t> { template <typename Type> static inline bool apply_to (const void *obj, OT::hb_ot_apply_context_t *c) @@ -800,15 +803,53 @@ struct hb_get_subtables_context_t : return typed_obj->apply (c); } +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + template <typename T> + static inline auto apply_cached_ (const T *obj, OT::hb_ot_apply_context_t *c, hb_priority<1>) HB_RETURN (bool, obj->apply (c, true) ) + template <typename T> + static inline auto apply_cached_ (const T *obj, OT::hb_ot_apply_context_t *c, hb_priority<0>) HB_RETURN (bool, obj->apply (c) ) + template <typename Type> + static inline bool apply_cached_to (const void *obj, OT::hb_ot_apply_context_t *c) + { + const Type *typed_obj = (const Type *) obj; + return apply_cached_ (typed_obj, c, hb_prioritize); + } + + template <typename T> + static inline auto cache_func_ (const T *obj, OT::hb_ot_apply_context_t *c, bool enter, hb_priority<1>) HB_RETURN (bool, obj->cache_func (c, enter) ) + template <typename T> + static inline bool cache_func_ (const T *obj, OT::hb_ot_apply_context_t *c, bool enter, hb_priority<0>) { return false; } + template <typename Type> + static inline bool cache_func_to (const void *obj, OT::hb_ot_apply_context_t *c, bool enter) + { + const Type *typed_obj = (const Type *) obj; + return cache_func_ (typed_obj, c, enter, hb_prioritize); + } +#endif + typedef bool (*hb_apply_func_t) (const void *obj, OT::hb_ot_apply_context_t *c); + typedef bool (*hb_cache_func_t) (const void *obj, OT::hb_ot_apply_context_t *c, bool enter); struct hb_applicable_t { + friend struct hb_accelerate_subtables_context_t; + friend struct hb_ot_layout_lookup_accelerator_t; + template <typename T> - void init (const T &obj_, hb_apply_func_t apply_func_) + void init (const T &obj_, + hb_apply_func_t apply_func_ +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + , hb_apply_func_t apply_cached_func_ + , hb_cache_func_t cache_func_ +#endif + ) { obj = &obj_; apply_func = apply_func_; +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + apply_cached_func = apply_cached_func_; + cache_func = cache_func_; +#endif digest.init (); obj_.get_coverage ().collect_coverage (&digest); } @@ -817,38 +858,93 @@ struct hb_get_subtables_context_t : { return digest.may_have (c->buffer->cur().codepoint) && apply_func (obj, c); } +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + bool apply_cached (OT::hb_ot_apply_context_t *c) const + { + return digest.may_have (c->buffer->cur().codepoint) && apply_cached_func (obj, c); + } + bool cache_enter (OT::hb_ot_apply_context_t *c) const + { + return cache_func (obj, c, true); + } + void cache_leave (OT::hb_ot_apply_context_t *c) const + { + cache_func (obj, c, false); + } +#endif private: const void *obj; hb_apply_func_t apply_func; +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + hb_apply_func_t apply_cached_func; + hb_cache_func_t cache_func; +#endif hb_set_digest_t digest; }; typedef hb_vector_t<hb_applicable_t> array_t; +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + template <typename T> + auto cache_cost (const T &obj, hb_priority<1>) HB_AUTO_RETURN ( obj.cache_cost () ) + template <typename T> + auto cache_cost (const T &obj, hb_priority<0>) HB_AUTO_RETURN ( 0u ) +#endif + /* Dispatch interface. */ template <typename T> return_t dispatch (const T &obj) { - hb_applicable_t *entry = array.push(); - entry->init (obj, apply_to<T>); + hb_applicable_t entry; + + entry.init (obj, + apply_to<T> +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + , apply_cached_to<T> + , cache_func_to<T> +#endif + ); + + array.push (entry); + +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + /* Cache handling + * + * We allow one subtable from each lookup to use a cache. The assumption + * being that multiple subtables of the same lookup cannot use a cache + * because the resources they would use will collide. As such, we ask + * each subtable to tell us how much it costs (which a cache would avoid), + * and we allocate the cache opportunity to the costliest subtable. + */ + unsigned cost = cache_cost (obj, hb_prioritize); + if (cost > cache_user_cost && !array.in_error ()) + { + cache_user_idx = array.length - 1; + cache_user_cost = cost; + } +#endif + return hb_empty_t (); } static return_t default_return_value () { return hb_empty_t (); } - hb_get_subtables_context_t (array_t &array_) : - array (array_) {} + hb_accelerate_subtables_context_t (array_t &array_) : + array (array_) {} array_t &array; -}; - +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + unsigned cache_user_idx = (unsigned) -1; + unsigned cache_user_cost = 0; +#endif +}; typedef bool (*intersects_func_t) (const hb_set_t *glyphs, const HBUINT16 &value, const void *data); typedef void (*intersected_glyphs_func_t) (const hb_set_t *glyphs, const void *data, unsigned value, hb_set_t *intersected_glyphs); typedef void (*collect_glyphs_func_t) (hb_set_t *glyphs, const HBUINT16 &value, const void *data); -typedef bool (*match_func_t) (hb_codepoint_t glyph_id, const HBUINT16 &value, const void *data); +typedef bool (*match_func_t) (hb_glyph_info_t &info, const HBUINT16 &value, const void *data); struct ContextClosureFuncs { @@ -863,6 +959,10 @@ struct ContextApplyFuncs { match_func_t match; }; +struct ChainContextApplyFuncs +{ + match_func_t match[3]; +}; static inline bool intersects_glyph (const hb_set_t *glyphs, const HBUINT16 &value, const void *data HB_UNUSED) @@ -939,19 +1039,30 @@ static inline void collect_array (hb_collect_glyphs_context_t *c HB_UNUSED, } -static inline bool match_glyph (hb_codepoint_t glyph_id, const HBUINT16 &value, const void *data HB_UNUSED) +static inline bool match_glyph (hb_glyph_info_t &info, const HBUINT16 &value, const void *data HB_UNUSED) { - return glyph_id == value; + return info.codepoint == value; } -static inline bool match_class (hb_codepoint_t glyph_id, const HBUINT16 &value, const void *data) +static inline bool match_class (hb_glyph_info_t &info, const HBUINT16 &value, const void *data) { const ClassDef &class_def = *reinterpret_cast<const ClassDef *>(data); - return class_def.get_class (glyph_id) == value; + return class_def.get_class (info.codepoint) == value; } -static inline bool match_coverage (hb_codepoint_t glyph_id, const HBUINT16 &value, const void *data) +static inline bool match_class_cached (hb_glyph_info_t &info, const HBUINT16 &value, const void *data) +{ + unsigned klass = info.syllable(); + if (klass < 255) + return klass == value; + const ClassDef &class_def = *reinterpret_cast<const ClassDef *>(data); + klass = class_def.get_class (info.codepoint); + if (likely (klass < 255)) + info.syllable() = klass; + return klass == value; +} +static inline bool match_coverage (hb_glyph_info_t &info, const HBUINT16 &value, const void *data) { const Offset16To<Coverage> &coverage = (const Offset16To<Coverage>&)value; - return (data+coverage).get_coverage (glyph_id) != NOT_COVERED; + return (data+coverage).get_coverage (info.codepoint) != NOT_COVERED; } static inline bool would_match_input (hb_would_apply_context_t *c, @@ -964,8 +1075,12 @@ static inline bool would_match_input (hb_would_apply_context_t *c, return false; for (unsigned int i = 1; i < count; i++) - if (likely (!match_func (c->glyphs[i], input[i - 1], match_data))) + { + hb_glyph_info_t info; + info.codepoint = c->glyphs[i]; + if (likely (!match_func (info, input[i - 1], match_data))) return false; + } return true; } @@ -2125,19 +2240,54 @@ struct ContextFormat2 const Coverage &get_coverage () const { return this+coverage; } - bool apply (hb_ot_apply_context_t *c) const + unsigned cache_cost () const + { + unsigned c = (this+classDef).cost () * ruleSet.len; + return c >= 4 ? c : 0; + } + bool cache_func (hb_ot_apply_context_t *c, bool enter) const + { + if (enter) + { + if (!HB_BUFFER_TRY_ALLOCATE_VAR (c->buffer, syllable)) + return false; + auto &info = c->buffer->info; + unsigned count = c->buffer->len; + for (unsigned i = 0; i < count; i++) + info[i].syllable() = 255; + c->new_syllables = 255; + return true; + } + else + { + c->new_syllables = (unsigned) -1; + HB_BUFFER_DEALLOCATE_VAR (c->buffer, syllable); + return true; + } + } + + bool apply (hb_ot_apply_context_t *c, bool cached = false) const { TRACE_APPLY (this); unsigned int index = (this+coverage).get_coverage (c->buffer->cur().codepoint); if (likely (index == NOT_COVERED)) return_trace (false); const ClassDef &class_def = this+classDef; - index = class_def.get_class (c->buffer->cur().codepoint); - const RuleSet &rule_set = this+ruleSet[index]; + struct ContextApplyLookupContext lookup_context = { - {match_class}, + {cached ? match_class_cached : match_class}, &class_def }; + + if (cached && c->buffer->cur().syllable() < 255) + index = c->buffer->cur().syllable (); + else + { + index = class_def.get_class (c->buffer->cur().codepoint); + if (cached && index < 255) + c->buffer->cur().syllable() = index; + } + const RuleSet &rule_set = this+ruleSet[index]; return_trace (rule_set.apply (c, lookup_context)); } @@ -2411,7 +2561,7 @@ struct ChainContextCollectGlyphsLookupContext struct ChainContextApplyLookupContext { - ContextApplyFuncs funcs; + ChainContextApplyFuncs funcs; const void *match_data[3]; }; @@ -2499,7 +2649,7 @@ static inline bool chain_context_would_apply_lookup (hb_would_apply_context_t *c return (c->zero_context ? !backtrackCount && !lookaheadCount : true) && would_match_input (c, inputCount, input, - lookup_context.funcs.match, lookup_context.match_data[1]); + lookup_context.funcs.match[1], lookup_context.match_data[1]); } static inline bool chain_context_apply_lookup (hb_ot_apply_context_t *c, @@ -2518,11 +2668,11 @@ static inline bool chain_context_apply_lookup (hb_ot_apply_context_t *c, unsigned match_positions[HB_MAX_CONTEXT_LENGTH]; if (!(match_input (c, inputCount, input, - lookup_context.funcs.match, lookup_context.match_data[1], + lookup_context.funcs.match[1], lookup_context.match_data[1], &match_end, match_positions) && (end_index = match_end) && match_lookahead (c, lookaheadCount, lookahead, - lookup_context.funcs.match, lookup_context.match_data[2], + lookup_context.funcs.match[2], lookup_context.match_data[2], match_end, &end_index))) { c->buffer->unsafe_to_concat (c->buffer->idx, end_index); @@ -2532,7 +2682,7 @@ static inline bool chain_context_apply_lookup (hb_ot_apply_context_t *c, unsigned start_index = c->buffer->out_len; if (!match_backtrack (c, backtrackCount, backtrack, - lookup_context.funcs.match, lookup_context.match_data[0], + lookup_context.funcs.match[0], lookup_context.match_data[0], &start_index)) { c->buffer->unsafe_to_concat_from_outbuffer (start_index, end_index); @@ -2934,7 +3084,7 @@ struct ChainContextFormat1 { const ChainRuleSet &rule_set = this+ruleSet[(this+coverage).get_coverage (c->glyphs[0])]; struct ChainContextApplyLookupContext lookup_context = { - {match_glyph}, + {{match_glyph, match_glyph, match_glyph}}, {nullptr, nullptr, nullptr} }; return rule_set.would_apply (c, lookup_context); @@ -2950,7 +3100,7 @@ struct ChainContextFormat1 const ChainRuleSet &rule_set = this+ruleSet[index]; struct ChainContextApplyLookupContext lookup_context = { - {match_glyph}, + {{match_glyph, match_glyph, match_glyph}}, {nullptr, nullptr, nullptr} }; return_trace (rule_set.apply (c, lookup_context)); @@ -3134,7 +3284,7 @@ struct ChainContextFormat2 unsigned int index = input_class_def.get_class (c->glyphs[0]); const ChainRuleSet &rule_set = this+ruleSet[index]; struct ChainContextApplyLookupContext lookup_context = { - {match_class}, + {{match_class, match_class, match_class}}, {&backtrack_class_def, &input_class_def, &lookahead_class_def} @@ -3144,7 +3294,33 @@ struct ChainContextFormat2 const Coverage &get_coverage () const { return this+coverage; } - bool apply (hb_ot_apply_context_t *c) const + unsigned cache_cost () const + { + unsigned c = (this+lookaheadClassDef).cost () * ruleSet.len; + return c >= 4 ? c : 0; + } + bool cache_func (hb_ot_apply_context_t *c, bool enter) const + { + if (enter) + { + if (!HB_BUFFER_TRY_ALLOCATE_VAR (c->buffer, syllable)) + return false; + auto &info = c->buffer->info; + unsigned count = c->buffer->len; + for (unsigned i = 0; i < count; i++) + info[i].syllable() = 255; + c->new_syllables = 255; + return true; + } + else + { + c->new_syllables = (unsigned) -1; + HB_BUFFER_DEALLOCATE_VAR (c->buffer, syllable); + return true; + } + } + + bool apply (hb_ot_apply_context_t *c, bool cached = false) const { TRACE_APPLY (this); unsigned int index = (this+coverage).get_coverage (c->buffer->cur().codepoint); @@ -3154,14 +3330,27 @@ struct ChainContextFormat2 const ClassDef &input_class_def = this+inputClassDef; const ClassDef &lookahead_class_def = this+lookaheadClassDef; - index = input_class_def.get_class (c->buffer->cur().codepoint); - const ChainRuleSet &rule_set = this+ruleSet[index]; + /* For ChainContextFormat2 we cache the LookaheadClassDef instead of InputClassDef. + * The reason is that most heavy fonts want to identify a glyph in context and apply + * a lookup to it. In this scenario, the length of the input sequence is one, whereas + * the lookahead / backtrack are typically longer. The one glyph in input sequence is + * looked-up below and no input glyph is looked up in individual rules, whereas the + * lookahead and backtrack glyphs are tried. Since we match lookahead before backtrack, + * we should cache lookahead. This decisions showed a 20% improvement in shaping of + * the Gulzar font. + */ + struct ChainContextApplyLookupContext lookup_context = { - {match_class}, + {{cached && &backtrack_class_def == &input_class_def ? match_class_cached : match_class, + cached && &input_class_def == &lookahead_class_def ? match_class_cached : match_class, + cached ? match_class_cached : match_class}}, {&backtrack_class_def, &input_class_def, &lookahead_class_def} }; + + index = input_class_def.get_class (c->buffer->cur().codepoint); + const ChainRuleSet &rule_set = this+ruleSet[index]; return_trace (rule_set.apply (c, lookup_context)); } @@ -3359,7 +3548,7 @@ struct ChainContextFormat3 const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (input); const Array16Of<LookupRecord> &lookup = StructAfter<Array16Of<LookupRecord>> (lookahead); struct ChainContextApplyLookupContext lookup_context = { - {match_coverage}, + {{match_coverage, match_coverage, match_coverage}}, {this, this, this} }; return chain_context_would_apply_lookup (c, @@ -3386,7 +3575,7 @@ struct ChainContextFormat3 const Array16OfOffset16To<Coverage> &lookahead = StructAfter<Array16OfOffset16To<Coverage>> (input); const Array16Of<LookupRecord> &lookup = StructAfter<Array16Of<LookupRecord>> (lookahead); struct ChainContextApplyLookupContext lookup_context = { - {match_coverage}, + {{match_coverage, match_coverage, match_coverage}}, {this, this, this} }; return_trace (chain_context_apply_lookup (c, @@ -3623,25 +3812,63 @@ struct hb_ot_layout_lookup_accelerator_t lookup.collect_coverage (&digest); subtables.init (); - OT::hb_get_subtables_context_t c_get_subtables (subtables); - lookup.dispatch (&c_get_subtables); + OT::hb_accelerate_subtables_context_t c_accelerate_subtables (subtables); + lookup.dispatch (&c_accelerate_subtables); + +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + cache_user_idx = c_accelerate_subtables.cache_user_idx; + for (unsigned i = 0; i < subtables.length; i++) + if (i != cache_user_idx) + subtables[i].apply_cached_func = subtables[i].apply_func; +#endif } void fini () { subtables.fini (); } bool may_have (hb_codepoint_t g) const { return digest.may_have (g); } - bool apply (hb_ot_apply_context_t *c) const + bool apply (hb_ot_apply_context_t *c, bool use_cache) const { - for (unsigned int i = 0; i < subtables.length; i++) - if (subtables[i].apply (c)) - return true; +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + if (use_cache) + { + for (unsigned int i = 0; i < subtables.length; i++) + if (subtables[i].apply_cached (c)) + return true; + } + else +#endif + { + for (unsigned int i = 0; i < subtables.length; i++) + if (subtables[i].apply (c)) + return true; + } return false; } + bool cache_enter (OT::hb_ot_apply_context_t *c) const + { +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + return cache_user_idx != (unsigned) -1 && + subtables[cache_user_idx].cache_enter (c); +#else + return false; +#endif + } + void cache_leave (OT::hb_ot_apply_context_t *c) const + { +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + subtables[cache_user_idx].cache_leave (c); +#endif + } + + private: hb_set_digest_t digest; - hb_get_subtables_context_t::array_t subtables; + hb_accelerate_subtables_context_t::array_t subtables; +#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE + unsigned cache_user_idx = (unsigned) -1; +#endif }; struct GSUBGPOS @@ -3721,6 +3948,8 @@ struct GSUBGPOS hb_set_t visited_lookups, inactive_lookups; OT::hb_closure_lookups_context_t c (face, glyphs, &visited_lookups, &inactive_lookups); + c.set_recurse_func (TLookup::template dispatch_recurse_func<hb_closure_lookups_context_t>); + for (unsigned lookup_index : + hb_iter (lookup_indexes)) reinterpret_cast<const TLookup &> (get_lookup (lookup_index)).closure_lookups (&c, lookup_index); @@ -3729,7 +3958,7 @@ struct GSUBGPOS } void prune_langsys (const hb_map_t *duplicate_feature_map, - hb_hashmap_t<unsigned, hb_set_t *> *script_langsys_map, + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *script_langsys_map, hb_set_t *new_feature_indexes /* OUT */) const { hb_prune_langsys_context_t c (this, script_langsys_map, duplicate_feature_map, new_feature_indexes); @@ -3787,7 +4016,7 @@ struct GSUBGPOS hb_map_t *duplicate_feature_map /* OUT */) const { if (feature_indices->is_empty ()) return; - hb_hashmap_t<hb_tag_t, hb_set_t *> unique_features; + hb_hashmap_t<hb_tag_t, hb::unique_ptr<hb_set_t>> unique_features; //find out duplicate features after subset for (unsigned i : feature_indices->iter ()) { @@ -3795,16 +4024,9 @@ struct GSUBGPOS if (t == HB_MAP_VALUE_INVALID) continue; if (!unique_features.has (t)) { - hb_set_t* indices = hb_set_create (); - if (unlikely (indices == hb_set_get_empty () || - !unique_features.set (t, indices))) - { - hb_set_destroy (indices); - for (auto _ : unique_features.iter ()) - hb_set_destroy (_.second); + if (unlikely (!unique_features.set (t, hb::unique_ptr<hb_set_t> {hb_set_create ()}))) return; - } - if (unique_features.get (t)) + if (unique_features.has (t)) unique_features.get (t)->add (i); duplicate_feature_map->set (i, i); continue; @@ -3849,9 +4071,6 @@ struct GSUBGPOS duplicate_feature_map->set (i, i); } } - - for (auto _ : unique_features.iter ()) - hb_set_destroy (_.second); } void prune_features (const hb_map_t *lookup_indices, /* IN */ diff --git a/thirdparty/harfbuzz/src/hb-ot-layout.cc b/thirdparty/harfbuzz/src/hb-ot-layout.cc index f4ea21a4f9..142c843dad 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout.cc +++ b/thirdparty/harfbuzz/src/hb-ot-layout.cc @@ -46,7 +46,7 @@ #include "hb-ot-layout-gdef-table.hh" #include "hb-ot-layout-gsub-table.hh" #include "hb-ot-layout-gpos-table.hh" -#include "hb-ot-layout-base-table.hh" // Just so we compile it; unused otherwise. +#include "hb-ot-layout-base-table.hh" #include "hb-ot-layout-jstf-table.hh" // Just so we compile it; unused otherwise. #include "hb-ot-name-table.hh" #include "hb-ot-os2-table.hh" @@ -55,6 +55,7 @@ #include "hb-aat-layout-opbd-table.hh" // Just so we compile it; unused otherwise. using OT::Layout::GSUB::GSUB; +using OT::Layout::GPOS; /** * SECTION:hb-ot-layout @@ -260,7 +261,6 @@ _hb_ot_layout_set_glyph_props (hb_font_t *font, { _hb_glyph_info_set_glyph_props (&buffer->info[i], gdef.get_glyph_props (buffer->info[i].codepoint)); _hb_glyph_info_clear_lig_props (&buffer->info[i]); - buffer->info[i].syllable() = 0; } } @@ -401,7 +401,7 @@ GSUB::is_blocklisted (hb_blob_t *blob HB_UNUSED, } bool -OT::GPOS::is_blocklisted (hb_blob_t *blob HB_UNUSED, +GPOS::is_blocklisted (hb_blob_t *blob HB_UNUSED, hb_face_t *face HB_UNUSED) const { #ifdef HB_NO_OT_LAYOUT_BLOCKLIST @@ -1501,15 +1501,12 @@ hb_ot_layout_lookup_substitute_closure (hb_face_t *face, hb_set_t *glyphs /* OUT */) { hb_map_t done_lookups_glyph_count; - hb_hashmap_t<unsigned, hb_set_t *> done_lookups_glyph_set; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> done_lookups_glyph_set; OT::hb_closure_context_t c (face, glyphs, &done_lookups_glyph_count, &done_lookups_glyph_set); const OT::SubstLookup& l = face->table.GSUB->table->get_lookup (lookup_index); l.closure (&c, lookup_index); - - for (auto _ : done_lookups_glyph_set.iter ()) - hb_set_destroy (_.second); } /** @@ -1529,7 +1526,7 @@ hb_ot_layout_lookups_substitute_closure (hb_face_t *face, hb_set_t *glyphs /* OUT */) { hb_map_t done_lookups_glyph_count; - hb_hashmap_t<unsigned, hb_set_t *> done_lookups_glyph_set; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> done_lookups_glyph_set; OT::hb_closure_context_t c (face, glyphs, &done_lookups_glyph_count, &done_lookups_glyph_set); const GSUB& gsub = *face->table.GSUB->table; @@ -1551,13 +1548,10 @@ hb_ot_layout_lookups_substitute_closure (hb_face_t *face, } } while (iteration_count++ <= HB_CLOSURE_MAX_STAGES && glyphs_length != glyphs->get_population ()); - - for (auto _ : done_lookups_glyph_set.iter ()) - hb_set_destroy (_.second); } /* - * OT::GPOS + * GPOS */ @@ -1588,7 +1582,7 @@ hb_ot_layout_has_positioning (hb_face_t *face) void hb_ot_layout_position_start (hb_font_t *font, hb_buffer_t *buffer) { - OT::GPOS::position_start (font, buffer); + GPOS::position_start (font, buffer); } @@ -1603,7 +1597,7 @@ hb_ot_layout_position_start (hb_font_t *font, hb_buffer_t *buffer) void hb_ot_layout_position_finish_advances (hb_font_t *font, hb_buffer_t *buffer) { - OT::GPOS::position_finish_advances (font, buffer); + GPOS::position_finish_advances (font, buffer); } /** @@ -1617,7 +1611,7 @@ hb_ot_layout_position_finish_advances (hb_font_t *font, hb_buffer_t *buffer) void hb_ot_layout_position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer) { - OT::GPOS::position_finish_offsets (font, buffer); + GPOS::position_finish_offsets (font, buffer); } @@ -1652,7 +1646,7 @@ hb_ot_layout_get_size_params (hb_face_t *face, unsigned int *range_start, /* OUT. May be NULL */ unsigned int *range_end /* OUT. May be NULL */) { - const OT::GPOS &gpos = *face->table.GPOS->table; + const GPOS &gpos = *face->table.GPOS->table; const hb_tag_t tag = HB_TAG ('s','i','z','e'); unsigned int num_features = gpos.get_feature_count (); @@ -1803,7 +1797,7 @@ hb_ot_layout_feature_get_characters (hb_face_t *face, struct GSUBProxy { static constexpr unsigned table_index = 0u; - static constexpr bool inplace = false; + static constexpr bool always_inplace = false; typedef OT::SubstLookup Lookup; GSUBProxy (hb_face_t *face) : @@ -1817,14 +1811,14 @@ struct GSUBProxy struct GPOSProxy { static constexpr unsigned table_index = 1u; - static constexpr bool inplace = true; + static constexpr bool always_inplace = true; typedef OT::PosLookup Lookup; GPOSProxy (hb_face_t *face) : table (*face->table.GPOS->table), accels (face->table.GPOS->accels) {} - const OT::GPOS &table; + const GPOS &table; const OT::hb_ot_layout_lookup_accelerator_t *accels; }; @@ -1833,6 +1827,8 @@ static inline bool apply_forward (OT::hb_ot_apply_context_t *c, const OT::hb_ot_layout_lookup_accelerator_t &accel) { + bool use_cache = accel.cache_enter (c); + bool ret = false; hb_buffer_t *buffer = c->buffer; while (buffer->idx < buffer->len && buffer->successful) @@ -1842,7 +1838,7 @@ apply_forward (OT::hb_ot_apply_context_t *c, (buffer->cur().mask & c->lookup_mask) && c->check_glyph_property (&buffer->cur(), c->lookup_props)) { - applied = accel.apply (c); + applied = accel.apply (c, use_cache); } if (applied) @@ -1850,6 +1846,10 @@ apply_forward (OT::hb_ot_apply_context_t *c, else (void) buffer->next_glyph (); } + + if (use_cache) + accel.cache_leave (c); + return ret; } @@ -1864,7 +1864,7 @@ apply_backward (OT::hb_ot_apply_context_t *c, if (accel.may_have (buffer->cur().codepoint) && (buffer->cur().mask & c->lookup_mask) && c->check_glyph_property (&buffer->cur(), c->lookup_props)) - ret |= accel.apply (c); + ret |= accel.apply (c, false); /* The reverse lookup doesn't "advance" cursor (for good reason). */ buffer->idx--; @@ -1890,13 +1890,13 @@ apply_string (OT::hb_ot_apply_context_t *c, if (likely (!lookup.is_reverse ())) { /* in/out forward substitution/positioning */ - if (!Proxy::inplace) + if (!Proxy::always_inplace) buffer->clear_output (); buffer->idx = 0; apply_forward (c, accel); - if (!Proxy::inplace) + if (!Proxy::always_inplace) buffer->sync (); } else @@ -1917,7 +1917,7 @@ inline void hb_ot_map_t::apply (const Proxy &proxy, const unsigned int table_index = proxy.table_index; unsigned int i = 0; OT::hb_ot_apply_context_t c (table_index, font, buffer); - c.set_recurse_func (Proxy::Lookup::apply_recurse_func); + c.set_recurse_func (Proxy::Lookup::template dispatch_recurse_func<OT::hb_ot_apply_context_t>); for (unsigned int stage_index = 0; stage_index < stages[table_index].length; stage_index++) { diff --git a/thirdparty/harfbuzz/src/hb-ot-layout.hh b/thirdparty/harfbuzz/src/hb-ot-layout.hh index 75bba0bc50..6395f06670 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout.hh @@ -589,13 +589,11 @@ _hb_buffer_allocate_gsubgpos_vars (hb_buffer_t *buffer) { HB_BUFFER_ALLOCATE_VAR (buffer, glyph_props); HB_BUFFER_ALLOCATE_VAR (buffer, lig_props); - HB_BUFFER_ALLOCATE_VAR (buffer, syllable); } static inline void _hb_buffer_deallocate_gsubgpos_vars (hb_buffer_t *buffer) { - HB_BUFFER_DEALLOCATE_VAR (buffer, syllable); HB_BUFFER_DEALLOCATE_VAR (buffer, lig_props); HB_BUFFER_DEALLOCATE_VAR (buffer, glyph_props); } @@ -605,7 +603,6 @@ _hb_buffer_assert_gsubgpos_vars (hb_buffer_t *buffer) { HB_BUFFER_ASSERT_VAR (buffer, glyph_props); HB_BUFFER_ASSERT_VAR (buffer, lig_props); - HB_BUFFER_ASSERT_VAR (buffer, syllable); } /* Make sure no one directly touches our props... */ diff --git a/thirdparty/harfbuzz/src/hb-ot-name-language-static.hh b/thirdparty/harfbuzz/src/hb-ot-name-language-static.hh index c496dc2981..0e0f2d632a 100644 --- a/thirdparty/harfbuzz/src/hb-ot-name-language-static.hh +++ b/thirdparty/harfbuzz/src/hb-ot-name-language-static.hh @@ -45,7 +45,7 @@ struct hb_ot_language_map_t }; static const hb_ot_language_map_t -hb_ms_language_map[] = +_hb_ms_language_map[] = { {0x0001, "ar"}, /* ??? */ {0x0004, "zh"}, /* ??? */ @@ -298,7 +298,7 @@ hb_ms_language_map[] = }; static const hb_ot_language_map_t -hb_mac_language_map[] = +_hb_mac_language_map[] = { { 0, "en"}, /* English */ { 1, "fr"}, /* French */ @@ -441,16 +441,16 @@ hb_language_t _hb_ot_name_language_for_ms_code (unsigned int code) { return _hb_ot_name_language_for (code, - hb_ms_language_map, - ARRAY_LENGTH (hb_ms_language_map)); + _hb_ms_language_map, + ARRAY_LENGTH (_hb_ms_language_map)); } hb_language_t _hb_ot_name_language_for_mac_code (unsigned int code) { return _hb_ot_name_language_for (code, - hb_mac_language_map, - ARRAY_LENGTH (hb_mac_language_map)); + _hb_mac_language_map, + ARRAY_LENGTH (_hb_mac_language_map)); } #endif /* HB_OT_NAME_LANGUAGE_STATIC_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-name-table.hh b/thirdparty/harfbuzz/src/hb-ot-name-table.hh index d52367e9b1..01107aad67 100644 --- a/thirdparty/harfbuzz/src/hb-ot-name-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-name-table.hh @@ -156,7 +156,7 @@ struct NameRecord }; static int -_hb_ot_name_entry_cmp_key (const void *pa, const void *pb) +_hb_ot_name_entry_cmp_key (const void *pa, const void *pb, bool exact) { const hb_ot_name_entry_t *a = (const hb_ot_name_entry_t *) pa; const hb_ot_name_entry_t *b = (const hb_ot_name_entry_t *) pb; @@ -169,8 +169,23 @@ _hb_ot_name_entry_cmp_key (const void *pa, const void *pb) if (a->language == b->language) return 0; if (!a->language) return -1; if (!b->language) return +1; - return strcmp (hb_language_to_string (a->language), - hb_language_to_string (b->language)); + + const char *astr = hb_language_to_string (a->language); + const char *bstr = hb_language_to_string (b->language); + + signed c = strcmp (astr, bstr); + + if (!exact && c) + { + unsigned la = strlen (astr); + unsigned lb = strlen (bstr); + // 'a' is the user request, and 'b' is string in the font. + // If eg. user asks for "en-us" and font has "en", approve. + if (la > lb && astr[lb] == '-' && !strncmp (astr, bstr, lb)) + return 0; + } + + return c; } static int @@ -178,7 +193,7 @@ _hb_ot_name_entry_cmp (const void *pa, const void *pb) { /* Compare by name_id, then language, then score, then index. */ - int v = _hb_ot_name_entry_cmp_key (pa, pb); + int v = _hb_ot_name_entry_cmp_key (pa, pb, true); if (v) return v; @@ -330,7 +345,18 @@ struct name const hb_ot_name_entry_t *entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names, this->names.length, sizeof (hb_ot_name_entry_t), - _hb_ot_name_entry_cmp_key); + _hb_ot_name_entry_cmp_key, + true); + + if (!entry) + { + entry = hb_bsearch (key, (const hb_ot_name_entry_t *) this->names, + this->names.length, + sizeof (hb_ot_name_entry_t), + _hb_ot_name_entry_cmp_key, + false); + } + if (!entry) return -1; diff --git a/thirdparty/harfbuzz/src/hb-ot-os2-table.hh b/thirdparty/harfbuzz/src/hb-ot-os2-table.hh index f0035e2f04..3473afef54 100644 --- a/thirdparty/harfbuzz/src/hb-ot-os2-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-os2-table.hh @@ -224,9 +224,11 @@ struct OS2 *max_cp = hb_min (0xFFFFu, codepoints->get_max ()); } - /* https://github.com/Microsoft/Font-Validator/blob/520aaae/OTFontFileVal/val_OS2.cs#L644-L681 */ + /* https://github.com/Microsoft/Font-Validator/blob/520aaae/OTFontFileVal/val_OS2.cs#L644-L681 + * https://docs.microsoft.com/en-us/typography/legacy/legacy_arabic_fonts */ enum font_page_t { + FONT_PAGE_NONE = 0, FONT_PAGE_HEBREW = 0xB100, /* Hebrew Windows 3.1 font page */ FONT_PAGE_SIMP_ARABIC = 0xB200, /* Simplified Arabic Windows 3.1 font page */ FONT_PAGE_TRAD_ARABIC = 0xB300, /* Traditional Arabic Windows 3.1 font page */ diff --git a/thirdparty/harfbuzz/src/hb-ot-post-table-v2subset.hh b/thirdparty/harfbuzz/src/hb-ot-post-table-v2subset.hh index 0f3cd8e24f..4d427e5431 100644 --- a/thirdparty/harfbuzz/src/hb-ot-post-table-v2subset.hh +++ b/thirdparty/harfbuzz/src/hb-ot-post-table-v2subset.hh @@ -52,16 +52,16 @@ HB_INTERNAL bool postV2Tail::serialize (hb_serialize_context_t *c, { unsigned glyph_id = _.first; unsigned new_index = _.second; - + if (new_index < 258) continue; if (copied_indices.has (new_index)) continue; copied_indices.add (new_index); - + hb_bytes_t s = reinterpret_cast<const post::accelerator_t*> (_post)->find_glyph_name (glyph_id); HBUINT8 *o = c->allocate_size<HBUINT8> (HBUINT8::static_size * (s.length + 1)); if (unlikely (!o)) return_trace (false); if (!c->check_assign (o[0], s.length, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false); - memcpy (o+1, s.arrayZ, HBUINT8::static_size * s.length); + hb_memcpy (o+1, s.arrayZ, HBUINT8::static_size * s.length); } return_trace (true); @@ -78,17 +78,19 @@ HB_INTERNAL bool postV2Tail::subset (hb_subset_context_t *c) const post::accelerator_t _post (c->plan->source); - hb_hashmap_t<hb_bytes_t, unsigned, std::nullptr_t, unsigned, nullptr, (unsigned)-1> glyph_name_to_new_index; + hb_hashmap_t<hb_bytes_t, unsigned, true> glyph_name_to_new_index; for (hb_codepoint_t new_gid = 0; new_gid < num_glyphs; new_gid++) { hb_codepoint_t old_gid = reverse_glyph_map.get (new_gid); unsigned old_index = glyphNameIndex[old_gid]; unsigned new_index; + const unsigned *new_index2; if (old_index <= 257) new_index = old_index; - else if (old_new_index_map.has (old_index)) new_index = old_new_index_map.get (old_index); - else + else if (old_new_index_map.has (old_index, &new_index2)) { + new_index = *new_index2; + } else { hb_bytes_t s = _post.find_glyph_name (old_gid); new_index = glyph_name_to_new_index.get (s); if (new_index == (unsigned)-1) diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-machine.hh deleted file mode 100644 index 74bf3ca0fa..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-machine.hh +++ /dev/null @@ -1,603 +0,0 @@ - -#line 1 "hb-ot-shape-complex-indic-machine.rl" -/* - * Copyright © 2011,2012 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_INDIC_MACHINE_HH -#define HB_OT_SHAPE_COMPLEX_INDIC_MACHINE_HH - -#include "hb.hh" - -enum indic_syllable_type_t { - indic_consonant_syllable, - indic_vowel_syllable, - indic_standalone_cluster, - indic_symbol_cluster, - indic_broken_cluster, - indic_non_indic_cluster, -}; - - -#line 45 "hb-ot-shape-complex-indic-machine.hh" -#define indic_syllable_machine_ex_A 10u -#define indic_syllable_machine_ex_C 1u -#define indic_syllable_machine_ex_CM 17u -#define indic_syllable_machine_ex_CS 19u -#define indic_syllable_machine_ex_DOTTEDCIRCLE 12u -#define indic_syllable_machine_ex_H 4u -#define indic_syllable_machine_ex_M 7u -#define indic_syllable_machine_ex_N 3u -#define indic_syllable_machine_ex_PLACEHOLDER 11u -#define indic_syllable_machine_ex_RS 13u -#define indic_syllable_machine_ex_Ra 16u -#define indic_syllable_machine_ex_Repha 15u -#define indic_syllable_machine_ex_SM 8u -#define indic_syllable_machine_ex_Symbol 18u -#define indic_syllable_machine_ex_V 2u -#define indic_syllable_machine_ex_ZWJ 6u -#define indic_syllable_machine_ex_ZWNJ 5u - - -#line 65 "hb-ot-shape-complex-indic-machine.hh" -static const unsigned char _indic_syllable_machine_trans_keys[] = { - 8u, 8u, 4u, 8u, 5u, 7u, 5u, 8u, 4u, 8u, 6u, 6u, 16u, 16u, 4u, 8u, - 4u, 13u, 4u, 8u, 8u, 8u, 5u, 7u, 5u, 8u, 4u, 8u, 6u, 6u, 16u, 16u, - 4u, 8u, 4u, 13u, 4u, 13u, 4u, 13u, 8u, 8u, 5u, 7u, 5u, 8u, 4u, 8u, - 6u, 6u, 16u, 16u, 4u, 8u, 4u, 8u, 4u, 13u, 8u, 8u, 5u, 7u, 5u, 8u, - 4u, 8u, 6u, 6u, 16u, 16u, 4u, 8u, 4u, 8u, 5u, 8u, 8u, 8u, 1u, 19u, - 3u, 17u, 3u, 17u, 4u, 17u, 1u, 16u, 5u, 10u, 5u, 10u, 10u, 10u, 5u, 10u, - 1u, 16u, 1u, 16u, 1u, 16u, 3u, 10u, 4u, 10u, 5u, 10u, 4u, 10u, 5u, 10u, - 3u, 10u, 5u, 10u, 3u, 17u, 3u, 17u, 3u, 17u, 3u, 17u, 4u, 17u, 1u, 16u, - 3u, 17u, 3u, 17u, 4u, 17u, 1u, 16u, 5u, 10u, 10u, 10u, 5u, 10u, 1u, 16u, - 1u, 16u, 3u, 10u, 4u, 10u, 5u, 10u, 4u, 10u, 5u, 10u, 5u, 10u, 3u, 10u, - 5u, 10u, 3u, 17u, 3u, 17u, 4u, 8u, 3u, 17u, 3u, 17u, 4u, 17u, 1u, 16u, - 3u, 17u, 1u, 16u, 5u, 10u, 10u, 10u, 5u, 10u, 1u, 16u, 1u, 16u, 3u, 10u, - 4u, 10u, 5u, 10u, 3u, 17u, 4u, 10u, 5u, 10u, 5u, 10u, 3u, 10u, 5u, 10u, - 3u, 17u, 4u, 13u, 4u, 8u, 3u, 17u, 3u, 17u, 4u, 17u, 1u, 16u, 3u, 17u, - 1u, 16u, 5u, 10u, 10u, 10u, 5u, 10u, 1u, 16u, 1u, 16u, 3u, 10u, 4u, 10u, - 5u, 10u, 3u, 17u, 4u, 10u, 5u, 10u, 5u, 10u, 3u, 10u, 5u, 10u, 1u, 17u, - 3u, 17u, 1u, 17u, 4u, 13u, 5u, 10u, 10u, 10u, 5u, 10u, 1u, 16u, 3u, 10u, - 5u, 10u, 5u, 10u, 10u, 10u, 5u, 10u, 1u, 16u, 0 -}; - -static const char _indic_syllable_machine_key_spans[] = { - 1, 5, 3, 4, 5, 1, 1, 5, - 10, 5, 1, 3, 4, 5, 1, 1, - 5, 10, 10, 10, 1, 3, 4, 5, - 1, 1, 5, 5, 10, 1, 3, 4, - 5, 1, 1, 5, 5, 4, 1, 19, - 15, 15, 14, 16, 6, 6, 1, 6, - 16, 16, 16, 8, 7, 6, 7, 6, - 8, 6, 15, 15, 15, 15, 14, 16, - 15, 15, 14, 16, 6, 1, 6, 16, - 16, 8, 7, 6, 7, 6, 6, 8, - 6, 15, 15, 5, 15, 15, 14, 16, - 15, 16, 6, 1, 6, 16, 16, 8, - 7, 6, 15, 7, 6, 6, 8, 6, - 15, 10, 5, 15, 15, 14, 16, 15, - 16, 6, 1, 6, 16, 16, 8, 7, - 6, 15, 7, 6, 6, 8, 6, 17, - 15, 17, 10, 6, 1, 6, 16, 8, - 6, 6, 1, 6, 16 -}; - -static const short _indic_syllable_machine_index_offsets[] = { - 0, 2, 8, 12, 17, 23, 25, 27, - 33, 44, 50, 52, 56, 61, 67, 69, - 71, 77, 88, 99, 110, 112, 116, 121, - 127, 129, 131, 137, 143, 154, 156, 160, - 165, 171, 173, 175, 181, 187, 192, 194, - 214, 230, 246, 261, 278, 285, 292, 294, - 301, 318, 335, 352, 361, 369, 376, 384, - 391, 400, 407, 423, 439, 455, 471, 486, - 503, 519, 535, 550, 567, 574, 576, 583, - 600, 617, 626, 634, 641, 649, 656, 663, - 672, 679, 695, 711, 717, 733, 749, 764, - 781, 797, 814, 821, 823, 830, 847, 864, - 873, 881, 888, 904, 912, 919, 926, 935, - 942, 958, 969, 975, 991, 1007, 1022, 1039, - 1055, 1072, 1079, 1081, 1088, 1105, 1122, 1131, - 1139, 1146, 1162, 1170, 1177, 1184, 1193, 1200, - 1218, 1234, 1252, 1263, 1270, 1272, 1279, 1296, - 1305, 1312, 1319, 1321, 1328 -}; - -static const unsigned char _indic_syllable_machine_indicies[] = { - 1, 0, 2, 3, 3, 4, 1, 0, - 3, 3, 4, 0, 3, 3, 4, 1, - 0, 5, 3, 3, 4, 1, 0, 6, - 0, 7, 0, 8, 3, 3, 4, 1, - 0, 2, 3, 3, 4, 1, 0, 0, - 0, 0, 9, 0, 11, 12, 12, 13, - 14, 10, 14, 10, 12, 12, 13, 10, - 12, 12, 13, 14, 10, 15, 12, 12, - 13, 14, 10, 16, 10, 17, 10, 18, - 12, 12, 13, 14, 10, 11, 12, 12, - 13, 14, 10, 10, 10, 10, 19, 10, - 11, 12, 12, 13, 14, 10, 10, 10, - 10, 20, 10, 22, 23, 23, 24, 25, - 21, 21, 21, 21, 26, 21, 25, 21, - 23, 23, 24, 27, 23, 23, 24, 25, - 21, 28, 23, 23, 24, 25, 21, 29, - 21, 30, 21, 22, 23, 23, 24, 25, - 21, 31, 23, 23, 24, 25, 21, 33, - 34, 34, 35, 36, 32, 32, 32, 32, - 37, 32, 36, 32, 34, 34, 35, 32, - 34, 34, 35, 36, 32, 38, 34, 34, - 35, 36, 32, 39, 32, 40, 32, 33, - 34, 34, 35, 36, 32, 41, 34, 34, - 35, 36, 32, 23, 23, 24, 1, 0, - 43, 42, 45, 46, 47, 48, 49, 50, - 24, 25, 44, 51, 52, 52, 26, 44, - 53, 54, 55, 56, 57, 44, 59, 60, - 61, 62, 4, 1, 58, 63, 58, 58, - 9, 58, 58, 58, 64, 58, 65, 60, - 66, 66, 4, 1, 58, 63, 58, 58, - 58, 58, 58, 58, 64, 58, 60, 66, - 66, 4, 1, 58, 63, 58, 58, 58, - 58, 58, 58, 64, 58, 45, 58, 58, - 58, 67, 68, 58, 1, 58, 63, 58, - 58, 58, 58, 58, 45, 58, 69, 69, - 58, 1, 58, 63, 58, 63, 58, 58, - 70, 58, 63, 58, 63, 58, 63, 58, - 58, 58, 58, 63, 58, 45, 58, 71, - 58, 69, 69, 58, 1, 58, 63, 58, - 58, 58, 58, 58, 45, 58, 45, 58, - 58, 58, 69, 69, 58, 1, 58, 63, - 58, 58, 58, 58, 58, 45, 58, 45, - 58, 58, 58, 69, 68, 58, 1, 58, - 63, 58, 58, 58, 58, 58, 45, 58, - 72, 7, 73, 74, 4, 1, 58, 63, - 58, 7, 73, 74, 4, 1, 58, 63, - 58, 73, 73, 4, 1, 58, 63, 58, - 75, 76, 76, 4, 1, 58, 63, 58, - 67, 77, 58, 1, 58, 63, 58, 67, - 58, 69, 69, 58, 1, 58, 63, 58, - 69, 77, 58, 1, 58, 63, 58, 59, - 60, 66, 66, 4, 1, 58, 63, 58, - 58, 58, 58, 58, 58, 64, 58, 59, - 60, 61, 66, 4, 1, 58, 63, 58, - 58, 9, 58, 58, 58, 64, 58, 79, - 80, 81, 82, 13, 14, 78, 83, 78, - 78, 20, 78, 78, 78, 84, 78, 85, - 80, 86, 82, 13, 14, 78, 83, 78, - 78, 78, 78, 78, 78, 84, 78, 80, - 86, 82, 13, 14, 78, 83, 78, 78, - 78, 78, 78, 78, 84, 78, 87, 78, - 78, 78, 88, 89, 78, 14, 78, 83, - 78, 78, 78, 78, 78, 87, 78, 90, - 80, 91, 92, 13, 14, 78, 83, 78, - 78, 19, 78, 78, 78, 84, 78, 93, - 80, 86, 86, 13, 14, 78, 83, 78, - 78, 78, 78, 78, 78, 84, 78, 80, - 86, 86, 13, 14, 78, 83, 78, 78, - 78, 78, 78, 78, 84, 78, 87, 78, - 78, 78, 94, 89, 78, 14, 78, 83, - 78, 78, 78, 78, 78, 87, 78, 83, - 78, 78, 95, 78, 83, 78, 83, 78, - 83, 78, 78, 78, 78, 83, 78, 87, - 78, 96, 78, 94, 94, 78, 14, 78, - 83, 78, 78, 78, 78, 78, 87, 78, - 87, 78, 78, 78, 94, 94, 78, 14, - 78, 83, 78, 78, 78, 78, 78, 87, - 78, 97, 17, 98, 99, 13, 14, 78, - 83, 78, 17, 98, 99, 13, 14, 78, - 83, 78, 98, 98, 13, 14, 78, 83, - 78, 100, 101, 101, 13, 14, 78, 83, - 78, 88, 102, 78, 14, 78, 83, 78, - 94, 94, 78, 14, 78, 83, 78, 88, - 78, 94, 94, 78, 14, 78, 83, 78, - 94, 102, 78, 14, 78, 83, 78, 90, - 80, 86, 86, 13, 14, 78, 83, 78, - 78, 78, 78, 78, 78, 84, 78, 90, - 80, 91, 86, 13, 14, 78, 83, 78, - 78, 19, 78, 78, 78, 84, 78, 11, - 12, 12, 13, 14, 78, 79, 80, 86, - 82, 13, 14, 78, 83, 78, 78, 78, - 78, 78, 78, 84, 78, 104, 48, 105, - 105, 24, 25, 103, 51, 103, 103, 103, - 103, 103, 103, 55, 103, 48, 105, 105, - 24, 25, 103, 51, 103, 103, 103, 103, - 103, 103, 55, 103, 106, 103, 103, 103, - 107, 108, 103, 25, 103, 51, 103, 103, - 103, 103, 103, 106, 103, 47, 48, 109, - 110, 24, 25, 103, 51, 103, 103, 26, - 103, 103, 103, 55, 103, 106, 103, 103, - 103, 111, 108, 103, 25, 103, 51, 103, - 103, 103, 103, 103, 106, 103, 51, 103, - 103, 112, 103, 51, 103, 51, 103, 51, - 103, 103, 103, 103, 51, 103, 106, 103, - 113, 103, 111, 111, 103, 25, 103, 51, - 103, 103, 103, 103, 103, 106, 103, 106, - 103, 103, 103, 111, 111, 103, 25, 103, - 51, 103, 103, 103, 103, 103, 106, 103, - 114, 30, 115, 116, 24, 25, 103, 51, - 103, 30, 115, 116, 24, 25, 103, 51, - 103, 115, 115, 24, 25, 103, 51, 103, - 47, 48, 105, 105, 24, 25, 103, 51, - 103, 103, 103, 103, 103, 103, 55, 103, - 117, 118, 118, 24, 25, 103, 51, 103, - 107, 119, 103, 25, 103, 51, 103, 111, - 111, 103, 25, 103, 51, 103, 107, 103, - 111, 111, 103, 25, 103, 51, 103, 111, - 119, 103, 25, 103, 51, 103, 47, 48, - 109, 105, 24, 25, 103, 51, 103, 103, - 26, 103, 103, 103, 55, 103, 22, 23, - 23, 24, 25, 120, 120, 120, 120, 26, - 120, 22, 23, 23, 24, 25, 120, 122, - 123, 124, 125, 35, 36, 121, 126, 121, - 121, 37, 121, 121, 121, 127, 121, 128, - 123, 125, 125, 35, 36, 121, 126, 121, - 121, 121, 121, 121, 121, 127, 121, 123, - 125, 125, 35, 36, 121, 126, 121, 121, - 121, 121, 121, 121, 127, 121, 129, 121, - 121, 121, 130, 131, 121, 36, 121, 126, - 121, 121, 121, 121, 121, 129, 121, 122, - 123, 124, 52, 35, 36, 121, 126, 121, - 121, 37, 121, 121, 121, 127, 121, 129, - 121, 121, 121, 132, 131, 121, 36, 121, - 126, 121, 121, 121, 121, 121, 129, 121, - 126, 121, 121, 133, 121, 126, 121, 126, - 121, 126, 121, 121, 121, 121, 126, 121, - 129, 121, 134, 121, 132, 132, 121, 36, - 121, 126, 121, 121, 121, 121, 121, 129, - 121, 129, 121, 121, 121, 132, 132, 121, - 36, 121, 126, 121, 121, 121, 121, 121, - 129, 121, 135, 40, 136, 137, 35, 36, - 121, 126, 121, 40, 136, 137, 35, 36, - 121, 126, 121, 136, 136, 35, 36, 121, - 126, 121, 122, 123, 125, 125, 35, 36, - 121, 126, 121, 121, 121, 121, 121, 121, - 127, 121, 138, 139, 139, 35, 36, 121, - 126, 121, 130, 140, 121, 36, 121, 126, - 121, 132, 132, 121, 36, 121, 126, 121, - 130, 121, 132, 132, 121, 36, 121, 126, - 121, 132, 140, 121, 36, 121, 126, 121, - 45, 46, 47, 48, 109, 105, 24, 25, - 103, 51, 52, 52, 26, 103, 103, 45, - 55, 103, 59, 141, 61, 62, 4, 1, - 58, 63, 58, 58, 9, 58, 58, 58, - 64, 58, 45, 46, 47, 48, 142, 143, - 24, 144, 58, 145, 58, 52, 26, 58, - 58, 45, 55, 58, 22, 146, 146, 24, - 144, 58, 63, 58, 58, 26, 58, 145, - 58, 58, 147, 58, 145, 58, 145, 58, - 145, 58, 58, 58, 58, 145, 58, 45, - 58, 71, 22, 146, 146, 24, 144, 58, - 63, 58, 58, 58, 58, 58, 45, 58, - 149, 148, 150, 150, 148, 43, 148, 151, - 148, 150, 150, 148, 43, 148, 151, 148, - 151, 148, 148, 152, 148, 151, 148, 151, - 148, 151, 148, 148, 148, 148, 151, 148, - 45, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 52, 120, 120, 120, 120, 45, - 120, 0 -}; - -static const unsigned char _indic_syllable_machine_trans_targs[] = { - 39, 45, 50, 2, 51, 5, 6, 53, - 57, 58, 39, 67, 11, 73, 68, 14, - 15, 75, 80, 81, 84, 39, 89, 21, - 95, 90, 98, 39, 24, 25, 97, 103, - 39, 112, 30, 118, 113, 121, 33, 34, - 120, 126, 39, 137, 39, 40, 60, 85, - 87, 105, 106, 91, 107, 127, 128, 99, - 135, 140, 39, 41, 43, 8, 59, 46, - 54, 42, 1, 44, 48, 0, 47, 49, - 52, 3, 4, 55, 7, 56, 39, 61, - 63, 18, 83, 69, 76, 62, 9, 64, - 78, 71, 65, 17, 82, 66, 10, 70, - 72, 74, 12, 13, 77, 16, 79, 39, - 86, 26, 88, 101, 93, 19, 104, 20, - 92, 94, 96, 22, 23, 100, 27, 102, - 39, 39, 108, 110, 28, 35, 114, 122, - 109, 111, 124, 116, 29, 115, 117, 119, - 31, 32, 123, 36, 125, 129, 130, 134, - 131, 132, 37, 133, 39, 136, 38, 138, - 139 -}; - -static const char _indic_syllable_machine_trans_actions[] = { - 1, 0, 2, 0, 2, 0, 0, 2, - 2, 2, 3, 2, 0, 2, 0, 0, - 0, 2, 2, 2, 2, 4, 2, 0, - 5, 0, 5, 6, 0, 0, 5, 2, - 7, 2, 0, 2, 0, 2, 0, 0, - 2, 2, 8, 0, 11, 2, 2, 5, - 0, 12, 12, 0, 2, 5, 2, 5, - 2, 0, 13, 2, 0, 0, 2, 0, - 2, 2, 0, 2, 2, 0, 0, 2, - 2, 0, 0, 0, 0, 2, 14, 2, - 0, 0, 2, 0, 2, 2, 0, 2, - 2, 2, 2, 0, 2, 2, 0, 0, - 2, 2, 0, 0, 0, 0, 2, 15, - 5, 0, 5, 2, 2, 0, 5, 0, - 0, 2, 5, 0, 0, 0, 0, 2, - 16, 17, 2, 0, 0, 0, 0, 2, - 2, 2, 2, 2, 0, 0, 2, 2, - 0, 0, 0, 0, 2, 0, 18, 18, - 0, 0, 0, 0, 19, 2, 0, 0, - 0 -}; - -static const char _indic_syllable_machine_to_state_actions[] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 9, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0 -}; - -static const char _indic_syllable_machine_from_state_actions[] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 10, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0 -}; - -static const short _indic_syllable_machine_eof_trans[] = { - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 11, 11, 11, 11, 11, 11, 11, - 11, 11, 11, 22, 22, 28, 22, 22, - 22, 22, 22, 22, 33, 33, 33, 33, - 33, 33, 33, 33, 33, 1, 43, 0, - 59, 59, 59, 59, 59, 59, 59, 59, - 59, 59, 59, 59, 59, 59, 59, 59, - 59, 59, 59, 59, 79, 79, 79, 79, - 79, 79, 79, 79, 79, 79, 79, 79, - 79, 79, 79, 79, 79, 79, 79, 79, - 79, 79, 79, 79, 79, 104, 104, 104, - 104, 104, 104, 104, 104, 104, 104, 104, - 104, 104, 104, 104, 104, 104, 104, 104, - 104, 121, 121, 122, 122, 122, 122, 122, - 122, 122, 122, 122, 122, 122, 122, 122, - 122, 122, 122, 122, 122, 122, 122, 104, - 59, 59, 59, 59, 59, 59, 59, 149, - 149, 149, 149, 149, 121 -}; - -static const int indic_syllable_machine_start = 39; -static const int indic_syllable_machine_first_final = 39; -static const int indic_syllable_machine_error = -1; - -static const int indic_syllable_machine_en_main = 39; - - -#line 46 "hb-ot-shape-complex-indic-machine.rl" - - - -#line 102 "hb-ot-shape-complex-indic-machine.rl" - - -#define found_syllable(syllable_type) \ - HB_STMT_START { \ - if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ - for (unsigned int i = ts; i < te; i++) \ - info[i].syllable() = (syllable_serial << 4) | syllable_type; \ - syllable_serial++; \ - if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ - } HB_STMT_END - -static void -find_syllables_indic (hb_buffer_t *buffer) -{ - unsigned int p, pe, eof, ts, te, act; - int cs; - hb_glyph_info_t *info = buffer->info; - -#line 440 "hb-ot-shape-complex-indic-machine.hh" - { - cs = indic_syllable_machine_start; - ts = 0; - te = 0; - act = 0; - } - -#line 122 "hb-ot-shape-complex-indic-machine.rl" - - - p = 0; - pe = eof = buffer->len; - - unsigned int syllable_serial = 1; - -#line 456 "hb-ot-shape-complex-indic-machine.hh" - { - int _slen; - int _trans; - const unsigned char *_keys; - const unsigned char *_inds; - if ( p == pe ) - goto _test_eof; -_resume: - switch ( _indic_syllable_machine_from_state_actions[cs] ) { - case 10: -#line 1 "NONE" - {ts = p;} - break; -#line 470 "hb-ot-shape-complex-indic-machine.hh" - } - - _keys = _indic_syllable_machine_trans_keys + (cs<<1); - _inds = _indic_syllable_machine_indicies + _indic_syllable_machine_index_offsets[cs]; - - _slen = _indic_syllable_machine_key_spans[cs]; - _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].indic_category()) && - ( info[p].indic_category()) <= _keys[1] ? - ( info[p].indic_category()) - _keys[0] : _slen ]; - -_eof_trans: - cs = _indic_syllable_machine_trans_targs[_trans]; - - if ( _indic_syllable_machine_trans_actions[_trans] == 0 ) - goto _again; - - switch ( _indic_syllable_machine_trans_actions[_trans] ) { - case 2: -#line 1 "NONE" - {te = p+1;} - break; - case 11: -#line 98 "hb-ot-shape-complex-indic-machine.rl" - {te = p+1;{ found_syllable (indic_non_indic_cluster); }} - break; - case 13: -#line 93 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_consonant_syllable); }} - break; - case 14: -#line 94 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_vowel_syllable); }} - break; - case 17: -#line 95 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_standalone_cluster); }} - break; - case 19: -#line 96 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_symbol_cluster); }} - break; - case 15: -#line 97 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_broken_cluster); }} - break; - case 16: -#line 98 "hb-ot-shape-complex-indic-machine.rl" - {te = p;p--;{ found_syllable (indic_non_indic_cluster); }} - break; - case 1: -#line 93 "hb-ot-shape-complex-indic-machine.rl" - {{p = ((te))-1;}{ found_syllable (indic_consonant_syllable); }} - break; - case 3: -#line 94 "hb-ot-shape-complex-indic-machine.rl" - {{p = ((te))-1;}{ found_syllable (indic_vowel_syllable); }} - break; - case 7: -#line 95 "hb-ot-shape-complex-indic-machine.rl" - {{p = ((te))-1;}{ found_syllable (indic_standalone_cluster); }} - break; - case 8: -#line 96 "hb-ot-shape-complex-indic-machine.rl" - {{p = ((te))-1;}{ found_syllable (indic_symbol_cluster); }} - break; - case 4: -#line 97 "hb-ot-shape-complex-indic-machine.rl" - {{p = ((te))-1;}{ found_syllable (indic_broken_cluster); }} - break; - case 6: -#line 1 "NONE" - { switch( act ) { - case 1: - {{p = ((te))-1;} found_syllable (indic_consonant_syllable); } - break; - case 5: - {{p = ((te))-1;} found_syllable (indic_broken_cluster); } - break; - case 6: - {{p = ((te))-1;} found_syllable (indic_non_indic_cluster); } - break; - } - } - break; - case 18: -#line 1 "NONE" - {te = p+1;} -#line 93 "hb-ot-shape-complex-indic-machine.rl" - {act = 1;} - break; - case 5: -#line 1 "NONE" - {te = p+1;} -#line 97 "hb-ot-shape-complex-indic-machine.rl" - {act = 5;} - break; - case 12: -#line 1 "NONE" - {te = p+1;} -#line 98 "hb-ot-shape-complex-indic-machine.rl" - {act = 6;} - break; -#line 573 "hb-ot-shape-complex-indic-machine.hh" - } - -_again: - switch ( _indic_syllable_machine_to_state_actions[cs] ) { - case 9: -#line 1 "NONE" - {ts = 0;} - break; -#line 582 "hb-ot-shape-complex-indic-machine.hh" - } - - if ( ++p != pe ) - goto _resume; - _test_eof: {} - if ( p == eof ) - { - if ( _indic_syllable_machine_eof_trans[cs] > 0 ) { - _trans = _indic_syllable_machine_eof_trans[cs] - 1; - goto _eof_trans; - } - } - - } - -#line 130 "hb-ot-shape-complex-indic-machine.rl" - -} - -#undef found_syllable - -#endif /* HB_OT_SHAPE_COMPLEX_INDIC_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-table.cc b/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-table.cc deleted file mode 100644 index 326aa9f96e..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic-table.cc +++ /dev/null @@ -1,501 +0,0 @@ -/* == Start of generated table == */ -/* - * The following table is generated by running: - * - * ./gen-indic-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt - * - * on files with these headers: - * - * # IndicSyllabicCategory-14.0.0.txt - * # Date: 2021-05-22, 01:01:00 GMT [KW, RP] - * # IndicPositionalCategory-14.0.0.txt - * # Date: 2021-05-22, 01:01:00 GMT [KW, RP] - * # Blocks-14.0.0.txt - * # Date: 2021-01-22, 23:29:00 GMT [KW] - */ - -#include "hb.hh" - -#ifndef HB_NO_OT_SHAPE - -#include "hb-ot-shape-complex-indic.hh" - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-macros" - -#define ISC_A INDIC_SYLLABIC_CATEGORY_AVAGRAHA /* 17 chars; Avagraha */ -#define ISC_Bi INDIC_SYLLABIC_CATEGORY_BINDU /* 91 chars; Bindu */ -#define ISC_BJN INDIC_SYLLABIC_CATEGORY_BRAHMI_JOINING_NUMBER /* 20 chars; Brahmi_Joining_Number */ -#define ISC_Ca INDIC_SYLLABIC_CATEGORY_CANTILLATION_MARK /* 59 chars; Cantillation_Mark */ -#define ISC_C INDIC_SYLLABIC_CATEGORY_CONSONANT /* 2206 chars; Consonant */ -#define ISC_CD INDIC_SYLLABIC_CATEGORY_CONSONANT_DEAD /* 14 chars; Consonant_Dead */ -#define ISC_CF INDIC_SYLLABIC_CATEGORY_CONSONANT_FINAL /* 70 chars; Consonant_Final */ -#define ISC_CHL INDIC_SYLLABIC_CATEGORY_CONSONANT_HEAD_LETTER /* 5 chars; Consonant_Head_Letter */ -#define ISC_CIP INDIC_SYLLABIC_CATEGORY_CONSONANT_INITIAL_POSTFIXED /* 1 chars; Consonant_Initial_Postfixed */ -#define ISC_CK INDIC_SYLLABIC_CATEGORY_CONSONANT_KILLER /* 2 chars; Consonant_Killer */ -#define ISC_CM INDIC_SYLLABIC_CATEGORY_CONSONANT_MEDIAL /* 31 chars; Consonant_Medial */ -#define ISC_CP INDIC_SYLLABIC_CATEGORY_CONSONANT_PLACEHOLDER /* 22 chars; Consonant_Placeholder */ -#define ISC_CPR INDIC_SYLLABIC_CATEGORY_CONSONANT_PRECEDING_REPHA /* 3 chars; Consonant_Preceding_Repha */ -#define ISC_CPrf INDIC_SYLLABIC_CATEGORY_CONSONANT_PREFIXED /* 10 chars; Consonant_Prefixed */ -#define ISC_CS INDIC_SYLLABIC_CATEGORY_CONSONANT_SUBJOINED /* 94 chars; Consonant_Subjoined */ -#define ISC_CSR INDIC_SYLLABIC_CATEGORY_CONSONANT_SUCCEEDING_REPHA /* 1 chars; Consonant_Succeeding_Repha */ -#define ISC_CWS INDIC_SYLLABIC_CATEGORY_CONSONANT_WITH_STACKER /* 8 chars; Consonant_With_Stacker */ -#define ISC_GM INDIC_SYLLABIC_CATEGORY_GEMINATION_MARK /* 3 chars; Gemination_Mark */ -#define ISC_IS INDIC_SYLLABIC_CATEGORY_INVISIBLE_STACKER /* 12 chars; Invisible_Stacker */ -#define ISC_ZWJ INDIC_SYLLABIC_CATEGORY_JOINER /* 1 chars; Joiner */ -#define ISC_ML INDIC_SYLLABIC_CATEGORY_MODIFYING_LETTER /* 1 chars; Modifying_Letter */ -#define ISC_ZWNJ INDIC_SYLLABIC_CATEGORY_NON_JOINER /* 1 chars; Non_Joiner */ -#define ISC_N INDIC_SYLLABIC_CATEGORY_NUKTA /* 32 chars; Nukta */ -#define ISC_Nd INDIC_SYLLABIC_CATEGORY_NUMBER /* 491 chars; Number */ -#define ISC_NJ INDIC_SYLLABIC_CATEGORY_NUMBER_JOINER /* 1 chars; Number_Joiner */ -#define ISC_x INDIC_SYLLABIC_CATEGORY_OTHER /* 1 chars; Other */ -#define ISC_PK INDIC_SYLLABIC_CATEGORY_PURE_KILLER /* 25 chars; Pure_Killer */ -#define ISC_RS INDIC_SYLLABIC_CATEGORY_REGISTER_SHIFTER /* 2 chars; Register_Shifter */ -#define ISC_SM INDIC_SYLLABIC_CATEGORY_SYLLABLE_MODIFIER /* 25 chars; Syllable_Modifier */ -#define ISC_TL INDIC_SYLLABIC_CATEGORY_TONE_LETTER /* 7 chars; Tone_Letter */ -#define ISC_TM INDIC_SYLLABIC_CATEGORY_TONE_MARK /* 42 chars; Tone_Mark */ -#define ISC_V INDIC_SYLLABIC_CATEGORY_VIRAMA /* 27 chars; Virama */ -#define ISC_Vs INDIC_SYLLABIC_CATEGORY_VISARGA /* 35 chars; Visarga */ -#define ISC_Vo INDIC_SYLLABIC_CATEGORY_VOWEL /* 30 chars; Vowel */ -#define ISC_M INDIC_SYLLABIC_CATEGORY_VOWEL_DEPENDENT /* 686 chars; Vowel_Dependent */ -#define ISC_VI INDIC_SYLLABIC_CATEGORY_VOWEL_INDEPENDENT /* 486 chars; Vowel_Independent */ - -#define IMC_B INDIC_MATRA_CATEGORY_BOTTOM /* 352 chars; Bottom */ -#define IMC_BL INDIC_MATRA_CATEGORY_BOTTOM_AND_LEFT /* 1 chars; Bottom_And_Left */ -#define IMC_BR INDIC_MATRA_CATEGORY_BOTTOM_AND_RIGHT /* 4 chars; Bottom_And_Right */ -#define IMC_L INDIC_MATRA_CATEGORY_LEFT /* 64 chars; Left */ -#define IMC_LR INDIC_MATRA_CATEGORY_LEFT_AND_RIGHT /* 22 chars; Left_And_Right */ -#define IMC_x INDIC_MATRA_CATEGORY_NOT_APPLICABLE /* 1 chars; Not_Applicable */ -#define IMC_O INDIC_MATRA_CATEGORY_OVERSTRUCK /* 10 chars; Overstruck */ -#define IMC_R INDIC_MATRA_CATEGORY_RIGHT /* 290 chars; Right */ -#define IMC_T INDIC_MATRA_CATEGORY_TOP /* 418 chars; Top */ -#define IMC_TB INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM /* 10 chars; Top_And_Bottom */ -#define IMC_TBL INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM_AND_LEFT /* 2 chars; Top_And_Bottom_And_Left */ -#define IMC_TBR INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM_AND_RIGHT /* 1 chars; Top_And_Bottom_And_Right */ -#define IMC_TL INDIC_MATRA_CATEGORY_TOP_AND_LEFT /* 6 chars; Top_And_Left */ -#define IMC_TLR INDIC_MATRA_CATEGORY_TOP_AND_LEFT_AND_RIGHT /* 4 chars; Top_And_Left_And_Right */ -#define IMC_TR INDIC_MATRA_CATEGORY_TOP_AND_RIGHT /* 13 chars; Top_And_Right */ -#define IMC_VOL INDIC_MATRA_CATEGORY_VISUAL_ORDER_LEFT /* 19 chars; Visual_Order_Left */ - -#pragma GCC diagnostic pop - -#define _(S,M) INDIC_COMBINE_CATEGORIES (ISC_##S, IMC_##M) - - -static const uint16_t indic_table[] = { - - -#define indic_offset_0x0028u 0 - - - /* Basic Latin */ - - /* 0028 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(CP,x), _(x,x), _(x,x), - /* 0030 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0038 */ _(Nd,x), _(Nd,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0x00b0u 24 - - - /* Latin-1 Supplement */ - - /* 00B0 */ _(x,x), _(x,x), _(SM,x), _(SM,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 00B8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 00C0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 00C8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 00D0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(CP,x), - -#define indic_offset_0x0900u 64 - - - /* Devanagari */ - - /* 0900 */ _(Bi,T), _(Bi,T), _(Bi,T), _(Vs,R), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 0908 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 0910 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0918 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0920 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0928 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0930 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0938 */ _(C,x), _(C,x), _(M,T), _(M,R), _(N,B), _(A,x), _(M,R), _(M,L), - /* 0940 */ _(M,R), _(M,B), _(M,B), _(M,B), _(M,B), _(M,T), _(M,T), _(M,T), - /* 0948 */ _(M,T), _(M,R), _(M,R), _(M,R), _(M,R), _(V,B), _(M,L), _(M,R), - /* 0950 */ _(x,x), _(Ca,T), _(Ca,B), _(x,T), _(x,T), _(M,T), _(M,B), _(M,B), - /* 0958 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0960 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0968 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0970 */ _(x,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 0978 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - - /* Bengali */ - - /* 0980 */ _(CP,x), _(Bi,T), _(Bi,R), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0988 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(x,x), _(VI,x), - /* 0990 */ _(VI,x), _(x,x), _(x,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0998 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 09A0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 09A8 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 09B0 */ _(C,x), _(x,x), _(C,x), _(x,x), _(x,x), _(x,x), _(C,x), _(C,x), - /* 09B8 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(A,x), _(M,R), _(M,L), - /* 09C0 */ _(M,R), _(M,B), _(M,B), _(M,B), _(M,B), _(x,x), _(x,x), _(M,L), - /* 09C8 */ _(M,L), _(x,x), _(x,x), _(M,LR), _(M,LR), _(V,B), _(CD,x), _(x,x), - /* 09D0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,R), - /* 09D8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), - /* 09E0 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 09E8 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 09F0 */ _(C,x), _(C,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 09F8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(Bi,x), _(x,x), _(SM,T), _(x,x), - - /* Gurmukhi */ - - /* 0A00 */ _(x,x), _(Bi,T), _(Bi,T), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0A08 */ _(VI,x), _(VI,x), _(VI,x), _(x,x), _(x,x), _(x,x), _(x,x), _(VI,x), - /* 0A10 */ _(VI,x), _(x,x), _(x,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0A18 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0A20 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0A28 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0A30 */ _(C,x), _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), _(C,x), _(x,x), - /* 0A38 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(x,x), _(M,R), _(M,L), - /* 0A40 */ _(M,R), _(M,B), _(M,B), _(x,x), _(x,x), _(x,x), _(x,x), _(M,T), - /* 0A48 */ _(M,T), _(x,x), _(x,x), _(M,T), _(M,T), _(V,B), _(x,x), _(x,x), - /* 0A50 */ _(x,x), _(Ca,B), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0A58 */ _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(x,x), _(C,x), _(x,x), - /* 0A60 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0A68 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0A70 */ _(Bi,T), _(GM,T), _(CP,x), _(CP,x), _(x,x), _(CM,B), _(x,x), _(x,x), - /* 0A78 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - - /* Gujarati */ - - /* 0A80 */ _(x,x), _(Bi,T), _(Bi,T), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0A88 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(VI,x), - /* 0A90 */ _(VI,x), _(VI,x), _(x,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0A98 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0AA0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0AA8 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0AB0 */ _(C,x), _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), - /* 0AB8 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(A,x), _(M,R), _(M,L), - /* 0AC0 */ _(M,R), _(M,B), _(M,B), _(M,B), _(M,B), _(M,T), _(x,x), _(M,T), - /* 0AC8 */ _(M,T), _(M,TR), _(x,x), _(M,R), _(M,R), _(V,B), _(x,x), _(x,x), - /* 0AD0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0AD8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0AE0 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0AE8 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0AF0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0AF8 */ _(x,x), _(C,x), _(Ca,T), _(Ca,T), _(Ca,T), _(N,T), _(N,T), _(N,T), - - /* Oriya */ - - /* 0B00 */ _(x,x), _(Bi,T), _(Bi,R), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0B08 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(x,x), _(VI,x), - /* 0B10 */ _(VI,x), _(x,x), _(x,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0B18 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0B20 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0B28 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0B30 */ _(C,x), _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), - /* 0B38 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(A,x), _(M,R), _(M,T), - /* 0B40 */ _(M,R), _(M,B), _(M,B), _(M,B), _(M,B), _(x,x), _(x,x), _(M,L), - /* 0B48 */ _(M,TL), _(x,x), _(x,x), _(M,LR),_(M,TLR), _(V,B), _(x,x), _(x,x), - /* 0B50 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,T), _(M,T), _(M,TR), - /* 0B58 */ _(x,x), _(x,x), _(x,x), _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), - /* 0B60 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0B68 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0B70 */ _(x,x), _(C,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0B78 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - - /* Tamil */ - - /* 0B80 */ _(x,x), _(x,x), _(Bi,T), _(ML,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0B88 */ _(VI,x), _(VI,x), _(VI,x), _(x,x), _(x,x), _(x,x), _(VI,x), _(VI,x), - /* 0B90 */ _(VI,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), _(C,x), _(x,x), _(x,x), - /* 0B98 */ _(x,x), _(C,x), _(C,x), _(x,x), _(C,x), _(x,x), _(C,x), _(C,x), - /* 0BA0 */ _(x,x), _(x,x), _(x,x), _(C,x), _(C,x), _(x,x), _(x,x), _(x,x), - /* 0BA8 */ _(C,x), _(C,x), _(C,x), _(x,x), _(x,x), _(x,x), _(C,x), _(C,x), - /* 0BB0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0BB8 */ _(C,x), _(C,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,R), _(M,R), - /* 0BC0 */ _(M,T), _(M,R), _(M,R), _(x,x), _(x,x), _(x,x), _(M,L), _(M,L), - /* 0BC8 */ _(M,L), _(x,x), _(M,LR), _(M,LR), _(M,LR), _(V,T), _(x,x), _(x,x), - /* 0BD0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,R), - /* 0BD8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0BE0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0BE8 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0BF0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0BF8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - - /* Telugu */ - - /* 0C00 */ _(Bi,T), _(Bi,R), _(Bi,R), _(Vs,R), _(Bi,T), _(VI,x), _(VI,x), _(VI,x), - /* 0C08 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(VI,x), _(VI,x), - /* 0C10 */ _(VI,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0C18 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0C20 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0C28 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0C30 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0C38 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(A,x), _(M,T), _(M,T), - /* 0C40 */ _(M,T), _(M,R), _(M,R), _(M,R), _(M,R), _(x,x), _(M,T), _(M,T), - /* 0C48 */ _(M,TB), _(x,x), _(M,T), _(M,T), _(M,T), _(V,T), _(x,x), _(x,x), - /* 0C50 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,T), _(M,B), _(x,x), - /* 0C58 */ _(C,x), _(C,x), _(C,x), _(x,x), _(x,x), _(CD,x), _(x,x), _(x,x), - /* 0C60 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0C68 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0C70 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0C78 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - - /* Kannada */ - - /* 0C80 */ _(Bi,x), _(Bi,T), _(Bi,R), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0C88 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(VI,x), _(VI,x), - /* 0C90 */ _(VI,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0C98 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0CA0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0CA8 */ _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0CB0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), - /* 0CB8 */ _(C,x), _(C,x), _(x,x), _(x,x), _(N,B), _(A,x), _(M,R), _(M,T), - /* 0CC0 */ _(M,TR), _(M,R), _(M,R), _(M,R), _(M,R), _(x,x), _(M,T), _(M,TR), - /* 0CC8 */ _(M,TR), _(x,x), _(M,TR), _(M,TR), _(M,T), _(V,T), _(x,x), _(x,x), - /* 0CD0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(M,R), _(M,R), _(x,x), - /* 0CD8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(CD,x), _(C,x), _(x,x), - /* 0CE0 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0CE8 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0CF0 */ _(x,x),_(CWS,x),_(CWS,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0CF8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - - /* Malayalam */ - - /* 0D00 */ _(Bi,T), _(Bi,T), _(Bi,R), _(Vs,R), _(Bi,x), _(VI,x), _(VI,x), _(VI,x), - /* 0D08 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(VI,x), _(VI,x), - /* 0D10 */ _(VI,x), _(x,x), _(VI,x), _(VI,x), _(VI,x), _(C,x), _(C,x), _(C,x), - /* 0D18 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0D20 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0D28 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0D30 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0D38 */ _(C,x), _(C,x), _(C,x), _(PK,T), _(PK,T), _(A,x), _(M,R), _(M,R), - /* 0D40 */ _(M,R), _(M,R), _(M,R), _(M,B), _(M,B), _(x,x), _(M,L), _(M,L), - /* 0D48 */ _(M,L), _(x,x), _(M,LR), _(M,LR), _(M,LR), _(V,T),_(CPR,T), _(x,x), - /* 0D50 */ _(x,x), _(x,x), _(x,x), _(x,x), _(CD,x), _(CD,x), _(CD,x), _(M,R), - /* 0D58 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(VI,x), - /* 0D60 */ _(VI,x), _(VI,x), _(M,B), _(M,B), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0D68 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0D70 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 0D78 */ _(x,x), _(x,x), _(CD,x), _(CD,x), _(CD,x), _(CD,x), _(CD,x), _(CD,x), - - /* Sinhala */ - - /* 0D80 */ _(x,x), _(Bi,T), _(Bi,R), _(Vs,R), _(x,x), _(VI,x), _(VI,x), _(VI,x), - /* 0D88 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 0D90 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), - /* 0D98 */ _(x,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0DA0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0DA8 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0DB0 */ _(C,x), _(C,x), _(x,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 0DB8 */ _(C,x), _(C,x), _(C,x), _(C,x), _(x,x), _(C,x), _(x,x), _(x,x), - /* 0DC0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(x,x), - /* 0DC8 */ _(x,x), _(x,x), _(V,T), _(x,x), _(x,x), _(x,x), _(x,x), _(M,R), - /* 0DD0 */ _(M,R), _(M,R), _(M,T), _(M,T), _(M,B), _(x,x), _(M,B), _(x,x), - /* 0DD8 */ _(M,R), _(M,L), _(M,TL), _(M,L), _(M,LR),_(M,TLR), _(M,LR), _(M,R), - /* 0DE0 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(Nd,x), _(Nd,x), - /* 0DE8 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 0DF0 */ _(x,x), _(x,x), _(M,R), _(M,R), _(x,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0x1000u 1336 - - - /* Myanmar */ - - /* 1000 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1008 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1010 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1018 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1020 */ _(C,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 1028 */ _(VI,x), _(VI,x), _(VI,x), _(M,R), _(M,R), _(M,T), _(M,T), _(M,B), - /* 1030 */ _(M,B), _(M,L), _(M,T), _(M,T), _(M,T), _(M,T), _(Bi,T), _(TM,B), - /* 1038 */ _(Vs,R), _(IS,x), _(PK,T), _(CM,R),_(CM,TBL), _(CM,B), _(CM,B), _(C,x), - /* 1040 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 1048 */ _(Nd,x), _(Nd,x), _(x,x), _(CP,x), _(x,x), _(x,x), _(CP,x), _(x,x), - /* 1050 */ _(C,x), _(C,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(M,R), _(M,R), - /* 1058 */ _(M,B), _(M,B), _(C,x), _(C,x), _(C,x), _(C,x), _(CM,B), _(CM,B), - /* 1060 */ _(CM,B), _(C,x), _(M,R), _(TM,R), _(TM,R), _(C,x), _(C,x), _(M,R), - /* 1068 */ _(M,R), _(TM,R), _(TM,R), _(TM,R), _(TM,R), _(TM,R), _(C,x), _(C,x), - /* 1070 */ _(C,x), _(M,T), _(M,T), _(M,T), _(M,T), _(C,x), _(C,x), _(C,x), - /* 1078 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1080 */ _(C,x), _(C,x), _(CM,B), _(M,R), _(M,L), _(M,T), _(M,T), _(TM,R), - /* 1088 */ _(TM,R), _(TM,R), _(TM,R), _(TM,R), _(TM,R), _(TM,B), _(C,x), _(TM,R), - /* 1090 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 1098 */ _(Nd,x), _(Nd,x), _(TM,R), _(TM,R), _(M,R), _(M,T), _(x,x), _(x,x), - -#define indic_offset_0x1780u 1496 - - - /* Khmer */ - - /* 1780 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1788 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1790 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 1798 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* 17A0 */ _(C,x), _(C,x), _(C,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 17A8 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(VI,x), - /* 17B0 */ _(VI,x), _(VI,x), _(VI,x), _(VI,x), _(x,x), _(x,x), _(M,R), _(M,T), - /* 17B8 */ _(M,T), _(M,T), _(M,T), _(M,B), _(M,B), _(M,B), _(M,TL),_(M,TLR), - /* 17C0 */ _(M,LR), _(M,L), _(M,L), _(M,L), _(M,LR), _(M,LR), _(Bi,T), _(Vs,R), - /* 17C8 */ _(M,R), _(RS,T), _(RS,T), _(SM,T),_(CSR,T), _(CK,T), _(SM,T), _(SM,T), - /* 17D0 */ _(SM,T), _(PK,T), _(IS,x), _(SM,T), _(x,x), _(x,x), _(x,x), _(x,x), - /* 17D8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(A,x), _(SM,T), _(x,x), _(x,x), - /* 17E0 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* 17E8 */ _(Nd,x), _(Nd,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0x1cd0u 1608 - - - /* Vedic Extensions */ - - /* 1CD0 */ _(Ca,T), _(Ca,T), _(Ca,T), _(x,x), _(Ca,O), _(Ca,B), _(Ca,B), _(Ca,B), - /* 1CD8 */ _(Ca,B), _(Ca,B), _(Ca,T), _(Ca,T), _(Ca,B), _(Ca,B), _(Ca,B), _(Ca,B), - /* 1CE0 */ _(Ca,T), _(Ca,R), _(x,O), _(x,O), _(x,O), _(x,O), _(x,O), _(x,O), - /* 1CE8 */ _(x,O), _(x,x), _(x,x), _(x,x), _(x,x), _(x,B), _(x,x), _(x,x), - /* 1CF0 */ _(x,x), _(x,x), _(CD,x), _(CD,x), _(Ca,T),_(CWS,x),_(CWS,x), _(Ca,R), - /* 1CF8 */ _(Ca,x), _(Ca,x), _(CP,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0x2008u 1656 - - - /* General Punctuation */ - - /* 2008 */ _(x,x), _(x,x), _(x,x), _(x,x),_(ZWNJ,x),_(ZWJ,x), _(x,x), _(x,x), - /* 2010 */ _(CP,x), _(CP,x), _(CP,x), _(CP,x), _(CP,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0x2070u 1672 - - - /* Superscripts and Subscripts */ - - /* 2070 */ _(x,x), _(x,x), _(x,x), _(x,x), _(SM,x), _(x,x), _(x,x), _(x,x), - /* 2078 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* 2080 */ _(x,x), _(x,x), _(SM,x), _(SM,x), _(SM,x), _(x,x), _(x,x), _(x,x), - -#define indic_offset_0xa8e0u 1696 - - - /* Devanagari Extended */ - - /* A8E0 */ _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), - /* A8E8 */ _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), _(Ca,T), - /* A8F0 */ _(Ca,T), _(Ca,T), _(Bi,x), _(Bi,x), _(x,x), _(x,x), _(x,x), _(x,x), - /* A8F8 */ _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(x,x), _(VI,x), _(M,T), - -#define indic_offset_0xa9e0u 1728 - - - /* Myanmar Extended-B */ - - /* A9E0 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(M,T), _(x,x), _(C,x), - /* A9E8 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* A9F0 */ _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), _(Nd,x), - /* A9F8 */ _(Nd,x), _(Nd,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(x,x), - -#define indic_offset_0xaa60u 1760 - - - /* Myanmar Extended-A */ - - /* AA60 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* AA68 */ _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), _(C,x), - /* AA70 */ _(x,x), _(C,x), _(C,x), _(C,x), _(CP,x), _(CP,x), _(CP,x), _(x,x), - /* AA78 */ _(x,x), _(x,x), _(C,x), _(TM,R), _(TM,T), _(TM,R), _(C,x), _(C,x), - -}; /* Table items: 1792; occupancy: 71% */ - -uint16_t -hb_indic_get_categories (hb_codepoint_t u) -{ - switch (u >> 12) - { - case 0x0u: - if (unlikely (u == 0x00A0u)) return _(CP,x); - if (hb_in_range<hb_codepoint_t> (u, 0x0028u, 0x003Fu)) return indic_table[u - 0x0028u + indic_offset_0x0028u]; - if (hb_in_range<hb_codepoint_t> (u, 0x00B0u, 0x00D7u)) return indic_table[u - 0x00B0u + indic_offset_0x00b0u]; - if (hb_in_range<hb_codepoint_t> (u, 0x0900u, 0x0DF7u)) return indic_table[u - 0x0900u + indic_offset_0x0900u]; - break; - - case 0x1u: - if (hb_in_range<hb_codepoint_t> (u, 0x1000u, 0x109Fu)) return indic_table[u - 0x1000u + indic_offset_0x1000u]; - if (hb_in_range<hb_codepoint_t> (u, 0x1780u, 0x17EFu)) return indic_table[u - 0x1780u + indic_offset_0x1780u]; - if (hb_in_range<hb_codepoint_t> (u, 0x1CD0u, 0x1CFFu)) return indic_table[u - 0x1CD0u + indic_offset_0x1cd0u]; - break; - - case 0x2u: - if (unlikely (u == 0x25CCu)) return _(CP,x); - if (hb_in_range<hb_codepoint_t> (u, 0x2008u, 0x2017u)) return indic_table[u - 0x2008u + indic_offset_0x2008u]; - if (hb_in_range<hb_codepoint_t> (u, 0x2070u, 0x2087u)) return indic_table[u - 0x2070u + indic_offset_0x2070u]; - break; - - case 0xAu: - if (hb_in_range<hb_codepoint_t> (u, 0xA8E0u, 0xA8FFu)) return indic_table[u - 0xA8E0u + indic_offset_0xa8e0u]; - if (hb_in_range<hb_codepoint_t> (u, 0xA9E0u, 0xA9FFu)) return indic_table[u - 0xA9E0u + indic_offset_0xa9e0u]; - if (hb_in_range<hb_codepoint_t> (u, 0xAA60u, 0xAA7Fu)) return indic_table[u - 0xAA60u + indic_offset_0xaa60u]; - break; - - default: - break; - } - return _(x,x); -} - -#undef _ - -#undef ISC_A -#undef ISC_Bi -#undef ISC_BJN -#undef ISC_Ca -#undef ISC_C -#undef ISC_CD -#undef ISC_CF -#undef ISC_CHL -#undef ISC_CIP -#undef ISC_CK -#undef ISC_CM -#undef ISC_CP -#undef ISC_CPR -#undef ISC_CPrf -#undef ISC_CS -#undef ISC_CSR -#undef ISC_CWS -#undef ISC_GM -#undef ISC_IS -#undef ISC_ZWJ -#undef ISC_ML -#undef ISC_ZWNJ -#undef ISC_N -#undef ISC_Nd -#undef ISC_NJ -#undef ISC_x -#undef ISC_PK -#undef ISC_RS -#undef ISC_SM -#undef ISC_TL -#undef ISC_TM -#undef ISC_V -#undef ISC_Vs -#undef ISC_Vo -#undef ISC_M -#undef ISC_VI - -#undef IMC_B -#undef IMC_BL -#undef IMC_BR -#undef IMC_L -#undef IMC_LR -#undef IMC_x -#undef IMC_O -#undef IMC_R -#undef IMC_T -#undef IMC_TB -#undef IMC_TBL -#undef IMC_TBR -#undef IMC_TL -#undef IMC_TLR -#undef IMC_TR -#undef IMC_VOL - -#endif - -/* == End of generated table == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic.hh deleted file mode 100644 index da77a2887c..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic.hh +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Copyright © 2012 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_INDIC_HH -#define HB_OT_SHAPE_COMPLEX_INDIC_HH - -#include "hb.hh" - -#include "hb-ot-shape-complex-syllabic.hh" - - -/* buffer var allocations */ -#define indic_category() complex_var_u8_category() /* indic_category_t */ -#define indic_position() complex_var_u8_auxiliary() /* indic_position_t */ - - -/* Cateories used in the OpenType spec: - * https://docs.microsoft.com/en-us/typography/script-development/devanagari - */ -/* Note: This enum is duplicated in the -machine.rl source file. - * Not sure how to avoid duplication. */ -enum indic_category_t { - OT_X = 0, - OT_C = 1, - OT_V = 2, - OT_N = 3, - OT_H = 4, - OT_ZWNJ = 5, - OT_ZWJ = 6, - OT_M = 7, - OT_SM = 8, - /* OT_VD = 9, UNUSED; we use OT_A instead. */ - OT_A = 10, - OT_PLACEHOLDER = 11, - OT_DOTTEDCIRCLE = 12, - OT_RS = 13, /* Register Shifter, used in Khmer OT spec. */ - OT_Coeng = 14, /* Khmer-style Virama. */ - OT_Repha = 15, /* Atomically-encoded logical or visual repha. */ - OT_Ra = 16, - OT_CM = 17, /* Consonant-Medial. */ - OT_Symbol = 18, /* Avagraha, etc that take marks (SM,A,VD). */ - OT_CS = 19, - - /* The following are used by Khmer & Myanmar shapers. Defined - * here for them to share. */ - OT_VAbv = 26, - OT_VBlw = 27, - OT_VPre = 28, - OT_VPst = 29, -}; - -#define MEDIAL_FLAGS (FLAG (OT_CM)) - -/* Note: - * - * We treat Vowels and placeholders as if they were consonants. This is safe because Vowels - * cannot happen in a consonant syllable. The plus side however is, we can call the - * consonant syllable logic from the vowel syllable function and get it all right! */ -#define CONSONANT_FLAGS (FLAG (OT_C) | FLAG (OT_CS) | FLAG (OT_Ra) | MEDIAL_FLAGS | FLAG (OT_V) | FLAG (OT_PLACEHOLDER) | FLAG (OT_DOTTEDCIRCLE)) -#define JOINER_FLAGS (FLAG (OT_ZWJ) | FLAG (OT_ZWNJ)) - - -/* Visual positions in a syllable from left to right. */ -enum indic_position_t { - POS_START = 0, - - POS_RA_TO_BECOME_REPH = 1, - POS_PRE_M = 2, - POS_PRE_C = 3, - - POS_BASE_C = 4, - POS_AFTER_MAIN = 5, - - POS_ABOVE_C = 6, - - POS_BEFORE_SUB = 7, - POS_BELOW_C = 8, - POS_AFTER_SUB = 9, - - POS_BEFORE_POST = 10, - POS_POST_C = 11, - POS_AFTER_POST = 12, - - POS_FINAL_C = 13, - POS_SMVD = 14, - - POS_END = 15 -}; - -/* Categories used in IndicSyllabicCategory.txt from UCD. */ -enum indic_syllabic_category_t { - INDIC_SYLLABIC_CATEGORY_OTHER = OT_X, - - INDIC_SYLLABIC_CATEGORY_AVAGRAHA = OT_Symbol, - INDIC_SYLLABIC_CATEGORY_BINDU = OT_SM, - INDIC_SYLLABIC_CATEGORY_BRAHMI_JOINING_NUMBER = OT_PLACEHOLDER, /* Don't care. */ - INDIC_SYLLABIC_CATEGORY_CANTILLATION_MARK = OT_A, - INDIC_SYLLABIC_CATEGORY_CONSONANT = OT_C, - INDIC_SYLLABIC_CATEGORY_CONSONANT_DEAD = OT_C, - INDIC_SYLLABIC_CATEGORY_CONSONANT_FINAL = OT_CM, - INDIC_SYLLABIC_CATEGORY_CONSONANT_HEAD_LETTER = OT_C, - INDIC_SYLLABIC_CATEGORY_CONSONANT_KILLER = OT_M, /* U+17CD only. */ - INDIC_SYLLABIC_CATEGORY_CONSONANT_MEDIAL = OT_CM, - INDIC_SYLLABIC_CATEGORY_CONSONANT_PLACEHOLDER = OT_PLACEHOLDER, - INDIC_SYLLABIC_CATEGORY_CONSONANT_PRECEDING_REPHA = OT_Repha, - INDIC_SYLLABIC_CATEGORY_CONSONANT_PREFIXED = OT_X, /* Don't care. */ - INDIC_SYLLABIC_CATEGORY_CONSONANT_SUBJOINED = OT_CM, - INDIC_SYLLABIC_CATEGORY_CONSONANT_SUCCEEDING_REPHA = OT_CM, - INDIC_SYLLABIC_CATEGORY_CONSONANT_WITH_STACKER = OT_CS, - INDIC_SYLLABIC_CATEGORY_GEMINATION_MARK = OT_SM, /* https://github.com/harfbuzz/harfbuzz/issues/552 */ - INDIC_SYLLABIC_CATEGORY_INVISIBLE_STACKER = OT_Coeng, - INDIC_SYLLABIC_CATEGORY_JOINER = OT_ZWJ, - INDIC_SYLLABIC_CATEGORY_MODIFYING_LETTER = OT_X, - INDIC_SYLLABIC_CATEGORY_NON_JOINER = OT_ZWNJ, - INDIC_SYLLABIC_CATEGORY_NUKTA = OT_N, - INDIC_SYLLABIC_CATEGORY_NUMBER = OT_PLACEHOLDER, - INDIC_SYLLABIC_CATEGORY_NUMBER_JOINER = OT_PLACEHOLDER, /* Don't care. */ - INDIC_SYLLABIC_CATEGORY_PURE_KILLER = OT_M, /* Is like a vowel matra. */ - INDIC_SYLLABIC_CATEGORY_REGISTER_SHIFTER = OT_RS, - INDIC_SYLLABIC_CATEGORY_SYLLABLE_MODIFIER = OT_SM, - INDIC_SYLLABIC_CATEGORY_TONE_LETTER = OT_X, - INDIC_SYLLABIC_CATEGORY_TONE_MARK = OT_N, - INDIC_SYLLABIC_CATEGORY_VIRAMA = OT_H, - INDIC_SYLLABIC_CATEGORY_VISARGA = OT_SM, - INDIC_SYLLABIC_CATEGORY_VOWEL = OT_V, - INDIC_SYLLABIC_CATEGORY_VOWEL_DEPENDENT = OT_M, - INDIC_SYLLABIC_CATEGORY_VOWEL_INDEPENDENT = OT_V -}; - -/* Categories used in IndicSMatraCategory.txt from UCD */ -enum indic_matra_category_t { - INDIC_MATRA_CATEGORY_NOT_APPLICABLE = POS_END, - - INDIC_MATRA_CATEGORY_LEFT = POS_PRE_C, - INDIC_MATRA_CATEGORY_TOP = POS_ABOVE_C, - INDIC_MATRA_CATEGORY_BOTTOM = POS_BELOW_C, - INDIC_MATRA_CATEGORY_RIGHT = POS_POST_C, - - /* These should resolve to the position of the last part of the split sequence. */ - INDIC_MATRA_CATEGORY_BOTTOM_AND_RIGHT = INDIC_MATRA_CATEGORY_RIGHT, - INDIC_MATRA_CATEGORY_LEFT_AND_RIGHT = INDIC_MATRA_CATEGORY_RIGHT, - INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM = INDIC_MATRA_CATEGORY_BOTTOM, - INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM_AND_LEFT = INDIC_MATRA_CATEGORY_BOTTOM, - INDIC_MATRA_CATEGORY_TOP_AND_BOTTOM_AND_RIGHT = INDIC_MATRA_CATEGORY_RIGHT, - INDIC_MATRA_CATEGORY_TOP_AND_LEFT = INDIC_MATRA_CATEGORY_TOP, - INDIC_MATRA_CATEGORY_TOP_AND_LEFT_AND_RIGHT = INDIC_MATRA_CATEGORY_RIGHT, - INDIC_MATRA_CATEGORY_TOP_AND_RIGHT = INDIC_MATRA_CATEGORY_RIGHT, - - INDIC_MATRA_CATEGORY_OVERSTRUCK = POS_AFTER_MAIN, - INDIC_MATRA_CATEGORY_VISUAL_ORDER_LEFT = POS_PRE_M -}; - -#define INDIC_COMBINE_CATEGORIES(S,M) \ - ( \ - static_assert_expr (S < 255 && M < 255) + \ - ( S | \ - ( \ - ( \ - S == INDIC_SYLLABIC_CATEGORY_CONSONANT_MEDIAL || \ - S == INDIC_SYLLABIC_CATEGORY_GEMINATION_MARK || \ - S == INDIC_SYLLABIC_CATEGORY_REGISTER_SHIFTER || \ - S == INDIC_SYLLABIC_CATEGORY_CONSONANT_SUCCEEDING_REPHA || \ - S == INDIC_SYLLABIC_CATEGORY_VIRAMA || \ - S == INDIC_SYLLABIC_CATEGORY_VOWEL_DEPENDENT || \ - false \ - ? M : INDIC_MATRA_CATEGORY_NOT_APPLICABLE \ - ) << 8 \ - ) \ - ) \ - ) - -HB_INTERNAL uint16_t -hb_indic_get_categories (hb_codepoint_t u); - - -static inline bool -is_one_of (const hb_glyph_info_t &info, unsigned int flags) -{ - /* If it ligated, all bets are off. */ - if (_hb_glyph_info_ligated (&info)) return false; - return !!(FLAG_UNSAFE (info.indic_category()) & flags); -} - -static inline bool -is_joiner (const hb_glyph_info_t &info) -{ - return is_one_of (info, JOINER_FLAGS); -} - -static inline bool -is_consonant (const hb_glyph_info_t &info) -{ - return is_one_of (info, CONSONANT_FLAGS); -} - -static inline bool -is_halant (const hb_glyph_info_t &info) -{ - return is_one_of (info, FLAG (OT_H)); -} - -#define IN_HALF_BLOCK(u, Base) (((u) & ~0x7Fu) == (Base)) - -#define IS_DEVA(u) (IN_HALF_BLOCK (u, 0x0900u)) -#define IS_BENG(u) (IN_HALF_BLOCK (u, 0x0980u)) -#define IS_GURU(u) (IN_HALF_BLOCK (u, 0x0A00u)) -#define IS_GUJR(u) (IN_HALF_BLOCK (u, 0x0A80u)) -#define IS_ORYA(u) (IN_HALF_BLOCK (u, 0x0B00u)) -#define IS_TAML(u) (IN_HALF_BLOCK (u, 0x0B80u)) -#define IS_TELU(u) (IN_HALF_BLOCK (u, 0x0C00u)) -#define IS_KNDA(u) (IN_HALF_BLOCK (u, 0x0C80u)) -#define IS_MLYM(u) (IN_HALF_BLOCK (u, 0x0D00u)) -#define IS_SINH(u) (IN_HALF_BLOCK (u, 0x0D80u)) - - -#define MATRA_POS_LEFT(u) POS_PRE_M -#define MATRA_POS_RIGHT(u) ( \ - IS_DEVA(u) ? POS_AFTER_SUB : \ - IS_BENG(u) ? POS_AFTER_POST : \ - IS_GURU(u) ? POS_AFTER_POST : \ - IS_GUJR(u) ? POS_AFTER_POST : \ - IS_ORYA(u) ? POS_AFTER_POST : \ - IS_TAML(u) ? POS_AFTER_POST : \ - IS_TELU(u) ? (u <= 0x0C42u ? POS_BEFORE_SUB : POS_AFTER_SUB) : \ - IS_KNDA(u) ? (u < 0x0CC3u || u > 0xCD6u ? POS_BEFORE_SUB : POS_AFTER_SUB) : \ - IS_MLYM(u) ? POS_AFTER_POST : \ - IS_SINH(u) ? POS_AFTER_SUB : \ - /*default*/ POS_AFTER_SUB \ - ) -#define MATRA_POS_TOP(u) ( /* BENG and MLYM don't have top matras. */ \ - IS_DEVA(u) ? POS_AFTER_SUB : \ - IS_GURU(u) ? POS_AFTER_POST : /* Deviate from spec */ \ - IS_GUJR(u) ? POS_AFTER_SUB : \ - IS_ORYA(u) ? POS_AFTER_MAIN : \ - IS_TAML(u) ? POS_AFTER_SUB : \ - IS_TELU(u) ? POS_BEFORE_SUB : \ - IS_KNDA(u) ? POS_BEFORE_SUB : \ - IS_SINH(u) ? POS_AFTER_SUB : \ - /*default*/ POS_AFTER_SUB \ - ) -#define MATRA_POS_BOTTOM(u) ( \ - IS_DEVA(u) ? POS_AFTER_SUB : \ - IS_BENG(u) ? POS_AFTER_SUB : \ - IS_GURU(u) ? POS_AFTER_POST : \ - IS_GUJR(u) ? POS_AFTER_POST : \ - IS_ORYA(u) ? POS_AFTER_SUB : \ - IS_TAML(u) ? POS_AFTER_POST : \ - IS_TELU(u) ? POS_BEFORE_SUB : \ - IS_KNDA(u) ? POS_BEFORE_SUB : \ - IS_MLYM(u) ? POS_AFTER_POST : \ - IS_SINH(u) ? POS_AFTER_SUB : \ - /*default*/ POS_AFTER_SUB \ - ) - -static inline indic_position_t -matra_position_indic (hb_codepoint_t u, indic_position_t side) -{ - switch ((int) side) - { - case POS_PRE_C: return MATRA_POS_LEFT (u); - case POS_POST_C: return MATRA_POS_RIGHT (u); - case POS_ABOVE_C: return MATRA_POS_TOP (u); - case POS_BELOW_C: return MATRA_POS_BOTTOM (u); - } - return side; -} - -/* XXX - * This is a hack for now. We should move this data into the main Indic table. - * Or completely remove it and just check in the tables. - */ -static const hb_codepoint_t ra_chars[] = { - 0x0930u, /* Devanagari */ - 0x09B0u, /* Bengali */ - 0x09F0u, /* Bengali */ - 0x0A30u, /* Gurmukhi */ /* No Reph */ - 0x0AB0u, /* Gujarati */ - 0x0B30u, /* Oriya */ - 0x0BB0u, /* Tamil */ /* No Reph */ - 0x0C30u, /* Telugu */ /* Reph formed only with ZWJ */ - 0x0CB0u, /* Kannada */ - 0x0D30u, /* Malayalam */ /* No Reph, Logical Repha */ - - 0x0DBBu, /* Sinhala */ /* Reph formed only with ZWJ */ -}; - -static inline bool -is_ra (hb_codepoint_t u) -{ - return hb_array (ra_chars).lfind (u); -} - -static inline void -set_indic_properties (hb_glyph_info_t &info) -{ - hb_codepoint_t u = info.codepoint; - unsigned int type = hb_indic_get_categories (u); - indic_category_t cat = (indic_category_t) (type & 0xFFu); - indic_position_t pos = (indic_position_t) (type >> 8); - - - /* - * Re-assign category - */ - - /* The following act more like the Bindus. */ - if (unlikely (hb_in_range<hb_codepoint_t> (u, 0x0953u, 0x0954u))) - cat = OT_SM; - /* The following act like consonants. */ - else if (unlikely (hb_in_ranges<hb_codepoint_t> (u, 0x0A72u, 0x0A73u, - 0x1CF5u, 0x1CF6u))) - cat = OT_C; - /* TODO: The following should only be allowed after a Visarga. - * For now, just treat them like regular tone marks. */ - else if (unlikely (hb_in_range<hb_codepoint_t> (u, 0x1CE2u, 0x1CE8u))) - cat = OT_A; - /* TODO: The following should only be allowed after some of - * the nasalization marks, maybe only for U+1CE9..U+1CF1. - * For now, just treat them like tone marks. */ - else if (unlikely (u == 0x1CEDu)) - cat = OT_A; - /* The following take marks in standalone clusters, similar to Avagraha. */ - else if (unlikely (hb_in_ranges<hb_codepoint_t> (u, 0xA8F2u, 0xA8F7u, - 0x1CE9u, 0x1CECu, - 0x1CEEu, 0x1CF1u))) - { - cat = OT_Symbol; - static_assert (((int) INDIC_SYLLABIC_CATEGORY_AVAGRAHA == OT_Symbol), ""); - } - else if (unlikely (u == 0x0A51u)) - { - /* https://github.com/harfbuzz/harfbuzz/issues/524 */ - cat = OT_M; - pos = POS_BELOW_C; - } - - /* According to ScriptExtensions.txt, these Grantha marks may also be used in Tamil, - * so the Indic shaper needs to know their categories. */ - else if (unlikely (u == 0x11301u || u == 0x11303u)) cat = OT_SM; - else if (unlikely (u == 0x1133Bu || u == 0x1133Cu)) cat = OT_N; - - else if (unlikely (u == 0x0AFBu)) cat = OT_N; /* https://github.com/harfbuzz/harfbuzz/issues/552 */ - else if (unlikely (u == 0x0B55u)) cat = OT_N; /* https://github.com/harfbuzz/harfbuzz/issues/2849 */ - - else if (unlikely (u == 0x0980u)) cat = OT_PLACEHOLDER; /* https://github.com/harfbuzz/harfbuzz/issues/538 */ - else if (unlikely (u == 0x09FCu)) cat = OT_PLACEHOLDER; /* https://github.com/harfbuzz/harfbuzz/pull/1613 */ - else if (unlikely (u == 0x0C80u)) cat = OT_PLACEHOLDER; /* https://github.com/harfbuzz/harfbuzz/pull/623 */ - else if (unlikely (u == 0x0D04u)) cat = OT_PLACEHOLDER; /* https://github.com/harfbuzz/harfbuzz/pull/3511 */ - else if (unlikely (hb_in_range<hb_codepoint_t> (u, 0x2010u, 0x2011u))) - cat = OT_PLACEHOLDER; - else if (unlikely (u == 0x25CCu)) cat = OT_DOTTEDCIRCLE; - - - /* - * Re-assign position. - */ - - if ((FLAG_UNSAFE (cat) & CONSONANT_FLAGS)) - { - pos = POS_BASE_C; - if (is_ra (u)) - cat = OT_Ra; - } - else if (cat == OT_M) - { - pos = matra_position_indic (u, pos); - } - else if ((FLAG_UNSAFE (cat) & (FLAG (OT_SM) /* | FLAG (OT_VD) */ | FLAG (OT_A) | FLAG (OT_Symbol)))) - { - pos = POS_SMVD; - } - - if (unlikely (u == 0x0B01u)) pos = POS_BEFORE_SUB; /* Oriya Bindu is BeforeSub in the spec. */ - - - - info.indic_category() = cat; - info.indic_position() = pos; -} - -struct hb_indic_would_substitute_feature_t -{ - void init (const hb_ot_map_t *map, hb_tag_t feature_tag, bool zero_context_) - { - zero_context = zero_context_; - map->get_stage_lookups (0/*GSUB*/, - map->get_feature_stage (0/*GSUB*/, feature_tag), - &lookups, &count); - } - - bool would_substitute (const hb_codepoint_t *glyphs, - unsigned int glyphs_count, - hb_face_t *face) const - { - for (unsigned int i = 0; i < count; i++) - if (hb_ot_layout_lookup_would_substitute (face, lookups[i].index, glyphs, glyphs_count, zero_context)) - return true; - return false; - } - - private: - const hb_ot_map_t::lookup_map_t *lookups; - unsigned int count; - bool zero_context; -}; - - -#endif /* HB_OT_SHAPE_COMPLEX_INDIC_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer-machine.hh deleted file mode 100644 index c52f72f394..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer-machine.hh +++ /dev/null @@ -1,396 +0,0 @@ - -#line 1 "hb-ot-shape-complex-khmer-machine.rl" -/* - * Copyright © 2011,2012 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_KHMER_MACHINE_HH -#define HB_OT_SHAPE_COMPLEX_KHMER_MACHINE_HH - -#include "hb.hh" - -enum khmer_syllable_type_t { - khmer_consonant_syllable, - khmer_broken_cluster, - khmer_non_khmer_cluster, -}; - - -#line 42 "hb-ot-shape-complex-khmer-machine.hh" -#define khmer_syllable_machine_ex_C 1u -#define khmer_syllable_machine_ex_Coeng 14u -#define khmer_syllable_machine_ex_DOTTEDCIRCLE 12u -#define khmer_syllable_machine_ex_PLACEHOLDER 11u -#define khmer_syllable_machine_ex_Ra 16u -#define khmer_syllable_machine_ex_Robatic 20u -#define khmer_syllable_machine_ex_V 2u -#define khmer_syllable_machine_ex_VAbv 26u -#define khmer_syllable_machine_ex_VBlw 27u -#define khmer_syllable_machine_ex_VPre 28u -#define khmer_syllable_machine_ex_VPst 29u -#define khmer_syllable_machine_ex_Xgroup 21u -#define khmer_syllable_machine_ex_Ygroup 22u -#define khmer_syllable_machine_ex_ZWJ 6u -#define khmer_syllable_machine_ex_ZWNJ 5u - - -#line 60 "hb-ot-shape-complex-khmer-machine.hh" -static const unsigned char _khmer_syllable_machine_trans_keys[] = { - 5u, 26u, 5u, 21u, 5u, 26u, 5u, 21u, 1u, 16u, 5u, 21u, 5u, 26u, 5u, 21u, - 5u, 26u, 5u, 21u, 5u, 21u, 5u, 26u, 5u, 21u, 1u, 16u, 5u, 21u, 5u, 26u, - 5u, 21u, 5u, 26u, 5u, 21u, 5u, 26u, 1u, 29u, 5u, 29u, 5u, 29u, 5u, 29u, - 22u, 22u, 5u, 22u, 5u, 29u, 5u, 29u, 5u, 29u, 1u, 16u, 5u, 26u, 5u, 29u, - 5u, 29u, 22u, 22u, 5u, 22u, 5u, 29u, 5u, 29u, 1u, 16u, 5u, 29u, 5u, 29u, - 0 -}; - -static const char _khmer_syllable_machine_key_spans[] = { - 22, 17, 22, 17, 16, 17, 22, 17, - 22, 17, 17, 22, 17, 16, 17, 22, - 17, 22, 17, 22, 29, 25, 25, 25, - 1, 18, 25, 25, 25, 16, 22, 25, - 25, 1, 18, 25, 25, 16, 25, 25 -}; - -static const short _khmer_syllable_machine_index_offsets[] = { - 0, 23, 41, 64, 82, 99, 117, 140, - 158, 181, 199, 217, 240, 258, 275, 293, - 316, 334, 357, 375, 398, 428, 454, 480, - 506, 508, 527, 553, 579, 605, 622, 645, - 671, 697, 699, 718, 744, 770, 787, 813 -}; - -static const char _khmer_syllable_machine_indicies[] = { - 1, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 2, - 3, 0, 0, 0, 0, 4, 0, 1, - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3, - 0, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 3, 0, 0, 0, 0, 4, 0, - 5, 5, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 6, 6, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 6, 0, 7, 7, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 8, 0, 9, 9, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 10, 0, 0, - 0, 0, 4, 0, 9, 9, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 10, 0, 11, 11, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 12, 0, - 0, 0, 0, 4, 0, 11, 11, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 12, 0, 14, - 14, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 15, - 13, 14, 14, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 15, 16, 16, 16, 16, 17, 16, - 18, 18, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 17, 16, 19, 19, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 19, 16, 20, 20, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 21, 16, 22, 22, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 23, 16, 16, - 16, 16, 17, 16, 22, 22, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 23, 16, 24, 24, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 25, 16, - 16, 16, 16, 17, 16, 24, 24, 16, - 16, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 25, 16, 14, - 14, 16, 16, 16, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 26, 15, - 16, 16, 16, 16, 17, 16, 28, 28, - 27, 27, 29, 29, 27, 27, 27, 27, - 2, 2, 27, 30, 27, 28, 27, 27, - 27, 27, 15, 19, 27, 27, 27, 17, - 23, 25, 21, 27, 32, 32, 31, 31, - 31, 31, 31, 31, 31, 33, 31, 31, - 31, 31, 31, 2, 3, 6, 31, 31, - 31, 4, 10, 12, 8, 31, 34, 34, - 31, 31, 31, 31, 31, 31, 31, 35, - 31, 31, 31, 31, 31, 31, 3, 6, - 31, 31, 31, 4, 10, 12, 8, 31, - 5, 5, 31, 31, 31, 31, 31, 31, - 31, 35, 31, 31, 31, 31, 31, 31, - 4, 6, 31, 31, 31, 31, 31, 31, - 8, 31, 6, 31, 7, 7, 31, 31, - 31, 31, 31, 31, 31, 35, 31, 31, - 31, 31, 31, 31, 8, 6, 31, 36, - 36, 31, 31, 31, 31, 31, 31, 31, - 35, 31, 31, 31, 31, 31, 31, 10, - 6, 31, 31, 31, 4, 31, 31, 8, - 31, 37, 37, 31, 31, 31, 31, 31, - 31, 31, 35, 31, 31, 31, 31, 31, - 31, 12, 6, 31, 31, 31, 4, 10, - 31, 8, 31, 34, 34, 31, 31, 31, - 31, 31, 31, 31, 33, 31, 31, 31, - 31, 31, 31, 3, 6, 31, 31, 31, - 4, 10, 12, 8, 31, 28, 28, 31, - 31, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 28, 31, 14, 14, - 38, 38, 38, 38, 38, 38, 38, 38, - 38, 38, 38, 38, 38, 38, 15, 38, - 38, 38, 38, 17, 38, 40, 40, 39, - 39, 39, 39, 39, 39, 39, 41, 39, - 39, 39, 39, 39, 39, 15, 19, 39, - 39, 39, 17, 23, 25, 21, 39, 18, - 18, 39, 39, 39, 39, 39, 39, 39, - 41, 39, 39, 39, 39, 39, 39, 17, - 19, 39, 39, 39, 39, 39, 39, 21, - 39, 19, 39, 20, 20, 39, 39, 39, - 39, 39, 39, 39, 41, 39, 39, 39, - 39, 39, 39, 21, 19, 39, 42, 42, - 39, 39, 39, 39, 39, 39, 39, 41, - 39, 39, 39, 39, 39, 39, 23, 19, - 39, 39, 39, 17, 39, 39, 21, 39, - 43, 43, 39, 39, 39, 39, 39, 39, - 39, 41, 39, 39, 39, 39, 39, 39, - 25, 19, 39, 39, 39, 17, 23, 39, - 21, 39, 44, 44, 39, 39, 39, 39, - 39, 39, 39, 39, 39, 39, 39, 39, - 39, 44, 39, 45, 45, 39, 39, 39, - 39, 39, 39, 39, 30, 39, 39, 39, - 39, 39, 26, 15, 19, 39, 39, 39, - 17, 23, 25, 21, 39, 40, 40, 39, - 39, 39, 39, 39, 39, 39, 30, 39, - 39, 39, 39, 39, 39, 15, 19, 39, - 39, 39, 17, 23, 25, 21, 39, 0 -}; - -static const char _khmer_syllable_machine_trans_targs[] = { - 20, 1, 28, 22, 23, 3, 24, 5, - 25, 7, 26, 9, 27, 20, 10, 31, - 20, 32, 12, 33, 14, 34, 16, 35, - 18, 36, 39, 20, 21, 30, 37, 20, - 0, 29, 2, 4, 6, 8, 20, 20, - 11, 13, 15, 17, 38, 19 -}; - -static const char _khmer_syllable_machine_trans_actions[] = { - 1, 0, 2, 2, 2, 0, 0, 0, - 2, 0, 2, 0, 2, 3, 0, 4, - 5, 2, 0, 0, 0, 2, 0, 2, - 0, 2, 4, 8, 2, 9, 0, 10, - 0, 0, 0, 0, 0, 0, 11, 12, - 0, 0, 0, 0, 4, 0 -}; - -static const char _khmer_syllable_machine_to_state_actions[] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 6, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const char _khmer_syllable_machine_from_state_actions[] = { - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 7, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const unsigned char _khmer_syllable_machine_eof_trans[] = { - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 14, 17, 17, 17, 17, 17, - 17, 17, 17, 17, 0, 32, 32, 32, - 32, 32, 32, 32, 32, 32, 39, 40, - 40, 40, 40, 40, 40, 40, 40, 40 -}; - -static const int khmer_syllable_machine_start = 20; -static const int khmer_syllable_machine_first_final = 20; -static const int khmer_syllable_machine_error = -1; - -static const int khmer_syllable_machine_en_main = 20; - - -#line 43 "hb-ot-shape-complex-khmer-machine.rl" - - - -#line 86 "hb-ot-shape-complex-khmer-machine.rl" - - -#define found_syllable(syllable_type) \ - HB_STMT_START { \ - if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ - for (unsigned int i = ts; i < te; i++) \ - info[i].syllable() = (syllable_serial << 4) | syllable_type; \ - syllable_serial++; \ - if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ - } HB_STMT_END - -static void -find_syllables_khmer (hb_buffer_t *buffer) -{ - unsigned int p, pe, eof, ts, te, act HB_UNUSED; - int cs; - hb_glyph_info_t *info = buffer->info; - -#line 266 "hb-ot-shape-complex-khmer-machine.hh" - { - cs = khmer_syllable_machine_start; - ts = 0; - te = 0; - act = 0; - } - -#line 106 "hb-ot-shape-complex-khmer-machine.rl" - - - p = 0; - pe = eof = buffer->len; - - unsigned int syllable_serial = 1; - -#line 282 "hb-ot-shape-complex-khmer-machine.hh" - { - int _slen; - int _trans; - const unsigned char *_keys; - const char *_inds; - if ( p == pe ) - goto _test_eof; -_resume: - switch ( _khmer_syllable_machine_from_state_actions[cs] ) { - case 7: -#line 1 "NONE" - {ts = p;} - break; -#line 296 "hb-ot-shape-complex-khmer-machine.hh" - } - - _keys = _khmer_syllable_machine_trans_keys + (cs<<1); - _inds = _khmer_syllable_machine_indicies + _khmer_syllable_machine_index_offsets[cs]; - - _slen = _khmer_syllable_machine_key_spans[cs]; - _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].khmer_category()) && - ( info[p].khmer_category()) <= _keys[1] ? - ( info[p].khmer_category()) - _keys[0] : _slen ]; - -_eof_trans: - cs = _khmer_syllable_machine_trans_targs[_trans]; - - if ( _khmer_syllable_machine_trans_actions[_trans] == 0 ) - goto _again; - - switch ( _khmer_syllable_machine_trans_actions[_trans] ) { - case 2: -#line 1 "NONE" - {te = p+1;} - break; - case 8: -#line 82 "hb-ot-shape-complex-khmer-machine.rl" - {te = p+1;{ found_syllable (khmer_non_khmer_cluster); }} - break; - case 10: -#line 80 "hb-ot-shape-complex-khmer-machine.rl" - {te = p;p--;{ found_syllable (khmer_consonant_syllable); }} - break; - case 12: -#line 81 "hb-ot-shape-complex-khmer-machine.rl" - {te = p;p--;{ found_syllable (khmer_broken_cluster); }} - break; - case 11: -#line 82 "hb-ot-shape-complex-khmer-machine.rl" - {te = p;p--;{ found_syllable (khmer_non_khmer_cluster); }} - break; - case 1: -#line 80 "hb-ot-shape-complex-khmer-machine.rl" - {{p = ((te))-1;}{ found_syllable (khmer_consonant_syllable); }} - break; - case 5: -#line 81 "hb-ot-shape-complex-khmer-machine.rl" - {{p = ((te))-1;}{ found_syllable (khmer_broken_cluster); }} - break; - case 3: -#line 1 "NONE" - { switch( act ) { - case 2: - {{p = ((te))-1;} found_syllable (khmer_broken_cluster); } - break; - case 3: - {{p = ((te))-1;} found_syllable (khmer_non_khmer_cluster); } - break; - } - } - break; - case 4: -#line 1 "NONE" - {te = p+1;} -#line 81 "hb-ot-shape-complex-khmer-machine.rl" - {act = 2;} - break; - case 9: -#line 1 "NONE" - {te = p+1;} -#line 82 "hb-ot-shape-complex-khmer-machine.rl" - {act = 3;} - break; -#line 366 "hb-ot-shape-complex-khmer-machine.hh" - } - -_again: - switch ( _khmer_syllable_machine_to_state_actions[cs] ) { - case 6: -#line 1 "NONE" - {ts = 0;} - break; -#line 375 "hb-ot-shape-complex-khmer-machine.hh" - } - - if ( ++p != pe ) - goto _resume; - _test_eof: {} - if ( p == eof ) - { - if ( _khmer_syllable_machine_eof_trans[cs] > 0 ) { - _trans = _khmer_syllable_machine_eof_trans[cs] - 1; - goto _eof_trans; - } - } - - } - -#line 114 "hb-ot-shape-complex-khmer-machine.rl" - -} - -#undef found_syllable - -#endif /* HB_OT_SHAPE_COMPLEX_KHMER_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer.hh deleted file mode 100644 index 35bfbb64d5..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer.hh +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright © 2018 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_KHMER_HH -#define HB_OT_SHAPE_COMPLEX_KHMER_HH - -#include "hb.hh" - -#include "hb-ot-shape-complex-indic.hh" - - -/* buffer var allocations */ -#define khmer_category() indic_category() /* khmer_category_t */ - - -/* Note: This enum is duplicated in the -machine.rl source file. - * Not sure how to avoid duplication. */ -enum khmer_category_t -{ - OT_Robatic = 20, - OT_Xgroup = 21, - OT_Ygroup = 22, - //OT_VAbv = 26, - //OT_VBlw = 27, - //OT_VPre = 28, - //OT_VPst = 29, -}; - -using khmer_position_t = indic_position_t; - -static inline void -set_khmer_properties (hb_glyph_info_t &info) -{ - hb_codepoint_t u = info.codepoint; - unsigned int type = hb_indic_get_categories (u); - khmer_category_t cat = (khmer_category_t) (type & 0xFFu); - khmer_position_t pos = (khmer_position_t) (type >> 8); - - - /* - * Re-assign category - * - * These categories are experimentally extracted from what Uniscribe allows. - */ - switch (u) - { - case 0x179Au: - cat = (khmer_category_t) OT_Ra; - break; - - case 0x17CCu: - case 0x17C9u: - case 0x17CAu: - cat = OT_Robatic; - break; - - case 0x17C6u: - case 0x17CBu: - case 0x17CDu: - case 0x17CEu: - case 0x17CFu: - case 0x17D0u: - case 0x17D1u: - cat = OT_Xgroup; - break; - - case 0x17C7u: - case 0x17C8u: - case 0x17DDu: - case 0x17D3u: /* Just guessing. Uniscribe doesn't categorize it. */ - cat = OT_Ygroup; - break; - } - - /* - * Re-assign position. - */ - if (cat == (khmer_category_t) OT_M) - switch ((int) pos) - { - case POS_PRE_C: cat = (khmer_category_t) OT_VPre; break; - case POS_BELOW_C: cat = (khmer_category_t) OT_VBlw; break; - case POS_ABOVE_C: cat = (khmer_category_t) OT_VAbv; break; - case POS_POST_C: cat = (khmer_category_t) OT_VPst; break; - default: assert (0); - } - - info.khmer_category() = cat; -} - - -#endif /* HB_OT_SHAPE_COMPLEX_KHMER_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar-machine.hh deleted file mode 100644 index f4ef33004d..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar-machine.hh +++ /dev/null @@ -1,492 +0,0 @@ - -#line 1 "hb-ot-shape-complex-myanmar-machine.rl" -/* - * Copyright © 2011,2012 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_MYANMAR_MACHINE_HH -#define HB_OT_SHAPE_COMPLEX_MYANMAR_MACHINE_HH - -#include "hb.hh" - -enum myanmar_syllable_type_t { - myanmar_consonant_syllable, - myanmar_punctuation_cluster, - myanmar_broken_cluster, - myanmar_non_myanmar_cluster, -}; - - -#line 43 "hb-ot-shape-complex-myanmar-machine.hh" -#define myanmar_syllable_machine_ex_A 10u -#define myanmar_syllable_machine_ex_As 18u -#define myanmar_syllable_machine_ex_C 1u -#define myanmar_syllable_machine_ex_CS 19u -#define myanmar_syllable_machine_ex_D 32u -#define myanmar_syllable_machine_ex_D0 20u -#define myanmar_syllable_machine_ex_DB 3u -#define myanmar_syllable_machine_ex_GB 11u -#define myanmar_syllable_machine_ex_H 4u -#define myanmar_syllable_machine_ex_IV 2u -#define myanmar_syllable_machine_ex_MH 21u -#define myanmar_syllable_machine_ex_ML 33u -#define myanmar_syllable_machine_ex_MR 22u -#define myanmar_syllable_machine_ex_MW 23u -#define myanmar_syllable_machine_ex_MY 24u -#define myanmar_syllable_machine_ex_P 31u -#define myanmar_syllable_machine_ex_PT 25u -#define myanmar_syllable_machine_ex_Ra 16u -#define myanmar_syllable_machine_ex_V 8u -#define myanmar_syllable_machine_ex_VAbv 26u -#define myanmar_syllable_machine_ex_VBlw 27u -#define myanmar_syllable_machine_ex_VPre 28u -#define myanmar_syllable_machine_ex_VPst 29u -#define myanmar_syllable_machine_ex_VS 30u -#define myanmar_syllable_machine_ex_ZWJ 6u -#define myanmar_syllable_machine_ex_ZWNJ 5u - - -#line 72 "hb-ot-shape-complex-myanmar-machine.hh" -static const unsigned char _myanmar_syllable_machine_trans_keys[] = { - 1u, 33u, 3u, 33u, 5u, 29u, 5u, 8u, 5u, 29u, 3u, 25u, 5u, 25u, 5u, 25u, - 3u, 33u, 3u, 29u, 3u, 29u, 3u, 29u, 3u, 33u, 1u, 16u, 3u, 33u, 3u, 33u, - 3u, 29u, 3u, 29u, 3u, 29u, 3u, 30u, 3u, 29u, 3u, 33u, 3u, 33u, 3u, 33u, - 3u, 33u, 3u, 33u, 5u, 29u, 5u, 8u, 5u, 29u, 3u, 25u, 5u, 25u, 5u, 25u, - 3u, 33u, 3u, 29u, 3u, 29u, 3u, 29u, 3u, 33u, 1u, 16u, 3u, 33u, 3u, 33u, - 3u, 33u, 3u, 29u, 3u, 29u, 3u, 29u, 3u, 30u, 3u, 29u, 3u, 33u, 3u, 33u, - 3u, 33u, 3u, 33u, 3u, 33u, 3u, 33u, 3u, 33u, 1u, 33u, 1u, 32u, 8u, 8u, - 0 -}; - -static const char _myanmar_syllable_machine_key_spans[] = { - 33, 31, 25, 4, 25, 23, 21, 21, - 31, 27, 27, 27, 31, 16, 31, 31, - 27, 27, 27, 28, 27, 31, 31, 31, - 31, 31, 25, 4, 25, 23, 21, 21, - 31, 27, 27, 27, 31, 16, 31, 31, - 31, 27, 27, 27, 28, 27, 31, 31, - 31, 31, 31, 31, 31, 33, 32, 1 -}; - -static const short _myanmar_syllable_machine_index_offsets[] = { - 0, 34, 66, 92, 97, 123, 147, 169, - 191, 223, 251, 279, 307, 339, 356, 388, - 420, 448, 476, 504, 533, 561, 593, 625, - 657, 689, 721, 747, 752, 778, 802, 824, - 846, 878, 906, 934, 962, 994, 1011, 1043, - 1075, 1107, 1135, 1163, 1191, 1220, 1248, 1280, - 1312, 1344, 1376, 1408, 1440, 1472, 1506, 1539 -}; - -static const char _myanmar_syllable_machine_indicies[] = { - 1, 1, 2, 3, 4, 4, 0, 5, - 0, 6, 1, 0, 0, 0, 0, 7, - 0, 8, 9, 0, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 20, 1, - 21, 0, 23, 24, 25, 25, 22, 26, - 22, 27, 22, 22, 22, 22, 22, 22, - 22, 28, 22, 22, 29, 30, 31, 32, - 33, 34, 35, 36, 37, 38, 22, 22, - 39, 22, 25, 25, 22, 26, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 40, - 22, 22, 22, 22, 22, 22, 33, 22, - 22, 22, 37, 22, 25, 25, 22, 26, - 22, 25, 25, 22, 26, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 33, 22, 22, - 22, 37, 22, 41, 22, 25, 25, 22, - 26, 22, 33, 22, 22, 22, 22, 22, - 22, 22, 42, 22, 22, 22, 22, 22, - 22, 33, 22, 25, 25, 22, 26, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 42, 22, 22, 22, 22, 22, 22, 33, - 22, 25, 25, 22, 26, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 33, 22, 23, - 22, 25, 25, 22, 26, 22, 27, 22, - 22, 22, 22, 22, 22, 22, 43, 22, - 22, 44, 22, 22, 22, 33, 45, 22, - 22, 37, 22, 22, 22, 43, 22, 23, - 22, 25, 25, 22, 26, 22, 27, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 33, 22, 22, - 22, 37, 22, 23, 22, 25, 25, 22, - 26, 22, 27, 22, 22, 22, 22, 22, - 22, 22, 43, 22, 22, 22, 22, 22, - 22, 33, 45, 22, 22, 37, 22, 23, - 22, 25, 25, 22, 26, 22, 27, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 33, 45, 22, - 22, 37, 22, 23, 22, 25, 25, 22, - 26, 22, 27, 22, 22, 22, 22, 22, - 22, 22, 43, 22, 22, 22, 22, 22, - 22, 33, 45, 22, 22, 37, 22, 22, - 22, 43, 22, 1, 1, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 1, 22, 23, 22, 25, 25, - 22, 26, 22, 27, 22, 22, 22, 22, - 22, 22, 22, 28, 22, 22, 29, 30, - 31, 32, 33, 34, 35, 36, 37, 22, - 22, 22, 39, 22, 23, 22, 25, 25, - 22, 26, 22, 27, 22, 22, 22, 22, - 22, 22, 22, 46, 22, 22, 22, 22, - 22, 22, 33, 34, 35, 36, 37, 22, - 22, 22, 39, 22, 23, 22, 25, 25, - 22, 26, 22, 27, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 33, 34, 35, 36, 37, 22, - 23, 22, 25, 25, 22, 26, 22, 27, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 33, 34, - 35, 22, 37, 22, 23, 22, 25, 25, - 22, 26, 22, 27, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 33, 22, 35, 22, 37, 22, - 23, 22, 25, 25, 22, 26, 22, 27, - 22, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 22, 22, 22, 33, 34, - 35, 36, 37, 46, 22, 23, 22, 25, - 25, 22, 26, 22, 27, 22, 22, 22, - 22, 22, 22, 22, 46, 22, 22, 22, - 22, 22, 22, 33, 34, 35, 36, 37, - 22, 23, 22, 25, 25, 22, 26, 22, - 27, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 29, 22, 31, 22, 33, - 34, 35, 36, 37, 22, 22, 22, 39, - 22, 23, 22, 25, 25, 22, 26, 22, - 27, 22, 22, 22, 22, 22, 22, 22, - 46, 22, 22, 29, 22, 22, 22, 33, - 34, 35, 36, 37, 22, 22, 22, 39, - 22, 23, 22, 25, 25, 22, 26, 22, - 27, 22, 22, 22, 22, 22, 22, 22, - 47, 22, 22, 29, 30, 31, 22, 33, - 34, 35, 36, 37, 22, 22, 22, 39, - 22, 23, 22, 25, 25, 22, 26, 22, - 27, 22, 22, 22, 22, 22, 22, 22, - 22, 22, 22, 29, 30, 31, 22, 33, - 34, 35, 36, 37, 22, 22, 22, 39, - 22, 23, 24, 25, 25, 22, 26, 22, - 27, 22, 22, 22, 22, 22, 22, 22, - 28, 22, 22, 29, 30, 31, 32, 33, - 34, 35, 36, 37, 22, 22, 22, 39, - 22, 49, 49, 48, 5, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 50, 48, - 48, 48, 48, 48, 48, 14, 48, 48, - 48, 18, 48, 49, 49, 48, 5, 48, - 49, 49, 48, 5, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 14, 48, 48, 48, - 18, 48, 51, 48, 49, 49, 48, 5, - 48, 14, 48, 48, 48, 48, 48, 48, - 48, 52, 48, 48, 48, 48, 48, 48, - 14, 48, 49, 49, 48, 5, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 52, - 48, 48, 48, 48, 48, 48, 14, 48, - 49, 49, 48, 5, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 14, 48, 2, 48, - 49, 49, 48, 5, 48, 6, 48, 48, - 48, 48, 48, 48, 48, 53, 48, 48, - 54, 48, 48, 48, 14, 55, 48, 48, - 18, 48, 48, 48, 53, 48, 2, 48, - 49, 49, 48, 5, 48, 6, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 14, 48, 48, 48, - 18, 48, 2, 48, 49, 49, 48, 5, - 48, 6, 48, 48, 48, 48, 48, 48, - 48, 53, 48, 48, 48, 48, 48, 48, - 14, 55, 48, 48, 18, 48, 2, 48, - 49, 49, 48, 5, 48, 6, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 14, 55, 48, 48, - 18, 48, 2, 48, 49, 49, 48, 5, - 48, 6, 48, 48, 48, 48, 48, 48, - 48, 53, 48, 48, 48, 48, 48, 48, - 14, 55, 48, 48, 18, 48, 48, 48, - 53, 48, 56, 56, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 56, 48, 2, 3, 49, 49, 48, - 5, 48, 6, 48, 48, 48, 48, 48, - 48, 48, 8, 48, 48, 10, 11, 12, - 13, 14, 15, 16, 17, 18, 19, 48, - 48, 21, 48, 2, 48, 49, 49, 48, - 5, 48, 6, 48, 48, 48, 48, 48, - 48, 48, 8, 48, 48, 10, 11, 12, - 13, 14, 15, 16, 17, 18, 48, 48, - 48, 21, 48, 2, 48, 49, 49, 48, - 5, 48, 6, 48, 48, 48, 48, 48, - 48, 48, 57, 48, 48, 48, 48, 48, - 48, 14, 15, 16, 17, 18, 48, 48, - 48, 21, 48, 2, 48, 49, 49, 48, - 5, 48, 6, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 14, 15, 16, 17, 18, 48, 2, - 48, 49, 49, 48, 5, 48, 6, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 14, 15, 16, - 48, 18, 48, 2, 48, 49, 49, 48, - 5, 48, 6, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 14, 48, 16, 48, 18, 48, 2, - 48, 49, 49, 48, 5, 48, 6, 48, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 48, 48, 48, 14, 15, 16, - 17, 18, 57, 48, 2, 48, 49, 49, - 48, 5, 48, 6, 48, 48, 48, 48, - 48, 48, 48, 57, 48, 48, 48, 48, - 48, 48, 14, 15, 16, 17, 18, 48, - 2, 48, 49, 49, 48, 5, 48, 6, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 10, 48, 12, 48, 14, 15, - 16, 17, 18, 48, 48, 48, 21, 48, - 2, 48, 49, 49, 48, 5, 48, 6, - 48, 48, 48, 48, 48, 48, 48, 57, - 48, 48, 10, 48, 48, 48, 14, 15, - 16, 17, 18, 48, 48, 48, 21, 48, - 2, 48, 49, 49, 48, 5, 48, 6, - 48, 48, 48, 48, 48, 48, 48, 58, - 48, 48, 10, 11, 12, 48, 14, 15, - 16, 17, 18, 48, 48, 48, 21, 48, - 2, 48, 49, 49, 48, 5, 48, 6, - 48, 48, 48, 48, 48, 48, 48, 48, - 48, 48, 10, 11, 12, 48, 14, 15, - 16, 17, 18, 48, 48, 48, 21, 48, - 2, 3, 49, 49, 48, 5, 48, 6, - 48, 48, 48, 48, 48, 48, 48, 8, - 48, 48, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 48, 48, 48, 21, 48, - 23, 24, 25, 25, 22, 26, 22, 27, - 22, 22, 22, 22, 22, 22, 22, 59, - 22, 22, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 38, 22, 22, 39, 22, - 23, 60, 25, 25, 22, 26, 22, 27, - 22, 22, 22, 22, 22, 22, 22, 28, - 22, 22, 29, 30, 31, 32, 33, 34, - 35, 36, 37, 22, 22, 22, 39, 22, - 1, 1, 2, 3, 49, 49, 48, 5, - 48, 6, 1, 48, 48, 48, 48, 1, - 48, 8, 48, 48, 10, 11, 12, 13, - 14, 15, 16, 17, 18, 19, 48, 1, - 21, 48, 1, 1, 61, 61, 61, 61, - 61, 61, 61, 61, 1, 61, 61, 61, - 61, 1, 61, 61, 61, 61, 61, 61, - 61, 61, 61, 61, 61, 61, 61, 61, - 61, 1, 61, 62, 61, 0 -}; - -static const char _myanmar_syllable_machine_trans_targs[] = { - 0, 1, 26, 37, 0, 27, 33, 51, - 39, 54, 40, 46, 47, 48, 29, 42, - 43, 44, 32, 50, 55, 45, 0, 2, - 13, 0, 3, 9, 14, 15, 21, 22, - 23, 5, 17, 18, 19, 8, 25, 20, - 4, 6, 7, 10, 12, 11, 16, 24, - 0, 0, 28, 30, 31, 34, 36, 35, - 38, 41, 49, 52, 53, 0, 0 -}; - -static const char _myanmar_syllable_machine_trans_actions[] = { - 3, 0, 0, 0, 4, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, 0, - 0, 6, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 7, 8, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 9, 10 -}; - -static const char _myanmar_syllable_machine_to_state_actions[] = { - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const char _myanmar_syllable_machine_from_state_actions[] = { - 2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const short _myanmar_syllable_machine_eof_trans[] = { - 0, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 49, 49, 49, 49, 49, 49, - 49, 49, 49, 49, 49, 49, 49, 49, - 49, 49, 49, 49, 49, 49, 49, 49, - 49, 49, 49, 23, 23, 49, 62, 62 -}; - -static const int myanmar_syllable_machine_start = 0; -static const int myanmar_syllable_machine_first_final = 0; -static const int myanmar_syllable_machine_error = -1; - -static const int myanmar_syllable_machine_en_main = 0; - - -#line 44 "hb-ot-shape-complex-myanmar-machine.rl" - - - -#line 102 "hb-ot-shape-complex-myanmar-machine.rl" - - -#define found_syllable(syllable_type) \ - HB_STMT_START { \ - if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ - for (unsigned int i = ts; i < te; i++) \ - info[i].syllable() = (syllable_serial << 4) | syllable_type; \ - syllable_serial++; \ - if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ - } HB_STMT_END - -static void -find_syllables_myanmar (hb_buffer_t *buffer) -{ - unsigned int p, pe, eof, ts, te, act HB_UNUSED; - int cs; - hb_glyph_info_t *info = buffer->info; - -#line 382 "hb-ot-shape-complex-myanmar-machine.hh" - { - cs = myanmar_syllable_machine_start; - ts = 0; - te = 0; - act = 0; - } - -#line 122 "hb-ot-shape-complex-myanmar-machine.rl" - - - p = 0; - pe = eof = buffer->len; - - unsigned int syllable_serial = 1; - -#line 398 "hb-ot-shape-complex-myanmar-machine.hh" - { - int _slen; - int _trans; - const unsigned char *_keys; - const char *_inds; - if ( p == pe ) - goto _test_eof; -_resume: - switch ( _myanmar_syllable_machine_from_state_actions[cs] ) { - case 2: -#line 1 "NONE" - {ts = p;} - break; -#line 412 "hb-ot-shape-complex-myanmar-machine.hh" - } - - _keys = _myanmar_syllable_machine_trans_keys + (cs<<1); - _inds = _myanmar_syllable_machine_indicies + _myanmar_syllable_machine_index_offsets[cs]; - - _slen = _myanmar_syllable_machine_key_spans[cs]; - _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].myanmar_category()) && - ( info[p].myanmar_category()) <= _keys[1] ? - ( info[p].myanmar_category()) - _keys[0] : _slen ]; - -_eof_trans: - cs = _myanmar_syllable_machine_trans_targs[_trans]; - - if ( _myanmar_syllable_machine_trans_actions[_trans] == 0 ) - goto _again; - - switch ( _myanmar_syllable_machine_trans_actions[_trans] ) { - case 6: -#line 94 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p+1;{ found_syllable (myanmar_consonant_syllable); }} - break; - case 4: -#line 95 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p+1;{ found_syllable (myanmar_non_myanmar_cluster); }} - break; - case 10: -#line 96 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p+1;{ found_syllable (myanmar_punctuation_cluster); }} - break; - case 8: -#line 97 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p+1;{ found_syllable (myanmar_broken_cluster); }} - break; - case 3: -#line 98 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p+1;{ found_syllable (myanmar_non_myanmar_cluster); }} - break; - case 5: -#line 94 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p;p--;{ found_syllable (myanmar_consonant_syllable); }} - break; - case 7: -#line 97 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p;p--;{ found_syllable (myanmar_broken_cluster); }} - break; - case 9: -#line 98 "hb-ot-shape-complex-myanmar-machine.rl" - {te = p;p--;{ found_syllable (myanmar_non_myanmar_cluster); }} - break; -#line 462 "hb-ot-shape-complex-myanmar-machine.hh" - } - -_again: - switch ( _myanmar_syllable_machine_to_state_actions[cs] ) { - case 1: -#line 1 "NONE" - {ts = 0;} - break; -#line 471 "hb-ot-shape-complex-myanmar-machine.hh" - } - - if ( ++p != pe ) - goto _resume; - _test_eof: {} - if ( p == eof ) - { - if ( _myanmar_syllable_machine_eof_trans[cs] > 0 ) { - _trans = _myanmar_syllable_machine_eof_trans[cs] - 1; - goto _eof_trans; - } - } - - } - -#line 130 "hb-ot-shape-complex-myanmar-machine.rl" - -} - -#undef found_syllable - -#endif /* HB_OT_SHAPE_COMPLEX_MYANMAR_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar.hh deleted file mode 100644 index 7fbca3878f..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar.hh +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright © 2018 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_MYANMAR_HH -#define HB_OT_SHAPE_COMPLEX_MYANMAR_HH - -#include "hb.hh" - -#include "hb-ot-shape-complex-indic.hh" - - -/* buffer var allocations */ -#define myanmar_category() indic_category() /* myanmar_category_t */ -#define myanmar_position() indic_position() /* myanmar_position_t */ - - -/* Note: This enum is duplicated in the -machine.rl source file. - * Not sure how to avoid duplication. */ -enum myanmar_category_t { - OT_As = 18, /* Asat */ - OT_D0 = 20, /* Digit zero */ - OT_DB = OT_N, /* Dot below */ - OT_GB = OT_PLACEHOLDER, - OT_MH = 21, /* Various consonant medial types */ - OT_MR = 22, /* Various consonant medial types */ - OT_MW = 23, /* Various consonant medial types */ - OT_MY = 24, /* Various consonant medial types */ - OT_PT = 25, /* Pwo and other tones */ - //OT_VAbv = 26, - //OT_VBlw = 27, - //OT_VPre = 28, - //OT_VPst = 29, - OT_VS = 30, /* Variation selectors */ - OT_P = 31, /* Punctuation */ - OT_D = 32, /* Digits except zero */ - OT_ML = 33, /* Various consonant medial types */ -}; - -using myanmar_position_t = indic_position_t; - -static inline void -set_myanmar_properties (hb_glyph_info_t &info) -{ - hb_codepoint_t u = info.codepoint; - unsigned int type = hb_indic_get_categories (u); - unsigned int cat = type & 0xFFu; - myanmar_position_t pos = (myanmar_position_t) (type >> 8); - - /* Myanmar - * https://docs.microsoft.com/en-us/typography/script-development/myanmar#analyze - */ - if (unlikely (hb_in_range<hb_codepoint_t> (u, 0xFE00u, 0xFE0Fu))) - cat = OT_VS; - - switch (u) - { - case 0x104Eu: - cat = OT_C; /* The spec says C, IndicSyllableCategory doesn't have. */ - break; - - case 0x002Du: case 0x00A0u: case 0x00D7u: case 0x2012u: - case 0x2013u: case 0x2014u: case 0x2015u: case 0x2022u: - case 0x25CCu: case 0x25FBu: case 0x25FCu: case 0x25FDu: - case 0x25FEu: - cat = OT_GB; - break; - - case 0x1004u: case 0x101Bu: case 0x105Au: - cat = OT_Ra; - break; - - case 0x1032u: case 0x1036u: - cat = OT_A; - break; - - case 0x1039u: - cat = OT_H; - break; - - case 0x103Au: - cat = OT_As; - break; - - case 0x1041u: case 0x1042u: case 0x1043u: case 0x1044u: - case 0x1045u: case 0x1046u: case 0x1047u: case 0x1048u: - case 0x1049u: case 0x1090u: case 0x1091u: case 0x1092u: - case 0x1093u: case 0x1094u: case 0x1095u: case 0x1096u: - case 0x1097u: case 0x1098u: case 0x1099u: - cat = OT_D; - break; - - case 0x1040u: - cat = OT_D; /* XXX The spec says D0, but Uniscribe doesn't seem to do. */ - break; - - case 0x103Eu: - cat = OT_MH; - break; - - case 0x1060u: - cat = OT_ML; - break; - - case 0x103Cu: - cat = OT_MR; - break; - - case 0x103Du: case 0x1082u: - cat = OT_MW; - break; - - case 0x103Bu: case 0x105Eu: case 0x105Fu: - cat = OT_MY; - break; - - case 0x1063u: case 0x1064u: case 0x1069u: case 0x106Au: - case 0x106Bu: case 0x106Cu: case 0x106Du: case 0xAA7Bu: - cat = OT_PT; - break; - - case 0x1038u: case 0x1087u: case 0x1088u: case 0x1089u: - case 0x108Au: case 0x108Bu: case 0x108Cu: case 0x108Du: - case 0x108Fu: case 0x109Au: case 0x109Bu: case 0x109Cu: - cat = OT_SM; - break; - - case 0x104Au: case 0x104Bu: - cat = OT_P; - break; - - case 0xAA74u: case 0xAA75u: case 0xAA76u: - /* https://github.com/harfbuzz/harfbuzz/issues/218 */ - cat = OT_C; - break; - } - - if (cat == OT_M) - { - switch ((int) pos) - { - case POS_PRE_C: cat = (myanmar_category_t) OT_VPre; - pos = POS_PRE_M; break; - case POS_ABOVE_C: cat = (myanmar_category_t) OT_VAbv; break; - case POS_BELOW_C: cat = (myanmar_category_t) OT_VBlw; break; - case POS_POST_C: cat = (myanmar_category_t) OT_VPst; break; - } - } - - info.myanmar_category() = cat; - info.myanmar_position() = pos; -} - - -#endif /* HB_OT_SHAPE_COMPLEX_MYANMAR_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh deleted file mode 100644 index 468bd95c3f..0000000000 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-machine.hh +++ /dev/null @@ -1,849 +0,0 @@ - -#line 1 "hb-ot-shape-complex-use-machine.rl" -/* - * Copyright © 2015 Mozilla Foundation. - * Copyright © 2015 Google, Inc. - * - * This is part of HarfBuzz, a text shaping library. - * - * Permission is hereby granted, without written agreement and without - * license or royalty fees, to use, copy, modify, and distribute this - * software and its documentation for any purpose, provided that the - * above copyright notice and the following two paragraphs appear in - * all copies of this software. - * - * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR - * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES - * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN - * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH - * DAMAGE. - * - * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, - * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND - * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS - * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO - * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - * - * Mozilla Author(s): Jonathan Kew - * Google Author(s): Behdad Esfahbod - */ - -#ifndef HB_OT_SHAPE_COMPLEX_USE_MACHINE_HH -#define HB_OT_SHAPE_COMPLEX_USE_MACHINE_HH - -#include "hb.hh" - -#include "hb-ot-shape-complex-syllabic.hh" - -/* buffer var allocations */ -#define use_category() complex_var_u8_category() - -#define USE(Cat) use_syllable_machine_ex_##Cat - -enum use_syllable_type_t { - use_virama_terminated_cluster, - use_sakot_terminated_cluster, - use_standard_cluster, - use_number_joiner_terminated_cluster, - use_numeral_cluster, - use_symbol_cluster, - use_hieroglyph_cluster, - use_broken_cluster, - use_non_cluster, -}; - - -#line 57 "hb-ot-shape-complex-use-machine.hh" -#define use_syllable_machine_ex_B 1u -#define use_syllable_machine_ex_CGJ 6u -#define use_syllable_machine_ex_CMAbv 31u -#define use_syllable_machine_ex_CMBlw 32u -#define use_syllable_machine_ex_CS 43u -#define use_syllable_machine_ex_FAbv 24u -#define use_syllable_machine_ex_FBlw 25u -#define use_syllable_machine_ex_FMAbv 45u -#define use_syllable_machine_ex_FMBlw 46u -#define use_syllable_machine_ex_FMPst 47u -#define use_syllable_machine_ex_FPst 26u -#define use_syllable_machine_ex_G 49u -#define use_syllable_machine_ex_GB 5u -#define use_syllable_machine_ex_H 12u -#define use_syllable_machine_ex_HN 13u -#define use_syllable_machine_ex_IS 44u -#define use_syllable_machine_ex_J 50u -#define use_syllable_machine_ex_MAbv 27u -#define use_syllable_machine_ex_MBlw 28u -#define use_syllable_machine_ex_MPre 30u -#define use_syllable_machine_ex_MPst 29u -#define use_syllable_machine_ex_N 4u -#define use_syllable_machine_ex_O 0u -#define use_syllable_machine_ex_R 18u -#define use_syllable_machine_ex_SB 51u -#define use_syllable_machine_ex_SE 52u -#define use_syllable_machine_ex_SMAbv 41u -#define use_syllable_machine_ex_SMBlw 42u -#define use_syllable_machine_ex_SUB 11u -#define use_syllable_machine_ex_Sk 48u -#define use_syllable_machine_ex_VAbv 33u -#define use_syllable_machine_ex_VBlw 34u -#define use_syllable_machine_ex_VMAbv 37u -#define use_syllable_machine_ex_VMBlw 38u -#define use_syllable_machine_ex_VMPre 23u -#define use_syllable_machine_ex_VMPst 39u -#define use_syllable_machine_ex_VPre 22u -#define use_syllable_machine_ex_VPst 35u -#define use_syllable_machine_ex_WJ 16u -#define use_syllable_machine_ex_ZWNJ 14u - - -#line 100 "hb-ot-shape-complex-use-machine.hh" -static const unsigned char _use_syllable_machine_trans_keys[] = { - 0u, 51u, 11u, 48u, 11u, 48u, 1u, 48u, 23u, 48u, 24u, 47u, 25u, 47u, 26u, 47u, - 45u, 46u, 46u, 46u, 24u, 48u, 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, 22u, 48u, - 23u, 48u, 23u, 48u, 23u, 48u, 12u, 48u, 12u, 48u, 12u, 48u, 12u, 48u, 11u, 48u, - 1u, 1u, 11u, 48u, 41u, 42u, 42u, 42u, 11u, 48u, 11u, 48u, 1u, 48u, 23u, 48u, - 24u, 47u, 25u, 47u, 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, 24u, 48u, 24u, 48u, - 1u, 1u, 24u, 48u, 22u, 48u, 23u, 48u, 23u, 48u, 23u, 48u, 12u, 48u, 12u, 48u, - 12u, 48u, 12u, 48u, 11u, 48u, 1u, 1u, 13u, 13u, 4u, 4u, 11u, 48u, 11u, 48u, - 1u, 48u, 23u, 48u, 24u, 47u, 25u, 47u, 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, - 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, 22u, 48u, 23u, 48u, 23u, 48u, 23u, 48u, - 12u, 48u, 12u, 48u, 12u, 48u, 12u, 48u, 11u, 48u, 1u, 1u, 11u, 48u, 11u, 48u, - 1u, 48u, 23u, 48u, 24u, 47u, 25u, 47u, 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, - 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, 22u, 48u, 23u, 48u, 23u, 48u, 23u, 48u, - 12u, 48u, 12u, 48u, 12u, 48u, 12u, 48u, 11u, 48u, 1u, 1u, 4u, 4u, 13u, 13u, - 1u, 48u, 11u, 48u, 41u, 42u, 42u, 42u, 1u, 5u, 50u, 52u, 49u, 52u, 49u, 51u, - 0 -}; - -static const char _use_syllable_machine_key_spans[] = { - 52, 38, 38, 48, 26, 24, 23, 22, - 2, 1, 25, 25, 25, 1, 25, 27, - 26, 26, 26, 37, 37, 37, 37, 38, - 1, 38, 2, 1, 38, 38, 48, 26, - 24, 23, 22, 2, 1, 25, 25, 25, - 1, 25, 27, 26, 26, 26, 37, 37, - 37, 37, 38, 1, 1, 1, 38, 38, - 48, 26, 24, 23, 22, 2, 1, 25, - 25, 25, 1, 25, 27, 26, 26, 26, - 37, 37, 37, 37, 38, 1, 38, 38, - 48, 26, 24, 23, 22, 2, 1, 25, - 25, 25, 1, 25, 27, 26, 26, 26, - 37, 37, 37, 37, 38, 1, 1, 1, - 48, 38, 2, 1, 5, 3, 4, 3 -}; - -static const short _use_syllable_machine_index_offsets[] = { - 0, 53, 92, 131, 180, 207, 232, 256, - 279, 282, 284, 310, 336, 362, 364, 390, - 418, 445, 472, 499, 537, 575, 613, 651, - 690, 692, 731, 734, 736, 775, 814, 863, - 890, 915, 939, 962, 965, 967, 993, 1019, - 1045, 1047, 1073, 1101, 1128, 1155, 1182, 1220, - 1258, 1296, 1334, 1373, 1375, 1377, 1379, 1418, - 1457, 1506, 1533, 1558, 1582, 1605, 1608, 1610, - 1636, 1662, 1688, 1690, 1716, 1744, 1771, 1798, - 1825, 1863, 1901, 1939, 1977, 2016, 2018, 2057, - 2096, 2145, 2172, 2197, 2221, 2244, 2247, 2249, - 2275, 2301, 2327, 2329, 2355, 2383, 2410, 2437, - 2464, 2502, 2540, 2578, 2616, 2655, 2657, 2659, - 2661, 2710, 2749, 2752, 2754, 2760, 2764, 2769 -}; - -static const char _use_syllable_machine_indicies[] = { - 0, 1, 2, 2, 3, 4, 2, 2, - 2, 2, 2, 5, 6, 7, 2, 2, - 2, 2, 8, 2, 2, 2, 9, 10, - 11, 12, 13, 14, 15, 16, 17, 18, - 19, 20, 21, 22, 2, 23, 24, 25, - 2, 26, 27, 28, 29, 30, 31, 32, - 29, 33, 2, 34, 2, 36, 37, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 38, 39, 40, 41, 42, 43, 44, 45, - 46, 47, 48, 49, 50, 51, 35, 52, - 53, 54, 35, 55, 56, 35, 57, 58, - 59, 60, 57, 35, 36, 37, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 38, - 39, 40, 41, 42, 43, 44, 45, 46, - 48, 48, 49, 50, 51, 35, 52, 53, - 54, 35, 35, 35, 35, 57, 58, 59, - 60, 57, 35, 36, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 39, 40, 41, 42, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 52, - 53, 54, 35, 35, 35, 35, 35, 58, - 59, 60, 61, 35, 39, 40, 41, 42, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 52, 53, 54, 35, 35, 35, - 35, 35, 58, 59, 60, 61, 35, 40, - 41, 42, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 58, 59, 60, 35, - 41, 42, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 58, 59, 60, 35, - 42, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 58, 59, 60, 35, 58, - 59, 35, 59, 35, 40, 41, 42, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 52, 53, 54, 35, 35, 35, 35, - 35, 58, 59, 60, 61, 35, 40, 41, - 42, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 53, 54, 35, 35, - 35, 35, 35, 58, 59, 60, 61, 35, - 40, 41, 42, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 54, - 35, 35, 35, 35, 35, 58, 59, 60, - 61, 35, 62, 35, 40, 41, 42, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 58, 59, 60, 61, 35, 38, 39, - 40, 41, 42, 35, 35, 35, 35, 35, - 35, 49, 50, 51, 35, 52, 53, 54, - 35, 35, 35, 35, 35, 58, 59, 60, - 61, 35, 39, 40, 41, 42, 35, 35, - 35, 35, 35, 35, 49, 50, 51, 35, - 52, 53, 54, 35, 35, 35, 35, 35, - 58, 59, 60, 61, 35, 39, 40, 41, - 42, 35, 35, 35, 35, 35, 35, 35, - 50, 51, 35, 52, 53, 54, 35, 35, - 35, 35, 35, 58, 59, 60, 61, 35, - 39, 40, 41, 42, 35, 35, 35, 35, - 35, 35, 35, 35, 51, 35, 52, 53, - 54, 35, 35, 35, 35, 35, 58, 59, - 60, 61, 35, 39, 35, 35, 35, 35, - 35, 35, 35, 35, 35, 38, 39, 40, - 41, 42, 35, 44, 45, 35, 35, 35, - 49, 50, 51, 35, 52, 53, 54, 35, - 35, 35, 35, 35, 58, 59, 60, 61, - 35, 39, 35, 35, 35, 35, 35, 35, - 35, 35, 35, 38, 39, 40, 41, 42, - 35, 35, 45, 35, 35, 35, 49, 50, - 51, 35, 52, 53, 54, 35, 35, 35, - 35, 35, 58, 59, 60, 61, 35, 39, - 35, 35, 35, 35, 35, 35, 35, 35, - 35, 38, 39, 40, 41, 42, 35, 35, - 35, 35, 35, 35, 49, 50, 51, 35, - 52, 53, 54, 35, 35, 35, 35, 35, - 58, 59, 60, 61, 35, 39, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 38, - 39, 40, 41, 42, 43, 44, 45, 35, - 35, 35, 49, 50, 51, 35, 52, 53, - 54, 35, 35, 35, 35, 35, 58, 59, - 60, 61, 35, 36, 37, 35, 35, 35, - 35, 35, 35, 35, 35, 35, 38, 39, - 40, 41, 42, 43, 44, 45, 46, 35, - 48, 49, 50, 51, 35, 52, 53, 54, - 35, 35, 35, 35, 57, 58, 59, 60, - 57, 35, 36, 35, 36, 37, 35, 35, - 35, 35, 35, 35, 35, 35, 35, 38, - 39, 40, 41, 42, 43, 44, 45, 46, - 47, 48, 49, 50, 51, 35, 52, 53, - 54, 35, 35, 35, 35, 57, 58, 59, - 60, 57, 35, 55, 56, 35, 56, 35, - 64, 65, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 66, 67, 68, 69, 70, - 71, 72, 73, 74, 1, 75, 76, 77, - 78, 63, 79, 80, 81, 63, 63, 63, - 63, 82, 83, 84, 85, 86, 63, 64, - 65, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 75, 76, 77, 78, - 63, 79, 80, 81, 63, 63, 63, 63, - 82, 83, 84, 85, 86, 63, 64, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 67, 68, 69, 70, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 79, 80, 81, 63, 63, 63, - 63, 63, 83, 84, 85, 87, 63, 67, - 68, 69, 70, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 79, 80, 81, - 63, 63, 63, 63, 63, 83, 84, 85, - 87, 63, 68, 69, 70, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 83, - 84, 85, 63, 69, 70, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 83, - 84, 85, 63, 70, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 83, 84, - 85, 63, 83, 84, 63, 84, 63, 68, - 69, 70, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 79, 80, 81, 63, - 63, 63, 63, 63, 83, 84, 85, 87, - 63, 68, 69, 70, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 80, - 81, 63, 63, 63, 63, 63, 83, 84, - 85, 87, 63, 68, 69, 70, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 81, 63, 63, 63, 63, 63, - 83, 84, 85, 87, 63, 89, 88, 68, - 69, 70, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 83, 84, 85, 87, - 63, 66, 67, 68, 69, 70, 63, 63, - 63, 63, 63, 63, 76, 77, 78, 63, - 79, 80, 81, 63, 63, 63, 63, 63, - 83, 84, 85, 87, 63, 67, 68, 69, - 70, 63, 63, 63, 63, 63, 63, 76, - 77, 78, 63, 79, 80, 81, 63, 63, - 63, 63, 63, 83, 84, 85, 87, 63, - 67, 68, 69, 70, 63, 63, 63, 63, - 63, 63, 63, 77, 78, 63, 79, 80, - 81, 63, 63, 63, 63, 63, 83, 84, - 85, 87, 63, 67, 68, 69, 70, 63, - 63, 63, 63, 63, 63, 63, 63, 78, - 63, 79, 80, 81, 63, 63, 63, 63, - 63, 83, 84, 85, 87, 63, 67, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 66, 67, 68, 69, 70, 63, 72, 73, - 63, 63, 63, 76, 77, 78, 63, 79, - 80, 81, 63, 63, 63, 63, 63, 83, - 84, 85, 87, 63, 67, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 66, 67, - 68, 69, 70, 63, 63, 73, 63, 63, - 63, 76, 77, 78, 63, 79, 80, 81, - 63, 63, 63, 63, 63, 83, 84, 85, - 87, 63, 67, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 66, 67, 68, 69, - 70, 63, 63, 63, 63, 63, 63, 76, - 77, 78, 63, 79, 80, 81, 63, 63, - 63, 63, 63, 83, 84, 85, 87, 63, - 67, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 66, 67, 68, 69, 70, 71, - 72, 73, 63, 63, 63, 76, 77, 78, - 63, 79, 80, 81, 63, 63, 63, 63, - 63, 83, 84, 85, 87, 63, 64, 65, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 66, 67, 68, 69, 70, 71, 72, - 73, 74, 63, 75, 76, 77, 78, 63, - 79, 80, 81, 63, 63, 63, 63, 82, - 83, 84, 85, 86, 63, 64, 90, 92, - 91, 3, 93, 94, 95, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, 63, 110, 111, 112, - 63, 55, 56, 63, 113, 114, 115, 85, - 116, 63, 94, 95, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 96, 97, 98, - 99, 100, 101, 102, 103, 104, 106, 106, - 107, 108, 109, 63, 110, 111, 112, 63, - 63, 63, 63, 113, 114, 115, 85, 116, - 63, 94, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 97, - 98, 99, 100, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 110, 111, 112, - 63, 63, 63, 63, 63, 114, 115, 85, - 117, 63, 97, 98, 99, 100, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 110, 111, 112, 63, 63, 63, 63, 63, - 114, 115, 85, 117, 63, 98, 99, 100, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 114, 115, 85, 63, 99, 100, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 114, 115, 85, 63, 100, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 114, 115, 85, 63, 114, 115, 63, - 115, 63, 98, 99, 100, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 110, - 111, 112, 63, 63, 63, 63, 63, 114, - 115, 85, 117, 63, 98, 99, 100, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 111, 112, 63, 63, 63, 63, - 63, 114, 115, 85, 117, 63, 98, 99, - 100, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 112, 63, 63, - 63, 63, 63, 114, 115, 85, 117, 63, - 118, 88, 98, 99, 100, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 114, - 115, 85, 117, 63, 96, 97, 98, 99, - 100, 63, 63, 63, 63, 63, 63, 107, - 108, 109, 63, 110, 111, 112, 63, 63, - 63, 63, 63, 114, 115, 85, 117, 63, - 97, 98, 99, 100, 63, 63, 63, 63, - 63, 63, 107, 108, 109, 63, 110, 111, - 112, 63, 63, 63, 63, 63, 114, 115, - 85, 117, 63, 97, 98, 99, 100, 63, - 63, 63, 63, 63, 63, 63, 108, 109, - 63, 110, 111, 112, 63, 63, 63, 63, - 63, 114, 115, 85, 117, 63, 97, 98, - 99, 100, 63, 63, 63, 63, 63, 63, - 63, 63, 109, 63, 110, 111, 112, 63, - 63, 63, 63, 63, 114, 115, 85, 117, - 63, 97, 63, 63, 63, 63, 63, 63, - 63, 63, 63, 96, 97, 98, 99, 100, - 63, 102, 103, 63, 63, 63, 107, 108, - 109, 63, 110, 111, 112, 63, 63, 63, - 63, 63, 114, 115, 85, 117, 63, 97, - 63, 63, 63, 63, 63, 63, 63, 63, - 63, 96, 97, 98, 99, 100, 63, 63, - 103, 63, 63, 63, 107, 108, 109, 63, - 110, 111, 112, 63, 63, 63, 63, 63, - 114, 115, 85, 117, 63, 97, 63, 63, - 63, 63, 63, 63, 63, 63, 63, 96, - 97, 98, 99, 100, 63, 63, 63, 63, - 63, 63, 107, 108, 109, 63, 110, 111, - 112, 63, 63, 63, 63, 63, 114, 115, - 85, 117, 63, 97, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 96, 97, 98, - 99, 100, 101, 102, 103, 63, 63, 63, - 107, 108, 109, 63, 110, 111, 112, 63, - 63, 63, 63, 63, 114, 115, 85, 117, - 63, 94, 95, 63, 63, 63, 63, 63, - 63, 63, 63, 63, 96, 97, 98, 99, - 100, 101, 102, 103, 104, 63, 106, 107, - 108, 109, 63, 110, 111, 112, 63, 63, - 63, 63, 113, 114, 115, 85, 116, 63, - 94, 90, 94, 95, 63, 63, 63, 63, - 63, 63, 63, 63, 63, 96, 97, 98, - 99, 100, 101, 102, 103, 104, 105, 106, - 107, 108, 109, 63, 110, 111, 112, 63, - 63, 63, 63, 113, 114, 115, 85, 116, - 63, 5, 6, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 9, 10, 11, 12, - 13, 14, 15, 16, 17, 19, 19, 20, - 21, 22, 119, 23, 24, 25, 119, 119, - 119, 119, 29, 30, 31, 32, 29, 119, - 5, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 10, 11, - 12, 13, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 23, 24, 25, 119, - 119, 119, 119, 119, 30, 31, 32, 120, - 119, 10, 11, 12, 13, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 23, - 24, 25, 119, 119, 119, 119, 119, 30, - 31, 32, 120, 119, 11, 12, 13, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 30, 31, 32, 119, 12, 13, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 30, 31, 32, 119, 13, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 30, 31, 32, 119, 30, 31, 119, 31, - 119, 11, 12, 13, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 23, 24, - 25, 119, 119, 119, 119, 119, 30, 31, - 32, 120, 119, 11, 12, 13, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 24, 25, 119, 119, 119, 119, 119, - 30, 31, 32, 120, 119, 11, 12, 13, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 25, 119, 119, 119, - 119, 119, 30, 31, 32, 120, 119, 121, - 119, 11, 12, 13, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 30, 31, - 32, 120, 119, 9, 10, 11, 12, 13, - 119, 119, 119, 119, 119, 119, 20, 21, - 22, 119, 23, 24, 25, 119, 119, 119, - 119, 119, 30, 31, 32, 120, 119, 10, - 11, 12, 13, 119, 119, 119, 119, 119, - 119, 20, 21, 22, 119, 23, 24, 25, - 119, 119, 119, 119, 119, 30, 31, 32, - 120, 119, 10, 11, 12, 13, 119, 119, - 119, 119, 119, 119, 119, 21, 22, 119, - 23, 24, 25, 119, 119, 119, 119, 119, - 30, 31, 32, 120, 119, 10, 11, 12, - 13, 119, 119, 119, 119, 119, 119, 119, - 119, 22, 119, 23, 24, 25, 119, 119, - 119, 119, 119, 30, 31, 32, 120, 119, - 10, 119, 119, 119, 119, 119, 119, 119, - 119, 119, 9, 10, 11, 12, 13, 119, - 15, 16, 119, 119, 119, 20, 21, 22, - 119, 23, 24, 25, 119, 119, 119, 119, - 119, 30, 31, 32, 120, 119, 10, 119, - 119, 119, 119, 119, 119, 119, 119, 119, - 9, 10, 11, 12, 13, 119, 119, 16, - 119, 119, 119, 20, 21, 22, 119, 23, - 24, 25, 119, 119, 119, 119, 119, 30, - 31, 32, 120, 119, 10, 119, 119, 119, - 119, 119, 119, 119, 119, 119, 9, 10, - 11, 12, 13, 119, 119, 119, 119, 119, - 119, 20, 21, 22, 119, 23, 24, 25, - 119, 119, 119, 119, 119, 30, 31, 32, - 120, 119, 10, 119, 119, 119, 119, 119, - 119, 119, 119, 119, 9, 10, 11, 12, - 13, 14, 15, 16, 119, 119, 119, 20, - 21, 22, 119, 23, 24, 25, 119, 119, - 119, 119, 119, 30, 31, 32, 120, 119, - 5, 6, 119, 119, 119, 119, 119, 119, - 119, 119, 119, 9, 10, 11, 12, 13, - 14, 15, 16, 17, 119, 19, 20, 21, - 22, 119, 23, 24, 25, 119, 119, 119, - 119, 29, 30, 31, 32, 29, 119, 5, - 119, 122, 119, 7, 119, 1, 119, 119, - 119, 1, 119, 119, 119, 119, 119, 5, - 6, 7, 119, 119, 119, 119, 119, 119, - 119, 119, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, - 119, 23, 24, 25, 119, 26, 27, 119, - 29, 30, 31, 32, 29, 119, 5, 6, - 119, 119, 119, 119, 119, 119, 119, 119, - 119, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 119, - 23, 24, 25, 119, 119, 119, 119, 29, - 30, 31, 32, 29, 119, 26, 27, 119, - 27, 119, 1, 123, 123, 123, 1, 123, - 125, 124, 33, 124, 33, 125, 124, 125, - 124, 33, 124, 34, 124, 0 -}; - -static const char _use_syllable_machine_trans_targs[] = { - 1, 28, 0, 52, 54, 79, 80, 102, - 104, 92, 81, 82, 83, 84, 96, 97, - 98, 99, 105, 100, 93, 94, 95, 87, - 88, 89, 106, 107, 108, 101, 85, 86, - 0, 109, 111, 0, 2, 3, 15, 4, - 5, 6, 7, 19, 20, 21, 22, 25, - 23, 16, 17, 18, 10, 11, 12, 26, - 27, 24, 8, 9, 0, 13, 14, 0, - 29, 30, 42, 31, 32, 33, 34, 46, - 47, 48, 49, 50, 43, 44, 45, 37, - 38, 39, 51, 35, 36, 0, 51, 40, - 0, 41, 0, 0, 53, 0, 55, 56, - 68, 57, 58, 59, 60, 72, 73, 74, - 75, 78, 76, 69, 70, 71, 63, 64, - 65, 77, 61, 62, 77, 66, 67, 0, - 90, 91, 103, 0, 0, 110 -}; - -static const char _use_syllable_machine_trans_actions[] = { - 0, 0, 3, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 4, 0, 0, 5, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 6, 0, 0, 7, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 8, 0, 0, 9, 10, 0, - 11, 0, 12, 13, 0, 14, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 8, 0, 0, 10, 0, 0, 15, - 0, 0, 0, 16, 17, 0 -}; - -static const char _use_syllable_machine_to_state_actions[] = { - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const char _use_syllable_machine_from_state_actions[] = { - 2, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0 -}; - -static const short _use_syllable_machine_eof_trans[] = { - 0, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 36, 36, 36, 36, 36, 36, - 36, 36, 36, 36, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, - 89, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 91, 92, 94, 64, 64, - 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 89, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 91, 64, 120, - 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 120, 120, 120, 120, 120, 120, - 120, 120, 120, 120, 124, 125, 125, 125 -}; - -static const int use_syllable_machine_start = 0; -static const int use_syllable_machine_first_final = 0; -static const int use_syllable_machine_error = -1; - -static const int use_syllable_machine_en_main = 0; - - -#line 58 "hb-ot-shape-complex-use-machine.rl" - - - -#line 181 "hb-ot-shape-complex-use-machine.rl" - - -#define found_syllable(syllable_type) \ - HB_STMT_START { \ - if (0) fprintf (stderr, "syllable %d..%d %s\n", (*ts).second.first, (*te).second.first, #syllable_type); \ - for (unsigned i = (*ts).second.first; i < (*te).second.first; ++i) \ - info[i].syllable() = (syllable_serial << 4) | syllable_type; \ - syllable_serial++; \ - if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ - } HB_STMT_END - - -template <typename Iter> -struct machine_index_t : - hb_iter_with_fallback_t<machine_index_t<Iter>, - typename Iter::item_t> -{ - machine_index_t (const Iter& it) : it (it) {} - machine_index_t (const machine_index_t& o) : hb_iter_with_fallback_t<machine_index_t<Iter>, - typename Iter::item_t> (), - it (o.it), is_null (o.is_null) {} - - static constexpr bool is_random_access_iterator = Iter::is_random_access_iterator; - static constexpr bool is_sorted_iterator = Iter::is_sorted_iterator; - - typename Iter::item_t __item__ () const { return *it; } - typename Iter::item_t __item_at__ (unsigned i) const { return it[i]; } - unsigned __len__ () const { return it.len (); } - void __next__ () { ++it; } - void __forward__ (unsigned n) { it += n; } - void __prev__ () { --it; } - void __rewind__ (unsigned n) { it -= n; } - - void operator = (unsigned n) - { - assert (n == 0); - is_null = true; - } - explicit operator bool () { return !is_null; } - - void operator = (const machine_index_t& o) - { - is_null = o.is_null; - unsigned index = (*it).first; - unsigned n = (*o.it).first; - if (index < n) it += n - index; else if (index > n) it -= index - n; - } - bool operator == (const machine_index_t& o) const - { return is_null ? o.is_null : !o.is_null && (*it).first == (*o.it).first; } - bool operator != (const machine_index_t& o) const { return !(*this == o); } - - private: - Iter it; - bool is_null = false; -}; -struct -{ - template <typename Iter, - hb_requires (hb_is_iterable (Iter))> - machine_index_t<hb_iter_type<Iter>> - operator () (Iter&& it) const - { return machine_index_t<hb_iter_type<Iter>> (hb_iter (it)); } -} -HB_FUNCOBJ (machine_index); - - - -static bool -not_ccs_default_ignorable (const hb_glyph_info_t &i) -{ return i.use_category() != USE(CGJ); } - -static inline void -find_syllables_use (hb_buffer_t *buffer) -{ - hb_glyph_info_t *info = buffer->info; - auto p = - + hb_iter (info, buffer->len) - | hb_enumerate - | hb_filter ([] (const hb_glyph_info_t &i) { return not_ccs_default_ignorable (i); }, - hb_second) - | hb_filter ([&] (const hb_pair_t<unsigned, const hb_glyph_info_t &> p) - { - if (p.second.use_category() == USE(ZWNJ)) - for (unsigned i = p.first + 1; i < buffer->len; ++i) - if (not_ccs_default_ignorable (info[i])) - return !_hb_glyph_info_is_unicode_mark (&info[i]); - return true; - }) - | hb_enumerate - | machine_index - ; - auto pe = p + p.len (); - auto eof = +pe; - auto ts = +p; - auto te = +p; - unsigned int act HB_UNUSED; - int cs; - -#line 702 "hb-ot-shape-complex-use-machine.hh" - { - cs = use_syllable_machine_start; - ts = 0; - te = 0; - act = 0; - } - -#line 281 "hb-ot-shape-complex-use-machine.rl" - - - unsigned int syllable_serial = 1; - -#line 715 "hb-ot-shape-complex-use-machine.hh" - { - int _slen; - int _trans; - const unsigned char *_keys; - const char *_inds; - if ( p == pe ) - goto _test_eof; -_resume: - switch ( _use_syllable_machine_from_state_actions[cs] ) { - case 2: -#line 1 "NONE" - {ts = p;} - break; -#line 729 "hb-ot-shape-complex-use-machine.hh" - } - - _keys = _use_syllable_machine_trans_keys + (cs<<1); - _inds = _use_syllable_machine_indicies + _use_syllable_machine_index_offsets[cs]; - - _slen = _use_syllable_machine_key_spans[cs]; - _trans = _inds[ _slen > 0 && _keys[0] <=( (*p).second.second.use_category()) && - ( (*p).second.second.use_category()) <= _keys[1] ? - ( (*p).second.second.use_category()) - _keys[0] : _slen ]; - -_eof_trans: - cs = _use_syllable_machine_trans_targs[_trans]; - - if ( _use_syllable_machine_trans_actions[_trans] == 0 ) - goto _again; - - switch ( _use_syllable_machine_trans_actions[_trans] ) { - case 9: -#line 171 "hb-ot-shape-complex-use-machine.rl" - {te = p+1;{ found_syllable (use_standard_cluster); }} - break; - case 6: -#line 174 "hb-ot-shape-complex-use-machine.rl" - {te = p+1;{ found_syllable (use_symbol_cluster); }} - break; - case 4: -#line 176 "hb-ot-shape-complex-use-machine.rl" - {te = p+1;{ found_syllable (use_broken_cluster); }} - break; - case 3: -#line 177 "hb-ot-shape-complex-use-machine.rl" - {te = p+1;{ found_syllable (use_non_cluster); }} - break; - case 11: -#line 170 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_sakot_terminated_cluster); }} - break; - case 7: -#line 171 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_standard_cluster); }} - break; - case 14: -#line 172 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_number_joiner_terminated_cluster); }} - break; - case 13: -#line 173 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_numeral_cluster); }} - break; - case 5: -#line 174 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_symbol_cluster); }} - break; - case 17: -#line 175 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_hieroglyph_cluster); }} - break; - case 15: -#line 176 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_broken_cluster); }} - break; - case 16: -#line 177 "hb-ot-shape-complex-use-machine.rl" - {te = p;p--;{ found_syllable (use_non_cluster); }} - break; - case 12: -#line 1 "NONE" - { switch( act ) { - case 1: - {{p = ((te))-1;} found_syllable (use_virama_terminated_cluster); } - break; - case 2: - {{p = ((te))-1;} found_syllable (use_sakot_terminated_cluster); } - break; - } - } - break; - case 8: -#line 1 "NONE" - {te = p+1;} -#line 169 "hb-ot-shape-complex-use-machine.rl" - {act = 1;} - break; - case 10: -#line 1 "NONE" - {te = p+1;} -#line 170 "hb-ot-shape-complex-use-machine.rl" - {act = 2;} - break; -#line 819 "hb-ot-shape-complex-use-machine.hh" - } - -_again: - switch ( _use_syllable_machine_to_state_actions[cs] ) { - case 1: -#line 1 "NONE" - {ts = 0;} - break; -#line 828 "hb-ot-shape-complex-use-machine.hh" - } - - if ( ++p != pe ) - goto _resume; - _test_eof: {} - if ( p == eof ) - { - if ( _use_syllable_machine_eof_trans[cs] > 0 ) { - _trans = _use_syllable_machine_eof_trans[cs] - 1; - goto _eof_trans; - } - } - - } - -#line 286 "hb-ot-shape-complex-use-machine.rl" - -} - -#undef found_syllable - -#endif /* HB_OT_SHAPE_COMPLEX_USE_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-normalize.cc b/thirdparty/harfbuzz/src/hb-ot-shape-normalize.cc index aa5a8eeaa3..7db0b25b73 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-normalize.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shape-normalize.cc @@ -29,7 +29,7 @@ #ifndef HB_NO_OT_SHAPE #include "hb-ot-shape-normalize.hh" -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" #include "hb-ot-shape.hh" @@ -69,7 +69,7 @@ * - When a font does not support a character but supports its canonical * decomposition, well, use the decomposition. * - * - The complex shapers can customize the compose and decompose functions to + * - The shapers can customize the compose and decompose functions to * offload some of their requirements to the normalizer. For example, the * Indic shaper may want to disallow recomposing of two matras. */ @@ -143,8 +143,7 @@ decompose (const hb_ot_shape_normalize_context_t *c, bool shortest, hb_codepoint return 1; } - unsigned int ret; - if ((ret = decompose (c, shortest, a))) { + if (unsigned ret = decompose (c, shortest, a)) { if (b) { output_char (buffer, b, b_glyph); return ret + 1; @@ -223,7 +222,7 @@ handle_variation_selector_cluster (const hb_ot_shape_normalize_context_t *c, unsigned int end, bool short_circuit HB_UNUSED) { - /* TODO Currently if there's a variation-selector we give-up, it's just too hard. */ + /* Currently if there's a variation-selector we give-up on normalization, it's just too hard. */ hb_buffer_t * const buffer = c->buffer; hb_font_t * const font = c->font; for (; buffer->idx < end - 1 && buffer->successful;) { @@ -395,7 +394,7 @@ _hb_ot_shape_normalize (const hb_ot_shape_plan_t *plan, break; /* We are going to do a O(n^2). Only do this if the sequence is short. */ - if (end - i > HB_OT_SHAPE_COMPLEX_MAX_COMBINING_MARKS) { + if (end - i > HB_OT_SHAPE_MAX_COMBINING_MARKS) { i = end; continue; } diff --git a/thirdparty/harfbuzz/src/hb-ot-shape.cc b/thirdparty/harfbuzz/src/hb-ot-shape.cc index 99aadab3f0..0806abb7dd 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shape.cc @@ -37,7 +37,7 @@ #include "hb-shaper-impl.hh" #include "hb-ot-shape.hh" -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" #include "hb-ot-shape-fallback.hh" #include "hb-ot-shape-normalize.hh" @@ -86,14 +86,14 @@ hb_ot_shape_planner_t::hb_ot_shape_planner_t (hb_face_t *fac , apply_morx (_hb_apply_morx (face, props)) #endif { - shaper = hb_ot_shape_complex_categorize (this); + shaper = hb_ot_shaper_categorize (this); script_zero_marks = shaper->zero_width_marks != HB_OT_SHAPE_ZERO_WIDTH_MARKS_NONE; script_fallback_mark_positioning = shaper->fallback_position; /* https://github.com/harfbuzz/harfbuzz/issues/1528 */ - if (apply_morx && shaper != &_hb_ot_complex_shaper_default) - shaper = &_hb_ot_complex_shaper_dumber; + if (apply_morx && shaper != &_hb_ot_shaper_default) + shaper = &_hb_ot_shaper_dumber; } void @@ -927,7 +927,7 @@ hb_ot_substitute_default (const hb_ot_shape_context_t *c) } static inline void -hb_ot_substitute_complex (const hb_ot_shape_context_t *c) +hb_ot_substitute_plan (const hb_ot_shape_context_t *c) { hb_buffer_t *buffer = c->buffer; @@ -946,7 +946,7 @@ hb_ot_substitute_pre (const hb_ot_shape_context_t *c) _hb_buffer_allocate_gsubgpos_vars (c->buffer); - hb_ot_substitute_complex (c); + hb_ot_substitute_plan (c); #ifndef HB_NO_AAT_SHAPE if (c->plan->apply_morx && c->plan->apply_gpos) @@ -1039,7 +1039,7 @@ hb_ot_position_default (const hb_ot_shape_context_t *c) } static inline void -hb_ot_position_complex (const hb_ot_shape_context_t *c) +hb_ot_position_plan (const hb_ot_shape_context_t *c) { unsigned int count = c->buffer->len; hb_glyph_info_t *info = c->buffer->info; @@ -1124,7 +1124,7 @@ hb_ot_position (const hb_ot_shape_context_t *c) hb_ot_position_default (c); - hb_ot_position_complex (c); + hb_ot_position_plan (c); if (HB_DIRECTION_IS_BACKWARD (c->buffer->props.direction)) hb_buffer_reverse (c->buffer); @@ -1159,8 +1159,6 @@ hb_propagate_flags (hb_buffer_t *buffer) static void hb_ot_shape_internal (hb_ot_shape_context_t *c) { - c->buffer->enter (); - /* Save the original direction, we use it later. */ c->target_direction = c->buffer->props.direction; diff --git a/thirdparty/harfbuzz/src/hb-ot-shape.hh b/thirdparty/harfbuzz/src/hb-ot-shape.hh index e8c81015c7..17fa58b337 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shape.hh @@ -61,7 +61,7 @@ struct hb_shape_plan_key_t; struct hb_ot_shape_plan_t { hb_segment_properties_t props; - const struct hb_ot_complex_shaper_t *shaper; + const struct hb_ot_shaper_t *shaper; hb_ot_map_t map; hb_aat_map_t aat_map; const void *data; @@ -158,7 +158,7 @@ struct hb_ot_shape_planner_t #endif bool script_zero_marks : 1; bool script_fallback_mark_positioning : 1; - const struct hb_ot_complex_shaper_t *shaper; + const struct hb_ot_shaper_t *shaper; HB_INTERNAL hb_ot_shape_planner_t (hb_face_t *face, const hb_segment_properties_t *props); diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-fallback.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-fallback.hh index 78f46c1cac..b9f92f72d6 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-fallback.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-fallback.hh @@ -24,8 +24,8 @@ * Google Author(s): Behdad Esfahbod */ -#ifndef HB_OT_SHAPE_COMPLEX_ARABIC_FALLBACK_HH -#define HB_OT_SHAPE_COMPLEX_ARABIC_FALLBACK_HH +#ifndef HB_OT_SHAPER_ARABIC_FALLBACK_HH +#define HB_OT_SHAPER_ARABIC_FALLBACK_HH #include "hb.hh" @@ -34,7 +34,11 @@ /* Features ordered the same as the entries in shaping_table rows, - * followed by rlig. Don't change. */ + * followed by rlig. Don't change. + * + * We currently support one subtable per lookup, and one lookup + * per feature. But we allow duplicate features, so we use that! + */ static const hb_tag_t arabic_fallback_features[] = { HB_TAG('i','n','i','t'), @@ -42,6 +46,8 @@ static const hb_tag_t arabic_fallback_features[] = HB_TAG('f','i','n','a'), HB_TAG('i','s','o','l'), HB_TAG('r','l','i','g'), + HB_TAG('r','l','i','g'), + HB_TAG('r','l','i','g'), }; static OT::SubstLookup * @@ -95,20 +101,25 @@ arabic_fallback_synthesize_lookup_single (const hb_ot_shape_plan_t *plan HB_UNUS return ret && !c.in_error () ? c.copy<OT::SubstLookup> () : nullptr; } +template <typename T> static OT::SubstLookup * arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UNUSED, - hb_font_t *font) + hb_font_t *font, + const T &ligature_table, + unsigned lookup_flags) { OT::HBGlyphID16 first_glyphs[ARRAY_LENGTH_CONST (ligature_table)]; unsigned int first_glyphs_indirection[ARRAY_LENGTH_CONST (ligature_table)]; unsigned int ligature_per_first_glyph_count_list[ARRAY_LENGTH_CONST (first_glyphs)]; unsigned int num_first_glyphs = 0; - /* We know that all our ligatures are 2-component */ + /* We know that all our ligatures have the same number of components. */ OT::HBGlyphID16 ligature_list[ARRAY_LENGTH_CONST (first_glyphs) * ARRAY_LENGTH_CONST(ligature_table[0].ligatures)]; unsigned int component_count_list[ARRAY_LENGTH_CONST (ligature_list)]; - OT::HBGlyphID16 component_list[ARRAY_LENGTH_CONST (ligature_list) * 1/* One extra component per ligature */]; + OT::HBGlyphID16 component_list[ARRAY_LENGTH_CONST (ligature_list) * + ARRAY_LENGTH_CONST (ligature_table[0].ligatures[0].components)]; unsigned int num_ligatures = 0; + unsigned int num_components = 0; /* Populate arrays */ @@ -133,21 +144,32 @@ arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UN { unsigned int first_glyph_idx = first_glyphs_indirection[i]; - for (unsigned int second_glyph_idx = 0; second_glyph_idx < ARRAY_LENGTH (ligature_table[0].ligatures); second_glyph_idx++) + for (unsigned int ligature_idx = 0; ligature_idx < ARRAY_LENGTH (ligature_table[0].ligatures); ligature_idx++) { - hb_codepoint_t second_u = ligature_table[first_glyph_idx].ligatures[second_glyph_idx].second; - hb_codepoint_t ligature_u = ligature_table[first_glyph_idx].ligatures[second_glyph_idx].ligature; - hb_codepoint_t second_glyph, ligature_glyph; - if (!second_u || - !hb_font_get_glyph (font, second_u, 0, &second_glyph) || - !hb_font_get_glyph (font, ligature_u, 0, &ligature_glyph)) + hb_codepoint_t ligature_u = ligature_table[first_glyph_idx].ligatures[ligature_idx].ligature; + hb_codepoint_t ligature_glyph; + if (!hb_font_get_glyph (font, ligature_u, 0, &ligature_glyph)) continue; - ligature_per_first_glyph_count_list[i]++; + const auto &components = ligature_table[first_glyph_idx].ligatures[ligature_idx].components; + unsigned component_count = ARRAY_LENGTH_CONST (components); + + for (unsigned i = 0; i < component_count; i++) + { + hb_codepoint_t component_u = ligature_table[first_glyph_idx].ligatures[ligature_idx].components[i]; + hb_codepoint_t component_glyph; + if (!component_u || + !hb_font_get_glyph (font, component_u, 0, &component_glyph)) + continue; + component_list[num_components++] = component_glyph; + } + + component_count_list[num_ligatures] = 1 + component_count; ligature_list[num_ligatures] = ligature_glyph; - component_count_list[num_ligatures] = 2; - component_list[num_ligatures] = second_glyph; + + ligature_per_first_glyph_count_list[i]++; + num_ligatures++; } } @@ -161,14 +183,13 @@ arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UN hb_serialize_context_t c (buf, sizeof (buf)); OT::SubstLookup *lookup = c.start_serialize<OT::SubstLookup> (); bool ret = lookup->serialize_ligature (&c, - OT::LookupFlag::IgnoreMarks, + lookup_flags, hb_sorted_array (first_glyphs, num_first_glyphs), hb_array (ligature_per_first_glyph_count_list, num_first_glyphs), hb_array (ligature_list, num_ligatures), hb_array (component_count_list, num_ligatures), - hb_array (component_list, num_ligatures)); + hb_array (component_list, num_components)); c.end_serialize (); - /* TODO sanitize the results? */ return ret && !c.in_error () ? c.copy<OT::SubstLookup> () : nullptr; } @@ -181,10 +202,18 @@ arabic_fallback_synthesize_lookup (const hb_ot_shape_plan_t *plan, if (feature_index < 4) return arabic_fallback_synthesize_lookup_single (plan, font, feature_index); else - return arabic_fallback_synthesize_lookup_ligature (plan, font); + { + switch (feature_index) { + case 4: return arabic_fallback_synthesize_lookup_ligature (plan, font, ligature_3_table, OT::LookupFlag::IgnoreMarks); + case 5: return arabic_fallback_synthesize_lookup_ligature (plan, font, ligature_table, OT::LookupFlag::IgnoreMarks); + case 6: return arabic_fallback_synthesize_lookup_ligature (plan, font, ligature_mark_table, 0); + } + } + assert (false); + return nullptr; } -#define ARABIC_FALLBACK_MAX_LOOKUPS 5 +#define ARABIC_FALLBACK_MAX_LOOKUPS ARRAY_LENGTH_CONST (arabic_fallback_features) struct arabic_fallback_plan_t { @@ -201,7 +230,7 @@ struct arabic_fallback_plan_t #endif #ifdef HB_WITH_WIN1256 -#include "hb-ot-shape-complex-arabic-win1256.hh" +#include "hb-ot-shaper-arabic-win1256.hh" #endif struct ManifestLookup @@ -230,9 +259,8 @@ arabic_fallback_plan_init_win1256 (arabic_fallback_plan_t *fallback_plan HB_UNUS return false; const Manifest &manifest = reinterpret_cast<const Manifest&> (arabic_win1256_gsub_lookups.manifest); - static_assert (sizeof (arabic_win1256_gsub_lookups.manifestData) == + static_assert (sizeof (arabic_win1256_gsub_lookups.manifestData) <= ARABIC_FALLBACK_MAX_LOOKUPS * sizeof (ManifestLookup), ""); - /* TODO sanitize the table? */ unsigned j = 0; unsigned int count = manifest.len; @@ -264,7 +292,7 @@ arabic_fallback_plan_init_unicode (arabic_fallback_plan_t *fallback_plan, const hb_ot_shape_plan_t *plan, hb_font_t *font) { - static_assert ((ARRAY_LENGTH_CONST(arabic_fallback_features) <= ARABIC_FALLBACK_MAX_LOOKUPS), ""); + static_assert ((ARRAY_LENGTH_CONST (arabic_fallback_features) <= ARABIC_FALLBACK_MAX_LOOKUPS), ""); unsigned int j = 0; for (unsigned int i = 0; i < ARRAY_LENGTH(arabic_fallback_features) ; i++) { @@ -345,4 +373,4 @@ arabic_fallback_plan_shape (arabic_fallback_plan_t *fallback_plan, } -#endif /* HB_OT_SHAPE_COMPLEX_ARABIC_FALLBACK_HH */ +#endif /* HB_OT_SHAPER_ARABIC_FALLBACK_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-joining-list.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-joining-list.hh index e6339ee72b..a5ce3a6c93 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-joining-list.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-joining-list.hh @@ -12,8 +12,8 @@ * # Date: 2021-07-10, 00:35:31 GMT */ -#ifndef HB_OT_SHAPE_COMPLEX_ARABIC_JOINING_LIST_HH -#define HB_OT_SHAPE_COMPLEX_ARABIC_JOINING_LIST_HH +#ifndef HB_OT_SHAPER_ARABIC_JOINING_LIST_HH +#define HB_OT_SHAPER_ARABIC_JOINING_LIST_HH static bool has_arabic_joining (hb_script_t script) @@ -42,6 +42,6 @@ has_arabic_joining (hb_script_t script) } -#endif /* HB_OT_SHAPE_COMPLEX_ARABIC_JOINING_LIST_HH */ +#endif /* HB_OT_SHAPER_ARABIC_JOINING_LIST_HH */ /* == End of generated function == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-pua.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-pua.hh new file mode 100644 index 0000000000..ba86772f84 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-pua.hh @@ -0,0 +1,118 @@ +/* == Start of generated table == */ +/* + * The following table is generated by running: + * + * ./gen-arabic-pua.py + * + */ + +#ifndef HB_OT_SHAPER_ARABIC_PUA_HH +#define HB_OT_SHAPER_ARABIC_PUA_HH + +static const uint8_t +_hb_arabic_u8[464] = +{ + 84, 86, 85, 85, 85, 85, 85,213, 16, 34, 34, 34, 34, 34, 35, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 36, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 82, 16, 0, 0, 0, 0, 1, 2, 3, 4, + 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 7, + 0, 0, 8, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 0, 0, 0, 22, 0, 23, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 16, 34, 34, 34, 35, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, + 34, 34, 34, 34, 34, 34, 34, 66, 16, 50, 68, 68, 68, 68, 68, 68, + 68, 68, 68, 68,101, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, + 71, 68, 68, 68, 68, 68, 68, 68,152,186, 76, 77, 68,254, 16, 50, + 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 5, 6, + 0, 0, 0, 0, 0, 0, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 0, 0, 10, 0, + 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 13, 0, 0, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 23, 23, 29, 30, 31, 32, 33, 0, 0, 0, 0, + 0, 0, 0, 34, 0, 0, 0, 35, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 36, 37, 38, 0, 0, 0, 0, 0, 0, 0, 39, 0, 0, 40, + 41, 42, 0, 43, 44, 0, 0, 45, 46, 0, 47, 48, 49, 0, 0, 0, + 0, 50, 0, 0, 51, 52, 0, 53, 54, 55, 56, 57, 58, 0, 0, 0, + 0, 0, 59, 60, 61, 62, 63, 64, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 65, 0, 0, 66, + 0, 0, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, +}; +static const uint16_t +_hb_arabic_u16[720] = +{ + 0, 0, 0, 0, 0, 0, 0, 0,61728,61729,61730, 0, 0,61733, 0, 0, + 61736,61737,61738,61739,61790,61741,61742,61743,61872,61873,61874,61875,61876,61877,61878,61879, + 61880,61881,61754,61755, 0,61757, 0,61759, 0, 0, 0,61787,61788,61789, 0, 0, + 0, 0, 0,61731, 0, 0, 0, 0, 0, 0, 0,61732, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0,61734, 0, 0, 0, 0, 0, 0, 0,61735, + 0, 0, 0, 0,61740, 0, 0, 0, 0, 0, 0,61755, 0, 0, 0,61759, + 0,61869,61765,61763,61883,61767,61882,61761,61770,61865,61772,61774,61777,61780,61783,61784, + 61785,61786,61792,61794,61796,61798,61800,61801,61802,61806,61810,61696,61696,61696,61696,61696, + 61791,61813,61816,61818,61820,61822,61921,61860,61861,61868,61864,61895,61896,61899,61892,61893, + 61898,61897,61894,61696,61696,61696,61696,61696,61696,61696,61696,61696,61696,61696,61696, 0, + 61744,61745,61746,61747,61748,61749,61750,61751,61752,61753, 0,61790,61790, 0, 0, 0, + 0, 0, 0, 0,61708,61709,61710,61711,61756,61758, 0, 0, 0, 0, 0, 0, + 0,61765,61766,61763,61764,61883,61883,61767,61768,61882,61871,61870,61870,61761,61762,61770, + 61770,61769,61769,61865,61866,61772,61772,61771,61771,61774,61774,61773,61773,61777,61776,61775, + 61775,61780,61779,61778,61778,61783,61782,61781,61781,61784,61784,61785,61785,61786,61786,61792, + 61792,61794,61794,61793,61793,61796,61796,61795,61795,61798,61798,61797,61797,61800,61800,61799, + 61799,61801,61801,61801,61801,61802,61802,61802,61802,61806,61805,61803,61804,61810,61809,61807, + 61808,61813,61813,61811,61812,61816,61816,61814,61815,61818,61818,61817,61817,61820,61820,61819, + 61819,61822,61822,61821,61821,61921,61921,61823,61823,61860,61859,61857,61858,61861,61861,61868, + 61867,61864,61863,61862,61862,61888,61889,61886,61887,61890,61891,61885,61884, 0, 0, 0, + 0, 0, 0, 0,61984,61985,61986, 0, 0,61989, 0, 0,61992,61993,61994,61995, + 62046,61997,61998,61999, 0, 0,62010,62011, 0,62013, 0,62015, 0, 0, 0,62043, + 0,62045, 0, 0, 0, 0, 0,61987, 0, 0, 0,61988, 0, 0, 0,61990, + 0, 0, 0,61991,61996, 0, 0, 0, 0, 0, 0,62011, 0, 0, 0,62015, + 0,62165,62021,62019,62170,62023,62169,62017,62028,62161,62032,62036,62040,62048,62052,62053, + 62055,62057,62059,62064,62068,62072,62078,62114,62115,62122,62126,61952,61952,61952,61952,61952, + 62047,62130,62134,62138,62142,62146,62150,62154,62155,62164,62160,62183,62184,62187,62180,62181, + 62186,62185,62182,61952,61952,61952,61952, 0,62000,62001,62002,62003,62004,62005,62006,62007, + 62008,62009, 0,62046,62046, 0, 0, 0,61964,61965,61966,61967,62012,62014, 0, 0, + 61954, 0,61981, 0, 0, 0,61955, 0,61982, 0,61956, 0, 0, 0,62111, 0, + 0, 0, 0,61970,61971,61972,61957, 0,61980, 0, 0, 0, 0, 0,61958, 0, + 61983, 0, 0, 0, 0, 0,62191, 0,62188,62189,62192, 0, 0, 0,61973, 0, + 0,62098, 0, 0,61974, 0, 0,62099, 0, 0,62101, 0, 0,61975, 0, 0, + 62100, 0, 0, 0,62080,62081,62082,62102, 0,62083,62084,62085,62103, 0, 0, 0, + 62106, 0,62107, 0,62108, 0, 0, 0,61976, 0, 0, 0, 0,62086,62087,62088, + 62109,61978,62089,62090,62091,62110,62093,62094, 0,62104, 0, 0, 0, 0,62095,62096, + 62097,62105, 0, 0,61977, 0, 0, 0, 0, 0,62075,62077,61968, 0, 0, 0, + 0,62021,62022,62019,62020,62170,62171,62023,62024,62169,62168,62166,62167,62017,62018,62028, + 62027,62025,62026,62161,62162,62032,62031,62029,62030,62036,62035,62033,62034,62040,62039,62037, + 62038,62048,62044,62041,62042,62052,62051,62049,62050,62053,62054,62055,62056,62057,62058,62059, + 62060,62064,62063,62061,62062,62068,62067,62065,62066,62072,62071,62069,62070,62078,62076,62073, + 62074,62114,62113,62079,62193,62118,62117,62115,62116,62122,62121,62119,62120,62126,62125,62123, + 62124,62130,62129,62127,62128,62134,62133,62131,62132,62138,62137,62135,62136,62142,62141,62139, + 62140,62146,62145,62143,62144,62150,62149,62147,62148,62154,62153,62151,62152,62155,62156,62164, + 62163,62160,62159,62157,62158,62176,62177,62174,62175,62178,62179,62172,62173, 0, 0, 0, +}; + +static inline unsigned +_hb_arabic_b2 (const uint8_t* a, unsigned i) +{ + return (a[i>>2]>>((i&3u)<<1))&3u; +} +static inline unsigned +_hb_arabic_b4 (const uint8_t* a, unsigned i) +{ + return (a[i>>1]>>((i&1u)<<2))&15u; +} +static inline uint_fast16_t +_hb_arabic_pua_simp_map (unsigned u) +{ + return u<65277u?_hb_arabic_u16[((_hb_arabic_u8[40+(((_hb_arabic_b4(8+_hb_arabic_u8,((_hb_arabic_b2(_hb_arabic_u8,u>>3>>4>>4))<<4)+((u>>3>>4)&15u)))<<4)+((u>>3)&15u))])<<3)+((u)&7u)]:0; +} +static inline uint_fast16_t +_hb_arabic_pua_trad_map (unsigned u) +{ + return u<65277u?_hb_arabic_u16[320+(((_hb_arabic_u8[208+(((_hb_arabic_b4(168+_hb_arabic_u8,((_hb_arabic_b4(136+_hb_arabic_u8,u>>2>>4>>4))<<4)+((u>>2>>4)&15u)))<<4)+((u>>2)&15u))])<<2)+((u)&3u))]:0; +} + +#endif /* HB_OT_SHAPER_ARABIC_PUA_HH */ + +/* == End of generated table == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-table.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-table.hh index c158964f2c..fd3d8645d1 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-table.hh @@ -13,8 +13,8 @@ * UnicodeData.txt does not have a header. */ -#ifndef HB_OT_SHAPE_COMPLEX_ARABIC_TABLE_HH -#define HB_OT_SHAPE_COMPLEX_ARABIC_TABLE_HH +#ifndef HB_OT_SHAPER_ARABIC_TABLE_HH +#define HB_OT_SHAPER_ARABIC_TABLE_HH #define A JOINING_GROUP_ALAPH @@ -416,26 +416,141 @@ static const uint16_t shaping_table[][4] = static const struct ligature_set_t { uint16_t first; struct ligature_pairs_t { - uint16_t second; + uint16_t components[1]; uint16_t ligature; - } ligatures[4]; + } ligatures[14]; } ligature_table[] = { + { 0xFE91u, { + { {0xFEE2u}, 0xFC08u }, /* ARABIC LIGATURE BEH WITH MEEM ISOLATED FORM */ + { {0xFEE4u}, 0xFC9Fu }, /* ARABIC LIGATURE BEH WITH MEEM INITIAL FORM */ + { {0xFEA0u}, 0xFC9Cu }, /* ARABIC LIGATURE BEH WITH JEEM INITIAL FORM */ + { {0xFEA4u}, 0xFC9Du }, /* ARABIC LIGATURE BEH WITH HAH INITIAL FORM */ + { {0xFEA8u}, 0xFC9Eu }, /* ARABIC LIGATURE BEH WITH KHAH INITIAL FORM */ + }}, + { 0xFE92u, { + { {0xFEAEu}, 0xFC6Au }, /* ARABIC LIGATURE BEH WITH REH FINAL FORM */ + { {0xFEE6u}, 0xFC6Du }, /* ARABIC LIGATURE BEH WITH NOON FINAL FORM */ + { {0xFEF2u}, 0xFC6Fu }, /* ARABIC LIGATURE BEH WITH YEH FINAL FORM */ + }}, + { 0xFE97u, { + { {0xFEE2u}, 0xFC0Eu }, /* ARABIC LIGATURE TEH WITH MEEM ISOLATED FORM */ + { {0xFEE4u}, 0xFCA4u }, /* ARABIC LIGATURE TEH WITH MEEM INITIAL FORM */ + { {0xFEA0u}, 0xFCA1u }, /* ARABIC LIGATURE TEH WITH JEEM INITIAL FORM */ + { {0xFEA4u}, 0xFCA2u }, /* ARABIC LIGATURE TEH WITH HAH INITIAL FORM */ + { {0xFEA8u}, 0xFCA3u }, /* ARABIC LIGATURE TEH WITH KHAH INITIAL FORM */ + }}, + { 0xFE98u, { + { {0xFEAEu}, 0xFC70u }, /* ARABIC LIGATURE TEH WITH REH FINAL FORM */ + { {0xFEE6u}, 0xFC73u }, /* ARABIC LIGATURE TEH WITH NOON FINAL FORM */ + { {0xFEF2u}, 0xFC75u }, /* ARABIC LIGATURE TEH WITH YEH FINAL FORM */ + }}, + { 0xFE9Bu, { + { {0xFEE2u}, 0xFC12u }, /* ARABIC LIGATURE THEH WITH MEEM ISOLATED FORM */ + }}, + { 0xFE9Fu, { + { {0xFEE4u}, 0xFCA8u }, /* ARABIC LIGATURE JEEM WITH MEEM INITIAL FORM */ + }}, + { 0xFEA3u, { + { {0xFEE4u}, 0xFCAAu }, /* ARABIC LIGATURE HAH WITH MEEM INITIAL FORM */ + }}, + { 0xFEA7u, { + { {0xFEE4u}, 0xFCACu }, /* ARABIC LIGATURE KHAH WITH MEEM INITIAL FORM */ + }}, + { 0xFEB3u, { + { {0xFEE4u}, 0xFCB0u }, /* ARABIC LIGATURE SEEN WITH MEEM INITIAL FORM */ + }}, + { 0xFEB7u, { + { {0xFEE4u}, 0xFD30u }, /* ARABIC LIGATURE SHEEN WITH MEEM INITIAL FORM */ + }}, + { 0xFED3u, { + { {0xFEF2u}, 0xFC32u }, /* ARABIC LIGATURE FEH WITH YEH ISOLATED FORM */ + }}, { 0xFEDFu, { - { 0xFE82u, 0xFEF5u }, /* ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM */ - { 0xFE84u, 0xFEF7u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM */ - { 0xFE88u, 0xFEF9u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM */ - { 0xFE8Eu, 0xFEFBu }, /* ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM */ + { {0xFE9Eu}, 0xFC3Fu }, /* ARABIC LIGATURE LAM WITH JEEM ISOLATED FORM */ + { {0xFEA0u}, 0xFCC9u }, /* ARABIC LIGATURE LAM WITH JEEM INITIAL FORM */ + { {0xFEA2u}, 0xFC40u }, /* ARABIC LIGATURE LAM WITH HAH ISOLATED FORM */ + { {0xFEA4u}, 0xFCCAu }, /* ARABIC LIGATURE LAM WITH HAH INITIAL FORM */ + { {0xFEA6u}, 0xFC41u }, /* ARABIC LIGATURE LAM WITH KHAH ISOLATED FORM */ + { {0xFEA8u}, 0xFCCBu }, /* ARABIC LIGATURE LAM WITH KHAH INITIAL FORM */ + { {0xFEE2u}, 0xFC42u }, /* ARABIC LIGATURE LAM WITH MEEM ISOLATED FORM */ + { {0xFEE4u}, 0xFCCCu }, /* ARABIC LIGATURE LAM WITH MEEM INITIAL FORM */ + { {0xFEF2u}, 0xFC44u }, /* ARABIC LIGATURE LAM WITH YEH ISOLATED FORM */ + { {0xFEECu}, 0xFCCDu }, /* ARABIC LIGATURE LAM WITH HEH INITIAL FORM */ + { {0xFE82u}, 0xFEF5u }, /* ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM */ + { {0xFE84u}, 0xFEF7u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM */ + { {0xFE88u}, 0xFEF9u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM */ + { {0xFE8Eu}, 0xFEFBu }, /* ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM */ }}, { 0xFEE0u, { - { 0xFE82u, 0xFEF6u }, /* ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM */ - { 0xFE84u, 0xFEF8u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM */ - { 0xFE88u, 0xFEFAu }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM */ - { 0xFE8Eu, 0xFEFCu }, /* ARABIC LIGATURE LAM WITH ALEF FINAL FORM */ + { {0xFEF0u}, 0xFC86u }, /* ARABIC LIGATURE LAM WITH ALEF MAKSURA FINAL FORM */ + { {0xFE82u}, 0xFEF6u }, /* ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM */ + { {0xFE84u}, 0xFEF8u }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM */ + { {0xFE88u}, 0xFEFAu }, /* ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM */ + { {0xFE8Eu}, 0xFEFCu }, /* ARABIC LIGATURE LAM WITH ALEF FINAL FORM */ + }}, + { 0xFEE3u, { + { {0xFEA0u}, 0xFCCEu }, /* ARABIC LIGATURE MEEM WITH JEEM INITIAL FORM */ + { {0xFEA4u}, 0xFCCFu }, /* ARABIC LIGATURE MEEM WITH HAH INITIAL FORM */ + { {0xFEA8u}, 0xFCD0u }, /* ARABIC LIGATURE MEEM WITH KHAH INITIAL FORM */ + { {0xFEE4u}, 0xFCD1u }, /* ARABIC LIGATURE MEEM WITH MEEM INITIAL FORM */ + }}, + { 0xFEE7u, { + { {0xFEE2u}, 0xFC4Eu }, /* ARABIC LIGATURE NOON WITH MEEM ISOLATED FORM */ + { {0xFEE4u}, 0xFCD5u }, /* ARABIC LIGATURE NOON WITH MEEM INITIAL FORM */ + { {0xFEA0u}, 0xFCD2u }, /* ARABIC LIGATURE NOON WITH JEEM INITIAL FORM */ + { {0xFEA4u}, 0xFCD3u }, /* ARABIC LIGATURE NOON WITH HAH INITIAL FORM */ + }}, + { 0xFEE8u, { + { {0xFEF2u}, 0xFC8Fu }, /* ARABIC LIGATURE NOON WITH YEH FINAL FORM */ + }}, + { 0xFEF3u, { + { {0xFEA0u}, 0xFCDAu }, /* ARABIC LIGATURE YEH WITH JEEM INITIAL FORM */ + { {0xFEA4u}, 0xFCDBu }, /* ARABIC LIGATURE YEH WITH HAH INITIAL FORM */ + { {0xFEA8u}, 0xFCDCu }, /* ARABIC LIGATURE YEH WITH KHAH INITIAL FORM */ + { {0xFEE4u}, 0xFCDDu }, /* ARABIC LIGATURE YEH WITH MEEM INITIAL FORM */ + }}, + { 0xFEF4u, { + { {0xFEAEu}, 0xFC91u }, /* ARABIC LIGATURE YEH WITH REH FINAL FORM */ + { {0xFEE6u}, 0xFC94u }, /* ARABIC LIGATURE YEH WITH NOON FINAL FORM */ + }}, +}; + + +static const struct ligature_mark_set_t { + uint16_t first; + struct ligature_pairs_t { + uint16_t components[1]; + uint16_t ligature; + } ligatures[5]; +} ligature_mark_table[] = +{ + { 0x0651u, { + { {0x064Cu}, 0xFC5Eu }, /* ARABIC LIGATURE SHADDA WITH DAMMATAN ISOLATED FORM */ + { {0x064Eu}, 0xFC60u }, /* ARABIC LIGATURE SHADDA WITH FATHA ISOLATED FORM */ + { {0x064Fu}, 0xFC61u }, /* ARABIC LIGATURE SHADDA WITH DAMMA ISOLATED FORM */ + { {0x0650u}, 0xFC62u }, /* ARABIC LIGATURE SHADDA WITH KASRA ISOLATED FORM */ + { {0x064Bu}, 0xF2EEu }, /* PUA ARABIC LIGATURE SHADDA WITH FATHATAN ISOLATED FORM */ + }}, +}; + + +static const struct ligature_3_set_t { + uint16_t first; + struct ligature_triplets_t { + uint16_t components[2]; + uint16_t ligature; + } ligatures[3]; +} ligature_3_table[] = +{ + { 0xFEDFu, { + { {0xFEE4u, 0xFEA4u}, 0xFD88u}, /* ARABIC LIGATURE LAM WITH MEEM WITH HAH INITIAL FORM */ + { {0xFEE0u, 0xFEEAu}, 0xF201u}, /* PUA ARABIC LIGATURE LELLAH ISOLATED FORM */ + { {0xFEE4u, 0xFEA0u}, 0xF211u}, /* PUA ARABIC LIGATURE LAM WITH MEEM WITH JEEM INITIAL FORM */ }}, }; -#endif /* HB_OT_SHAPE_COMPLEX_ARABIC_TABLE_HH */ +#endif /* HB_OT_SHAPER_ARABIC_TABLE_HH */ /* == End of generated table == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-win1256.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-win1256.hh index 429974d05b..b8d481c813 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic-win1256.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic-win1256.hh @@ -24,7 +24,7 @@ * Google Author(s): Behdad Esfahbod */ -#ifndef HB_OT_SHAPE_COMPLEX_ARABIC_WIN1256_HH +#ifndef HB_OT_SHAPER_ARABIC_WIN1256_HH /* @@ -342,8 +342,8 @@ OT_TABLE_END #include "hb.hh" /* Make check-includes.sh happy. */ #endif #ifdef OT_MEASURE -#include "hb-ot-shape-complex-arabic-win1256.hh" +#include "hb-ot-shaper-arabic-win1256.hh" #endif -#define HB_OT_SHAPE_COMPLEX_ARABIC_WIN1256_HH -#endif /* HB_OT_SHAPE_COMPLEX_ARABIC_WIN1256_HH */ +#define HB_OT_SHAPER_ARABIC_WIN1256_HH +#endif /* HB_OT_SHAPER_ARABIC_WIN1256_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic.cc index 224f8b842e..e869d78509 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic.cc @@ -28,14 +28,14 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-arabic.hh" +#include "hb-ot-shaper-arabic.hh" #include "hb-ot-shape.hh" /* buffer var allocations */ -#define arabic_shaping_action() complex_var_u8_auxiliary() /* arabic shaping action */ +#define arabic_shaping_action() ot_shaper_var_u8_auxiliary() /* arabic shaping action */ -#define HB_BUFFER_SCRATCH_FLAG_ARABIC_HAS_STCH HB_BUFFER_SCRATCH_FLAG_COMPLEX0 +#define HB_BUFFER_SCRATCH_FLAG_ARABIC_HAS_STCH HB_BUFFER_SCRATCH_FLAG_SHAPER0 /* See: * https://github.com/harfbuzz/harfbuzz/commit/6e6f82b6f3dde0fc6c3c7d991d9ec6cfff57823d#commitcomment-14248516 */ @@ -81,7 +81,7 @@ enum hb_arabic_joining_type_t { JOINING_TYPE_X = 8 /* means: use general-category to choose between U or T. */ }; -#include "hb-ot-shape-complex-arabic-table.hh" +#include "hb-ot-shaper-arabic-table.hh" static unsigned int get_joining_type (hb_codepoint_t u, hb_unicode_general_category_t gen_cat) { @@ -172,6 +172,14 @@ record_stch (const hb_ot_shape_plan_t *plan, hb_buffer_t *buffer); static void +deallocate_buffer_var (const hb_ot_shape_plan_t *plan, + hb_font_t *font, + hb_buffer_t *buffer) +{ + HB_BUFFER_DEALLOCATE_VAR (buffer, arabic_shaping_action); +} + +static void collect_features_arabic (hb_ot_shape_planner_t *plan) { hb_ot_map_builder_t *map = &plan->map; @@ -213,6 +221,7 @@ collect_features_arabic (hb_ot_shape_planner_t *plan) map->add_feature (arabic_features[i], has_fallback ? F_HAS_FALLBACK : F_NONE); map->add_gsub_pause (nullptr); } + map->add_gsub_pause (deallocate_buffer_var); /* Normally, Unicode says a ZWNJ means "don't ligate". In Arabic script * however, it says a ZWJ should also mean "don't ligate". So we run @@ -240,7 +249,7 @@ collect_features_arabic (hb_ot_shape_planner_t *plan) map->enable_feature (HB_TAG('m','s','e','t')); } -#include "hb-ot-shape-complex-arabic-fallback.hh" +#include "hb-ot-shaper-arabic-fallback.hh" struct arabic_shape_plan_t { @@ -405,7 +414,7 @@ arabic_fallback_shape (const hb_ot_shape_plan_t *plan, hb_font_t *font, hb_buffer_t *buffer) { -#ifdef HB_NO_OT_SHAPE_COMPLEX_ARABIC_FALLBACK +#ifdef HB_NO_OT_SHAPER_ARABIC_FALLBACK return; #endif @@ -619,8 +628,6 @@ postprocess_glyphs_arabic (const hb_ot_shape_plan_t *plan, hb_font_t *font) { apply_stch (plan, buffer, font); - - HB_BUFFER_DEALLOCATE_VAR (buffer, arabic_shaping_action); } /* https://www.unicode.org/reports/tr53/ */ @@ -689,7 +696,7 @@ reorder_marks_arabic (const hb_ot_shape_plan_t *plan HB_UNUSED, /* Shift it! */ DEBUG_MSG (ARABIC, buffer, "Shifting %d's: %d %d", cc, i, j); - hb_glyph_info_t temp[HB_OT_SHAPE_COMPLEX_MAX_COMBINING_MARKS]; + hb_glyph_info_t temp[HB_OT_SHAPE_MAX_COMBINING_MARKS]; assert (j - i <= ARRAY_LENGTH (temp)); buffer->merge_clusters (start, j); memmove (temp, &info[i], (j - i) * sizeof (hb_glyph_info_t)); @@ -720,7 +727,7 @@ reorder_marks_arabic (const hb_ot_shape_plan_t *plan HB_UNUSED, } } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_arabic = +const hb_ot_shaper_t _hb_ot_shaper_arabic = { collect_features_arabic, nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic.hh index 5bf6ff6338..a025b1a399 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-arabic.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-arabic.hh @@ -26,12 +26,12 @@ * Google Author(s): Behdad Esfahbod */ -#ifndef HB_OT_SHAPE_COMPLEX_ARABIC_HH -#define HB_OT_SHAPE_COMPLEX_ARABIC_HH +#ifndef HB_OT_SHAPER_ARABIC_HH +#define HB_OT_SHAPER_ARABIC_HH #include "hb.hh" -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" struct arabic_shape_plan_t; @@ -47,4 +47,4 @@ setup_masks_arabic_plan (const arabic_shape_plan_t *arabic_plan, hb_buffer_t *buffer, hb_script_t script); -#endif /* HB_OT_SHAPE_COMPLEX_ARABIC_HH */ +#endif /* HB_OT_SHAPER_ARABIC_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-default.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-default.cc index a755aea098..25716aa81f 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-default.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-default.cc @@ -28,10 +28,10 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_default = +const hb_ot_shaper_t _hb_ot_shaper_default = { nullptr, /* collect_features */ nullptr, /* override_features */ @@ -51,7 +51,7 @@ const hb_ot_complex_shaper_t _hb_ot_complex_shaper_default = /* Same as default but no mark advance zeroing / fallback positioning. * Dumbest shaper ever, basically. */ -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_dumber = +const hb_ot_shaper_t _hb_ot_shaper_dumber = { nullptr, /* collect_features */ nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-hangul.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-hangul.cc index 3bc9e9b961..aa507c75ca 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-hangul.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-hangul.cc @@ -28,7 +28,7 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" /* Hangul shaper */ @@ -119,7 +119,7 @@ data_destroy_hangul (void *data) #define isHangulTone(u) (hb_in_range<hb_codepoint_t> ((u), 0x302Eu, 0x302Fu)) /* buffer var allocations */ -#define hangul_shaping_feature() complex_var_u8_auxiliary() /* hangul jamo shaping feature */ +#define hangul_shaping_feature() ot_shaper_var_u8_auxiliary() /* hangul jamo shaping feature */ static bool is_zero_width_char (hb_font_t *font, @@ -414,7 +414,7 @@ setup_masks_hangul (const hb_ot_shape_plan_t *plan, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_hangul = +const hb_ot_shaper_t _hb_ot_shaper_hangul = { collect_features_hangul, override_features_hangul, diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-hebrew.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-hebrew.cc index 334d3ded82..f3b6cde179 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-hebrew.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-hebrew.cc @@ -28,7 +28,7 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" static bool @@ -74,7 +74,7 @@ compose_hebrew (const hb_ot_shape_normalize_context_t *c, bool found = (bool) c->unicode->compose (a, b, ab); -#ifdef HB_NO_OT_SHAPE_COMPLEX_HEBREW_FALLBACK +#ifdef HB_NO_OT_SHAPER_HEBREW_FALLBACK return found; #endif @@ -163,7 +163,7 @@ compose_hebrew (const hb_ot_shape_normalize_context_t *c, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_hebrew = +const hb_ot_shaper_t _hb_ot_shaper_hebrew = { nullptr, /* collect_features */ nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-indic-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-indic-machine.hh new file mode 100644 index 0000000000..d52b13f616 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-indic-machine.hh @@ -0,0 +1,589 @@ + +#line 1 "hb-ot-shaper-indic-machine.rl" +/* + * Copyright © 2011,2012 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_OT_SHAPER_INDIC_MACHINE_HH +#define HB_OT_SHAPER_INDIC_MACHINE_HH + +#include "hb.hh" + +#include "hb-ot-layout.hh" +#include "hb-ot-shaper-indic.hh" + +/* buffer var allocations */ +#define indic_category() ot_shaper_var_u8_category() /* indic_category_t */ +#define indic_position() ot_shaper_var_u8_auxiliary() /* indic_position_t */ + +using indic_category_t = unsigned; +using indic_position_t = ot_position_t; + +#define I_Cat(Cat) indic_syllable_machine_ex_##Cat + +enum indic_syllable_type_t { + indic_consonant_syllable, + indic_vowel_syllable, + indic_standalone_cluster, + indic_symbol_cluster, + indic_broken_cluster, + indic_non_indic_cluster, +}; + + +#line 57 "hb-ot-shaper-indic-machine.hh" +#define indic_syllable_machine_ex_A 9u +#define indic_syllable_machine_ex_C 1u +#define indic_syllable_machine_ex_CM 16u +#define indic_syllable_machine_ex_CS 18u +#define indic_syllable_machine_ex_DOTTEDCIRCLE 11u +#define indic_syllable_machine_ex_H 4u +#define indic_syllable_machine_ex_M 7u +#define indic_syllable_machine_ex_N 3u +#define indic_syllable_machine_ex_PLACEHOLDER 10u +#define indic_syllable_machine_ex_RS 12u +#define indic_syllable_machine_ex_Ra 15u +#define indic_syllable_machine_ex_Repha 14u +#define indic_syllable_machine_ex_SM 8u +#define indic_syllable_machine_ex_Symbol 17u +#define indic_syllable_machine_ex_V 2u +#define indic_syllable_machine_ex_VD 9u +#define indic_syllable_machine_ex_X 0u +#define indic_syllable_machine_ex_ZWJ 6u +#define indic_syllable_machine_ex_ZWNJ 5u + + +#line 79 "hb-ot-shaper-indic-machine.hh" +static const unsigned char _indic_syllable_machine_trans_keys[] = { + 8u, 8u, 4u, 8u, 5u, 7u, 5u, 8u, 4u, 8u, 4u, 12u, 4u, 8u, 8u, 8u, + 5u, 7u, 5u, 8u, 4u, 8u, 4u, 12u, 4u, 12u, 4u, 12u, 8u, 8u, 5u, 7u, + 5u, 8u, 4u, 8u, 4u, 8u, 4u, 12u, 8u, 8u, 5u, 7u, 5u, 8u, 4u, 8u, + 4u, 8u, 5u, 8u, 8u, 8u, 1u, 18u, 3u, 16u, 3u, 16u, 4u, 16u, 1u, 15u, + 5u, 9u, 5u, 9u, 9u, 9u, 5u, 9u, 1u, 15u, 1u, 15u, 1u, 15u, 3u, 9u, + 4u, 9u, 5u, 9u, 4u, 9u, 5u, 9u, 3u, 9u, 5u, 9u, 3u, 16u, 3u, 16u, + 3u, 16u, 3u, 16u, 4u, 16u, 1u, 15u, 3u, 16u, 3u, 16u, 4u, 16u, 1u, 15u, + 5u, 9u, 9u, 9u, 5u, 9u, 1u, 15u, 1u, 15u, 3u, 9u, 4u, 9u, 5u, 9u, + 4u, 9u, 5u, 9u, 5u, 9u, 3u, 9u, 5u, 9u, 3u, 16u, 3u, 16u, 4u, 8u, + 3u, 16u, 3u, 16u, 4u, 16u, 1u, 15u, 3u, 16u, 1u, 15u, 5u, 9u, 9u, 9u, + 5u, 9u, 1u, 15u, 1u, 15u, 3u, 9u, 4u, 9u, 5u, 9u, 3u, 16u, 4u, 9u, + 5u, 9u, 5u, 9u, 3u, 9u, 5u, 9u, 3u, 16u, 4u, 12u, 4u, 8u, 3u, 16u, + 3u, 16u, 4u, 16u, 1u, 15u, 3u, 16u, 1u, 15u, 5u, 9u, 9u, 9u, 5u, 9u, + 1u, 15u, 1u, 15u, 3u, 9u, 4u, 9u, 5u, 9u, 3u, 16u, 4u, 9u, 5u, 9u, + 5u, 9u, 3u, 9u, 5u, 9u, 1u, 16u, 3u, 16u, 1u, 16u, 4u, 12u, 5u, 9u, + 9u, 9u, 5u, 9u, 1u, 15u, 3u, 9u, 5u, 9u, 5u, 9u, 9u, 9u, 5u, 9u, + 1u, 15u, 0 +}; + +static const char _indic_syllable_machine_key_spans[] = { + 1, 5, 3, 4, 5, 9, 5, 1, + 3, 4, 5, 9, 9, 9, 1, 3, + 4, 5, 5, 9, 1, 3, 4, 5, + 5, 4, 1, 18, 14, 14, 13, 15, + 5, 5, 1, 5, 15, 15, 15, 7, + 6, 5, 6, 5, 7, 5, 14, 14, + 14, 14, 13, 15, 14, 14, 13, 15, + 5, 1, 5, 15, 15, 7, 6, 5, + 6, 5, 5, 7, 5, 14, 14, 5, + 14, 14, 13, 15, 14, 15, 5, 1, + 5, 15, 15, 7, 6, 5, 14, 6, + 5, 5, 7, 5, 14, 9, 5, 14, + 14, 13, 15, 14, 15, 5, 1, 5, + 15, 15, 7, 6, 5, 14, 6, 5, + 5, 7, 5, 16, 14, 16, 9, 5, + 1, 5, 15, 7, 5, 5, 1, 5, + 15 +}; + +static const short _indic_syllable_machine_index_offsets[] = { + 0, 2, 8, 12, 17, 23, 33, 39, + 41, 45, 50, 56, 66, 76, 86, 88, + 92, 97, 103, 109, 119, 121, 125, 130, + 136, 142, 147, 149, 168, 183, 198, 212, + 228, 234, 240, 242, 248, 264, 280, 296, + 304, 311, 317, 324, 330, 338, 344, 359, + 374, 389, 404, 418, 434, 449, 464, 478, + 494, 500, 502, 508, 524, 540, 548, 555, + 561, 568, 574, 580, 588, 594, 609, 624, + 630, 645, 660, 674, 690, 705, 721, 727, + 729, 735, 751, 767, 775, 782, 788, 803, + 810, 816, 822, 830, 836, 851, 861, 867, + 882, 897, 911, 927, 942, 958, 964, 966, + 972, 988, 1004, 1012, 1019, 1025, 1040, 1047, + 1053, 1059, 1067, 1073, 1090, 1105, 1122, 1132, + 1138, 1140, 1146, 1162, 1170, 1176, 1182, 1184, + 1190 +}; + +static const unsigned char _indic_syllable_machine_indicies[] = { + 1, 0, 2, 3, 3, 4, 1, 0, + 3, 3, 4, 0, 3, 3, 4, 1, + 0, 5, 3, 3, 4, 1, 0, 2, + 3, 3, 4, 1, 0, 0, 0, 6, + 0, 8, 9, 9, 10, 11, 7, 11, + 7, 9, 9, 10, 7, 9, 9, 10, + 11, 7, 12, 9, 9, 10, 11, 7, + 8, 9, 9, 10, 11, 7, 7, 7, + 13, 7, 8, 9, 9, 10, 11, 7, + 7, 7, 14, 7, 16, 17, 17, 18, + 19, 15, 15, 15, 20, 15, 19, 15, + 17, 17, 18, 21, 17, 17, 18, 19, + 15, 16, 17, 17, 18, 19, 15, 22, + 17, 17, 18, 19, 15, 24, 25, 25, + 26, 27, 23, 23, 23, 28, 23, 27, + 23, 25, 25, 26, 23, 25, 25, 26, + 27, 23, 24, 25, 25, 26, 27, 23, + 29, 25, 25, 26, 27, 23, 17, 17, + 18, 1, 0, 31, 30, 33, 34, 35, + 36, 37, 38, 18, 19, 39, 40, 40, + 20, 32, 41, 42, 43, 44, 45, 32, + 47, 48, 49, 50, 4, 1, 51, 46, + 46, 6, 46, 46, 46, 52, 46, 53, + 48, 54, 54, 4, 1, 51, 46, 46, + 46, 46, 46, 46, 52, 46, 48, 54, + 54, 4, 1, 51, 46, 46, 46, 46, + 46, 46, 52, 46, 33, 46, 46, 46, + 55, 56, 46, 1, 51, 46, 46, 46, + 46, 46, 33, 46, 57, 57, 46, 1, + 51, 46, 51, 46, 46, 58, 51, 46, + 51, 46, 51, 46, 46, 46, 51, 46, + 33, 46, 59, 46, 57, 57, 46, 1, + 51, 46, 46, 46, 46, 46, 33, 46, + 33, 46, 46, 46, 57, 57, 46, 1, + 51, 46, 46, 46, 46, 46, 33, 46, + 33, 46, 46, 46, 57, 56, 46, 1, + 51, 46, 46, 46, 46, 46, 33, 46, + 60, 61, 62, 62, 4, 1, 51, 46, + 61, 62, 62, 4, 1, 51, 46, 62, + 62, 4, 1, 51, 46, 63, 64, 64, + 4, 1, 51, 46, 55, 65, 46, 1, + 51, 46, 55, 46, 57, 57, 46, 1, + 51, 46, 57, 65, 46, 1, 51, 46, + 47, 48, 54, 54, 4, 1, 51, 46, + 46, 46, 46, 46, 46, 52, 46, 47, + 48, 49, 54, 4, 1, 51, 46, 46, + 6, 46, 46, 46, 52, 46, 67, 68, + 69, 70, 10, 11, 71, 66, 66, 14, + 66, 66, 66, 72, 66, 73, 68, 74, + 70, 10, 11, 71, 66, 66, 66, 66, + 66, 66, 72, 66, 68, 74, 70, 10, + 11, 71, 66, 66, 66, 66, 66, 66, + 72, 66, 75, 66, 66, 66, 76, 77, + 66, 11, 71, 66, 66, 66, 66, 66, + 75, 66, 78, 68, 79, 80, 10, 11, + 71, 66, 66, 13, 66, 66, 66, 72, + 66, 81, 68, 74, 74, 10, 11, 71, + 66, 66, 66, 66, 66, 66, 72, 66, + 68, 74, 74, 10, 11, 71, 66, 66, + 66, 66, 66, 66, 72, 66, 75, 66, + 66, 66, 82, 77, 66, 11, 71, 66, + 66, 66, 66, 66, 75, 66, 71, 66, + 66, 83, 71, 66, 71, 66, 71, 66, + 66, 66, 71, 66, 75, 66, 84, 66, + 82, 82, 66, 11, 71, 66, 66, 66, + 66, 66, 75, 66, 75, 66, 66, 66, + 82, 82, 66, 11, 71, 66, 66, 66, + 66, 66, 75, 66, 85, 86, 87, 87, + 10, 11, 71, 66, 86, 87, 87, 10, + 11, 71, 66, 87, 87, 10, 11, 71, + 66, 88, 89, 89, 10, 11, 71, 66, + 76, 90, 66, 11, 71, 66, 82, 82, + 66, 11, 71, 66, 76, 66, 82, 82, + 66, 11, 71, 66, 82, 90, 66, 11, + 71, 66, 78, 68, 74, 74, 10, 11, + 71, 66, 66, 66, 66, 66, 66, 72, + 66, 78, 68, 79, 74, 10, 11, 71, + 66, 66, 13, 66, 66, 66, 72, 66, + 8, 9, 9, 10, 11, 66, 67, 68, + 74, 70, 10, 11, 71, 66, 66, 66, + 66, 66, 66, 72, 66, 92, 36, 93, + 93, 18, 19, 39, 91, 91, 91, 91, + 91, 91, 43, 91, 36, 93, 93, 18, + 19, 39, 91, 91, 91, 91, 91, 91, + 43, 91, 94, 91, 91, 91, 95, 96, + 91, 19, 39, 91, 91, 91, 91, 91, + 94, 91, 35, 36, 97, 98, 18, 19, + 39, 91, 91, 20, 91, 91, 91, 43, + 91, 94, 91, 91, 91, 99, 96, 91, + 19, 39, 91, 91, 91, 91, 91, 94, + 91, 39, 91, 91, 100, 39, 91, 39, + 91, 39, 91, 91, 91, 39, 91, 94, + 91, 101, 91, 99, 99, 91, 19, 39, + 91, 91, 91, 91, 91, 94, 91, 94, + 91, 91, 91, 99, 99, 91, 19, 39, + 91, 91, 91, 91, 91, 94, 91, 102, + 103, 104, 104, 18, 19, 39, 91, 103, + 104, 104, 18, 19, 39, 91, 104, 104, + 18, 19, 39, 91, 35, 36, 93, 93, + 18, 19, 39, 91, 91, 91, 91, 91, + 91, 43, 91, 105, 106, 106, 18, 19, + 39, 91, 95, 107, 91, 19, 39, 91, + 99, 99, 91, 19, 39, 91, 95, 91, + 99, 99, 91, 19, 39, 91, 99, 107, + 91, 19, 39, 91, 35, 36, 97, 93, + 18, 19, 39, 91, 91, 20, 91, 91, + 91, 43, 91, 16, 17, 17, 18, 19, + 108, 108, 108, 20, 108, 16, 17, 17, + 18, 19, 108, 110, 111, 112, 113, 26, + 27, 114, 109, 109, 28, 109, 109, 109, + 115, 109, 116, 111, 113, 113, 26, 27, + 114, 109, 109, 109, 109, 109, 109, 115, + 109, 111, 113, 113, 26, 27, 114, 109, + 109, 109, 109, 109, 109, 115, 109, 117, + 109, 109, 109, 118, 119, 109, 27, 114, + 109, 109, 109, 109, 109, 117, 109, 110, + 111, 112, 40, 26, 27, 114, 109, 109, + 28, 109, 109, 109, 115, 109, 117, 109, + 109, 109, 120, 119, 109, 27, 114, 109, + 109, 109, 109, 109, 117, 109, 114, 109, + 109, 121, 114, 109, 114, 109, 114, 109, + 109, 109, 114, 109, 117, 109, 122, 109, + 120, 120, 109, 27, 114, 109, 109, 109, + 109, 109, 117, 109, 117, 109, 109, 109, + 120, 120, 109, 27, 114, 109, 109, 109, + 109, 109, 117, 109, 123, 124, 125, 125, + 26, 27, 114, 109, 124, 125, 125, 26, + 27, 114, 109, 125, 125, 26, 27, 114, + 109, 110, 111, 113, 113, 26, 27, 114, + 109, 109, 109, 109, 109, 109, 115, 109, + 126, 127, 127, 26, 27, 114, 109, 118, + 128, 109, 27, 114, 109, 120, 120, 109, + 27, 114, 109, 118, 109, 120, 120, 109, + 27, 114, 109, 120, 128, 109, 27, 114, + 109, 33, 34, 35, 36, 97, 93, 18, + 19, 39, 40, 40, 20, 91, 91, 33, + 43, 91, 47, 129, 49, 50, 4, 1, + 51, 46, 46, 6, 46, 46, 46, 52, + 46, 33, 34, 35, 36, 130, 131, 18, + 132, 133, 46, 40, 20, 46, 46, 33, + 43, 46, 16, 134, 134, 18, 132, 51, + 46, 46, 20, 46, 133, 46, 46, 135, + 133, 46, 133, 46, 133, 46, 46, 46, + 133, 46, 33, 46, 59, 16, 134, 134, + 18, 132, 51, 46, 46, 46, 46, 46, + 33, 46, 137, 136, 138, 138, 136, 31, + 139, 136, 138, 138, 136, 31, 139, 136, + 139, 136, 136, 140, 139, 136, 139, 136, + 139, 136, 136, 136, 139, 136, 33, 108, + 108, 108, 108, 108, 108, 108, 108, 40, + 108, 108, 108, 108, 33, 108, 0 +}; + +static const unsigned char _indic_syllable_machine_trans_targs[] = { + 27, 33, 38, 2, 39, 45, 46, 27, + 55, 8, 61, 56, 68, 69, 72, 27, + 77, 15, 83, 78, 86, 27, 91, 27, + 100, 21, 106, 101, 109, 114, 27, 125, + 27, 28, 48, 73, 75, 93, 94, 79, + 95, 115, 116, 87, 123, 128, 27, 29, + 31, 5, 47, 34, 42, 30, 1, 32, + 36, 0, 35, 37, 40, 41, 3, 43, + 4, 44, 27, 49, 51, 12, 71, 57, + 64, 50, 6, 52, 66, 59, 53, 11, + 70, 54, 7, 58, 60, 62, 63, 9, + 65, 10, 67, 27, 74, 17, 76, 89, + 81, 13, 92, 14, 80, 82, 84, 85, + 16, 88, 18, 90, 27, 27, 96, 98, + 19, 23, 102, 110, 97, 99, 112, 104, + 20, 103, 105, 107, 108, 22, 111, 24, + 113, 117, 118, 122, 119, 120, 25, 121, + 27, 124, 26, 126, 127 +}; + +static const char _indic_syllable_machine_trans_actions[] = { + 1, 0, 2, 0, 2, 2, 2, 3, + 2, 0, 2, 0, 2, 2, 2, 4, + 2, 0, 5, 0, 5, 6, 2, 7, + 2, 0, 2, 0, 2, 2, 8, 0, + 11, 2, 2, 5, 0, 12, 12, 0, + 2, 5, 2, 5, 2, 0, 13, 2, + 0, 0, 2, 0, 2, 2, 0, 2, + 2, 0, 0, 2, 2, 2, 0, 0, + 0, 2, 14, 2, 0, 0, 2, 0, + 2, 2, 0, 2, 2, 2, 2, 0, + 2, 2, 0, 0, 2, 2, 2, 0, + 0, 0, 2, 15, 5, 0, 5, 2, + 2, 0, 5, 0, 0, 2, 5, 5, + 0, 0, 0, 2, 16, 17, 2, 0, + 0, 0, 0, 2, 2, 2, 2, 2, + 0, 0, 2, 2, 2, 0, 0, 0, + 2, 0, 18, 18, 0, 0, 0, 0, + 19, 2, 0, 0, 0 +}; + +static const char _indic_syllable_machine_to_state_actions[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 9, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0 +}; + +static const char _indic_syllable_machine_from_state_actions[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 10, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0 +}; + +static const short _indic_syllable_machine_eof_trans[] = { + 1, 1, 1, 1, 1, 1, 8, 8, + 8, 8, 8, 8, 8, 16, 16, 22, + 16, 16, 16, 24, 24, 24, 24, 24, + 24, 1, 31, 0, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, + 67, 92, 92, 92, 92, 92, 92, 92, + 92, 92, 92, 92, 92, 92, 92, 92, + 92, 92, 92, 92, 92, 109, 109, 110, + 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, 110, 92, 47, 47, 47, 47, + 47, 47, 47, 137, 137, 137, 137, 137, + 109 +}; + +static const int indic_syllable_machine_start = 27; +static const int indic_syllable_machine_first_final = 27; +static const int indic_syllable_machine_error = -1; + +static const int indic_syllable_machine_en_main = 27; + + +#line 58 "hb-ot-shaper-indic-machine.rl" + + + +#line 117 "hb-ot-shaper-indic-machine.rl" + + +#define found_syllable(syllable_type) \ + HB_STMT_START { \ + if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ + for (unsigned int i = ts; i < te; i++) \ + info[i].syllable() = (syllable_serial << 4) | syllable_type; \ + syllable_serial++; \ + if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ + } HB_STMT_END + +inline void +find_syllables_indic (hb_buffer_t *buffer) +{ + unsigned int p, pe, eof, ts, te, act; + int cs; + hb_glyph_info_t *info = buffer->info; + +#line 426 "hb-ot-shaper-indic-machine.hh" + { + cs = indic_syllable_machine_start; + ts = 0; + te = 0; + act = 0; + } + +#line 137 "hb-ot-shaper-indic-machine.rl" + + + p = 0; + pe = eof = buffer->len; + + unsigned int syllable_serial = 1; + +#line 442 "hb-ot-shaper-indic-machine.hh" + { + int _slen; + int _trans; + const unsigned char *_keys; + const unsigned char *_inds; + if ( p == pe ) + goto _test_eof; +_resume: + switch ( _indic_syllable_machine_from_state_actions[cs] ) { + case 10: +#line 1 "NONE" + {ts = p;} + break; +#line 456 "hb-ot-shaper-indic-machine.hh" + } + + _keys = _indic_syllable_machine_trans_keys + (cs<<1); + _inds = _indic_syllable_machine_indicies + _indic_syllable_machine_index_offsets[cs]; + + _slen = _indic_syllable_machine_key_spans[cs]; + _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].indic_category()) && + ( info[p].indic_category()) <= _keys[1] ? + ( info[p].indic_category()) - _keys[0] : _slen ]; + +_eof_trans: + cs = _indic_syllable_machine_trans_targs[_trans]; + + if ( _indic_syllable_machine_trans_actions[_trans] == 0 ) + goto _again; + + switch ( _indic_syllable_machine_trans_actions[_trans] ) { + case 2: +#line 1 "NONE" + {te = p+1;} + break; + case 11: +#line 113 "hb-ot-shaper-indic-machine.rl" + {te = p+1;{ found_syllable (indic_non_indic_cluster); }} + break; + case 13: +#line 108 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_consonant_syllable); }} + break; + case 14: +#line 109 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_vowel_syllable); }} + break; + case 17: +#line 110 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_standalone_cluster); }} + break; + case 19: +#line 111 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_symbol_cluster); }} + break; + case 15: +#line 112 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 16: +#line 113 "hb-ot-shaper-indic-machine.rl" + {te = p;p--;{ found_syllable (indic_non_indic_cluster); }} + break; + case 1: +#line 108 "hb-ot-shaper-indic-machine.rl" + {{p = ((te))-1;}{ found_syllable (indic_consonant_syllable); }} + break; + case 3: +#line 109 "hb-ot-shaper-indic-machine.rl" + {{p = ((te))-1;}{ found_syllable (indic_vowel_syllable); }} + break; + case 7: +#line 110 "hb-ot-shaper-indic-machine.rl" + {{p = ((te))-1;}{ found_syllable (indic_standalone_cluster); }} + break; + case 8: +#line 111 "hb-ot-shaper-indic-machine.rl" + {{p = ((te))-1;}{ found_syllable (indic_symbol_cluster); }} + break; + case 4: +#line 112 "hb-ot-shaper-indic-machine.rl" + {{p = ((te))-1;}{ found_syllable (indic_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 6: +#line 1 "NONE" + { switch( act ) { + case 1: + {{p = ((te))-1;} found_syllable (indic_consonant_syllable); } + break; + case 5: + {{p = ((te))-1;} found_syllable (indic_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; } + break; + case 6: + {{p = ((te))-1;} found_syllable (indic_non_indic_cluster); } + break; + } + } + break; + case 18: +#line 1 "NONE" + {te = p+1;} +#line 108 "hb-ot-shaper-indic-machine.rl" + {act = 1;} + break; + case 5: +#line 1 "NONE" + {te = p+1;} +#line 112 "hb-ot-shaper-indic-machine.rl" + {act = 5;} + break; + case 12: +#line 1 "NONE" + {te = p+1;} +#line 113 "hb-ot-shaper-indic-machine.rl" + {act = 6;} + break; +#line 559 "hb-ot-shaper-indic-machine.hh" + } + +_again: + switch ( _indic_syllable_machine_to_state_actions[cs] ) { + case 9: +#line 1 "NONE" + {ts = 0;} + break; +#line 568 "hb-ot-shaper-indic-machine.hh" + } + + if ( ++p != pe ) + goto _resume; + _test_eof: {} + if ( p == eof ) + { + if ( _indic_syllable_machine_eof_trans[cs] > 0 ) { + _trans = _indic_syllable_machine_eof_trans[cs] - 1; + goto _eof_trans; + } + } + + } + +#line 145 "hb-ot-shaper-indic-machine.rl" + +} + +#undef found_syllable + +#endif /* HB_OT_SHAPER_INDIC_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-indic-table.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-indic-table.cc new file mode 100644 index 0000000000..8994f3ca59 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-indic-table.cc @@ -0,0 +1,560 @@ +/* == Start of generated table == */ +/* + * The following table is generated by running: + * + * ./gen-indic-table.py IndicSyllabicCategory.txt IndicPositionalCategory.txt Blocks.txt + * + * on files with these headers: + * + * # IndicSyllabicCategory-14.0.0.txt + * # Date: 2021-05-22, 01:01:00 GMT [KW, RP] + * # IndicPositionalCategory-14.0.0.txt + * # Date: 2021-05-22, 01:01:00 GMT [KW, RP] + * # Blocks-14.0.0.txt + * # Date: 2021-01-22, 23:29:00 GMT [KW] + */ + +#include "hb.hh" + +#ifndef HB_NO_OT_SHAPE + +#include "hb-ot-shaper-indic.hh" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-macros" + +#include "hb-ot-shaper-indic-machine.hh" +#include "hb-ot-shaper-khmer-machine.hh" +#include "hb-ot-shaper-myanmar-machine.hh" + +/* indic */ +#define OT_X I_Cat(X) +#define OT_C I_Cat(C) +#define OT_V I_Cat(V) +#define OT_N I_Cat(N) +#define OT_H I_Cat(H) +#define OT_ZWNJ I_Cat(ZWNJ) +#define OT_ZWJ I_Cat(ZWJ) +#define OT_M I_Cat(M) +#define OT_SM I_Cat(SM) +#define OT_A I_Cat(A) +#define OT_VD I_Cat(VD) +#define OT_PLACEHOLDER I_Cat(PLACEHOLDER) +#define OT_DOTTEDCIRCLE I_Cat(DOTTEDCIRCLE) +#define OT_RS I_Cat(RS) +#define OT_Repha I_Cat(Repha) +#define OT_Ra I_Cat(Ra) +#define OT_CM I_Cat(CM) +#define OT_Symbol I_Cat(Symbol) +#define OT_CS I_Cat(CS) +/* khmer */ +#define OT_VAbv K_Cat(VAbv) +#define OT_VBlw K_Cat(VBlw) +#define OT_VPre K_Cat(VPre) +#define OT_VPst K_Cat(VPst) +#define OT_Robatic K_Cat(Robatic) +#define OT_Xgroup K_Cat(Xgroup) +#define OT_Ygroup K_Cat(Ygroup) +/* myanmar */ +static_assert (OT_VAbv == M_Cat(VAbv), ""); +static_assert (OT_VBlw == M_Cat(VBlw), ""); +static_assert (OT_VPre == M_Cat(VPre), ""); +static_assert (OT_VPst == M_Cat(VPst), ""); +#define OT_IV M_Cat(IV) +#define OT_As M_Cat(As) +#define OT_DB M_Cat(DB) +#define OT_GB M_Cat(GB) +#define OT_MH M_Cat(MH) +#define OT_MR M_Cat(MR) +#define OT_MW M_Cat(MW) +#define OT_MY M_Cat(MY) +#define OT_PT M_Cat(PT) +#define OT_VS M_Cat(VS) +#define OT_ML M_Cat(ML) + + +#define _OT_A OT_A /* 53 chars; A */ +#define _OT_As OT_As /* 1 chars; As */ +#define _OT_C OT_C /* 478 chars; C */ +#define _OT_CM OT_CM /* 1 chars; CM */ +#define _OT_CS OT_CS /* 2 chars; CS */ +#define _OT_DC OT_DOTTEDCIRCLE /* 1 chars; DOTTEDCIRCLE */ +#define _OT_H OT_H /* 11 chars; H */ +#define _OT_M OT_M /* 143 chars; M */ +#define _OT_MH OT_MH /* 1 chars; MH */ +#define _OT_ML OT_ML /* 1 chars; ML */ +#define _OT_MR OT_MR /* 1 chars; MR */ +#define _OT_MW OT_MW /* 2 chars; MW */ +#define _OT_MY OT_MY /* 3 chars; MY */ +#define _OT_N OT_N /* 17 chars; N */ +#define _OT_GB OT_PLACEHOLDER /* 165 chars; PLACEHOLDER */ +#define _OT_PT OT_PT /* 8 chars; PT */ +#define _OT_R OT_Ra /* 14 chars; Ra */ +#define _OT_Rf OT_Repha /* 1 chars; Repha */ +#define _OT_Rt OT_Robatic /* 3 chars; Robatic */ +#define _OT_SM OT_SM /* 55 chars; SM */ +#define _OT_S OT_Symbol /* 22 chars; Symbol */ +#define _OT_V OT_V /* 172 chars; V */ +#define _OT_VA OT_VAbv /* 18 chars; VAbv */ +#define _OT_VB OT_VBlw /* 7 chars; VBlw */ +#define _OT_VL OT_VPre /* 5 chars; VPre */ +#define _OT_VR OT_VPst /* 13 chars; VPst */ +#define _OT_VS OT_VS /* 16 chars; VS */ +#define _OT_X OT_X /* 2 chars; X */ +#define _OT_Xg OT_Xgroup /* 7 chars; Xgroup */ +#define _OT_Yg OT_Ygroup /* 4 chars; Ygroup */ +#define _OT_ZWJ OT_ZWJ /* 1 chars; ZWJ */ +#define _OT_ZWNJ OT_ZWNJ /* 1 chars; ZWNJ */ + +#define _POS_T POS_ABOVE_C /* 22 chars; ABOVE_C */ +#define _POS_A POS_AFTER_MAIN /* 3 chars; AFTER_MAIN */ +#define _POS_AP POS_AFTER_POST /* 50 chars; AFTER_POST */ +#define _POS_AS POS_AFTER_SUB /* 51 chars; AFTER_SUB */ +#define _POS_C POS_BASE_C /* 833 chars; BASE_C */ +#define _POS_BS POS_BEFORE_SUB /* 25 chars; BEFORE_SUB */ +#define _POS_B POS_BELOW_C /* 13 chars; BELOW_C */ +#define _POS_X POS_END /* 71 chars; END */ +#define _POS_R POS_POST_C /* 13 chars; POST_C */ +#define _POS_L POS_PRE_C /* 5 chars; PRE_C */ +#define _POS_LM POS_PRE_M /* 14 chars; PRE_M */ +#define _POS_SM POS_SMVD /* 129 chars; SMVD */ + +#pragma GCC diagnostic pop + +#define INDIC_COMBINE_CATEGORIES(S,M) ((S) | ((M) << 8)) + +#define _(S,M) INDIC_COMBINE_CATEGORIES (_OT_##S, _POS_##M) + + +static const uint16_t indic_table[] = { + + +#define indic_offset_0x0028u 0 + + + /* Basic Latin */ + + /* 0028 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(GB,C), _(X,X), _(X,X), + /* 0030 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0038 */ _(GB,C), _(GB,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + +#define indic_offset_0x00b0u 24 + + + /* Latin-1 Supplement */ + + /* 00B0 */ _(X,X), _(X,X),_(SM,SM),_(SM,SM), _(X,X), _(X,X), _(X,X), _(X,X), + /* 00B8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 00C0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 00C8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 00D0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(GB,C), + +#define indic_offset_0x0900u 64 + + + /* Devanagari */ + + /* 0900 */_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM), _(V,C), _(V,C), _(V,C), _(V,C), + /* 0908 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), + /* 0910 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0918 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0920 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0928 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0930 */ _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0938 */ _(C,C), _(C,C), _(M,AS), _(M,AS), _(N,X), _(S,SM), _(M,AS), _(M,LM), + /* 0940 */ _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(M,AS), + /* 0948 */ _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(H,B), _(M,LM), _(M,AS), + /* 0950 */ _(X,X), _(A,SM), _(A,SM),_(SM,SM),_(SM,SM), _(M,AS), _(M,AS), _(M,AS), + /* 0958 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0960 */ _(V,C), _(V,C), _(M,AS), _(M,AS), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0968 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0970 */ _(X,X), _(X,X), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), + /* 0978 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + + /* Bengali */ + + /* 0980 */ _(GB,C),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0988 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(X,X), _(V,C), + /* 0990 */ _(V,C), _(X,X), _(X,X), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0998 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 09A0 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 09A8 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 09B0 */ _(R,C), _(X,X), _(C,C), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), + /* 09B8 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(S,SM), _(M,AP), _(M,LM), + /* 09C0 */ _(M,AP), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(X,X), _(X,X), _(M,LM), + /* 09C8 */ _(M,LM), _(X,X), _(X,X), _(M,AP), _(M,AP), _(H,B), _(C,C), _(X,X), + /* 09D0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(M,AP), + /* 09D8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), + /* 09E0 */ _(V,C), _(V,C), _(M,AS), _(M,AS), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 09E8 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 09F0 */ _(R,C), _(C,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 09F8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(GB,C), _(X,X),_(SM,SM), _(X,X), + + /* Gurmukhi */ + + /* 0A00 */ _(X,X),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0A08 */ _(V,C), _(V,C), _(V,C), _(X,X), _(X,X), _(X,X), _(X,X), _(V,C), + /* 0A10 */ _(V,C), _(X,X), _(X,X), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0A18 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0A20 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0A28 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0A30 */ _(R,C), _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), _(C,C), _(X,X), + /* 0A38 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(X,X), _(M,AP), _(M,LM), + /* 0A40 */ _(M,AP), _(M,AP), _(M,AP), _(X,X), _(X,X), _(X,X), _(X,X), _(M,AP), + /* 0A48 */ _(M,AP), _(X,X), _(X,X), _(M,AP), _(M,AP), _(H,B), _(X,X), _(X,X), + /* 0A50 */ _(X,X), _(M,B), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0A58 */ _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(X,X), _(C,C), _(X,X), + /* 0A60 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0A68 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0A70 */_(SM,SM),_(SM,SM), _(C,C), _(C,C), _(X,X), _(CM,C), _(X,X), _(X,X), + /* 0A78 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + + /* Gujarati */ + + /* 0A80 */ _(X,X),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0A88 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(V,C), + /* 0A90 */ _(V,C), _(V,C), _(X,X), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0A98 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0AA0 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0AA8 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0AB0 */ _(R,C), _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), + /* 0AB8 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(S,SM), _(M,AP), _(M,LM), + /* 0AC0 */ _(M,AP), _(M,AP), _(M,AP), _(M,AP), _(M,AP), _(M,AS), _(X,X), _(M,AS), + /* 0AC8 */ _(M,AS), _(M,AP), _(X,X), _(M,AP), _(M,AP), _(H,B), _(X,X), _(X,X), + /* 0AD0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0AD8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0AE0 */ _(V,C), _(V,C), _(M,AP), _(M,AP), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0AE8 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0AF0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0AF8 */ _(X,X), _(C,C), _(A,SM), _(N,X), _(A,SM), _(N,X), _(N,X), _(N,X), + + /* Oriya */ + + /* 0B00 */ _(X,X),_(SM,BS),_(SM,SM),_(SM,SM), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0B08 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(X,X), _(V,C), + /* 0B10 */ _(V,C), _(X,X), _(X,X), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0B18 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0B20 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0B28 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0B30 */ _(R,C), _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), + /* 0B38 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(S,SM), _(M,AP), _(M,A), + /* 0B40 */ _(M,AP), _(M,AS), _(M,AS), _(M,AS), _(M,AS), _(X,X), _(X,X), _(M,LM), + /* 0B48 */ _(M,A), _(X,X), _(X,X), _(M,AP), _(M,AP), _(H,B), _(X,X), _(X,X), + /* 0B50 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(N,X), _(M,A), _(M,AP), + /* 0B58 */ _(X,X), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), + /* 0B60 */ _(V,C), _(V,C), _(M,AS), _(M,AS), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0B68 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0B70 */ _(X,X), _(C,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0B78 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + + /* Tamil */ + + /* 0B80 */ _(X,X), _(X,X),_(SM,SM), _(X,X), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0B88 */ _(V,C), _(V,C), _(V,C), _(X,X), _(X,X), _(X,X), _(V,C), _(V,C), + /* 0B90 */ _(V,C), _(X,X), _(V,C), _(V,C), _(V,C), _(C,C), _(X,X), _(X,X), + /* 0B98 */ _(X,X), _(C,C), _(C,C), _(X,X), _(C,C), _(X,X), _(C,C), _(C,C), + /* 0BA0 */ _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), _(X,X), _(X,X), _(X,X), + /* 0BA8 */ _(C,C), _(C,C), _(C,C), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), + /* 0BB0 */ _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0BB8 */ _(C,C), _(C,C), _(X,X), _(X,X), _(X,X), _(X,X), _(M,AP), _(M,AP), + /* 0BC0 */ _(M,AS), _(M,AP), _(M,AP), _(X,X), _(X,X), _(X,X), _(M,LM), _(M,LM), + /* 0BC8 */ _(M,LM), _(X,X), _(M,AP), _(M,AP), _(M,AP), _(H,T), _(X,X), _(X,X), + /* 0BD0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(M,AP), + /* 0BD8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0BE0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0BE8 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0BF0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0BF8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + + /* Telugu */ + + /* 0C00 */_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM), _(V,C), _(V,C), _(V,C), + /* 0C08 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(V,C), _(V,C), + /* 0C10 */ _(V,C), _(X,X), _(V,C), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0C18 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0C20 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0C28 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0C30 */ _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0C38 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(S,SM), _(M,BS), _(M,BS), + /* 0C40 */ _(M,BS), _(M,BS), _(M,BS), _(M,AS), _(M,AS), _(X,X), _(M,BS), _(M,BS), + /* 0C48 */ _(M,BS), _(X,X), _(M,BS), _(M,BS), _(M,BS), _(H,T), _(X,X), _(X,X), + /* 0C50 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(M,BS), _(M,BS), _(X,X), + /* 0C58 */ _(C,C), _(C,C), _(C,C), _(X,X), _(X,X), _(C,C), _(X,X), _(X,X), + /* 0C60 */ _(V,C), _(V,C), _(M,BS), _(M,BS), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0C68 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0C70 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0C78 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + + /* Kannada */ + + /* 0C80 */ _(GB,C),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(V,C), _(V,C), _(V,C), + /* 0C88 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(V,C), _(V,C), + /* 0C90 */ _(V,C), _(X,X), _(V,C), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0C98 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0CA0 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0CA8 */ _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0CB0 */ _(R,C), _(C,C), _(C,C), _(C,C), _(X,X), _(C,C), _(C,C), _(C,C), + /* 0CB8 */ _(C,C), _(C,C), _(X,X), _(X,X), _(N,X), _(S,SM), _(M,BS), _(M,BS), + /* 0CC0 */ _(M,BS), _(M,BS), _(M,BS), _(M,AS), _(M,AS), _(X,X), _(M,BS), _(M,AS), + /* 0CC8 */ _(M,AS), _(X,X), _(M,AS), _(M,AS), _(M,BS), _(H,T), _(X,X), _(X,X), + /* 0CD0 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(M,AS), _(M,AS), _(X,X), + /* 0CD8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), _(X,X), + /* 0CE0 */ _(V,C), _(V,C), _(M,BS), _(M,BS), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0CE8 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0CF0 */ _(X,X), _(CS,C), _(CS,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0CF8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + + /* Malayalam */ + + /* 0D00 */_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM), _(GB,C), _(V,C), _(V,C), _(V,C), + /* 0D08 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(V,C), _(V,C), + /* 0D10 */ _(V,C), _(X,X), _(V,C), _(V,C), _(V,C), _(C,C), _(C,C), _(C,C), + /* 0D18 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0D20 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0D28 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0D30 */ _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 0D38 */ _(C,C), _(C,C), _(C,C), _(M,AS), _(M,AS), _(S,SM), _(M,AP), _(M,AP), + /* 0D40 */ _(M,AP), _(M,AP), _(M,AP), _(M,AP), _(M,AP), _(X,X), _(M,LM), _(M,LM), + /* 0D48 */ _(M,LM), _(X,X), _(M,AP), _(M,AP), _(M,AP), _(H,T), _(Rf,X), _(X,X), + /* 0D50 */ _(X,X), _(X,X), _(X,X), _(X,X), _(C,C), _(C,C), _(C,C), _(M,AP), + /* 0D58 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(V,C), + /* 0D60 */ _(V,C), _(V,C), _(M,AP), _(M,AP), _(X,X), _(X,X), _(GB,C), _(GB,C), + /* 0D68 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 0D70 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 0D78 */ _(X,X), _(X,X), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + +#define indic_offset_0x1000u 1216 + + + /* Myanmar */ + + /* 1000 */ _(C,C), _(C,C), _(C,C), _(C,C), _(R,C), _(C,C), _(C,C), _(C,C), + /* 1008 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1010 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1018 */ _(C,C), _(C,C), _(C,C), _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1020 */ _(C,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), + /* 1028 */ _(V,C), _(V,C), _(V,C), _(VR,R), _(VR,R), _(VA,T), _(VA,T), _(VB,B), + /* 1030 */ _(VB,B), _(VL,L), _(A,SM), _(VA,T), _(VA,T), _(VA,T), _(A,SM), _(N,X), + /* 1038 */_(SM,SM), _(H,X), _(As,X), _(MY,X), _(MR,X), _(MW,X), _(MH,X), _(C,C), + /* 1040 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 1048 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(X,X), _(X,X), _(C,C), _(X,X), + /* 1050 */ _(C,C), _(C,C), _(V,C), _(V,C), _(V,C), _(V,C), _(VR,R), _(VR,R), + /* 1058 */ _(VB,B), _(VB,B), _(R,C), _(C,C), _(C,C), _(C,C), _(MY,X), _(MY,X), + /* 1060 */ _(ML,X), _(C,C), _(VR,R), _(PT,X), _(PT,X), _(C,C), _(C,C), _(VR,R), + /* 1068 */ _(VR,R), _(PT,X), _(PT,X), _(PT,X), _(PT,X), _(PT,X), _(C,C), _(C,C), + /* 1070 */ _(C,C), _(VA,T), _(VA,T), _(VA,T), _(VA,T), _(C,C), _(C,C), _(C,C), + /* 1078 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1080 */ _(C,C), _(C,C), _(MW,X), _(VR,R), _(VL,L), _(VA,T), _(VA,T),_(SM,SM), + /* 1088 */_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM),_(SM,SM), _(C,C),_(SM,SM), + /* 1090 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 1098 */ _(GB,C), _(GB,C),_(SM,SM),_(SM,SM),_(SM,SM), _(VA,T), _(X,X), _(X,X), + +#define indic_offset_0x1780u 1376 + + + /* Khmer */ + + /* 1780 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1788 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1790 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 1798 */ _(C,C), _(C,C), _(R,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* 17A0 */ _(C,C), _(C,C), _(C,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), + /* 17A8 */ _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), _(V,C), + /* 17B0 */ _(V,C), _(V,C), _(V,C), _(V,C), _(X,X), _(X,X), _(VR,R), _(VA,T), + /* 17B8 */ _(VA,T), _(VA,T), _(VA,T), _(VB,B), _(VB,B), _(VB,B), _(VA,T), _(VR,R), + /* 17C0 */ _(VR,R), _(VL,L), _(VL,L), _(VL,L), _(VR,R), _(VR,R), _(Xg,X), _(Yg,X), + /* 17C8 */ _(Yg,X), _(Rt,X), _(Rt,X), _(Xg,X), _(Rt,X), _(Xg,X), _(Xg,X), _(Xg,X), + /* 17D0 */ _(Xg,X), _(Xg,X), _(H,X), _(Yg,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 17D8 */ _(X,X), _(GB,C), _(X,X), _(X,X), _(S,SM), _(Yg,X), _(X,X), _(X,X), + /* 17E0 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* 17E8 */ _(GB,C), _(GB,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + +#define indic_offset_0x1cd0u 1488 + + + /* Vedic Extensions */ + + /* 1CD0 */ _(A,SM), _(A,SM), _(A,SM), _(X,X), _(A,SM), _(A,SM), _(A,SM), _(A,SM), + /* 1CD8 */ _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), + /* 1CE0 */ _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), + /* 1CE8 */ _(A,SM), _(S,SM), _(S,SM), _(S,SM), _(S,SM), _(A,SM), _(S,SM), _(S,SM), + /* 1CF0 */ _(S,SM), _(S,SM), _(C,C), _(C,C), _(A,SM), _(C,C), _(C,C), _(A,SM), + /* 1CF8 */ _(A,SM), _(A,SM), _(GB,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + +#define indic_offset_0x2008u 1536 + + + /* General Punctuation */ + + /* 2008 */ _(X,X), _(X,X), _(X,X), _(X,X),_(ZWNJ,X),_(ZWJ,X), _(X,X), _(X,X), + /* 2010 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(X,X), _(X,X), + /* 2018 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 2020 */ _(X,X), _(X,X), _(GB,C), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + +#define indic_offset_0x2070u 1568 + + + /* Superscripts and Subscripts */ + + /* 2070 */ _(X,X), _(X,X), _(X,X), _(X,X),_(SM,SM), _(X,X), _(X,X), _(X,X), + /* 2078 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 2080 */ _(X,X), _(X,X),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(X,X), _(X,X), + +#define indic_offset_0x25f8u 1592 + + + /* Geometric Shapes */ + + /* 25F8 */ _(X,X), _(X,X), _(X,X), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(X,X), + +#define indic_offset_0xa8e0u 1600 + + + /* Devanagari Extended */ + + /* A8E0 */ _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), + /* A8E8 */ _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), _(A,SM), + /* A8F0 */ _(A,SM), _(A,SM), _(S,SM), _(S,SM), _(S,SM), _(S,SM), _(S,SM), _(S,SM), + /* A8F8 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(V,C), _(M,AS), + +#define indic_offset_0xa9e0u 1632 + + + /* Myanmar Extended-B */ + + /* A9E0 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(VA,T), _(X,X), _(C,C), + /* A9E8 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* A9F0 */ _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), _(GB,C), + /* A9F8 */ _(GB,C), _(GB,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(X,X), + +#define indic_offset_0xaa60u 1664 + + + /* Myanmar Extended-A */ + + /* AA60 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* AA68 */ _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), _(C,C), + /* AA70 */ _(X,X), _(C,C), _(C,C), _(C,C), _(GB,C), _(GB,C), _(GB,C), _(X,X), + /* AA78 */ _(X,X), _(X,X), _(C,C), _(PT,X), _(N,X), _(N,X), _(C,C), _(C,C), + +#define indic_offset_0xfe00u 1696 + + + /* Variation Selectors */ + + /* FE00 */ _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), + /* FE08 */ _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), _(VS,X), + +#define indic_offset_0x11300u 1712 + + + /* Grantha */ + + /* 11300 */ _(X,X),_(SM,SM),_(SM,SM),_(SM,SM), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11308 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11310 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11318 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11320 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11328 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11330 */ _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), _(X,X), + /* 11338 */ _(X,X), _(X,X), _(X,X), _(N,X), _(N,X), _(X,X), _(X,X), _(X,X), + +}; /* Table items: 1776; occupancy: 69% */ + +uint16_t +hb_indic_get_categories (hb_codepoint_t u) +{ + switch (u >> 12) + { + case 0x0u: + if (unlikely (u == 0x00A0u)) return _(GB,C); + if (hb_in_range<hb_codepoint_t> (u, 0x0028u, 0x003Fu)) return indic_table[u - 0x0028u + indic_offset_0x0028u]; + if (hb_in_range<hb_codepoint_t> (u, 0x00B0u, 0x00D7u)) return indic_table[u - 0x00B0u + indic_offset_0x00b0u]; + if (hb_in_range<hb_codepoint_t> (u, 0x0900u, 0x0D7Fu)) return indic_table[u - 0x0900u + indic_offset_0x0900u]; + break; + + case 0x1u: + if (hb_in_range<hb_codepoint_t> (u, 0x1000u, 0x109Fu)) return indic_table[u - 0x1000u + indic_offset_0x1000u]; + if (hb_in_range<hb_codepoint_t> (u, 0x1780u, 0x17EFu)) return indic_table[u - 0x1780u + indic_offset_0x1780u]; + if (hb_in_range<hb_codepoint_t> (u, 0x1CD0u, 0x1CFFu)) return indic_table[u - 0x1CD0u + indic_offset_0x1cd0u]; + break; + + case 0x2u: + if (unlikely (u == 0x25CCu)) return _(DC,C); + if (hb_in_range<hb_codepoint_t> (u, 0x2008u, 0x2027u)) return indic_table[u - 0x2008u + indic_offset_0x2008u]; + if (hb_in_range<hb_codepoint_t> (u, 0x2070u, 0x2087u)) return indic_table[u - 0x2070u + indic_offset_0x2070u]; + if (hb_in_range<hb_codepoint_t> (u, 0x25F8u, 0x25FFu)) return indic_table[u - 0x25F8u + indic_offset_0x25f8u]; + break; + + case 0xAu: + if (hb_in_range<hb_codepoint_t> (u, 0xA8E0u, 0xA8FFu)) return indic_table[u - 0xA8E0u + indic_offset_0xa8e0u]; + if (hb_in_range<hb_codepoint_t> (u, 0xA9E0u, 0xA9FFu)) return indic_table[u - 0xA9E0u + indic_offset_0xa9e0u]; + if (hb_in_range<hb_codepoint_t> (u, 0xAA60u, 0xAA7Fu)) return indic_table[u - 0xAA60u + indic_offset_0xaa60u]; + break; + + case 0xFu: + if (hb_in_range<hb_codepoint_t> (u, 0xFE00u, 0xFE0Fu)) return indic_table[u - 0xFE00u + indic_offset_0xfe00u]; + break; + + case 0x11u: + if (hb_in_range<hb_codepoint_t> (u, 0x11300u, 0x1133Fu)) return indic_table[u - 0x11300u + indic_offset_0x11300u]; + break; + + default: + break; + } + return _(X,X); +} + +#undef _ +#undef INDIC_COMBINE_CATEGORIES + +#undef _OT_A +#undef _OT_As +#undef _OT_C +#undef _OT_CM +#undef _OT_CS +#undef _OT_DC +#undef _OT_H +#undef _OT_M +#undef _OT_MH +#undef _OT_ML +#undef _OT_MR +#undef _OT_MW +#undef _OT_MY +#undef _OT_N +#undef _OT_GB +#undef _OT_PT +#undef _OT_R +#undef _OT_Rf +#undef _OT_Rt +#undef _OT_SM +#undef _OT_S +#undef _OT_V +#undef _OT_VA +#undef _OT_VB +#undef _OT_VL +#undef _OT_VR +#undef _OT_VS +#undef _OT_X +#undef _OT_Xg +#undef _OT_Yg +#undef _OT_ZWJ +#undef _OT_ZWNJ + +#undef _POS_T +#undef _POS_A +#undef _POS_AP +#undef _POS_AS +#undef _POS_C +#undef _POS_BS +#undef _POS_B +#undef _POS_X +#undef _POS_R +#undef _POS_L +#undef _POS_LM +#undef _POS_SM + +#endif + +/* == End of generated table == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-indic.cc index c80f7df6a9..48a3c74463 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-indic.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-indic.cc @@ -28,9 +28,9 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-indic.hh" -#include "hb-ot-shape-complex-indic-machine.hh" -#include "hb-ot-shape-complex-vowel-constraints.hh" +#include "hb-ot-shaper-indic.hh" +#include "hb-ot-shaper-indic-machine.hh" +#include "hb-ot-shaper-vowel-constraints.hh" #include "hb-ot-layout.hh" @@ -39,6 +39,81 @@ */ +static inline void +set_indic_properties (hb_glyph_info_t &info) +{ + hb_codepoint_t u = info.codepoint; + unsigned int type = hb_indic_get_categories (u); + + info.indic_category() = (indic_category_t) (type & 0xFFu); + info.indic_position() = (indic_position_t) (type >> 8); +} + + +static inline bool +is_one_of (const hb_glyph_info_t &info, unsigned int flags) +{ + /* If it ligated, all bets are off. */ + if (_hb_glyph_info_ligated (&info)) return false; + return !!(FLAG_UNSAFE (info.indic_category()) & flags); +} + +/* Note: + * + * We treat Vowels and placeholders as if they were consonants. This is safe because Vowels + * cannot happen in a consonant syllable. The plus side however is, we can call the + * consonant syllable logic from the vowel syllable function and get it all right! + * + * Keep in sync with consonant_categories in the generator. */ +#define CONSONANT_FLAGS_INDIC (FLAG (I_Cat(C)) | FLAG (I_Cat(CS)) | FLAG (I_Cat(Ra)) | FLAG (I_Cat(CM)) | FLAG (I_Cat(V)) | FLAG (I_Cat(PLACEHOLDER)) | FLAG (I_Cat(DOTTEDCIRCLE))) + +static inline bool +is_consonant (const hb_glyph_info_t &info) +{ + return is_one_of (info, CONSONANT_FLAGS_INDIC); +} + +#define JOINER_FLAGS (FLAG (I_Cat(ZWJ)) | FLAG (I_Cat(ZWNJ))) + +static inline bool +is_joiner (const hb_glyph_info_t &info) +{ + return is_one_of (info, JOINER_FLAGS); +} + +static inline bool +is_halant (const hb_glyph_info_t &info) +{ + return is_one_of (info, FLAG (I_Cat(H))); +} + +struct hb_indic_would_substitute_feature_t +{ + void init (const hb_ot_map_t *map, hb_tag_t feature_tag, bool zero_context_) + { + zero_context = zero_context_; + map->get_stage_lookups (0/*GSUB*/, + map->get_feature_stage (0/*GSUB*/, feature_tag), + &lookups, &count); + } + + bool would_substitute (const hb_codepoint_t *glyphs, + unsigned int glyphs_count, + hb_face_t *face) const + { + for (unsigned int i = 0; i < count; i++) + if (hb_ot_layout_lookup_would_substitute (face, lookups[i].index, glyphs, glyphs_count, zero_context)) + return true; + return false; + } + + private: + const hb_ot_map_t::lookup_map_t *lookups; + unsigned int count; + bool zero_context; +}; + + /* * Indic configurations. Note that we do not want to keep every single script-specific * behavior in these tables necessarily. This should mainly be used for per-script @@ -47,10 +122,6 @@ * instead of adding a new flag in these structs. */ -enum base_position_t { - BASE_POS_LAST_SINHALA, - BASE_POS_LAST -}; enum reph_position_t { REPH_POS_AFTER_MAIN = POS_AFTER_MAIN, REPH_POS_BEFORE_SUB = POS_BEFORE_SUB, @@ -72,7 +143,6 @@ struct indic_config_t hb_script_t script; bool has_old_spec; hb_codepoint_t virama; - base_position_t base_pos; reph_position_t reph_pos; reph_mode_t reph_mode; blwf_mode_t blwf_mode; @@ -81,26 +151,19 @@ struct indic_config_t static const indic_config_t indic_configs[] = { /* Default. Should be first. */ - {HB_SCRIPT_INVALID, false, 0,BASE_POS_LAST, REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_DEVANAGARI,true, 0x094Du,BASE_POS_LAST, REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_BENGALI, true, 0x09CDu,BASE_POS_LAST, REPH_POS_AFTER_SUB, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_GURMUKHI, true, 0x0A4Du,BASE_POS_LAST, REPH_POS_BEFORE_SUB, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_GUJARATI, true, 0x0ACDu,BASE_POS_LAST, REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_ORIYA, true, 0x0B4Du,BASE_POS_LAST, REPH_POS_AFTER_MAIN, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_TAMIL, true, 0x0BCDu,BASE_POS_LAST, REPH_POS_AFTER_POST, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_TELUGU, true, 0x0C4Du,BASE_POS_LAST, REPH_POS_AFTER_POST, REPH_MODE_EXPLICIT, BLWF_MODE_POST_ONLY}, - {HB_SCRIPT_KANNADA, true, 0x0CCDu,BASE_POS_LAST, REPH_POS_AFTER_POST, REPH_MODE_IMPLICIT, BLWF_MODE_POST_ONLY}, - {HB_SCRIPT_MALAYALAM, true, 0x0D4Du,BASE_POS_LAST, REPH_POS_AFTER_MAIN, REPH_MODE_LOG_REPHA,BLWF_MODE_PRE_AND_POST}, - {HB_SCRIPT_SINHALA, false,0x0DCAu,BASE_POS_LAST_SINHALA, - REPH_POS_AFTER_POST, REPH_MODE_EXPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_INVALID, false, 0,REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_DEVANAGARI,true, 0x094Du,REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_BENGALI, true, 0x09CDu,REPH_POS_AFTER_SUB, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_GURMUKHI, true, 0x0A4Du,REPH_POS_BEFORE_SUB, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_GUJARATI, true, 0x0ACDu,REPH_POS_BEFORE_POST,REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_ORIYA, true, 0x0B4Du,REPH_POS_AFTER_MAIN, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_TAMIL, true, 0x0BCDu,REPH_POS_AFTER_POST, REPH_MODE_IMPLICIT, BLWF_MODE_PRE_AND_POST}, + {HB_SCRIPT_TELUGU, true, 0x0C4Du,REPH_POS_AFTER_POST, REPH_MODE_EXPLICIT, BLWF_MODE_POST_ONLY}, + {HB_SCRIPT_KANNADA, true, 0x0CCDu,REPH_POS_AFTER_POST, REPH_MODE_IMPLICIT, BLWF_MODE_POST_ONLY}, + {HB_SCRIPT_MALAYALAM, true, 0x0D4Du,REPH_POS_AFTER_MAIN, REPH_MODE_LOG_REPHA,BLWF_MODE_PRE_AND_POST}, }; - -/* - * Indic shaper. - */ - static const hb_ot_map_feature_t indic_features[] = { @@ -207,6 +270,7 @@ static void override_features_indic (hb_ot_shape_planner_t *plan) { plan->map.disable_feature (HB_TAG('l','i','g','a')); + plan->map.add_gsub_pause (hb_syllabic_clear_var); // Don't need syllables anymore, use stop to free buffer var } @@ -356,6 +420,7 @@ setup_syllables_indic (const hb_ot_shape_plan_t *plan HB_UNUSED, hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) { + HB_BUFFER_ALLOCATE_VAR (buffer, syllable); find_syllables_indic (buffer); foreach_syllable (buffer, start, end) buffer->unsafe_to_break (start, end); @@ -367,7 +432,7 @@ compare_indic_order (const hb_glyph_info_t *pa, const hb_glyph_info_t *pb) int a = pa->indic_position(); int b = pb->indic_position(); - return a < b ? -1 : a == b ? 0 : +1; + return (int) a - (int) b; } @@ -379,9 +444,6 @@ update_consonant_positions_indic (const hb_ot_shape_plan_t *plan, { const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) plan->data; - if (indic_plan->config->base_pos != BASE_POS_LAST) - return; - hb_codepoint_t virama; if (indic_plan->load_virama_glyph (font, &virama)) { @@ -416,9 +478,9 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, */ if (buffer->props.script == HB_SCRIPT_KANNADA && start + 3 <= end && - is_one_of (info[start ], FLAG (OT_Ra)) && - is_one_of (info[start+1], FLAG (OT_H)) && - is_one_of (info[start+2], FLAG (OT_ZWJ))) + is_one_of (info[start ], FLAG (I_Cat(Ra))) && + is_one_of (info[start+1], FLAG (I_Cat(H))) && + is_one_of (info[start+2], FLAG (I_Cat(ZWJ)))) { buffer->merge_clusters (start+1, start+3); hb_glyph_info_t tmp = info[start+1]; @@ -452,7 +514,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, start + 3 <= end && ( (indic_plan->config->reph_mode == REPH_MODE_IMPLICIT && !is_joiner (info[start + 2])) || - (indic_plan->config->reph_mode == REPH_MODE_EXPLICIT && info[start + 2].indic_category() == OT_ZWJ) + (indic_plan->config->reph_mode == REPH_MODE_EXPLICIT && info[start + 2].indic_category() == I_Cat(ZWJ)) )) { /* See if it matches the 'rphf' feature. */ @@ -470,7 +532,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, base = start; has_reph = true; } - } else if (indic_plan->config->reph_mode == REPH_MODE_LOG_REPHA && info[start].indic_category() == OT_Repha) + } else if (indic_plan->config->reph_mode == REPH_MODE_LOG_REPHA && info[start].indic_category() == I_Cat(Repha)) { limit += 1; while (limit < end && is_joiner (info[limit])) @@ -479,84 +541,51 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, has_reph = true; } - switch (indic_plan->config->base_pos) { - case BASE_POS_LAST: - { - /* -> starting from the end of the syllable, move backwards */ - unsigned int i = end; - bool seen_below = false; - do { - i--; - /* -> until a consonant is found */ - if (is_consonant (info[i])) + /* -> starting from the end of the syllable, move backwards */ + unsigned int i = end; + bool seen_below = false; + do { + i--; + /* -> until a consonant is found */ + if (is_consonant (info[i])) + { + /* -> that does not have a below-base or post-base form + * (post-base forms have to follow below-base forms), */ + if (info[i].indic_position() != POS_BELOW_C && + (info[i].indic_position() != POS_POST_C || seen_below)) { - /* -> that does not have a below-base or post-base form - * (post-base forms have to follow below-base forms), */ - if (info[i].indic_position() != POS_BELOW_C && - (info[i].indic_position() != POS_POST_C || seen_below)) - { - base = i; - break; - } - if (info[i].indic_position() == POS_BELOW_C) - seen_below = true; - - /* -> or that is not a pre-base-reordering Ra, - * - * IMPLEMENTATION NOTES: - * - * Our pre-base-reordering Ra's are marked POS_POST_C, so will be skipped - * by the logic above already. - */ - - /* -> or arrive at the first consonant. The consonant stopped at will - * be the base. */ base = i; + break; } - else - { - /* A ZWJ after a Halant stops the base search, and requests an explicit - * half form. - * A ZWJ before a Halant, requests a subjoined form instead, and hence - * search continues. This is particularly important for Bengali - * sequence Ra,H,Ya that should form Ya-Phalaa by subjoining Ya. */ - if (start < i && - info[i].indic_category() == OT_ZWJ && - info[i - 1].indic_category() == OT_H) - break; - } - } while (i > limit); - } - break; - - case BASE_POS_LAST_SINHALA: - { - /* Sinhala base positioning is slightly different from main Indic, in that: - * 1. Its ZWJ behavior is different, - * 2. We don't need to look into the font for consonant positions. - */ - - if (!has_reph) - base = limit; + if (info[i].indic_position() == POS_BELOW_C) + seen_below = true; - /* Find the last base consonant that is not blocked by ZWJ. If there is - * a ZWJ right before a base consonant, that would request a subjoined form. */ - for (unsigned int i = limit; i < end; i++) - if (is_consonant (info[i])) - { - if (limit < i && info[i - 1].indic_category() == OT_ZWJ) - break; - else - base = i; - } + /* -> or that is not a pre-base-reordering Ra, + * + * IMPLEMENTATION NOTES: + * + * Our pre-base-reordering Ra's are marked POS_POST_C, so will be skipped + * by the logic above already. + */ - /* Mark all subsequent consonants as below. */ - for (unsigned int i = base + 1; i < end; i++) - if (is_consonant (info[i])) - info[i].indic_position() = POS_BELOW_C; - } - break; + /* -> or arrive at the first consonant. The consonant stopped at will + * be the base. */ + base = i; + } + else + { + /* A ZWJ after a Halant stops the base search, and requests an explicit + * half form. + * A ZWJ before a Halant, requests a subjoined form instead, and hence + * search continues. This is particularly important for Bengali + * sequence Ra,H,Ya that should form Ya-Phalaa by subjoining Ya. */ + if (start < i && + info[i].indic_category() == I_Cat(ZWJ) && + info[i - 1].indic_category() == I_Cat(H)) + break; + } + } while (i > limit); } /* -> If the syllable starts with Ra + Halant (in a script that has Reph) @@ -611,18 +640,6 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, if (base < end) info[base].indic_position() = POS_BASE_C; - /* Mark final consonants. A final consonant is one appearing after a matra. - * Happens in Sinhala. */ - for (unsigned int i = base + 1; i < end; i++) - if (info[i].indic_category() == OT_M) { - for (unsigned int j = i + 1; j < end; j++) - if (is_consonant (info[j])) { - info[j].indic_position() = POS_FINAL_C; - break; - } - break; - } - /* Handle beginning Ra */ if (has_reph) info[start].indic_position() = POS_RA_TO_BECOME_REPH; @@ -659,14 +676,14 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, { bool disallow_double_halants = buffer->props.script == HB_SCRIPT_KANNADA; for (unsigned int i = base + 1; i < end; i++) - if (info[i].indic_category() == OT_H) + if (info[i].indic_category() == I_Cat(H)) { unsigned int j; for (j = end - 1; j > i; j--) if (is_consonant (info[j]) || - (disallow_double_halants && info[j].indic_category() == OT_H)) + (disallow_double_halants && info[j].indic_category() == I_Cat(H))) break; - if (info[j].indic_category() != OT_H && j > i) { + if (info[j].indic_category() != I_Cat(H) && j > i) { /* Move Halant to after last consonant. */ hb_glyph_info_t t = info[i]; memmove (&info[i], &info[i + 1], (j - i) * sizeof (info[0])); @@ -681,20 +698,16 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, indic_position_t last_pos = POS_START; for (unsigned int i = start; i < end; i++) { - if ((FLAG_UNSAFE (info[i].indic_category()) & (JOINER_FLAGS | FLAG (OT_N) | FLAG (OT_RS) | MEDIAL_FLAGS | FLAG (OT_H)))) + if ((FLAG_UNSAFE (info[i].indic_category()) & (JOINER_FLAGS | FLAG (I_Cat(N)) | FLAG (I_Cat(RS)) | FLAG (I_Cat(CM)) | FLAG (I_Cat(H))))) { info[i].indic_position() = last_pos; - if (unlikely (info[i].indic_category() == OT_H && + if (unlikely (info[i].indic_category() == I_Cat(H) && info[i].indic_position() == POS_PRE_M)) { /* * Uniscribe doesn't move the Halant with Left Matra. - * TEST: U+092B,U+093F,U+094DE - * We follow. This is important for the Sinhala - * U+0DDA split matra since it decomposes to U+0DD9,U+0DCA - * where U+0DD9 is a left matra and U+0DCA is the virama. - * We don't want to move the virama with the left matra. - * TEST: U+0D9A,U+0DDA + * TEST: U+092B,U+093F,U+094D + * We follow. */ for (unsigned int j = i; j > start; j--) if (info[j - 1].indic_position() != POS_PRE_M) { @@ -718,7 +731,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, if (info[j].indic_position() < POS_SMVD) info[j].indic_position() = info[i].indic_position(); last = i; - } else if (info[i].indic_category() == OT_M) + } else if (info[i].indic_category() == I_Cat(M)) last = i; } @@ -849,10 +862,10 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, * Test case: U+0924,U+094D,U+0930,U+094d,U+200D,U+0915 */ for (unsigned int i = start; i + 1 < base; i++) - if (info[i ].indic_category() == OT_Ra && - info[i+1].indic_category() == OT_H && + if (info[i ].indic_category() == I_Cat(Ra) && + info[i+1].indic_category() == I_Cat(H) && (i + 2 == base || - info[i+2].indic_category() != OT_ZWJ)) + info[i+2].indic_category() != I_Cat(ZWJ))) { info[i ].mask |= indic_plan->mask_array[INDIC_BLWF]; info[i+1].mask |= indic_plan->mask_array[INDIC_BLWF]; @@ -879,7 +892,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan, /* Apply ZWJ/ZWNJ effects */ for (unsigned int i = start + 1; i < end; i++) if (is_joiner (info[i])) { - bool non_joiner = info[i].indic_category() == OT_ZWNJ; + bool non_joiner = info[i].indic_category() == I_Cat(ZWNJ); unsigned int j = i; do { @@ -912,7 +925,7 @@ initial_reordering_standalone_cluster (const hb_ot_shape_plan_t *plan, /* For dotted-circle, this is what Uniscribe does: * If dotted-circle is the last glyph, it just does nothing. * Ie. It doesn't form Reph. */ - if (buffer->info[end - 1].indic_category() == OT_DOTTEDCIRCLE) + if (buffer->info[end - 1].indic_category() == I_Cat(DOTTEDCIRCLE)) return; } @@ -955,8 +968,8 @@ initial_reordering_indic (const hb_ot_shape_plan_t *plan, update_consonant_positions_indic (plan, font, buffer); hb_syllabic_insert_dotted_circles (font, buffer, indic_broken_cluster, - OT_DOTTEDCIRCLE, - OT_Repha, + I_Cat(DOTTEDCIRCLE), + I_Cat(Repha), POS_END); foreach_syllable (buffer, start, end) @@ -978,7 +991,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, * and possibly multiple substitutions happened prior to this * phase, and that might have messed up our properties. Recover * from a particular case of that where we're fairly sure that a - * class of OT_H is desired but has been lost. */ + * class of I_Cat(H) is desired but has been lost. */ /* We don't call load_virama_glyph(), since we know it's already * loaded. */ hb_codepoint_t virama_glyph = indic_plan->virama_glyph.get_relaxed (); @@ -990,7 +1003,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, _hb_glyph_info_multiplied (&info[i])) { /* This will make sure that this glyph passes is_halant() test. */ - info[i].indic_category() = OT_H; + info[i].indic_category() = I_Cat(H); _hb_glyph_info_clear_ligated_and_multiplied (&info[i]); } } @@ -1056,11 +1069,11 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, break; } if (base == end && start < base && - is_one_of (info[base - 1], FLAG (OT_ZWJ))) + is_one_of (info[base - 1], FLAG (I_Cat(ZWJ)))) base--; if (base < end) while (start < base && - is_one_of (info[base], (FLAG (OT_N) | FLAG (OT_H)))) + is_one_of (info[base], (FLAG (I_Cat(N)) | FLAG (I_Cat(H))))) base--; @@ -1105,7 +1118,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, { search: while (new_pos > start && - !(is_one_of (info[new_pos], (FLAG (OT_M) | FLAG (OT_H))))) + !(is_one_of (info[new_pos], (FLAG (I_Cat(M)) | FLAG (I_Cat(H)))))) new_pos--; /* If we found no Halant we are done. @@ -1122,7 +1135,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, if (new_pos + 1 < end) { /* -> If ZWJ follows this halant, matra is NOT repositioned after this halant. */ - if (info[new_pos + 1].indic_category() == OT_ZWJ) + if (info[new_pos + 1].indic_category() == I_Cat(ZWJ)) { /* Keep searching. */ if (new_pos > start) @@ -1195,7 +1208,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, */ if (start + 1 < end && info[start].indic_position() == POS_RA_TO_BECOME_REPH && - ((info[start].indic_category() == OT_Repha) ^ + ((info[start].indic_category() == I_Cat(Repha)) ^ _hb_glyph_info_ligated_and_didnt_multiply (&info[start]))) { unsigned int new_reph_pos; @@ -1305,7 +1318,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, unlikely (is_halant (info[new_reph_pos]))) { for (unsigned int i = base + 1; i < new_reph_pos; i++) - if (info[i].indic_category() == OT_M) { + if (info[i].indic_category() == I_Cat(M)) { /* Ok, got it. */ new_reph_pos--; } @@ -1365,7 +1378,7 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, if (buffer->props.script != HB_SCRIPT_MALAYALAM && buffer->props.script != HB_SCRIPT_TAMIL) { while (new_pos > start && - !(is_one_of (info[new_pos - 1], FLAG(OT_M) | FLAG (OT_H)))) + !(is_one_of (info[new_pos - 1], FLAG(I_Cat(M)) | FLAG (I_Cat(H))))) new_pos--; } @@ -1414,11 +1427,10 @@ final_reordering_syllable_indic (const hb_ot_shape_plan_t *plan, switch ((hb_tag_t) plan->props.script) { case HB_SCRIPT_TAMIL: - case HB_SCRIPT_SINHALA: break; default: - /* Uniscribe merges the entire syllable into a single cluster... Except for Tamil & Sinhala. + /* Uniscribe merges the entire syllable into a single cluster... Except for Tamil. * This means, half forms are submerged into the main consonant's cluster. * This is unnecessary, and makes cursor positioning harder, but that's what * Uniscribe does. */ @@ -1453,7 +1465,9 @@ preprocess_text_indic (const hb_ot_shape_plan_t *plan, hb_buffer_t *buffer, hb_font_t *font) { - _hb_preprocess_text_vowel_constraints (plan, buffer, font); + const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) plan->data; + if (!indic_plan->uniscribe_bug_compatible) + _hb_preprocess_text_vowel_constraints (plan, buffer, font); } static bool @@ -1486,48 +1500,6 @@ decompose_indic (const hb_ot_shape_normalize_context_t *c, #endif } - if ((ab == 0x0DDAu || hb_in_range<hb_codepoint_t> (ab, 0x0DDCu, 0x0DDEu))) - { - /* - * Sinhala split matras... Let the fun begin. - * - * These four characters have Unicode decompositions. However, Uniscribe - * decomposes them "Khmer-style", that is, it uses the character itself to - * get the second half. The first half of all four decompositions is always - * U+0DD9. - * - * Now, there are buggy fonts, namely, the widely used lklug.ttf, that are - * broken with Uniscribe. But we need to support them. As such, we only - * do the Uniscribe-style decomposition if the character is transformed into - * its "sec.half" form by the 'pstf' feature. Otherwise, we fall back to - * Unicode decomposition. - * - * Note that we can't unconditionally use Unicode decomposition. That would - * break some other fonts, that are designed to work with Uniscribe, and - * don't have positioning features for the Unicode-style decomposition. - * - * Argh... - * - * The Uniscribe behavior is now documented in the newly published Sinhala - * spec in 2012: - * - * https://docs.microsoft.com/en-us/typography/script-development/sinhala#shaping - */ - - - const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) c->plan->data; - hb_codepoint_t glyph; - if (indic_plan->uniscribe_bug_compatible || - (c->font->get_nominal_glyph (ab, &glyph) && - indic_plan->pstf.would_substitute (&glyph, 1, c->font->face))) - { - /* Ok, safe to use Uniscribe-style decomposition. */ - *a = 0x0DD9u; - *b = ab; - return true; - } - } - return (bool) c->unicode->decompose (ab, a, b); } @@ -1548,7 +1520,7 @@ compose_indic (const hb_ot_shape_normalize_context_t *c, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_indic = +const hb_ot_shaper_t _hb_ot_shaper_indic = { collect_features_indic, override_features_indic, diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-indic.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-indic.hh new file mode 100644 index 0000000000..4f822c26e9 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-indic.hh @@ -0,0 +1,66 @@ +/* + * Copyright © 2012 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_OT_SHAPER_INDIC_HH +#define HB_OT_SHAPER_INDIC_HH + +#include "hb.hh" + +#include "hb-ot-shaper-syllabic.hh" + + +/* Visual positions in a syllable from left to right. */ +enum ot_position_t { + POS_START = 0, + + POS_RA_TO_BECOME_REPH = 1, + POS_PRE_M = 2, + POS_PRE_C = 3, + + POS_BASE_C = 4, + POS_AFTER_MAIN = 5, + + POS_ABOVE_C = 6, + + POS_BEFORE_SUB = 7, + POS_BELOW_C = 8, + POS_AFTER_SUB = 9, + + POS_BEFORE_POST = 10, + POS_POST_C = 11, + POS_AFTER_POST = 12, + + POS_SMVD = 13, + + POS_END = 14 +}; + + +HB_INTERNAL uint16_t +hb_indic_get_categories (hb_codepoint_t u); + + +#endif /* HB_OT_SHAPER_INDIC_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-khmer-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-khmer-machine.hh new file mode 100644 index 0000000000..e18bd75ef1 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-khmer-machine.hh @@ -0,0 +1,428 @@ + +#line 1 "hb-ot-shaper-khmer-machine.rl" +/* + * Copyright © 2011,2012 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_OT_SHAPER_KHMER_MACHINE_HH +#define HB_OT_SHAPER_KHMER_MACHINE_HH + +#include "hb.hh" + +#include "hb-ot-layout.hh" +#include "hb-ot-shaper-indic.hh" + +/* buffer var allocations */ +#define khmer_category() ot_shaper_var_u8_category() /* khmer_category_t */ + +using khmer_category_t = unsigned; + +#define K_Cat(Cat) khmer_syllable_machine_ex_##Cat + +enum khmer_syllable_type_t { + khmer_consonant_syllable, + khmer_broken_cluster, + khmer_non_khmer_cluster, +}; + + +#line 52 "hb-ot-shaper-khmer-machine.hh" +#define khmer_syllable_machine_ex_C 1u +#define khmer_syllable_machine_ex_DOTTEDCIRCLE 11u +#define khmer_syllable_machine_ex_H 4u +#define khmer_syllable_machine_ex_PLACEHOLDER 10u +#define khmer_syllable_machine_ex_Ra 15u +#define khmer_syllable_machine_ex_Robatic 25u +#define khmer_syllable_machine_ex_V 2u +#define khmer_syllable_machine_ex_VAbv 20u +#define khmer_syllable_machine_ex_VBlw 21u +#define khmer_syllable_machine_ex_VPre 22u +#define khmer_syllable_machine_ex_VPst 23u +#define khmer_syllable_machine_ex_Xgroup 26u +#define khmer_syllable_machine_ex_Ygroup 27u +#define khmer_syllable_machine_ex_ZWJ 6u +#define khmer_syllable_machine_ex_ZWNJ 5u + + +#line 70 "hb-ot-shaper-khmer-machine.hh" +static const unsigned char _khmer_syllable_machine_trans_keys[] = { + 5u, 26u, 5u, 26u, 1u, 15u, 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, + 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, 1u, 15u, 5u, 26u, 5u, 26u, + 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, 5u, 26u, 1u, 27u, 4u, 27u, 1u, 15u, + 4u, 27u, 4u, 27u, 27u, 27u, 4u, 27u, 4u, 27u, 4u, 27u, 4u, 27u, 4u, 27u, + 4u, 27u, 1u, 15u, 4u, 27u, 4u, 27u, 27u, 27u, 4u, 27u, 4u, 27u, 4u, 27u, + 4u, 27u, 4u, 27u, 5u, 26u, 0 +}; + +static const char _khmer_syllable_machine_key_spans[] = { + 22, 22, 15, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 15, 22, 22, + 22, 22, 22, 22, 22, 27, 24, 15, + 24, 24, 1, 24, 24, 24, 24, 24, + 24, 15, 24, 24, 1, 24, 24, 24, + 24, 24, 22 +}; + +static const short _khmer_syllable_machine_index_offsets[] = { + 0, 23, 46, 62, 85, 108, 131, 154, + 177, 200, 223, 246, 269, 292, 308, 331, + 354, 377, 400, 423, 446, 469, 497, 522, + 538, 563, 588, 590, 615, 640, 665, 690, + 715, 740, 756, 781, 806, 808, 833, 858, + 883, 908, 933 +}; + +static const char _khmer_syllable_machine_indicies[] = { + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 3, 4, 0, 1, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 4, 0, 5, 5, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 5, 0, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 2, 0, 0, + 0, 0, 0, 4, 0, 6, 6, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2, 0, 7, 7, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 8, 0, 9, 9, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 2, 0, 0, 0, 0, 0, + 10, 0, 9, 9, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 10, + 0, 11, 11, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 2, 0, 0, 0, 0, 0, 12, 0, + 11, 11, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 12, 0, 1, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 2, 0, + 0, 0, 0, 13, 4, 0, 15, 15, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 16, 14, 14, + 14, 14, 17, 18, 14, 15, 15, 19, + 19, 19, 19, 19, 19, 19, 19, 19, + 19, 19, 19, 19, 19, 19, 19, 19, + 19, 19, 18, 19, 20, 20, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 20, 14, 15, 15, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 16, 14, 14, 14, 14, + 14, 18, 14, 21, 21, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 16, 14, 22, 22, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 23, + 14, 24, 24, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 16, 14, 14, 14, 14, 14, 25, 14, + 24, 24, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 25, 14, 26, + 26, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 16, 14, + 14, 14, 14, 14, 27, 14, 26, 26, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 14, 14, 14, 14, 14, + 14, 14, 14, 27, 14, 29, 29, 28, + 30, 31, 31, 28, 28, 28, 13, 13, + 28, 28, 28, 29, 28, 28, 28, 28, + 16, 25, 27, 23, 28, 17, 18, 20, + 28, 33, 34, 34, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 2, 10, 12, 8, 32, 13, 4, + 5, 32, 35, 35, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 35, 32, 33, 36, 36, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 2, 10, 12, 8, 32, 3, + 4, 5, 32, 37, 38, 38, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 2, 10, 12, 8, 32, + 32, 4, 5, 32, 5, 32, 37, 6, + 6, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 8, 32, 32, 2, 5, 32, 37, + 7, 7, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 8, 5, 32, + 37, 39, 39, 32, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 2, 32, 32, 8, 32, 32, 10, 5, + 32, 37, 40, 40, 32, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 2, 10, 32, 8, 32, 32, 12, + 5, 32, 33, 38, 38, 32, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 2, 10, 12, 8, 32, 32, + 4, 5, 32, 33, 38, 38, 32, 32, + 32, 32, 32, 32, 32, 32, 32, 32, + 32, 32, 32, 2, 10, 12, 8, 32, + 3, 4, 5, 32, 42, 42, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 42, 41, 30, 43, 43, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 16, 25, 27, 23, + 41, 17, 18, 20, 41, 44, 45, 45, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 16, 25, 27, + 23, 41, 41, 18, 20, 41, 20, 41, + 44, 21, 21, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 23, 41, 41, 16, 20, + 41, 44, 22, 22, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 23, + 20, 41, 44, 46, 46, 41, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 16, 41, 41, 23, 41, 41, + 25, 20, 41, 44, 47, 47, 41, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 16, 25, 41, 23, 41, + 41, 27, 20, 41, 30, 45, 45, 41, + 41, 41, 41, 41, 41, 41, 41, 41, + 41, 41, 41, 41, 16, 25, 27, 23, + 41, 41, 18, 20, 41, 15, 15, 48, + 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 16, 48, 48, 48, + 48, 48, 18, 48, 0 +}; + +static const char _khmer_syllable_machine_trans_targs[] = { + 21, 1, 27, 31, 25, 26, 4, 5, + 28, 7, 29, 9, 30, 32, 21, 12, + 37, 41, 35, 21, 36, 15, 16, 38, + 18, 39, 20, 40, 21, 22, 33, 42, + 21, 23, 10, 24, 0, 2, 3, 6, + 8, 21, 34, 11, 13, 14, 17, 19, + 21 +}; + +static const char _khmer_syllable_machine_trans_actions[] = { + 1, 0, 2, 2, 2, 0, 0, 0, + 2, 0, 2, 0, 2, 2, 3, 0, + 2, 4, 4, 5, 0, 0, 0, 2, + 0, 2, 0, 2, 8, 2, 0, 9, + 10, 0, 0, 2, 0, 0, 0, 0, + 0, 11, 4, 0, 0, 0, 0, 0, + 12 +}; + +static const char _khmer_syllable_machine_to_state_actions[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 6, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0 +}; + +static const char _khmer_syllable_machine_from_state_actions[] = { + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 7, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0 +}; + +static const short _khmer_syllable_machine_eof_trans[] = { + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 15, 20, 15, 15, 15, + 15, 15, 15, 15, 15, 0, 33, 33, + 33, 33, 33, 33, 33, 33, 33, 33, + 33, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 49 +}; + +static const int khmer_syllable_machine_start = 21; +static const int khmer_syllable_machine_first_final = 21; +static const int khmer_syllable_machine_error = -1; + +static const int khmer_syllable_machine_en_main = 21; + + +#line 53 "hb-ot-shaper-khmer-machine.rl" + + + +#line 102 "hb-ot-shaper-khmer-machine.rl" + + +#define found_syllable(syllable_type) \ + HB_STMT_START { \ + if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ + for (unsigned int i = ts; i < te; i++) \ + info[i].syllable() = (syllable_serial << 4) | syllable_type; \ + syllable_serial++; \ + if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ + } HB_STMT_END + +inline void +find_syllables_khmer (hb_buffer_t *buffer) +{ + unsigned int p, pe, eof, ts, te, act HB_UNUSED; + int cs; + hb_glyph_info_t *info = buffer->info; + +#line 298 "hb-ot-shaper-khmer-machine.hh" + { + cs = khmer_syllable_machine_start; + ts = 0; + te = 0; + act = 0; + } + +#line 122 "hb-ot-shaper-khmer-machine.rl" + + + p = 0; + pe = eof = buffer->len; + + unsigned int syllable_serial = 1; + +#line 314 "hb-ot-shaper-khmer-machine.hh" + { + int _slen; + int _trans; + const unsigned char *_keys; + const char *_inds; + if ( p == pe ) + goto _test_eof; +_resume: + switch ( _khmer_syllable_machine_from_state_actions[cs] ) { + case 7: +#line 1 "NONE" + {ts = p;} + break; +#line 328 "hb-ot-shaper-khmer-machine.hh" + } + + _keys = _khmer_syllable_machine_trans_keys + (cs<<1); + _inds = _khmer_syllable_machine_indicies + _khmer_syllable_machine_index_offsets[cs]; + + _slen = _khmer_syllable_machine_key_spans[cs]; + _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].khmer_category()) && + ( info[p].khmer_category()) <= _keys[1] ? + ( info[p].khmer_category()) - _keys[0] : _slen ]; + +_eof_trans: + cs = _khmer_syllable_machine_trans_targs[_trans]; + + if ( _khmer_syllable_machine_trans_actions[_trans] == 0 ) + goto _again; + + switch ( _khmer_syllable_machine_trans_actions[_trans] ) { + case 2: +#line 1 "NONE" + {te = p+1;} + break; + case 8: +#line 98 "hb-ot-shaper-khmer-machine.rl" + {te = p+1;{ found_syllable (khmer_non_khmer_cluster); }} + break; + case 10: +#line 96 "hb-ot-shaper-khmer-machine.rl" + {te = p;p--;{ found_syllable (khmer_consonant_syllable); }} + break; + case 11: +#line 97 "hb-ot-shaper-khmer-machine.rl" + {te = p;p--;{ found_syllable (khmer_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 12: +#line 98 "hb-ot-shaper-khmer-machine.rl" + {te = p;p--;{ found_syllable (khmer_non_khmer_cluster); }} + break; + case 1: +#line 96 "hb-ot-shaper-khmer-machine.rl" + {{p = ((te))-1;}{ found_syllable (khmer_consonant_syllable); }} + break; + case 3: +#line 97 "hb-ot-shaper-khmer-machine.rl" + {{p = ((te))-1;}{ found_syllable (khmer_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 5: +#line 1 "NONE" + { switch( act ) { + case 2: + {{p = ((te))-1;} found_syllable (khmer_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; } + break; + case 3: + {{p = ((te))-1;} found_syllable (khmer_non_khmer_cluster); } + break; + } + } + break; + case 4: +#line 1 "NONE" + {te = p+1;} +#line 97 "hb-ot-shaper-khmer-machine.rl" + {act = 2;} + break; + case 9: +#line 1 "NONE" + {te = p+1;} +#line 98 "hb-ot-shaper-khmer-machine.rl" + {act = 3;} + break; +#line 398 "hb-ot-shaper-khmer-machine.hh" + } + +_again: + switch ( _khmer_syllable_machine_to_state_actions[cs] ) { + case 6: +#line 1 "NONE" + {ts = 0;} + break; +#line 407 "hb-ot-shaper-khmer-machine.hh" + } + + if ( ++p != pe ) + goto _resume; + _test_eof: {} + if ( p == eof ) + { + if ( _khmer_syllable_machine_eof_trans[cs] > 0 ) { + _trans = _khmer_syllable_machine_eof_trans[cs] - 1; + goto _eof_trans; + } + } + + } + +#line 130 "hb-ot-shaper-khmer-machine.rl" + +} + +#undef found_syllable + +#endif /* HB_OT_SHAPER_KHMER_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-khmer.cc index a7d5bf574b..e04d633195 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-khmer.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-khmer.cc @@ -28,8 +28,8 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-khmer.hh" -#include "hb-ot-shape-complex-khmer-machine.hh" +#include "hb-ot-shaper-khmer-machine.hh" +#include "hb-ot-shaper-indic.hh" #include "hb-ot-layout.hh" @@ -37,6 +37,7 @@ * Khmer shaper. */ + static const hb_ot_map_feature_t khmer_features[] = { @@ -79,6 +80,15 @@ enum { KHMER_BASIC_FEATURES = _KHMER_PRES, /* Don't forget to update this! */ }; +static inline void +set_khmer_properties (hb_glyph_info_t &info) +{ + hb_codepoint_t u = info.codepoint; + unsigned int type = hb_indic_get_categories (u); + + info.khmer_category() = (khmer_category_t) (type & 0xFFu); +} + static void setup_syllables_khmer (const hb_ot_shape_plan_t *plan, hb_font_t *font, @@ -115,7 +125,7 @@ collect_features_khmer (hb_ot_shape_planner_t *plan) map->add_feature (khmer_features[i]); /* https://github.com/harfbuzz/harfbuzz/issues/3531 */ - map->add_gsub_pause (nullptr); + map->add_gsub_pause (hb_syllabic_clear_var); // Don't need syllables anymore, use stop to free buffer var for (; i < KHMER_NUM_FEATURES; i++) map->add_feature (khmer_features[i]); @@ -187,6 +197,7 @@ setup_syllables_khmer (const hb_ot_shape_plan_t *plan HB_UNUSED, hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) { + HB_BUFFER_ALLOCATE_VAR (buffer, syllable); find_syllables_khmer (buffer); foreach_syllable (buffer, start, end) buffer->unsafe_to_break (start, end); @@ -230,11 +241,11 @@ reorder_consonant_syllable (const hb_ot_shape_plan_t *plan, * the 'pref' OpenType feature applied to them. * """ */ - if (info[i].khmer_category() == OT_Coeng && num_coengs <= 2 && i + 1 < end) + if (info[i].khmer_category() == K_Cat(H) && num_coengs <= 2 && i + 1 < end) { num_coengs++; - if (info[i + 1].khmer_category() == OT_Ra) + if (info[i + 1].khmer_category() == K_Cat(Ra)) { for (unsigned int j = 0; j < 2; j++) info[i + j].mask |= khmer_plan->mask_array[KHMER_PREF]; @@ -262,7 +273,7 @@ reorder_consonant_syllable (const hb_ot_shape_plan_t *plan, } /* Reorder left matra piece. */ - else if (info[i].khmer_category() == OT_VPre) + else if (info[i].khmer_category() == K_Cat(VPre)) { /* Move to the start. */ buffer->merge_clusters (start, i + 1); @@ -301,8 +312,8 @@ reorder_khmer (const hb_ot_shape_plan_t *plan, { hb_syllabic_insert_dotted_circles (font, buffer, khmer_broken_cluster, - OT_DOTTEDCIRCLE, - OT_Repha); + K_Cat(DOTTEDCIRCLE), + (unsigned) -1); foreach_syllable (buffer, start, end) reorder_syllable_khmer (plan, font->face, buffer, start, end); @@ -349,7 +360,7 @@ compose_khmer (const hb_ot_shape_normalize_context_t *c, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_khmer = +const hb_ot_shaper_t _hb_ot_shaper_khmer = { collect_features_khmer, override_features_khmer, diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-myanmar-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-myanmar-machine.hh new file mode 100644 index 0000000000..b109708937 --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-myanmar-machine.hh @@ -0,0 +1,553 @@ + +#line 1 "hb-ot-shaper-myanmar-machine.rl" +/* + * Copyright © 2011,2012 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_OT_SHAPER_MYANMAR_MACHINE_HH +#define HB_OT_SHAPER_MYANMAR_MACHINE_HH + +#include "hb.hh" + +#include "hb-ot-layout.hh" +#include "hb-ot-shaper-indic.hh" + +/* buffer var allocations */ +#define myanmar_category() ot_shaper_var_u8_category() /* myanmar_category_t */ +#define myanmar_position() ot_shaper_var_u8_auxiliary() /* myanmar_position_t */ + +using myanmar_category_t = unsigned; +using myanmar_position_t = ot_position_t; + +#define M_Cat(Cat) myanmar_syllable_machine_ex_##Cat + +enum myanmar_syllable_type_t { + myanmar_consonant_syllable, + myanmar_broken_cluster, + myanmar_non_myanmar_cluster, +}; + + +#line 54 "hb-ot-shaper-myanmar-machine.hh" +#define myanmar_syllable_machine_ex_A 9u +#define myanmar_syllable_machine_ex_As 32u +#define myanmar_syllable_machine_ex_C 1u +#define myanmar_syllable_machine_ex_CS 18u +#define myanmar_syllable_machine_ex_DB 3u +#define myanmar_syllable_machine_ex_DOTTEDCIRCLE 11u +#define myanmar_syllable_machine_ex_GB 10u +#define myanmar_syllable_machine_ex_H 4u +#define myanmar_syllable_machine_ex_IV 2u +#define myanmar_syllable_machine_ex_MH 35u +#define myanmar_syllable_machine_ex_ML 41u +#define myanmar_syllable_machine_ex_MR 36u +#define myanmar_syllable_machine_ex_MW 37u +#define myanmar_syllable_machine_ex_MY 38u +#define myanmar_syllable_machine_ex_PT 39u +#define myanmar_syllable_machine_ex_Ra 15u +#define myanmar_syllable_machine_ex_SM 8u +#define myanmar_syllable_machine_ex_VAbv 20u +#define myanmar_syllable_machine_ex_VBlw 21u +#define myanmar_syllable_machine_ex_VPre 22u +#define myanmar_syllable_machine_ex_VPst 23u +#define myanmar_syllable_machine_ex_VS 40u +#define myanmar_syllable_machine_ex_ZWJ 6u +#define myanmar_syllable_machine_ex_ZWNJ 5u + + +#line 81 "hb-ot-shaper-myanmar-machine.hh" +static const unsigned char _myanmar_syllable_machine_trans_keys[] = { + 1u, 41u, 3u, 41u, 5u, 39u, 5u, 8u, 3u, 41u, 3u, 39u, 3u, 39u, 5u, 39u, + 5u, 39u, 3u, 39u, 3u, 39u, 3u, 41u, 5u, 39u, 1u, 15u, 3u, 39u, 3u, 39u, + 3u, 40u, 3u, 39u, 3u, 41u, 3u, 41u, 3u, 39u, 3u, 41u, 3u, 41u, 3u, 41u, + 3u, 41u, 3u, 41u, 5u, 39u, 5u, 8u, 3u, 41u, 3u, 39u, 3u, 39u, 5u, 39u, + 5u, 39u, 3u, 39u, 3u, 39u, 3u, 41u, 5u, 39u, 1u, 15u, 3u, 41u, 3u, 39u, + 3u, 39u, 3u, 40u, 3u, 39u, 3u, 41u, 3u, 41u, 3u, 39u, 3u, 41u, 3u, 41u, + 3u, 41u, 3u, 41u, 3u, 41u, 3u, 41u, 3u, 41u, 1u, 41u, 1u, 15u, 0 +}; + +static const char _myanmar_syllable_machine_key_spans[] = { + 41, 39, 35, 4, 39, 37, 37, 35, + 35, 37, 37, 39, 35, 15, 37, 37, + 38, 37, 39, 39, 37, 39, 39, 39, + 39, 39, 35, 4, 39, 37, 37, 35, + 35, 37, 37, 39, 35, 15, 39, 37, + 37, 38, 37, 39, 39, 37, 39, 39, + 39, 39, 39, 39, 39, 41, 15 +}; + +static const short _myanmar_syllable_machine_index_offsets[] = { + 0, 42, 82, 118, 123, 163, 201, 239, + 275, 311, 349, 387, 427, 463, 479, 517, + 555, 594, 632, 672, 712, 750, 790, 830, + 870, 910, 950, 986, 991, 1031, 1069, 1107, + 1143, 1179, 1217, 1255, 1295, 1331, 1347, 1387, + 1425, 1463, 1502, 1540, 1580, 1620, 1658, 1698, + 1738, 1778, 1818, 1858, 1898, 1938, 1980 +}; + +static const char _myanmar_syllable_machine_indicies[] = { + 1, 1, 2, 3, 4, 4, 0, 5, + 6, 1, 1, 0, 0, 0, 7, 0, + 0, 8, 0, 9, 10, 11, 12, 0, + 0, 0, 0, 0, 0, 0, 0, 13, + 0, 0, 14, 15, 16, 17, 18, 19, + 20, 0, 22, 23, 24, 24, 21, 25, + 26, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 27, 28, 29, 30, 21, + 21, 21, 21, 21, 21, 21, 21, 31, + 21, 21, 32, 33, 34, 35, 36, 37, + 38, 21, 24, 24, 21, 25, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 30, 21, 21, 21, + 21, 21, 21, 21, 21, 39, 21, 21, + 21, 21, 21, 21, 36, 21, 24, 24, + 21, 25, 21, 22, 21, 24, 24, 21, + 25, 26, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 40, 21, 21, 30, + 21, 21, 21, 21, 21, 21, 21, 21, + 41, 21, 21, 42, 21, 21, 21, 36, + 21, 41, 21, 22, 21, 24, 24, 21, + 25, 26, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 30, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 36, + 21, 43, 21, 24, 24, 21, 25, 36, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 44, 21, + 21, 21, 21, 21, 21, 36, 21, 24, + 24, 21, 25, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 44, 21, 21, 21, 21, 21, + 21, 36, 21, 24, 24, 21, 25, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 36, 21, 22, + 21, 24, 24, 21, 25, 26, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 40, 21, 21, 30, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 36, 21, 22, 21, 24, + 24, 21, 25, 26, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 40, 21, + 21, 30, 21, 21, 21, 21, 21, 21, + 21, 21, 41, 21, 21, 21, 21, 21, + 21, 36, 21, 22, 21, 24, 24, 21, + 25, 26, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 40, 21, 21, 30, + 21, 21, 21, 21, 21, 21, 21, 21, + 41, 21, 21, 21, 21, 21, 21, 36, + 21, 41, 21, 24, 24, 21, 25, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 30, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 36, 21, 1, + 1, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 1, 21, 22, + 21, 24, 24, 21, 25, 26, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 27, 28, 21, 30, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 36, 21, 22, 21, 24, + 24, 21, 25, 26, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 28, + 21, 30, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 36, 21, 22, 21, 24, 24, 21, + 25, 26, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 27, 28, 29, 30, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 36, + 45, 21, 22, 21, 24, 24, 21, 25, + 26, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 27, 28, 29, 30, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 36, 21, + 22, 21, 24, 24, 21, 25, 26, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 27, 28, 29, 30, 21, 21, 21, + 21, 21, 21, 21, 21, 31, 21, 21, + 32, 33, 34, 35, 36, 21, 38, 21, + 22, 21, 24, 24, 21, 25, 26, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 27, 28, 29, 30, 21, 21, 21, + 21, 21, 21, 21, 21, 45, 21, 21, + 21, 21, 21, 21, 36, 21, 38, 21, + 22, 21, 24, 24, 21, 25, 26, 21, + 21, 21, 21, 21, 21, 21, 21, 21, + 21, 27, 28, 29, 30, 21, 21, 21, + 21, 21, 21, 21, 21, 45, 21, 21, + 21, 21, 21, 21, 36, 21, 22, 21, + 24, 24, 21, 25, 26, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 27, + 28, 29, 30, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 32, 21, + 34, 21, 36, 21, 38, 21, 22, 21, + 24, 24, 21, 25, 26, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 27, + 28, 29, 30, 21, 21, 21, 21, 21, + 21, 21, 21, 45, 21, 21, 32, 21, + 21, 21, 36, 21, 38, 21, 22, 21, + 24, 24, 21, 25, 26, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 27, + 28, 29, 30, 21, 21, 21, 21, 21, + 21, 21, 21, 46, 21, 21, 32, 33, + 34, 21, 36, 21, 38, 21, 22, 21, + 24, 24, 21, 25, 26, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 27, + 28, 29, 30, 21, 21, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 32, 33, + 34, 21, 36, 21, 38, 21, 22, 23, + 24, 24, 21, 25, 26, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 27, + 28, 29, 30, 21, 21, 21, 21, 21, + 21, 21, 21, 31, 21, 21, 32, 33, + 34, 35, 36, 21, 38, 21, 48, 48, + 47, 5, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 12, 47, 47, 47, 47, 47, 47, 47, + 47, 49, 47, 47, 47, 47, 47, 47, + 18, 47, 48, 48, 47, 5, 47, 2, + 47, 48, 48, 47, 5, 6, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 50, 47, 47, 12, 47, 47, 47, 47, + 47, 47, 47, 47, 51, 47, 47, 52, + 47, 47, 47, 18, 47, 51, 47, 2, + 47, 48, 48, 47, 5, 6, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 12, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 18, 47, 53, 47, 48, + 48, 47, 5, 18, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 54, 47, 47, 47, 47, 47, + 47, 18, 47, 48, 48, 47, 5, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 54, 47, + 47, 47, 47, 47, 47, 18, 47, 48, + 48, 47, 5, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 18, 47, 2, 47, 48, 48, 47, + 5, 6, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 50, 47, 47, 12, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 18, + 47, 2, 47, 48, 48, 47, 5, 6, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 50, 47, 47, 12, 47, 47, + 47, 47, 47, 47, 47, 47, 51, 47, + 47, 47, 47, 47, 47, 18, 47, 2, + 47, 48, 48, 47, 5, 6, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 50, 47, 47, 12, 47, 47, 47, 47, + 47, 47, 47, 47, 51, 47, 47, 47, + 47, 47, 47, 18, 47, 51, 47, 48, + 48, 47, 5, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 12, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 18, 47, 55, 55, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 55, 47, 2, 3, 48, 48, 47, + 5, 6, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 9, 10, 11, 12, + 47, 47, 47, 47, 47, 47, 47, 47, + 13, 47, 47, 14, 15, 16, 17, 18, + 19, 20, 47, 2, 47, 48, 48, 47, + 5, 6, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 9, 10, 47, 12, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 18, + 47, 2, 47, 48, 48, 47, 5, 6, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 10, 47, 12, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 18, 47, 2, + 47, 48, 48, 47, 5, 6, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 9, 10, 11, 12, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 18, 56, 47, 2, 47, + 48, 48, 47, 5, 6, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 9, + 10, 11, 12, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 18, 47, 2, 47, 48, 48, + 47, 5, 6, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 9, 10, 11, + 12, 47, 47, 47, 47, 47, 47, 47, + 47, 13, 47, 47, 14, 15, 16, 17, + 18, 47, 20, 47, 2, 47, 48, 48, + 47, 5, 6, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 9, 10, 11, + 12, 47, 47, 47, 47, 47, 47, 47, + 47, 56, 47, 47, 47, 47, 47, 47, + 18, 47, 20, 47, 2, 47, 48, 48, + 47, 5, 6, 47, 47, 47, 47, 47, + 47, 47, 47, 47, 47, 9, 10, 11, + 12, 47, 47, 47, 47, 47, 47, 47, + 47, 56, 47, 47, 47, 47, 47, 47, + 18, 47, 2, 47, 48, 48, 47, 5, + 6, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 9, 10, 11, 12, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 14, 47, 16, 47, 18, 47, + 20, 47, 2, 47, 48, 48, 47, 5, + 6, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 9, 10, 11, 12, 47, + 47, 47, 47, 47, 47, 47, 47, 56, + 47, 47, 14, 47, 47, 47, 18, 47, + 20, 47, 2, 47, 48, 48, 47, 5, + 6, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 9, 10, 11, 12, 47, + 47, 47, 47, 47, 47, 47, 47, 57, + 47, 47, 14, 15, 16, 47, 18, 47, + 20, 47, 2, 47, 48, 48, 47, 5, + 6, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 9, 10, 11, 12, 47, + 47, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 14, 15, 16, 47, 18, 47, + 20, 47, 2, 3, 48, 48, 47, 5, + 6, 47, 47, 47, 47, 47, 47, 47, + 47, 47, 47, 9, 10, 11, 12, 47, + 47, 47, 47, 47, 47, 47, 47, 13, + 47, 47, 14, 15, 16, 17, 18, 47, + 20, 47, 22, 23, 24, 24, 21, 25, + 26, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 27, 28, 29, 30, 21, + 21, 21, 21, 21, 21, 21, 21, 58, + 21, 21, 32, 33, 34, 35, 36, 37, + 38, 21, 22, 59, 24, 24, 21, 25, + 26, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 27, 28, 29, 30, 21, + 21, 21, 21, 21, 21, 21, 21, 31, + 21, 21, 32, 33, 34, 35, 36, 21, + 38, 21, 1, 1, 2, 3, 48, 48, + 47, 5, 6, 1, 1, 47, 47, 47, + 1, 47, 47, 47, 47, 9, 10, 11, + 12, 47, 47, 47, 47, 47, 47, 47, + 47, 13, 47, 47, 14, 15, 16, 17, + 18, 19, 20, 47, 1, 1, 60, 60, + 60, 60, 60, 60, 60, 1, 1, 60, + 60, 60, 1, 60, 0 +}; + +static const char _myanmar_syllable_machine_trans_targs[] = { + 0, 1, 26, 37, 0, 27, 29, 51, + 54, 39, 40, 41, 28, 43, 44, 46, + 47, 48, 30, 50, 45, 0, 2, 13, + 0, 3, 5, 14, 15, 16, 4, 18, + 19, 21, 22, 23, 6, 25, 20, 12, + 9, 10, 11, 7, 8, 17, 24, 0, + 0, 36, 33, 34, 35, 31, 32, 38, + 42, 49, 52, 53, 0 +}; + +static const char _myanmar_syllable_machine_trans_actions[] = { + 3, 0, 0, 0, 4, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5, 0, 0, + 6, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 8, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 9 +}; + +static const char _myanmar_syllable_machine_to_state_actions[] = { + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 +}; + +static const char _myanmar_syllable_machine_from_state_actions[] = { + 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 +}; + +static const short _myanmar_syllable_machine_eof_trans[] = { + 0, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 22, 22, 22, 22, 22, 22, + 22, 22, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 48, 48, 48, 48, 48, + 48, 48, 48, 22, 22, 48, 61 +}; + +static const int myanmar_syllable_machine_start = 0; +static const int myanmar_syllable_machine_first_final = 0; +static const int myanmar_syllable_machine_error = -1; + +static const int myanmar_syllable_machine_en_main = 0; + + +#line 55 "hb-ot-shaper-myanmar-machine.rl" + + + +#line 117 "hb-ot-shaper-myanmar-machine.rl" + + +#define found_syllable(syllable_type) \ + HB_STMT_START { \ + if (0) fprintf (stderr, "syllable %d..%d %s\n", ts, te, #syllable_type); \ + for (unsigned int i = ts; i < te; i++) \ + info[i].syllable() = (syllable_serial << 4) | syllable_type; \ + syllable_serial++; \ + if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ + } HB_STMT_END + +inline void +find_syllables_myanmar (hb_buffer_t *buffer) +{ + unsigned int p, pe, eof, ts, te, act HB_UNUSED; + int cs; + hb_glyph_info_t *info = buffer->info; + +#line 447 "hb-ot-shaper-myanmar-machine.hh" + { + cs = myanmar_syllable_machine_start; + ts = 0; + te = 0; + act = 0; + } + +#line 137 "hb-ot-shaper-myanmar-machine.rl" + + + p = 0; + pe = eof = buffer->len; + + unsigned int syllable_serial = 1; + +#line 463 "hb-ot-shaper-myanmar-machine.hh" + { + int _slen; + int _trans; + const unsigned char *_keys; + const char *_inds; + if ( p == pe ) + goto _test_eof; +_resume: + switch ( _myanmar_syllable_machine_from_state_actions[cs] ) { + case 2: +#line 1 "NONE" + {ts = p;} + break; +#line 477 "hb-ot-shaper-myanmar-machine.hh" + } + + _keys = _myanmar_syllable_machine_trans_keys + (cs<<1); + _inds = _myanmar_syllable_machine_indicies + _myanmar_syllable_machine_index_offsets[cs]; + + _slen = _myanmar_syllable_machine_key_spans[cs]; + _trans = _inds[ _slen > 0 && _keys[0] <=( info[p].myanmar_category()) && + ( info[p].myanmar_category()) <= _keys[1] ? + ( info[p].myanmar_category()) - _keys[0] : _slen ]; + +_eof_trans: + cs = _myanmar_syllable_machine_trans_targs[_trans]; + + if ( _myanmar_syllable_machine_trans_actions[_trans] == 0 ) + goto _again; + + switch ( _myanmar_syllable_machine_trans_actions[_trans] ) { + case 6: +#line 110 "hb-ot-shaper-myanmar-machine.rl" + {te = p+1;{ found_syllable (myanmar_consonant_syllable); }} + break; + case 4: +#line 111 "hb-ot-shaper-myanmar-machine.rl" + {te = p+1;{ found_syllable (myanmar_non_myanmar_cluster); }} + break; + case 8: +#line 112 "hb-ot-shaper-myanmar-machine.rl" + {te = p+1;{ found_syllable (myanmar_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 3: +#line 113 "hb-ot-shaper-myanmar-machine.rl" + {te = p+1;{ found_syllable (myanmar_non_myanmar_cluster); }} + break; + case 5: +#line 110 "hb-ot-shaper-myanmar-machine.rl" + {te = p;p--;{ found_syllable (myanmar_consonant_syllable); }} + break; + case 7: +#line 112 "hb-ot-shaper-myanmar-machine.rl" + {te = p;p--;{ found_syllable (myanmar_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 9: +#line 113 "hb-ot-shaper-myanmar-machine.rl" + {te = p;p--;{ found_syllable (myanmar_non_myanmar_cluster); }} + break; +#line 523 "hb-ot-shaper-myanmar-machine.hh" + } + +_again: + switch ( _myanmar_syllable_machine_to_state_actions[cs] ) { + case 1: +#line 1 "NONE" + {ts = 0;} + break; +#line 532 "hb-ot-shaper-myanmar-machine.hh" + } + + if ( ++p != pe ) + goto _resume; + _test_eof: {} + if ( p == eof ) + { + if ( _myanmar_syllable_machine_eof_trans[cs] > 0 ) { + _trans = _myanmar_syllable_machine_eof_trans[cs] - 1; + goto _eof_trans; + } + } + + } + +#line 145 "hb-ot-shaper-myanmar-machine.rl" + +} + +#undef found_syllable + +#endif /* HB_OT_SHAPER_MYANMAR_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-myanmar.cc index 13beaf4d4c..1ccafbca7e 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-myanmar.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-myanmar.cc @@ -28,14 +28,16 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-myanmar.hh" -#include "hb-ot-shape-complex-myanmar-machine.hh" +#include "hb-ot-shaper-myanmar-machine.hh" +#include "hb-ot-shaper-indic.hh" +#include "hb-ot-layout.hh" /* * Myanmar shaper. */ + static const hb_tag_t myanmar_basic_features[] = { @@ -62,6 +64,40 @@ myanmar_other_features[] = HB_TAG('p','s','t','s'), }; +static inline void +set_myanmar_properties (hb_glyph_info_t &info) +{ + hb_codepoint_t u = info.codepoint; + unsigned int type = hb_indic_get_categories (u); + + info.myanmar_category() = (myanmar_category_t) (type & 0xFFu); +} + + +static inline bool +is_one_of_myanmar (const hb_glyph_info_t &info, unsigned int flags) +{ + /* If it ligated, all bets are off. */ + if (_hb_glyph_info_ligated (&info)) return false; + return !!(FLAG_UNSAFE (info.myanmar_category()) & flags); +} + +/* Note: + * + * We treat Vowels and placeholders as if they were consonants. This is safe because Vowels + * cannot happen in a consonant syllable. The plus side however is, we can call the + * consonant syllable logic from the vowel syllable function and get it all right! + * + * Keep in sync with consonant_categories in the generator. */ +#define CONSONANT_FLAGS_MYANMAR (FLAG (M_Cat(C)) | FLAG (M_Cat(CS)) | FLAG (M_Cat(Ra)) | /* FLAG (M_Cat(CM)) | */ FLAG (M_Cat(IV)) | FLAG (M_Cat(GB)) | FLAG (M_Cat(DOTTEDCIRCLE))) + +static inline bool +is_consonant_myanmar (const hb_glyph_info_t &info) +{ + return is_one_of_myanmar (info, CONSONANT_FLAGS_MYANMAR); +} + + static void setup_syllables_myanmar (const hb_ot_shape_plan_t *plan, hb_font_t *font, @@ -92,6 +128,7 @@ collect_features_myanmar (hb_ot_shape_planner_t *plan) map->enable_feature (myanmar_basic_features[i], F_MANUAL_ZWJ | F_PER_SYLLABLE); map->add_gsub_pause (nullptr); } + map->add_gsub_pause (hb_syllabic_clear_var); // Don't need syllables anymore, use stop to free buffer var for (unsigned int i = 0; i < ARRAY_LENGTH (myanmar_other_features); i++) map->enable_feature (myanmar_other_features[i], F_MANUAL_ZWJ); @@ -118,6 +155,7 @@ setup_syllables_myanmar (const hb_ot_shape_plan_t *plan HB_UNUSED, hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) { + HB_BUFFER_ALLOCATE_VAR (buffer, syllable); find_syllables_myanmar (buffer); foreach_syllable (buffer, start, end) buffer->unsafe_to_break (start, end); @@ -129,7 +167,7 @@ compare_myanmar_order (const hb_glyph_info_t *pa, const hb_glyph_info_t *pb) int a = pa->myanmar_position(); int b = pb->myanmar_position(); - return a < b ? -1 : a == b ? 0 : +1; + return (int) a - (int) b; } @@ -148,9 +186,9 @@ initial_reordering_consonant_syllable (hb_buffer_t *buffer, { unsigned int limit = start; if (start + 3 <= end && - info[start ].myanmar_category() == OT_Ra && - info[start+1].myanmar_category() == OT_As && - info[start+2].myanmar_category() == OT_H) + info[start ].myanmar_category() == M_Cat(Ra) && + info[start+1].myanmar_category() == M_Cat(As) && + info[start+2].myanmar_category() == M_Cat(H)) { limit += 3; base = start; @@ -162,7 +200,7 @@ initial_reordering_consonant_syllable (hb_buffer_t *buffer, base = limit; for (unsigned int i = limit; i < end; i++) - if (is_consonant (info[i])) + if (is_consonant_myanmar (info[i])) { base = i; break; @@ -187,39 +225,40 @@ initial_reordering_consonant_syllable (hb_buffer_t *buffer, * Myanmar reordering! */ for (; i < end; i++) { - if (info[i].myanmar_category() == OT_MR) /* Pre-base reordering */ + if (info[i].myanmar_category() == M_Cat(MR)) /* Pre-base reordering */ { info[i].myanmar_position() = POS_PRE_C; continue; } - if (info[i].myanmar_position() < POS_BASE_C) /* Left matra */ + if (info[i].myanmar_category() == M_Cat(VPre)) /* Left matra */ { + info[i].myanmar_position() = POS_PRE_M; continue; } - if (info[i].myanmar_category() == OT_VS) + if (info[i].myanmar_category() == M_Cat(VS)) { info[i].myanmar_position() = info[i - 1].myanmar_position(); continue; } - if (pos == POS_AFTER_MAIN && info[i].myanmar_category() == OT_VBlw) + if (pos == POS_AFTER_MAIN && info[i].myanmar_category() == M_Cat(VBlw)) { pos = POS_BELOW_C; info[i].myanmar_position() = pos; continue; } - if (pos == POS_BELOW_C && info[i].myanmar_category() == OT_A) + if (pos == POS_BELOW_C && info[i].myanmar_category() == M_Cat(A)) { info[i].myanmar_position() = POS_BEFORE_SUB; continue; } - if (pos == POS_BELOW_C && info[i].myanmar_category() == OT_VBlw) + if (pos == POS_BELOW_C && info[i].myanmar_category() == M_Cat(VBlw)) { info[i].myanmar_position() = pos; continue; } - if (pos == POS_BELOW_C && info[i].myanmar_category() != OT_A) + if (pos == POS_BELOW_C && info[i].myanmar_category() != M_Cat(A)) { pos = POS_AFTER_SUB; info[i].myanmar_position() = pos; @@ -247,7 +286,6 @@ reorder_syllable_myanmar (const hb_ot_shape_plan_t *plan HB_UNUSED, initial_reordering_consonant_syllable (buffer, start, end); break; - case myanmar_punctuation_cluster: case myanmar_non_myanmar_cluster: break; } @@ -262,7 +300,7 @@ reorder_myanmar (const hb_ot_shape_plan_t *plan, { hb_syllabic_insert_dotted_circles (font, buffer, myanmar_broken_cluster, - OT_GB); + M_Cat(DOTTEDCIRCLE)); foreach_syllable (buffer, start, end) reorder_syllable_myanmar (plan, font->face, buffer, start, end); @@ -274,7 +312,7 @@ reorder_myanmar (const hb_ot_shape_plan_t *plan, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_myanmar = +const hb_ot_shaper_t _hb_ot_shaper_myanmar = { collect_features_myanmar, nullptr, /* override_features */ @@ -296,7 +334,7 @@ const hb_ot_complex_shaper_t _hb_ot_complex_shaper_myanmar = /* Ugly Zawgyi encoding. * Disable all auto processing. * https://github.com/harfbuzz/harfbuzz/issues/1162 */ -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_myanmar_zawgyi = +const hb_ot_shaper_t _hb_ot_shaper_myanmar_zawgyi = { nullptr, /* collect_features */ nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-syllabic.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-syllabic.cc index 76092c7f38..a8e0d8e8c1 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-syllabic.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-syllabic.cc @@ -26,7 +26,7 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-syllabic.hh" +#include "hb-ot-shaper-syllabic.hh" void @@ -39,31 +39,18 @@ hb_syllabic_insert_dotted_circles (hb_font_t *font, { if (unlikely (buffer->flags & HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE)) return; - - /* Note: This loop is extra overhead, but should not be measurable. - * TODO Use a buffer scratch flag to remove the loop. */ - bool has_broken_syllables = false; - unsigned int count = buffer->len; - hb_glyph_info_t *info = buffer->info; - for (unsigned int i = 0; i < count; i++) - if ((info[i].syllable() & 0x0F) == broken_syllable_type) - { - has_broken_syllables = true; - break; - } - if (likely (!has_broken_syllables)) + if (likely (!(buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE))) return; - hb_codepoint_t dottedcircle_glyph; if (!font->get_nominal_glyph (0x25CCu, &dottedcircle_glyph)) return; hb_glyph_info_t dottedcircle = {0}; dottedcircle.codepoint = 0x25CCu; - dottedcircle.complex_var_u8_category() = dottedcircle_category; + dottedcircle.ot_shaper_var_u8_category() = dottedcircle_category; if (dottedcircle_position != -1) - dottedcircle.complex_var_u8_auxiliary() = dottedcircle_position; + dottedcircle.ot_shaper_var_u8_auxiliary() = dottedcircle_position; dottedcircle.codepoint = dottedcircle_glyph; buffer->clear_output (); @@ -87,7 +74,7 @@ hb_syllabic_insert_dotted_circles (hb_font_t *font, { while (buffer->idx < buffer->len && buffer->successful && last_syllable == buffer->cur().syllable() && - buffer->cur().complex_var_u8_category() == (unsigned) repha_category) + buffer->cur().ot_shaper_var_u8_category() == (unsigned) repha_category) (void) buffer->next_glyph (); } @@ -99,5 +86,13 @@ hb_syllabic_insert_dotted_circles (hb_font_t *font, buffer->sync (); } +HB_INTERNAL void +hb_syllabic_clear_var (const hb_ot_shape_plan_t *plan, + hb_font_t *font, + hb_buffer_t *buffer) +{ + HB_BUFFER_DEALLOCATE_VAR (buffer, syllable); +} + #endif diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-syllabic.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-syllabic.hh index b901a660d3..e8a15bb48a 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-syllabic.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-syllabic.hh @@ -22,12 +22,12 @@ * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. */ -#ifndef HB_OT_SHAPE_COMPLEX_SYLLABIC_HH -#define HB_OT_SHAPE_COMPLEX_SYLLABIC_HH +#ifndef HB_OT_SHAPER_SYLLABIC_HH +#define HB_OT_SHAPER_SYLLABIC_HH #include "hb.hh" -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" HB_INTERNAL void @@ -38,5 +38,10 @@ hb_syllabic_insert_dotted_circles (hb_font_t *font, int repha_category = -1, int dottedcircle_position = -1); +HB_INTERNAL void +hb_syllabic_clear_var (const hb_ot_shape_plan_t *plan, + hb_font_t *font, + hb_buffer_t *buffer); + -#endif /* HB_OT_SHAPE_COMPLEX_SYLLABIC_HH */ +#endif /* HB_OT_SHAPER_SYLLABIC_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-thai.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-thai.cc index a1e27a83be..1280e7ed17 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-thai.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-thai.cc @@ -28,7 +28,7 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" /* Thai / Lao shaper */ @@ -222,7 +222,7 @@ do_thai_pua_shaping (const hb_ot_shape_plan_t *plan HB_UNUSED, hb_buffer_t *buffer, hb_font_t *font) { -#ifdef HB_NO_OT_SHAPE_COMPLEX_THAI_FALLBACK +#ifdef HB_NO_OT_SHAPER_THAI_FALLBACK return; #endif @@ -279,7 +279,7 @@ preprocess_text_thai (const hb_ot_shape_plan_t *plan, * to be what Uniscribe and other engines implement. According to Eric Muller: * * When you have a SARA AM, decompose it in NIKHAHIT + SARA AA, *and* move the - * NIKHAHIT backwards over any tone mark (0E48-0E4B). + * NIKHAHIT backwards over any above-base marks. * * <0E14, 0E4B, 0E33> -> <0E14, 0E4D, 0E4B, 0E32> * @@ -308,8 +308,8 @@ preprocess_text_thai (const hb_ot_shape_plan_t *plan, * Nikhahit: U+0E4D U+0ECD * * Testing shows that Uniscribe reorder the following marks: - * Thai: <0E31,0E34..0E37,0E47..0E4E> - * Lao: <0EB1,0EB4..0EB7,0EC7..0ECE> + * Thai: <0E31,0E34..0E37, 0E47..0E4E> + * Lao: <0EB1,0EB4..0EB7,0EBB,0EC8..0ECD> * * Note how the Lao versions are the same as Thai + 0x80. */ @@ -319,7 +319,7 @@ preprocess_text_thai (const hb_ot_shape_plan_t *plan, #define IS_SARA_AM(x) (((x) & ~0x0080u) == 0x0E33u) #define NIKHAHIT_FROM_SARA_AM(x) ((x) - 0x0E33u + 0x0E4Du) #define SARA_AA_FROM_SARA_AM(x) ((x) - 1) -#define IS_TONE_MARK(x) (hb_in_ranges<hb_codepoint_t> ((x) & ~0x0080u, 0x0E34u, 0x0E37u, 0x0E47u, 0x0E4Eu, 0x0E31u, 0x0E31u)) +#define IS_ABOVE_BASE_MARK(x) (hb_in_ranges<hb_codepoint_t> ((x) & ~0x0080u, 0x0E34u, 0x0E37u, 0x0E47u, 0x0E4Eu, 0x0E31u, 0x0E31u, 0x0E3Bu, 0x0E3Bu)) buffer->clear_output (); unsigned int count = buffer->len; @@ -343,7 +343,7 @@ preprocess_text_thai (const hb_ot_shape_plan_t *plan, /* Ok, let's see... */ unsigned int start = end - 2; - while (start > 0 && IS_TONE_MARK (buffer->out_info[start - 1].codepoint)) + while (start > 0 && IS_ABOVE_BASE_MARK (buffer->out_info[start - 1].codepoint)) start--; if (start + 2 < end) @@ -371,7 +371,7 @@ preprocess_text_thai (const hb_ot_shape_plan_t *plan, do_thai_pua_shaping (plan, buffer, font); } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_thai = +const hb_ot_shaper_t _hb_ot_shaper_thai = { nullptr, /* collect_features */ nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shaper-use-machine.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-use-machine.hh new file mode 100644 index 0000000000..65e65ff39d --- /dev/null +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-use-machine.hh @@ -0,0 +1,931 @@ + +#line 1 "hb-ot-shaper-use-machine.rl" +/* + * Copyright © 2015 Mozilla Foundation. + * Copyright © 2015 Google, Inc. + * + * This is part of HarfBuzz, a text shaping library. + * + * Permission is hereby granted, without written agreement and without + * license or royalty fees, to use, copy, modify, and distribute this + * software and its documentation for any purpose, provided that the + * above copyright notice and the following two paragraphs appear in + * all copies of this software. + * + * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR + * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES + * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN + * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, + * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS + * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO + * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + * + * Mozilla Author(s): Jonathan Kew + * Google Author(s): Behdad Esfahbod + */ + +#ifndef HB_OT_SHAPER_USE_MACHINE_HH +#define HB_OT_SHAPER_USE_MACHINE_HH + +#include "hb.hh" + +#include "hb-ot-shaper-syllabic.hh" + +/* buffer var allocations */ +#define use_category() ot_shaper_var_u8_category() + +#define USE(Cat) use_syllable_machine_ex_##Cat + +enum use_syllable_type_t { + use_virama_terminated_cluster, + use_sakot_terminated_cluster, + use_standard_cluster, + use_number_joiner_terminated_cluster, + use_numeral_cluster, + use_symbol_cluster, + use_hieroglyph_cluster, + use_broken_cluster, + use_non_cluster, +}; + + +#line 57 "hb-ot-shaper-use-machine.hh" +#define use_syllable_machine_ex_B 1u +#define use_syllable_machine_ex_CGJ 6u +#define use_syllable_machine_ex_CMAbv 31u +#define use_syllable_machine_ex_CMBlw 32u +#define use_syllable_machine_ex_CS 43u +#define use_syllable_machine_ex_FAbv 24u +#define use_syllable_machine_ex_FBlw 25u +#define use_syllable_machine_ex_FMAbv 45u +#define use_syllable_machine_ex_FMBlw 46u +#define use_syllable_machine_ex_FMPst 47u +#define use_syllable_machine_ex_FPst 26u +#define use_syllable_machine_ex_G 49u +#define use_syllable_machine_ex_GB 5u +#define use_syllable_machine_ex_H 12u +#define use_syllable_machine_ex_HN 13u +#define use_syllable_machine_ex_HVM 53u +#define use_syllable_machine_ex_IS 44u +#define use_syllable_machine_ex_J 50u +#define use_syllable_machine_ex_MAbv 27u +#define use_syllable_machine_ex_MBlw 28u +#define use_syllable_machine_ex_MPre 30u +#define use_syllable_machine_ex_MPst 29u +#define use_syllable_machine_ex_N 4u +#define use_syllable_machine_ex_O 0u +#define use_syllable_machine_ex_R 18u +#define use_syllable_machine_ex_SB 51u +#define use_syllable_machine_ex_SE 52u +#define use_syllable_machine_ex_SMAbv 41u +#define use_syllable_machine_ex_SMBlw 42u +#define use_syllable_machine_ex_SUB 11u +#define use_syllable_machine_ex_Sk 48u +#define use_syllable_machine_ex_VAbv 33u +#define use_syllable_machine_ex_VBlw 34u +#define use_syllable_machine_ex_VMAbv 37u +#define use_syllable_machine_ex_VMBlw 38u +#define use_syllable_machine_ex_VMPre 23u +#define use_syllable_machine_ex_VMPst 39u +#define use_syllable_machine_ex_VPre 22u +#define use_syllable_machine_ex_VPst 35u +#define use_syllable_machine_ex_WJ 16u +#define use_syllable_machine_ex_ZWNJ 14u + + +#line 101 "hb-ot-shaper-use-machine.hh" +static const unsigned char _use_syllable_machine_trans_keys[] = { + 0u, 53u, 11u, 53u, 11u, 53u, 1u, 53u, 23u, 48u, 24u, 47u, 25u, 47u, 26u, 47u, + 45u, 46u, 46u, 46u, 24u, 48u, 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, 22u, 53u, + 23u, 53u, 23u, 53u, 23u, 53u, 12u, 53u, 23u, 53u, 12u, 53u, 12u, 53u, 12u, 53u, + 11u, 53u, 1u, 1u, 1u, 48u, 11u, 53u, 41u, 42u, 42u, 42u, 11u, 53u, 11u, 53u, + 1u, 53u, 23u, 48u, 24u, 47u, 25u, 47u, 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, + 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, 22u, 53u, 23u, 53u, 23u, 53u, 23u, 53u, + 12u, 53u, 23u, 53u, 12u, 53u, 12u, 53u, 12u, 53u, 11u, 53u, 1u, 1u, 1u, 48u, + 13u, 13u, 4u, 4u, 11u, 53u, 11u, 53u, 1u, 53u, 23u, 48u, 24u, 47u, 25u, 47u, + 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, 24u, 48u, 24u, 48u, 1u, 1u, 24u, 48u, + 22u, 53u, 23u, 53u, 23u, 53u, 23u, 53u, 12u, 53u, 23u, 53u, 12u, 53u, 12u, 53u, + 12u, 53u, 11u, 53u, 1u, 1u, 1u, 48u, 11u, 53u, 11u, 53u, 1u, 53u, 23u, 48u, + 24u, 47u, 25u, 47u, 26u, 47u, 45u, 46u, 46u, 46u, 24u, 48u, 24u, 48u, 24u, 48u, + 1u, 1u, 24u, 48u, 22u, 53u, 23u, 53u, 23u, 53u, 23u, 53u, 12u, 53u, 23u, 53u, + 12u, 53u, 12u, 53u, 12u, 53u, 11u, 53u, 1u, 1u, 1u, 48u, 4u, 4u, 13u, 13u, + 1u, 53u, 11u, 53u, 41u, 42u, 42u, 42u, 1u, 5u, 50u, 52u, 49u, 52u, 49u, 51u, + 0 +}; + +static const char _use_syllable_machine_key_spans[] = { + 54, 43, 43, 53, 26, 24, 23, 22, + 2, 1, 25, 25, 25, 1, 25, 32, + 31, 31, 31, 42, 31, 42, 42, 42, + 43, 1, 48, 43, 2, 1, 43, 43, + 53, 26, 24, 23, 22, 2, 1, 25, + 25, 25, 1, 25, 32, 31, 31, 31, + 42, 31, 42, 42, 42, 43, 1, 48, + 1, 1, 43, 43, 53, 26, 24, 23, + 22, 2, 1, 25, 25, 25, 1, 25, + 32, 31, 31, 31, 42, 31, 42, 42, + 42, 43, 1, 48, 43, 43, 53, 26, + 24, 23, 22, 2, 1, 25, 25, 25, + 1, 25, 32, 31, 31, 31, 42, 31, + 42, 42, 42, 43, 1, 48, 1, 1, + 53, 43, 2, 1, 5, 3, 4, 3 +}; + +static const short _use_syllable_machine_index_offsets[] = { + 0, 55, 99, 143, 197, 224, 249, 273, + 296, 299, 301, 327, 353, 379, 381, 407, + 440, 472, 504, 536, 579, 611, 654, 697, + 740, 784, 786, 835, 879, 882, 884, 928, + 972, 1026, 1053, 1078, 1102, 1125, 1128, 1130, + 1156, 1182, 1208, 1210, 1236, 1269, 1301, 1333, + 1365, 1408, 1440, 1483, 1526, 1569, 1613, 1615, + 1664, 1666, 1668, 1712, 1756, 1810, 1837, 1862, + 1886, 1909, 1912, 1914, 1940, 1966, 1992, 1994, + 2020, 2053, 2085, 2117, 2149, 2192, 2224, 2267, + 2310, 2353, 2397, 2399, 2448, 2492, 2536, 2590, + 2617, 2642, 2666, 2689, 2692, 2694, 2720, 2746, + 2772, 2774, 2800, 2833, 2865, 2897, 2929, 2972, + 3004, 3047, 3090, 3133, 3177, 3179, 3228, 3230, + 3232, 3286, 3330, 3333, 3335, 3341, 3345, 3350 +}; + +static const unsigned char _use_syllable_machine_indicies[] = { + 0, 1, 2, 2, 3, 4, 2, 2, + 2, 2, 2, 5, 6, 7, 2, 2, + 2, 2, 8, 2, 2, 2, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 2, 23, 24, 25, + 2, 26, 27, 28, 29, 30, 31, 32, + 29, 33, 2, 34, 2, 35, 2, 37, + 38, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 39, 40, 41, 42, 43, 44, + 45, 46, 47, 48, 49, 50, 51, 52, + 36, 53, 54, 55, 36, 56, 57, 36, + 58, 59, 60, 61, 58, 36, 36, 36, + 36, 62, 36, 37, 38, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 49, + 49, 50, 51, 52, 36, 53, 54, 55, + 36, 36, 36, 36, 58, 59, 60, 61, + 58, 36, 36, 36, 36, 62, 36, 37, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 40, 41, 42, + 43, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 53, 54, 55, 36, 36, + 36, 36, 36, 59, 60, 61, 63, 36, + 36, 36, 36, 40, 36, 40, 41, 42, + 43, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 53, 54, 55, 36, 36, + 36, 36, 36, 59, 60, 61, 63, 36, + 41, 42, 43, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 59, 60, 61, + 36, 42, 43, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 59, 60, 61, + 36, 43, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 59, 60, 61, 36, + 59, 60, 36, 60, 36, 41, 42, 43, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 53, 54, 55, 36, 36, 36, + 36, 36, 59, 60, 61, 63, 36, 41, + 42, 43, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 54, 55, 36, + 36, 36, 36, 36, 59, 60, 61, 63, + 36, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 64, 36, 41, 42, 43, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 59, 60, 61, 63, 36, 39, + 40, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 50, 51, 52, 36, 53, 54, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 36, 36, 36, 40, 36, + 40, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 50, 51, 52, 36, 53, 54, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 36, 36, 36, 40, 36, + 40, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 36, 51, 52, 36, 53, 54, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 36, 36, 36, 40, 36, + 40, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 36, 36, 52, 36, 53, 54, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 36, 36, 36, 40, 36, + 65, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 39, 40, 41, 42, 43, 36, + 45, 46, 36, 36, 36, 50, 51, 52, + 36, 53, 54, 55, 36, 36, 36, 36, + 36, 59, 60, 61, 63, 36, 36, 36, + 36, 40, 36, 40, 41, 42, 43, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 53, 54, 55, 36, 36, 36, 36, + 36, 59, 60, 61, 63, 36, 36, 36, + 36, 40, 36, 65, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 39, 40, 41, + 42, 43, 36, 36, 46, 36, 36, 36, + 50, 51, 52, 36, 53, 54, 55, 36, + 36, 36, 36, 36, 59, 60, 61, 63, + 36, 36, 36, 36, 40, 36, 65, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 39, 40, 41, 42, 43, 36, 36, 36, + 36, 36, 36, 50, 51, 52, 36, 53, + 54, 55, 36, 36, 36, 36, 36, 59, + 60, 61, 63, 36, 36, 36, 36, 40, + 36, 65, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 39, 40, 41, 42, 43, + 44, 45, 46, 36, 36, 36, 50, 51, + 52, 36, 53, 54, 55, 36, 36, 36, + 36, 36, 59, 60, 61, 63, 36, 36, + 36, 36, 40, 36, 37, 38, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 39, + 40, 41, 42, 43, 44, 45, 46, 47, + 36, 49, 50, 51, 52, 36, 53, 54, + 55, 36, 36, 36, 36, 58, 59, 60, + 61, 58, 36, 36, 36, 36, 62, 36, + 37, 36, 37, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 36, 36, + 40, 41, 42, 43, 36, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 53, 54, + 55, 36, 36, 36, 36, 36, 59, 60, + 61, 63, 36, 37, 38, 36, 36, 36, + 36, 36, 36, 36, 36, 36, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 36, 53, 54, 55, + 36, 36, 36, 36, 58, 59, 60, 61, + 58, 36, 36, 36, 36, 62, 36, 56, + 57, 36, 57, 36, 67, 68, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 69, + 70, 71, 72, 73, 74, 75, 76, 77, + 1, 78, 79, 80, 81, 66, 82, 83, + 84, 66, 66, 66, 66, 85, 86, 87, + 88, 89, 66, 66, 66, 66, 90, 66, + 67, 68, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 78, 79, 80, + 81, 66, 82, 83, 84, 66, 66, 66, + 66, 85, 86, 87, 88, 89, 66, 66, + 66, 66, 90, 66, 67, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 70, 71, 72, 73, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 82, 83, 84, 66, 66, 66, 66, 66, + 86, 87, 88, 91, 66, 66, 66, 66, + 70, 66, 70, 71, 72, 73, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 82, 83, 84, 66, 66, 66, 66, 66, + 86, 87, 88, 91, 66, 71, 72, 73, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 86, 87, 88, 66, 72, 73, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 86, 87, 88, 66, 73, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 86, 87, 88, 66, 86, 87, 66, + 87, 66, 71, 72, 73, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 82, + 83, 84, 66, 66, 66, 66, 66, 86, + 87, 88, 91, 66, 71, 72, 73, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 83, 84, 66, 66, 66, 66, + 66, 86, 87, 88, 91, 66, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 93, 92, 71, 72, 73, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 86, + 87, 88, 91, 66, 69, 70, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 79, + 80, 81, 66, 82, 83, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 66, 66, 66, 70, 66, 70, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 79, + 80, 81, 66, 82, 83, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 66, 66, 66, 70, 66, 70, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 66, + 80, 81, 66, 82, 83, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 66, 66, 66, 70, 66, 70, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 66, + 66, 81, 66, 82, 83, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 66, 66, 66, 70, 66, 94, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 69, + 70, 71, 72, 73, 66, 75, 76, 66, + 66, 66, 79, 80, 81, 66, 82, 83, + 84, 66, 66, 66, 66, 66, 86, 87, + 88, 91, 66, 66, 66, 66, 70, 66, + 70, 71, 72, 73, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 82, 83, + 84, 66, 66, 66, 66, 66, 86, 87, + 88, 91, 66, 66, 66, 66, 70, 66, + 94, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 69, 70, 71, 72, 73, 66, + 66, 76, 66, 66, 66, 79, 80, 81, + 66, 82, 83, 84, 66, 66, 66, 66, + 66, 86, 87, 88, 91, 66, 66, 66, + 66, 70, 66, 94, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 69, 70, 71, + 72, 73, 66, 66, 66, 66, 66, 66, + 79, 80, 81, 66, 82, 83, 84, 66, + 66, 66, 66, 66, 86, 87, 88, 91, + 66, 66, 66, 66, 70, 66, 94, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 69, 70, 71, 72, 73, 74, 75, 76, + 66, 66, 66, 79, 80, 81, 66, 82, + 83, 84, 66, 66, 66, 66, 66, 86, + 87, 88, 91, 66, 66, 66, 66, 70, + 66, 67, 68, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 69, 70, 71, 72, + 73, 74, 75, 76, 77, 66, 78, 79, + 80, 81, 66, 82, 83, 84, 66, 66, + 66, 66, 85, 86, 87, 88, 89, 66, + 66, 66, 66, 90, 66, 67, 95, 67, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 70, 71, 72, + 73, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 82, 83, 84, 66, 66, + 66, 66, 66, 86, 87, 88, 91, 66, + 97, 96, 3, 98, 99, 100, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 101, + 102, 103, 104, 105, 106, 107, 108, 109, + 110, 111, 112, 113, 114, 66, 115, 116, + 117, 66, 56, 57, 66, 118, 119, 120, + 88, 121, 66, 66, 66, 66, 122, 66, + 99, 100, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 111, 111, 112, 113, + 114, 66, 115, 116, 117, 66, 66, 66, + 66, 118, 119, 120, 88, 121, 66, 66, + 66, 66, 122, 66, 99, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 102, 103, 104, 105, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 115, 116, 117, 66, 66, 66, 66, 66, + 119, 120, 88, 123, 66, 66, 66, 66, + 102, 66, 102, 103, 104, 105, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 115, 116, 117, 66, 66, 66, 66, 66, + 119, 120, 88, 123, 66, 103, 104, 105, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 119, 120, 88, 66, 104, 105, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 119, 120, 88, 66, 105, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 119, 120, 88, 66, 119, 120, 66, + 120, 66, 103, 104, 105, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 115, + 116, 117, 66, 66, 66, 66, 66, 119, + 120, 88, 123, 66, 103, 104, 105, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 116, 117, 66, 66, 66, 66, + 66, 119, 120, 88, 123, 66, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 124, 92, 103, 104, 105, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 119, + 120, 88, 123, 66, 101, 102, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 112, + 113, 114, 66, 115, 116, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 66, 66, 66, 102, 66, 102, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 112, + 113, 114, 66, 115, 116, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 66, 66, 66, 102, 66, 102, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 66, + 113, 114, 66, 115, 116, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 66, 66, 66, 102, 66, 102, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 66, + 66, 114, 66, 115, 116, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 66, 66, 66, 102, 66, 125, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 101, + 102, 103, 104, 105, 66, 107, 108, 66, + 66, 66, 112, 113, 114, 66, 115, 116, + 117, 66, 66, 66, 66, 66, 119, 120, + 88, 123, 66, 66, 66, 66, 102, 66, + 102, 103, 104, 105, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 115, 116, + 117, 66, 66, 66, 66, 66, 119, 120, + 88, 123, 66, 66, 66, 66, 102, 66, + 125, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 101, 102, 103, 104, 105, 66, + 66, 108, 66, 66, 66, 112, 113, 114, + 66, 115, 116, 117, 66, 66, 66, 66, + 66, 119, 120, 88, 123, 66, 66, 66, + 66, 102, 66, 125, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 101, 102, 103, + 104, 105, 66, 66, 66, 66, 66, 66, + 112, 113, 114, 66, 115, 116, 117, 66, + 66, 66, 66, 66, 119, 120, 88, 123, + 66, 66, 66, 66, 102, 66, 125, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 101, 102, 103, 104, 105, 106, 107, 108, + 66, 66, 66, 112, 113, 114, 66, 115, + 116, 117, 66, 66, 66, 66, 66, 119, + 120, 88, 123, 66, 66, 66, 66, 102, + 66, 99, 100, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 66, 111, 112, + 113, 114, 66, 115, 116, 117, 66, 66, + 66, 66, 118, 119, 120, 88, 121, 66, + 66, 66, 66, 122, 66, 99, 95, 99, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 66, 66, 102, 103, 104, + 105, 66, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 115, 116, 117, 66, 66, + 66, 66, 66, 119, 120, 88, 123, 66, + 99, 100, 66, 66, 66, 66, 66, 66, + 66, 66, 66, 101, 102, 103, 104, 105, + 106, 107, 108, 109, 110, 111, 112, 113, + 114, 66, 115, 116, 117, 66, 66, 66, + 66, 118, 119, 120, 88, 121, 66, 66, + 66, 66, 122, 66, 5, 6, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 9, + 10, 11, 12, 13, 14, 15, 16, 17, + 19, 19, 20, 21, 22, 126, 23, 24, + 25, 126, 126, 126, 126, 29, 30, 31, + 32, 29, 126, 126, 126, 126, 35, 126, + 5, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 10, 11, + 12, 13, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 23, 24, 25, 126, + 126, 126, 126, 126, 30, 31, 32, 127, + 126, 126, 126, 126, 10, 126, 10, 11, + 12, 13, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 23, 24, 25, 126, + 126, 126, 126, 126, 30, 31, 32, 127, + 126, 11, 12, 13, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 30, 31, + 32, 126, 12, 13, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 30, 31, + 32, 126, 13, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 30, 31, 32, + 126, 30, 31, 126, 31, 126, 11, 12, + 13, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 23, 24, 25, 126, 126, + 126, 126, 126, 30, 31, 32, 127, 126, + 11, 12, 13, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 24, 25, + 126, 126, 126, 126, 126, 30, 31, 32, + 127, 126, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 128, 126, 11, 12, + 13, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 30, 31, 32, 127, 126, + 9, 10, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 20, 21, 22, 126, 23, + 24, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 126, 126, 126, 10, + 126, 10, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 20, 21, 22, 126, 23, + 24, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 126, 126, 126, 10, + 126, 10, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 126, 21, 22, 126, 23, + 24, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 126, 126, 126, 10, + 126, 10, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 126, 126, 22, 126, 23, + 24, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 126, 126, 126, 10, + 126, 129, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 9, 10, 11, 12, 13, + 126, 15, 16, 126, 126, 126, 20, 21, + 22, 126, 23, 24, 25, 126, 126, 126, + 126, 126, 30, 31, 32, 127, 126, 126, + 126, 126, 10, 126, 10, 11, 12, 13, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 23, 24, 25, 126, 126, 126, + 126, 126, 30, 31, 32, 127, 126, 126, + 126, 126, 10, 126, 129, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 9, 10, + 11, 12, 13, 126, 126, 16, 126, 126, + 126, 20, 21, 22, 126, 23, 24, 25, + 126, 126, 126, 126, 126, 30, 31, 32, + 127, 126, 126, 126, 126, 10, 126, 129, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 9, 10, 11, 12, 13, 126, 126, + 126, 126, 126, 126, 20, 21, 22, 126, + 23, 24, 25, 126, 126, 126, 126, 126, + 30, 31, 32, 127, 126, 126, 126, 126, + 10, 126, 129, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 9, 10, 11, 12, + 13, 14, 15, 16, 126, 126, 126, 20, + 21, 22, 126, 23, 24, 25, 126, 126, + 126, 126, 126, 30, 31, 32, 127, 126, + 126, 126, 126, 10, 126, 5, 6, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 9, 10, 11, 12, 13, 14, 15, 16, + 17, 126, 19, 20, 21, 22, 126, 23, + 24, 25, 126, 126, 126, 126, 29, 30, + 31, 32, 29, 126, 126, 126, 126, 35, + 126, 5, 126, 5, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 10, 11, 12, 13, 126, 126, 126, + 126, 126, 126, 126, 126, 126, 126, 23, + 24, 25, 126, 126, 126, 126, 126, 30, + 31, 32, 127, 126, 130, 126, 7, 126, + 1, 126, 126, 126, 1, 126, 126, 126, + 126, 126, 5, 6, 7, 126, 126, 126, + 126, 126, 126, 126, 126, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 126, 23, 24, 25, 126, + 26, 27, 126, 29, 30, 31, 32, 29, + 126, 126, 126, 126, 35, 126, 5, 6, + 126, 126, 126, 126, 126, 126, 126, 126, + 126, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 126, + 23, 24, 25, 126, 126, 126, 126, 29, + 30, 31, 32, 29, 126, 126, 126, 126, + 35, 126, 26, 27, 126, 27, 126, 1, + 131, 131, 131, 1, 131, 133, 132, 33, + 132, 33, 133, 132, 133, 132, 33, 132, + 34, 132, 0 +}; + +static const char _use_syllable_machine_trans_targs[] = { + 1, 30, 0, 56, 58, 85, 86, 110, + 112, 98, 87, 88, 89, 90, 102, 104, + 105, 106, 113, 107, 99, 100, 101, 93, + 94, 95, 114, 115, 116, 108, 91, 92, + 0, 117, 119, 109, 0, 2, 3, 15, + 4, 5, 6, 7, 19, 21, 22, 23, + 27, 24, 16, 17, 18, 10, 11, 12, + 28, 29, 25, 8, 9, 0, 26, 13, + 14, 20, 0, 31, 32, 44, 33, 34, + 35, 36, 48, 50, 51, 52, 53, 45, + 46, 47, 39, 40, 41, 54, 37, 38, + 0, 54, 55, 42, 0, 43, 49, 0, + 0, 57, 0, 59, 60, 72, 61, 62, + 63, 64, 76, 78, 79, 80, 84, 81, + 73, 74, 75, 67, 68, 69, 82, 65, + 66, 82, 83, 70, 71, 77, 0, 96, + 97, 103, 111, 0, 0, 118 +}; + +static const char _use_syllable_machine_trans_actions[] = { + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 4, 0, 0, 0, 5, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 6, 0, 0, + 0, 0, 7, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 8, 0, 0, + 9, 10, 0, 0, 11, 0, 0, 12, + 13, 0, 14, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 8, 0, + 0, 10, 0, 0, 0, 0, 15, 0, + 0, 0, 0, 16, 17, 0 +}; + +static const char _use_syllable_machine_to_state_actions[] = { + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +static const char _use_syllable_machine_from_state_actions[] = { + 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + +static const short _use_syllable_machine_eof_trans[] = { + 0, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 37, 37, + 37, 37, 37, 37, 37, 37, 67, 67, + 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 93, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 96, 67, + 97, 99, 67, 67, 67, 67, 67, 67, + 67, 67, 67, 67, 67, 67, 93, 67, + 67, 67, 67, 67, 67, 67, 67, 67, + 67, 67, 96, 67, 67, 127, 127, 127, + 127, 127, 127, 127, 127, 127, 127, 127, + 127, 127, 127, 127, 127, 127, 127, 127, + 127, 127, 127, 127, 127, 127, 127, 127, + 127, 127, 127, 127, 132, 133, 133, 133 +}; + +static const int use_syllable_machine_start = 0; +static const int use_syllable_machine_first_final = 0; +static const int use_syllable_machine_error = -1; + +static const int use_syllable_machine_en_main = 0; + + +#line 58 "hb-ot-shaper-use-machine.rl" + + + +#line 182 "hb-ot-shaper-use-machine.rl" + + +#define found_syllable(syllable_type) \ + HB_STMT_START { \ + if (0) fprintf (stderr, "syllable %d..%d %s\n", (*ts).second.first, (*te).second.first, #syllable_type); \ + for (unsigned i = (*ts).second.first; i < (*te).second.first; ++i) \ + info[i].syllable() = (syllable_serial << 4) | syllable_type; \ + syllable_serial++; \ + if (unlikely (syllable_serial == 16)) syllable_serial = 1; \ + } HB_STMT_END + + +template <typename Iter> +struct machine_index_t : + hb_iter_with_fallback_t<machine_index_t<Iter>, + typename Iter::item_t> +{ + machine_index_t (const Iter& it) : it (it) {} + machine_index_t (const machine_index_t& o) : hb_iter_with_fallback_t<machine_index_t<Iter>, + typename Iter::item_t> (), + it (o.it), is_null (o.is_null) {} + + static constexpr bool is_random_access_iterator = Iter::is_random_access_iterator; + static constexpr bool is_sorted_iterator = Iter::is_sorted_iterator; + + typename Iter::item_t __item__ () const { return *it; } + typename Iter::item_t __item_at__ (unsigned i) const { return it[i]; } + unsigned __len__ () const { return it.len (); } + void __next__ () { ++it; } + void __forward__ (unsigned n) { it += n; } + void __prev__ () { --it; } + void __rewind__ (unsigned n) { it -= n; } + + void operator = (unsigned n) + { + assert (n == 0); + is_null = true; + } + explicit operator bool () { return !is_null; } + + void operator = (const machine_index_t& o) + { + is_null = o.is_null; + unsigned index = (*it).first; + unsigned n = (*o.it).first; + if (index < n) it += n - index; else if (index > n) it -= index - n; + } + bool operator == (const machine_index_t& o) const + { return is_null ? o.is_null : !o.is_null && (*it).first == (*o.it).first; } + bool operator != (const machine_index_t& o) const { return !(*this == o); } + + private: + Iter it; + bool is_null = false; +}; +struct +{ + template <typename Iter, + hb_requires (hb_is_iterable (Iter))> + machine_index_t<hb_iter_type<Iter>> + operator () (Iter&& it) const + { return machine_index_t<hb_iter_type<Iter>> (hb_iter (it)); } +} +HB_FUNCOBJ (machine_index); + + + +static bool +not_ccs_default_ignorable (const hb_glyph_info_t &i) +{ return i.use_category() != USE(CGJ); } + +static inline void +find_syllables_use (hb_buffer_t *buffer) +{ + hb_glyph_info_t *info = buffer->info; + auto p = + + hb_iter (info, buffer->len) + | hb_enumerate + | hb_filter ([] (const hb_glyph_info_t &i) { return not_ccs_default_ignorable (i); }, + hb_second) + | hb_filter ([&] (const hb_pair_t<unsigned, const hb_glyph_info_t &> p) + { + if (p.second.use_category() == USE(ZWNJ)) + for (unsigned i = p.first + 1; i < buffer->len; ++i) + if (not_ccs_default_ignorable (info[i])) + return !_hb_glyph_info_is_unicode_mark (&info[i]); + return true; + }) + | hb_enumerate + | machine_index + ; + auto pe = p + p.len (); + auto eof = +pe; + auto ts = +p; + auto te = +p; + unsigned int act HB_UNUSED; + int cs; + +#line 784 "hb-ot-shaper-use-machine.hh" + { + cs = use_syllable_machine_start; + ts = 0; + te = 0; + act = 0; + } + +#line 282 "hb-ot-shaper-use-machine.rl" + + + unsigned int syllable_serial = 1; + +#line 797 "hb-ot-shaper-use-machine.hh" + { + int _slen; + int _trans; + const unsigned char *_keys; + const unsigned char *_inds; + if ( p == pe ) + goto _test_eof; +_resume: + switch ( _use_syllable_machine_from_state_actions[cs] ) { + case 2: +#line 1 "NONE" + {ts = p;} + break; +#line 811 "hb-ot-shaper-use-machine.hh" + } + + _keys = _use_syllable_machine_trans_keys + (cs<<1); + _inds = _use_syllable_machine_indicies + _use_syllable_machine_index_offsets[cs]; + + _slen = _use_syllable_machine_key_spans[cs]; + _trans = _inds[ _slen > 0 && _keys[0] <=( (*p).second.second.use_category()) && + ( (*p).second.second.use_category()) <= _keys[1] ? + ( (*p).second.second.use_category()) - _keys[0] : _slen ]; + +_eof_trans: + cs = _use_syllable_machine_trans_targs[_trans]; + + if ( _use_syllable_machine_trans_actions[_trans] == 0 ) + goto _again; + + switch ( _use_syllable_machine_trans_actions[_trans] ) { + case 9: +#line 172 "hb-ot-shaper-use-machine.rl" + {te = p+1;{ found_syllable (use_standard_cluster); }} + break; + case 6: +#line 175 "hb-ot-shaper-use-machine.rl" + {te = p+1;{ found_syllable (use_symbol_cluster); }} + break; + case 4: +#line 177 "hb-ot-shaper-use-machine.rl" + {te = p+1;{ found_syllable (use_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 3: +#line 178 "hb-ot-shaper-use-machine.rl" + {te = p+1;{ found_syllable (use_non_cluster); }} + break; + case 11: +#line 171 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_sakot_terminated_cluster); }} + break; + case 7: +#line 172 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_standard_cluster); }} + break; + case 14: +#line 173 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_number_joiner_terminated_cluster); }} + break; + case 13: +#line 174 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_numeral_cluster); }} + break; + case 5: +#line 175 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_symbol_cluster); }} + break; + case 17: +#line 176 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_hieroglyph_cluster); }} + break; + case 15: +#line 177 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_broken_cluster); buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE; }} + break; + case 16: +#line 178 "hb-ot-shaper-use-machine.rl" + {te = p;p--;{ found_syllable (use_non_cluster); }} + break; + case 12: +#line 1 "NONE" + { switch( act ) { + case 1: + {{p = ((te))-1;} found_syllable (use_virama_terminated_cluster); } + break; + case 2: + {{p = ((te))-1;} found_syllable (use_sakot_terminated_cluster); } + break; + } + } + break; + case 8: +#line 1 "NONE" + {te = p+1;} +#line 170 "hb-ot-shaper-use-machine.rl" + {act = 1;} + break; + case 10: +#line 1 "NONE" + {te = p+1;} +#line 171 "hb-ot-shaper-use-machine.rl" + {act = 2;} + break; +#line 901 "hb-ot-shaper-use-machine.hh" + } + +_again: + switch ( _use_syllable_machine_to_state_actions[cs] ) { + case 1: +#line 1 "NONE" + {ts = 0;} + break; +#line 910 "hb-ot-shaper-use-machine.hh" + } + + if ( ++p != pe ) + goto _resume; + _test_eof: {} + if ( p == eof ) + { + if ( _use_syllable_machine_eof_trans[cs] > 0 ) { + _trans = _use_syllable_machine_eof_trans[cs] - 1; + goto _eof_trans; + } + } + + } + +#line 287 "hb-ot-shaper-use-machine.rl" + +} + +#undef found_syllable + +#endif /* HB_OT_SHAPER_USE_MACHINE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-table.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-use-table.hh index c8e5151694..5aa025fb4c 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-use-table.hh @@ -37,12 +37,12 @@ * UnicodeData.txt does not have a header. */ -#ifndef HB_OT_SHAPE_COMPLEX_USE_TABLE_HH -#define HB_OT_SHAPE_COMPLEX_USE_TABLE_HH +#ifndef HB_OT_SHAPER_USE_TABLE_HH +#define HB_OT_SHAPER_USE_TABLE_HH #include "hb.hh" -#include "hb-ot-shape-complex-use-machine.hh" +#include "hb-ot-shaper-use-machine.hh" #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-macros" @@ -53,6 +53,7 @@ #define GB USE(GB) /* BASE_OTHER */ #define H USE(H) /* HALANT */ #define HN USE(HN) /* HALANT_NUM */ +#define HVM USE(HVM) /* HALANT_OR_VOWEL_MODIFIER */ #define IS USE(IS) /* INVISIBLE_STACKER */ #define J USE(J) /* HIEROGLYPH_JOINER */ #define N USE(N) /* BASE_NUM */ @@ -246,7 +247,7 @@ static const uint8_t use_table[] = { /* 0D90 */ B, B, B, B, B, B, B, WJ, WJ, WJ, B, B, B, B, B, B, /* 0DA0 */ B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, B, /* 0DB0 */ B, B, WJ, B, B, B, B, B, B, B, B, B, WJ, B, WJ, WJ, - /* 0DC0 */ B, B, B, B, B, B, B, WJ, WJ, WJ, H, WJ, WJ, WJ, WJ, VPst, + /* 0DC0 */ B, B, B, B, B, B, B, WJ, WJ, WJ, HVM, WJ, WJ, WJ, WJ, VPst, /* 0DD0 */ VPst, VPst, VAbv, VAbv, VBlw, WJ, VBlw, WJ, VPst, VPre, VPre, VPre, VPre, VPre, VPre, VPst, /* 0DE0 */ WJ, WJ, WJ, WJ, WJ, WJ, B, B, B, B, B, B, B, B, B, B, /* 0DF0 */ WJ, WJ, VPst, VPst, O, WJ, WJ, WJ, @@ -1531,6 +1532,7 @@ hb_use_get_category (hb_glyph_info_t info) #undef GB #undef H #undef HN +#undef HVM #undef IS #undef J #undef N @@ -1566,5 +1568,5 @@ hb_use_get_category (hb_glyph_info_t info) #undef VMPre -#endif /* HB_OT_SHAPE_COMPLEX_USE_TABLE_HH */ +#endif /* HB_OT_SHAPER_USE_TABLE_HH */ /* == End of generated table == */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-use.cc index 1d13c8a126..60394ed4d8 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-use.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-use.cc @@ -30,11 +30,11 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-use-machine.hh" -#include "hb-ot-shape-complex-use-table.hh" -#include "hb-ot-shape-complex-arabic.hh" -#include "hb-ot-shape-complex-arabic-joining-list.hh" -#include "hb-ot-shape-complex-vowel-constraints.hh" +#include "hb-ot-shaper-use-machine.hh" +#include "hb-ot-shaper-use-table.hh" +#include "hb-ot-shaper-arabic.hh" +#include "hb-ot-shaper-arabic-joining-list.hh" +#include "hb-ot-shaper-vowel-constraints.hh" /* @@ -133,6 +133,7 @@ collect_features_use (hb_ot_shape_planner_t *plan) map->enable_feature (use_basic_features[i], F_MANUAL_ZWJ | F_PER_SYLLABLE); map->add_gsub_pause (reorder_use); + map->add_gsub_pause (hb_syllabic_clear_var); // Don't need syllables anymore, use stop to free buffer var /* "Topographical features" */ for (unsigned int i = 0; i < ARRAY_LENGTH (use_topographical_features); i++) @@ -297,6 +298,7 @@ setup_syllables_use (const hb_ot_shape_plan_t *plan, hb_font_t *font HB_UNUSED, hb_buffer_t *buffer) { + HB_BUFFER_ALLOCATE_VAR (buffer, syllable); find_syllables_use (buffer); foreach_syllable (buffer, start, end) buffer->unsafe_to_break (start, end); @@ -349,7 +351,7 @@ record_pref_use (const hb_ot_shape_plan_t *plan HB_UNUSED, static inline bool is_halant_use (const hb_glyph_info_t &info) { - return (info.use_category() == USE(H) || info.use_category() == USE(IS)) && + return (info.use_category() == USE(H) || info.use_category() == USE(HVM) || info.use_category() == USE(IS)) && !_hb_glyph_info_ligated (&info); } @@ -480,7 +482,7 @@ compose_use (const hb_ot_shape_normalize_context_t *c, } -const hb_ot_complex_shaper_t _hb_ot_complex_shaper_use = +const hb_ot_shaper_t _hb_ot_shaper_use = { collect_features_use, nullptr, /* override_features */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-vowel-constraints.cc b/thirdparty/harfbuzz/src/hb-ot-shaper-vowel-constraints.cc index d2cca105a4..be4ac813b1 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-vowel-constraints.cc +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-vowel-constraints.cc @@ -18,7 +18,7 @@ #ifndef HB_NO_OT_SHAPE -#include "hb-ot-shape-complex-vowel-constraints.hh" +#include "hb-ot-shaper-vowel-constraints.hh" static void _output_dotted_circle (hb_buffer_t *buffer) @@ -39,7 +39,7 @@ _hb_preprocess_text_vowel_constraints (const hb_ot_shape_plan_t *plan HB_UNUSED, hb_buffer_t *buffer, hb_font_t *font HB_UNUSED) { -#ifdef HB_NO_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS +#ifdef HB_NO_OT_SHAPER_VOWEL_CONSTRAINTS return; #endif if (buffer->flags & HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE) diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex-vowel-constraints.hh b/thirdparty/harfbuzz/src/hb-ot-shaper-vowel-constraints.hh index d9082d4ead..5a7ee1b0f2 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex-vowel-constraints.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper-vowel-constraints.hh @@ -24,16 +24,16 @@ * Google Author(s): Behdad Esfahbod */ -#ifndef HB_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS_HH -#define HB_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS_HH +#ifndef HB_OT_SHAPER_VOWEL_CONSTRAINTS_HH +#define HB_OT_SHAPER_VOWEL_CONSTRAINTS_HH #include "hb.hh" -#include "hb-ot-shape-complex.hh" +#include "hb-ot-shaper.hh" HB_INTERNAL void _hb_preprocess_text_vowel_constraints (const hb_ot_shape_plan_t *plan, hb_buffer_t *buffer, hb_font_t *font); -#endif /* HB_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS_HH */ +#endif /* HB_OT_SHAPER_VOWEL_CONSTRAINTS_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-shape-complex.hh b/thirdparty/harfbuzz/src/hb-ot-shaper.hh index 8012a9ae94..b823bf003c 100644 --- a/thirdparty/harfbuzz/src/hb-ot-shape-complex.hh +++ b/thirdparty/harfbuzz/src/hb-ot-shaper.hh @@ -24,8 +24,8 @@ * Google Author(s): Behdad Esfahbod */ -#ifndef HB_OT_SHAPE_COMPLEX_HH -#define HB_OT_SHAPE_COMPLEX_HH +#ifndef HB_OT_SHAPER_HH +#define HB_OT_SHAPER_HH #include "hb.hh" @@ -34,12 +34,12 @@ #include "hb-ot-shape-normalize.hh" -/* buffer var allocations, used by complex shapers */ -#define complex_var_u8_category() var2.u8[2] -#define complex_var_u8_auxiliary() var2.u8[3] +/* buffer var allocations, used by all OT shapers */ +#define ot_shaper_var_u8_category() var2.u8[2] +#define ot_shaper_var_u8_auxiliary() var2.u8[3] -#define HB_OT_SHAPE_COMPLEX_MAX_COMBINING_MARKS 32 +#define HB_OT_SHAPE_MAX_COMBINING_MARKS 32 enum hb_ot_shape_zero_width_marks_type_t { HB_OT_SHAPE_ZERO_WIDTH_MARKS_NONE, @@ -49,22 +49,22 @@ enum hb_ot_shape_zero_width_marks_type_t { /* Master OT shaper list */ -#define HB_COMPLEX_SHAPERS_IMPLEMENT_SHAPERS \ - HB_COMPLEX_SHAPER_IMPLEMENT (arabic) \ - HB_COMPLEX_SHAPER_IMPLEMENT (default) \ - HB_COMPLEX_SHAPER_IMPLEMENT (dumber) \ - HB_COMPLEX_SHAPER_IMPLEMENT (hangul) \ - HB_COMPLEX_SHAPER_IMPLEMENT (hebrew) \ - HB_COMPLEX_SHAPER_IMPLEMENT (indic) \ - HB_COMPLEX_SHAPER_IMPLEMENT (khmer) \ - HB_COMPLEX_SHAPER_IMPLEMENT (myanmar) \ - HB_COMPLEX_SHAPER_IMPLEMENT (myanmar_zawgyi) \ - HB_COMPLEX_SHAPER_IMPLEMENT (thai) \ - HB_COMPLEX_SHAPER_IMPLEMENT (use) \ +#define HB_OT_SHAPERS_IMPLEMENT_SHAPERS \ + HB_OT_SHAPER_IMPLEMENT (arabic) \ + HB_OT_SHAPER_IMPLEMENT (default) \ + HB_OT_SHAPER_IMPLEMENT (dumber) \ + HB_OT_SHAPER_IMPLEMENT (hangul) \ + HB_OT_SHAPER_IMPLEMENT (hebrew) \ + HB_OT_SHAPER_IMPLEMENT (indic) \ + HB_OT_SHAPER_IMPLEMENT (khmer) \ + HB_OT_SHAPER_IMPLEMENT (myanmar) \ + HB_OT_SHAPER_IMPLEMENT (myanmar_zawgyi) \ + HB_OT_SHAPER_IMPLEMENT (thai) \ + HB_OT_SHAPER_IMPLEMENT (use) \ /* ^--- Add new shapers here; keep sorted. */ -struct hb_ot_complex_shaper_t +struct hb_ot_shaper_t { /* collect_features() * Called during shape_plan(). @@ -168,18 +168,18 @@ struct hb_ot_complex_shaper_t bool fallback_position; }; -#define HB_COMPLEX_SHAPER_IMPLEMENT(name) extern HB_INTERNAL const hb_ot_complex_shaper_t _hb_ot_complex_shaper_##name; -HB_COMPLEX_SHAPERS_IMPLEMENT_SHAPERS -#undef HB_COMPLEX_SHAPER_IMPLEMENT +#define HB_OT_SHAPER_IMPLEMENT(name) extern HB_INTERNAL const hb_ot_shaper_t _hb_ot_shaper_##name; +HB_OT_SHAPERS_IMPLEMENT_SHAPERS +#undef HB_OT_SHAPER_IMPLEMENT -static inline const hb_ot_complex_shaper_t * -hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) +static inline const hb_ot_shaper_t * +hb_ot_shaper_categorize (const hb_ot_shape_planner_t *planner) { switch ((hb_tag_t) planner->props.script) { default: - return &_hb_ot_complex_shaper_default; + return &_hb_ot_shaper_default; /* Unicode-1.1 additions */ @@ -195,28 +195,28 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) if ((planner->map.chosen_script[0] != HB_OT_TAG_DEFAULT_SCRIPT || planner->props.script == HB_SCRIPT_ARABIC) && HB_DIRECTION_IS_HORIZONTAL(planner->props.direction)) - return &_hb_ot_complex_shaper_arabic; + return &_hb_ot_shaper_arabic; else - return &_hb_ot_complex_shaper_default; + return &_hb_ot_shaper_default; /* Unicode-1.1 additions */ case HB_SCRIPT_THAI: case HB_SCRIPT_LAO: - return &_hb_ot_complex_shaper_thai; + return &_hb_ot_shaper_thai; /* Unicode-1.1 additions */ case HB_SCRIPT_HANGUL: - return &_hb_ot_complex_shaper_hangul; + return &_hb_ot_shaper_hangul; /* Unicode-1.1 additions */ case HB_SCRIPT_HEBREW: - return &_hb_ot_complex_shaper_hebrew; + return &_hb_ot_shaper_hebrew; /* Unicode-1.1 additions */ @@ -230,9 +230,6 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) case HB_SCRIPT_TAMIL: case HB_SCRIPT_TELUGU: - /* Unicode-3.0 additions */ - case HB_SCRIPT_SINHALA: - /* If the designer designed the font for the 'DFLT' script, * (or we ended up arbitrarily pick 'latn'), use the default shaper. * Otherwise, use the specific shaper. @@ -240,14 +237,14 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) * If it's indy3 tag, send to USE. */ if (planner->map.chosen_script[0] == HB_TAG ('D','F','L','T') || planner->map.chosen_script[0] == HB_TAG ('l','a','t','n')) - return &_hb_ot_complex_shaper_default; + return &_hb_ot_shaper_default; else if ((planner->map.chosen_script[0] & 0x000000FF) == '3') - return &_hb_ot_complex_shaper_use; + return &_hb_ot_shaper_use; else - return &_hb_ot_complex_shaper_indic; + return &_hb_ot_shaper_indic; case HB_SCRIPT_KHMER: - return &_hb_ot_complex_shaper_khmer; + return &_hb_ot_shaper_khmer; case HB_SCRIPT_MYANMAR: /* If the designer designed the font for the 'DFLT' script, @@ -260,16 +257,16 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) if (planner->map.chosen_script[0] == HB_TAG ('D','F','L','T') || planner->map.chosen_script[0] == HB_TAG ('l','a','t','n') || planner->map.chosen_script[0] == HB_TAG ('m','y','m','r')) - return &_hb_ot_complex_shaper_default; + return &_hb_ot_shaper_default; else - return &_hb_ot_complex_shaper_myanmar; + return &_hb_ot_shaper_myanmar; #define HB_SCRIPT_MYANMAR_ZAWGYI ((hb_script_t) HB_TAG ('Q','a','a','g')) case HB_SCRIPT_MYANMAR_ZAWGYI: /* https://github.com/harfbuzz/harfbuzz/issues/1162 */ - return &_hb_ot_complex_shaper_myanmar_zawgyi; + return &_hb_ot_shaper_myanmar_zawgyi; /* Unicode-2.0 additions */ @@ -277,7 +274,7 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) /* Unicode-3.0 additions */ case HB_SCRIPT_MONGOLIAN: - //case HB_SCRIPT_SINHALA: + case HB_SCRIPT_SINHALA: /* Unicode-3.2 additions */ case HB_SCRIPT_BUHID: @@ -390,11 +387,11 @@ hb_ot_shape_complex_categorize (const hb_ot_shape_planner_t *planner) * GSUB/GPOS needed, so there may be no scripts found! */ if (planner->map.chosen_script[0] == HB_TAG ('D','F','L','T') || planner->map.chosen_script[0] == HB_TAG ('l','a','t','n')) - return &_hb_ot_complex_shaper_default; + return &_hb_ot_shaper_default; else - return &_hb_ot_complex_shaper_use; + return &_hb_ot_shaper_use; } } -#endif /* HB_OT_SHAPE_COMPLEX_HH */ +#endif /* HB_OT_SHAPER_HH */ diff --git a/thirdparty/harfbuzz/src/hb-ot-tag-table.hh b/thirdparty/harfbuzz/src/hb-ot-tag-table.hh index b242fba2d8..9394b90ee6 100644 --- a/thirdparty/harfbuzz/src/hb-ot-tag-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-tag-table.hh @@ -219,6 +219,7 @@ static const LangTag ot_languages2[] = { {HB_TAG('z','u',' ',' '), HB_TAG('Z','U','L',' ')}, /* Zulu */ }; +#ifndef HB_NO_LANGUAGE_LONG static const LangTag ot_languages3[] = { {HB_TAG('a','a','e',' '), HB_TAG('S','Q','I',' ')}, /* Arbëreshë Albanian -> Albanian */ {HB_TAG('a','a','o',' '), HB_TAG('A','R','A',' ')}, /* Algerian Saharan Arabic -> Arabic */ @@ -1621,6 +1622,7 @@ static const LangTag ot_languages3[] = { /*{HB_TAG('z','z','a',' '), HB_TAG('Z','Z','A',' ')},*/ /* Zazaki [macrolanguage] */ {HB_TAG('z','z','j',' '), HB_TAG('Z','H','A',' ')}, /* Zuojiang Zhuang -> Zhuang */ }; +#endif /** * hb_ot_tags_from_complex_language: @@ -1636,7 +1638,7 @@ static const LangTag ot_languages3[] = { * * Return value: Whether any language systems were retrieved. **/ -static bool +static inline bool hb_ot_tags_from_complex_language (const char *lang_str, const char *limit, unsigned int *count /* IN/OUT */, @@ -2818,7 +2820,7 @@ out: * Return value: The #hb_language_t corresponding to the BCP 47 language tag, * or #HB_LANGUAGE_INVALID if @tag is not ambiguous. **/ -static hb_language_t +static inline hb_language_t hb_ot_ambiguous_tag_to_language (hb_tag_t tag) { switch (tag) diff --git a/thirdparty/harfbuzz/src/hb-ot-tag.cc b/thirdparty/harfbuzz/src/hb-ot-tag.cc index a4a9515362..ce5cdce98b 100644 --- a/thirdparty/harfbuzz/src/hb-ot-tag.cc +++ b/thirdparty/harfbuzz/src/hb-ot-tag.cc @@ -263,15 +263,19 @@ hb_ot_tags_from_language (const char *lang_str, unsigned int *count, hb_tag_t *tags) { - const char *s; +#ifndef HB_NO_LANGUAGE_LONG /* Check for matches of multiple subtags. */ if (hb_ot_tags_from_complex_language (lang_str, limit, count, tags)) return; +#endif /* Find a language matching in the first component. */ - s = strchr (lang_str, '-'); +#ifndef HB_NO_LANGUAGE_LONG + const char *s; s = strchr (lang_str, '-'); +#endif { +#ifndef HB_NO_LANGUAGE_LONG if (s && limit - lang_str >= 6) { const char *extlang_end = strchr (s + 1, '-'); @@ -280,6 +284,7 @@ hb_ot_tags_from_language (const char *lang_str, ISALPHA (s[1])) lang_str = s + 1; } +#endif const LangTag *ot_languages = nullptr; unsigned ot_languages_len = 0; const char *dash = strchr (lang_str, '-'); @@ -289,21 +294,23 @@ hb_ot_tags_from_language (const char *lang_str, ot_languages = ot_languages2; ot_languages_len = ARRAY_LENGTH (ot_languages2); } +#ifndef HB_NO_LANGUAGE_LONG else if (first_len == 3) { ot_languages = ot_languages3; ot_languages_len = ARRAY_LENGTH (ot_languages3); } +#endif hb_tag_t lang_tag = hb_tag_from_string (lang_str, first_len); - static unsigned last_tag_idx; /* Poor man's cache. */ - unsigned tag_idx = last_tag_idx; + static hb_atomic_int_t last_tag_idx; /* Poor man's cache. */ + unsigned tag_idx = last_tag_idx.get_relaxed (); if (likely (tag_idx < ot_languages_len && ot_languages[tag_idx].language == lang_tag) || hb_sorted_array (ot_languages, ot_languages_len).bfind (lang_tag, &tag_idx)) { - last_tag_idx = tag_idx; + last_tag_idx.set_relaxed (tag_idx); unsigned int i; while (tag_idx != 0 && ot_languages[tag_idx].language == ot_languages[tag_idx - 1].language) @@ -320,6 +327,7 @@ hb_ot_tags_from_language (const char *lang_str, } } +#ifndef HB_NO_LANGUAGE_LONG if (!s) s = lang_str + strlen (lang_str); if (s - lang_str == 3) { @@ -328,6 +336,7 @@ hb_ot_tags_from_language (const char *lang_str, *count = 1; return; } +#endif *count = 0; } @@ -472,11 +481,13 @@ hb_ot_tag_to_language (hb_tag_t tag) if (tag == HB_OT_TAG_DEFAULT_LANGUAGE) return nullptr; +#ifndef HB_NO_LANGUAGE_LONG { hb_language_t disambiguated_tag = hb_ot_ambiguous_tag_to_language (tag); if (disambiguated_tag != HB_LANGUAGE_INVALID) return disambiguated_tag; } +#endif char buf[4]; for (i = 0; i < ARRAY_LENGTH (ot_languages2); i++) @@ -485,12 +496,14 @@ hb_ot_tag_to_language (hb_tag_t tag) hb_tag_to_string (ot_languages2[i].language, buf); return hb_language_from_string (buf, 2); } +#ifndef HB_NO_LANGUAGE_LONG for (i = 0; i < ARRAY_LENGTH (ot_languages3); i++) if (ot_languages3[i].tag == tag) { hb_tag_to_string (ot_languages3[i].language, buf); return hb_language_from_string (buf, 3); } +#endif /* Return a custom language in the form of "x-hbot-AABBCCDD". * If it's three letters long, also guess it's ISO 639-3 and lower-case and @@ -596,6 +609,7 @@ test_langs_sorted () abort(); } } +#ifndef HB_NO_LANGUAGE_LONG for (unsigned int i = 1; i < ARRAY_LENGTH (ot_languages3); i++) { int c = ot_languages3[i].cmp (&ot_languages3[i - 1]); @@ -606,6 +620,7 @@ test_langs_sorted () abort(); } } +#endif } int diff --git a/thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh b/thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh index e066558683..c5c476bc0e 100644 --- a/thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-var-fvar-table.hh @@ -213,7 +213,7 @@ struct fvar if (!axis_index) axis_index = &i; *axis_index = HB_OT_VAR_NO_AXIS_INDEX; auto axes = get_axes (); - return axes.lfind (tag, axis_index) && (axes[*axis_index].get_axis_deprecated (info), true); + return axes.lfind (tag, axis_index) && ((void) axes[*axis_index].get_axis_deprecated (info), true); } #endif bool @@ -221,7 +221,7 @@ struct fvar { unsigned i; auto axes = get_axes (); - return axes.lfind (tag, &i) && (axes[i].get_axis_info (i, info), true); + return axes.lfind (tag, &i) && ((void) axes[i].get_axis_info (i, info), true); } int normalize_axis_value (unsigned int axis_index, float v) const diff --git a/thirdparty/harfbuzz/src/hb-ot-var-gvar-table.hh b/thirdparty/harfbuzz/src/hb-ot-var-gvar-table.hh index 3b2a38b9a6..bf1039d1d6 100644 --- a/thirdparty/harfbuzz/src/hb-ot-var-gvar-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-var-gvar-table.hh @@ -45,9 +45,10 @@ struct contour_point_t void translate (const contour_point_t &p) { x += p.x; y += p.y; } - uint8_t flag; - float x, y; - bool is_end_point; + float x = 0.f; + float y = 0.f; + uint8_t flag = 0; + bool is_end_point = false; }; struct contour_point_vector_t : hb_vector_t<contour_point_t> @@ -55,16 +56,24 @@ struct contour_point_vector_t : hb_vector_t<contour_point_t> void extend (const hb_array_t<contour_point_t> &a) { unsigned int old_len = length; - resize (old_len + a.length); - for (unsigned int i = 0; i < a.length; i++) - (*this)[old_len + i] = a[i]; + if (unlikely (!resize (old_len + a.length))) + return; + auto arrayZ = this->arrayZ + old_len; + unsigned count = a.length; + for (unsigned int i = 0; i < count; i++) + arrayZ[i] = a.arrayZ[i]; } void transform (const float (&matrix)[4]) { - for (unsigned int i = 0; i < length; i++) + if (matrix[0] == 1.f && matrix[1] == 0.f && + matrix[2] == 0.f && matrix[3] == 1.f) + return; + auto arrayZ = this->arrayZ; + unsigned count = length; + for (unsigned i = 0; i < count; i++) { - contour_point_t &p = (*this)[i]; + contour_point_t &p = arrayZ[i]; float x_ = p.x * matrix[0] + p.y * matrix[2]; p.y = p.x * matrix[1] + p.y * matrix[3]; p.x = x_; @@ -73,8 +82,12 @@ struct contour_point_vector_t : hb_vector_t<contour_point_t> void translate (const contour_point_t& delta) { - for (unsigned int i = 0; i < length; i++) - (*this)[i].translate (delta); + if (delta.x == 0.f && delta.y == 0.f) + return; + auto arrayZ = this->arrayZ; + unsigned count = length; + for (unsigned i = 0; i < count; i++) + arrayZ[i].translate (delta); } }; @@ -89,7 +102,7 @@ struct TupleVariationHeader const TupleVariationHeader &get_next (unsigned axis_count) const { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); } - float calculate_scalar (const int *coords, unsigned int coord_count, + float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count, const hb_array_t<const F2DOT14> shared_tuples) const { hb_array_t<const F2DOT14> peak_tuple; @@ -208,7 +221,7 @@ struct GlyphVariationData { const HBUINT8 *base = &(var_data+var_data->data); const HBUINT8 *p = base; - if (!unpack_points (p, shared_indices, var_data_bytes)) return false; + if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false; data_offset = p - base; } return true; @@ -258,7 +271,7 @@ struct GlyphVariationData static bool unpack_points (const HBUINT8 *&p /* IN/OUT */, hb_vector_t<unsigned int> &points /* OUT */, - const hb_bytes_t &bytes) + const HBUINT8 *end) { enum packed_point_flag_t { @@ -266,21 +279,21 @@ struct GlyphVariationData POINT_RUN_COUNT_MASK = 0x7F }; - if (unlikely (!bytes.check_range (p))) return false; + if (unlikely (p + 1 > end)) return false; uint16_t count = *p++; if (count & POINTS_ARE_WORDS) { - if (unlikely (!bytes.check_range (p))) return false; + if (unlikely (p + 1 > end)) return false; count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++; } - points.resize (count); + if (unlikely (!points.resize (count))) return false; unsigned int n = 0; uint16_t i = 0; while (i < count) { - if (unlikely (!bytes.check_range (p))) return false; + if (unlikely (p + 1 > end)) return false; uint16_t j; uint8_t control = *p++; uint16_t run_count = (control & POINT_RUN_COUNT_MASK) + 1; @@ -288,8 +301,7 @@ struct GlyphVariationData { for (j = 0; j < run_count && i < count; j++, i++) { - if (unlikely (!bytes.check_range ((const HBUINT16 *) p))) - return false; + if (unlikely (p + HBUINT16::static_size > end)) return false; n += *(const HBUINT16 *)p; points[i] = n; p += HBUINT16::static_size; @@ -299,7 +311,7 @@ struct GlyphVariationData { for (j = 0; j < run_count && i < count; j++, i++) { - if (unlikely (!bytes.check_range (p))) return false; + if (unlikely (p + 1 > end)) return false; n += *p++; points[i] = n; } @@ -311,7 +323,7 @@ struct GlyphVariationData static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */, hb_vector_t<int> &deltas /* IN/OUT */, - const hb_bytes_t &bytes) + const HBUINT8 *end) { enum packed_delta_flag_t { @@ -324,7 +336,7 @@ struct GlyphVariationData unsigned int count = deltas.length; while (i < count) { - if (unlikely (!bytes.check_range (p))) return false; + if (unlikely (p + 1 > end)) return false; uint8_t control = *p++; unsigned int run_count = (control & DELTA_RUN_COUNT_MASK) + 1; unsigned int j; @@ -334,16 +346,14 @@ struct GlyphVariationData else if (control & DELTAS_ARE_WORDS) for (j = 0; j < run_count && i < count; j++, i++) { - if (unlikely (!bytes.check_range ((const HBUINT16 *) p))) - return false; + if (unlikely (p + HBUINT16::static_size > end)) return false; deltas[i] = *(const HBINT16 *) p; p += HBUINT16::static_size; } else for (j = 0; j < run_count && i < count; j++, i++) { - if (unlikely (!bytes.check_range (p))) - return false; + if (unlikely (p + 1 > end)) return false; deltas[i] = *(const HBINT8 *) p++; } if (j < run_count) @@ -505,19 +515,17 @@ struct gvar ~accelerator_t () { table.destroy (); } private: - struct x_getter { static float get (const contour_point_t &p) { return p.x; } }; - struct y_getter { static float get (const contour_point_t &p) { return p.y; } }; - template <typename T> static float infer_delta (const hb_array_t<contour_point_t> points, const hb_array_t<contour_point_t> deltas, - unsigned int target, unsigned int prev, unsigned int next) + unsigned int target, unsigned int prev, unsigned int next, + float contour_point_t::*m) { - float target_val = T::get (points[target]); - float prev_val = T::get (points[prev]); - float next_val = T::get (points[next]); - float prev_delta = T::get (deltas[prev]); - float next_delta = T::get (deltas[next]); + float target_val = points[target].*m; + float prev_val = points[prev].*m; + float next_val = points[next].*m; + float prev_delta = deltas[prev].*m; + float next_delta = deltas[next].*m; if (prev_val == next_val) return (prev_delta == next_delta) ? prev_delta : 0.f; @@ -528,7 +536,7 @@ struct gvar /* linear interpolation */ float r = (target_val - prev_val) / (next_val - prev_val); - return (1.f - r) * prev_delta + r * next_delta; + return prev_delta + r * (next_delta - prev_delta); } static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) @@ -538,8 +546,7 @@ struct gvar bool apply_deltas_to_points (hb_codepoint_t glyph, hb_font_t *font, const hb_array_t<contour_point_t> points) const { - /* num_coords should exactly match gvar's axisCount due to how GlyphVariationData tuples are aligned */ - if (!font->num_coords || font->num_coords != table->axisCount) return true; + if (!font->num_coords) return true; if (unlikely (glyph >= table->glyphCount)) return true; @@ -553,21 +560,25 @@ struct gvar /* Save original points for inferred delta calculation */ contour_point_vector_t orig_points; - orig_points.resize (points.length); + if (unlikely (!orig_points.resize (points.length))) return false; for (unsigned int i = 0; i < orig_points.length; i++) - orig_points[i] = points[i]; + orig_points.arrayZ[i] = points.arrayZ[i]; contour_point_vector_t deltas; /* flag is used to indicate referenced point */ - deltas.resize (points.length); + if (unlikely (!deltas.resize (points.length))) return false; hb_vector_t<unsigned> end_points; for (unsigned i = 0; i < points.length; ++i) if (points[i].is_end_point) end_points.push (i); - int *coords = font->coords; - unsigned num_coords = font->num_coords; + auto coords = hb_array (font->coords, font->num_coords); + unsigned num_coords = table->axisCount; hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount); + + hb_vector_t<unsigned int> private_indices; + hb_vector_t<int> x_deltas; + hb_vector_t<int> y_deltas; do { float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples); @@ -577,33 +588,30 @@ struct gvar if (unlikely (!iterator.var_data_bytes.check_range (p, length))) return false; - hb_bytes_t bytes ((const char *) p, length); - hb_vector_t<unsigned int> private_indices; + const HBUINT8 *end = p + length; + bool has_private_points = iterator.current_tuple->has_private_points (); if (has_private_points && - !GlyphVariationData::unpack_points (p, private_indices, bytes)) + !GlyphVariationData::unpack_points (p, private_indices, end)) return false; const hb_array_t<unsigned int> &indices = has_private_points ? private_indices : shared_indices; bool apply_to_all = (indices.length == 0); unsigned int num_deltas = apply_to_all ? points.length : indices.length; - hb_vector_t<int> x_deltas; - x_deltas.resize (num_deltas); - if (!GlyphVariationData::unpack_deltas (p, x_deltas, bytes)) - return false; - hb_vector_t<int> y_deltas; - y_deltas.resize (num_deltas); - if (!GlyphVariationData::unpack_deltas (p, y_deltas, bytes)) - return false; + if (unlikely (!x_deltas.resize (num_deltas))) return false; + if (unlikely (!GlyphVariationData::unpack_deltas (p, x_deltas, end))) return false; + if (unlikely (!y_deltas.resize (num_deltas))) return false; + if (unlikely (!GlyphVariationData::unpack_deltas (p, y_deltas, end))) return false; for (unsigned int i = 0; i < deltas.length; i++) deltas[i].init (); for (unsigned int i = 0; i < num_deltas; i++) { unsigned int pt_index = apply_to_all ? i : indices[i]; - deltas[pt_index].flag = 1; /* this point is referenced, i.e., explicit deltas specified */ - deltas[pt_index].x += x_deltas[i] * scalar; - deltas[pt_index].y += y_deltas[i] * scalar; + if (unlikely (pt_index >= deltas.length)) continue; + deltas.arrayZ[pt_index].flag = 1; /* this point is referenced, i.e., explicit deltas specified */ + deltas.arrayZ[pt_index].x += x_deltas.arrayZ[i] * scalar; + deltas.arrayZ[pt_index].y += y_deltas.arrayZ[i] * scalar; } /* infer deltas for unreferenced points */ @@ -647,20 +655,20 @@ struct gvar { i = next_index (i, start_point, end_point); if (i == next) break; - deltas[i].x = infer_delta<x_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); - deltas[i].y = infer_delta<y_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); + deltas[i].x = infer_delta (orig_points.as_array (), deltas.as_array (), i, prev, next, &contour_point_t::x); + deltas[i].y = infer_delta (orig_points.as_array (), deltas.as_array (), i, prev, next, &contour_point_t::y); if (--unref_count == 0) goto no_more_gaps; } } -no_more_gaps: + no_more_gaps: start_point = end_point + 1; } /* apply specified / inferred deltas to points */ for (unsigned int i = 0; i < points.length; i++) { - points[i].x += deltas[i].x; - points[i].y += deltas[i].y; + points.arrayZ[i].x += deltas.arrayZ[i].x; + points.arrayZ[i].y += deltas.arrayZ[i].y; } } while (iterator.move_to_next ()); diff --git a/thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh b/thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh index e9d90352f0..56efcdbee9 100644 --- a/thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh +++ b/thirdparty/harfbuzz/src/hb-ot-var-hvar-table.hh @@ -319,10 +319,15 @@ struct HVARVVAR hvar_plan.index_map_plans.as_array ())); } - float get_advance_var (hb_codepoint_t glyph, hb_font_t *font) const + float get_advance_var (hb_codepoint_t glyph, + hb_font_t *font, + VariationStore::cache_t *store_cache = nullptr) const { uint32_t varidx = (this+advMap).map (glyph); - return (this+varStore).get_delta (varidx, font->coords, font->num_coords); + return (this+varStore).get_delta (varidx, + font->coords, + font->num_coords, + store_cache); } float get_side_bearing_var (hb_codepoint_t glyph, @@ -335,7 +340,7 @@ struct HVARVVAR bool has_side_bearing_deltas () const { return lsbMap && rsbMap; } - protected: + public: FixedVersion<>version; /* Version of the metrics variation table * initially set to 0x00010000u */ Offset32To<VariationStore> diff --git a/thirdparty/harfbuzz/src/hb-repacker.hh b/thirdparty/harfbuzz/src/hb-repacker.hh index ce9ff90bb4..683a441ec3 100644 --- a/thirdparty/harfbuzz/src/hb-repacker.hh +++ b/thirdparty/harfbuzz/src/hb-repacker.hh @@ -32,1033 +32,18 @@ #include "hb-priority-queue.hh" #include "hb-serialize.hh" #include "hb-vector.hh" +#include "graph/graph.hh" +#include "graph/serialize.hh" + +using graph::graph_t; /* * For a detailed writeup on the overflow resolution algorithm see: * docs/repacker.md */ -struct graph_t -{ - struct vertex_t - { - hb_serialize_context_t::object_t obj; - int64_t distance = 0 ; - int64_t space = 0 ; - hb_vector_t<unsigned> parents; - unsigned start = 0; - unsigned end = 0; - unsigned priority = 0; - - friend void swap (vertex_t& a, vertex_t& b) - { - hb_swap (a.obj, b.obj); - hb_swap (a.distance, b.distance); - hb_swap (a.space, b.space); - hb_swap (a.parents, b.parents); - hb_swap (a.start, b.start); - hb_swap (a.end, b.end); - hb_swap (a.priority, b.priority); - } - - bool is_shared () const - { - return parents.length > 1; - } - - unsigned incoming_edges () const - { - return parents.length; - } - - void remove_parent (unsigned parent_index) - { - for (unsigned i = 0; i < parents.length; i++) - { - if (parents[i] != parent_index) continue; - parents.remove (i); - break; - } - } - - void remap_parents (const hb_vector_t<unsigned>& id_map) - { - for (unsigned i = 0; i < parents.length; i++) - parents[i] = id_map[parents[i]]; - } - - void remap_parent (unsigned old_index, unsigned new_index) - { - for (unsigned i = 0; i < parents.length; i++) - { - if (parents[i] == old_index) - parents[i] = new_index; - } - } - - bool is_leaf () const - { - return !obj.real_links.length && !obj.virtual_links.length; - } - - bool raise_priority () - { - if (has_max_priority ()) return false; - priority++; - return true; - } - - bool has_max_priority () const { - return priority >= 3; - } - - int64_t modified_distance (unsigned order) const - { - // TODO(garretrieger): once priority is high enough, should try - // setting distance = 0 which will force to sort immediately after - // it's parent where possible. - - int64_t modified_distance = - hb_min (hb_max(distance + distance_modifier (), 0), 0x7FFFFFFFFFF); - if (has_max_priority ()) { - modified_distance = 0; - } - return (modified_distance << 18) | (0x003FFFF & order); - } - - int64_t distance_modifier () const - { - if (!priority) return 0; - int64_t table_size = obj.tail - obj.head; - - if (priority == 1) - return -table_size / 2; - - return -table_size; - } - }; - - struct overflow_record_t - { - unsigned parent; - unsigned child; - }; - - /* - * A topological sorting of an object graph. Ordered - * in reverse serialization order (first object in the - * serialization is at the end of the list). This matches - * the 'packed' object stack used internally in the - * serializer - */ - template<typename T> - graph_t (const T& objects) - : parents_invalid (true), - distance_invalid (true), - positions_invalid (true), - successful (true) - { - num_roots_for_space_.push (1); - bool removed_nil = false; - vertices_.alloc (objects.length); - vertices_scratch_.alloc (objects.length); - for (unsigned i = 0; i < objects.length; i++) - { - // TODO(grieger): check all links point to valid objects. - - // If this graph came from a serialization buffer object 0 is the - // nil object. We don't need it for our purposes here so drop it. - if (i == 0 && !objects[i]) - { - removed_nil = true; - continue; - } - - vertex_t* v = vertices_.push (); - if (check_success (!vertices_.in_error ())) - v->obj = *objects[i]; - if (!removed_nil) continue; - // Fix indices to account for removed nil object. - for (auto& l : v->obj.all_links_writer ()) { - l.objidx--; - } - } - } - - ~graph_t () - { - vertices_.fini (); - } - - bool in_error () const - { - return !successful || - vertices_.in_error () || - num_roots_for_space_.in_error (); - } - - const vertex_t& root () const - { - return vertices_[root_idx ()]; - } - - unsigned root_idx () const - { - // Object graphs are in reverse order, the first object is at the end - // of the vector. Since the graph is topologically sorted it's safe to - // assume the first object has no incoming edges. - return vertices_.length - 1; - } - - const hb_serialize_context_t::object_t& object(unsigned i) const - { - return vertices_[i].obj; - } - - /* - * serialize graph into the provided serialization buffer. - */ - hb_blob_t* serialize () const - { - hb_vector_t<char> buffer; - size_t size = serialized_length (); - if (!buffer.alloc (size)) { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Unable to allocate output buffer."); - return nullptr; - } - hb_serialize_context_t c((void *) buffer, size); - - c.start_serialize<void> (); - for (unsigned i = 0; i < vertices_.length; i++) { - c.push (); - - size_t size = vertices_[i].obj.tail - vertices_[i].obj.head; - char* start = c.allocate_size <char> (size); - if (!start) { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space."); - return nullptr; - } - - memcpy (start, vertices_[i].obj.head, size); - - // Only real links needs to be serialized. - for (const auto& link : vertices_[i].obj.real_links) - serialize_link (link, start, &c); - - // All duplications are already encoded in the graph, so don't - // enable sharing during packing. - c.pop_pack (false); - } - c.end_serialize (); - - if (c.in_error ()) { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Error during serialization. Err flag: %d", - c.errors); - return nullptr; - } - - return c.copy_blob (); - } - - /* - * Generates a new topological sorting of graph ordered by the shortest - * distance to each node. - */ - void sort_shortest_distance () - { - positions_invalid = true; - - if (vertices_.length <= 1) { - // Graph of 1 or less doesn't need sorting. - return; - } - - update_distances (); - - hb_priority_queue_t queue; - hb_vector_t<vertex_t> &sorted_graph = vertices_scratch_; - if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return; - hb_vector_t<unsigned> id_map; - if (unlikely (!check_success (id_map.resize (vertices_.length)))) return; - - hb_vector_t<unsigned> removed_edges; - if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return; - update_parents (); - - queue.insert (root ().modified_distance (0), root_idx ()); - int new_id = root_idx (); - unsigned order = 1; - while (!queue.in_error () && !queue.is_empty ()) - { - unsigned next_id = queue.pop_minimum().second; - - hb_swap (sorted_graph[new_id], vertices_[next_id]); - const vertex_t& next = sorted_graph[new_id]; - - id_map[next_id] = new_id--; - - for (const auto& link : next.obj.all_links ()) { - removed_edges[link.objidx]++; - if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx])) - // Add the order that the links were encountered to the priority. - // This ensures that ties between priorities objects are broken in a consistent - // way. More specifically this is set up so that if a set of objects have the same - // distance they'll be added to the topological order in the order that they are - // referenced from the parent object. - queue.insert (vertices_[link.objidx].modified_distance (order++), - link.objidx); - } - } - - check_success (!queue.in_error ()); - check_success (!sorted_graph.in_error ()); - if (!check_success (new_id == -1)) - print_orphaned_nodes (); - - remap_all_obj_indices (id_map, &sorted_graph); - - hb_swap (vertices_, sorted_graph); - } - - /* - * Assign unique space numbers to each connected subgraph of 32 bit offset(s). - */ - bool assign_32bit_spaces () - { - unsigned root_index = root_idx (); - hb_set_t visited; - hb_set_t roots; - for (unsigned i = 0; i <= root_index; i++) - { - // Only real links can form 32 bit spaces - for (auto& l : vertices_[i].obj.real_links) - { - if (l.width == 4 && !l.is_signed) - { - roots.add (l.objidx); - find_subgraph (l.objidx, visited); - } - } - } - - // Mark everything not in the subgraphs of 32 bit roots as visited. - // This prevents 32 bit subgraphs from being connected via nodes not in the 32 bit subgraphs. - visited.invert (); - - if (!roots) return false; - - while (roots) - { - unsigned next = HB_SET_VALUE_INVALID; - if (unlikely (!check_success (!roots.in_error ()))) break; - if (!roots.next (&next)) break; - - hb_set_t connected_roots; - find_connected_nodes (next, roots, visited, connected_roots); - if (unlikely (!check_success (!connected_roots.in_error ()))) break; - - isolate_subgraph (connected_roots); - if (unlikely (!check_success (!connected_roots.in_error ()))) break; - - unsigned next_space = this->next_space (); - num_roots_for_space_.push (0); - for (unsigned root : connected_roots) - { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Subgraph %u gets space %u", root, next_space); - vertices_[root].space = next_space; - num_roots_for_space_[next_space] = num_roots_for_space_[next_space] + 1; - distance_invalid = true; - positions_invalid = true; - } - - // TODO(grieger): special case for GSUB/GPOS use extension promotions to move 16 bit space - // into the 32 bit space as needed, instead of using isolation. - } - - - - return true; - } - - /* - * Isolates the subgraph of nodes reachable from root. Any links to nodes in the subgraph - * that originate from outside of the subgraph will be removed by duplicating the linked to - * object. - * - * Indices stored in roots will be updated if any of the roots are duplicated to new indices. - */ - bool isolate_subgraph (hb_set_t& roots) - { - update_parents (); - hb_hashmap_t<unsigned, unsigned> subgraph; - - // incoming edges to root_idx should be all 32 bit in length so we don't need to de-dup these - // set the subgraph incoming edge count to match all of root_idx's incoming edges - hb_set_t parents; - for (unsigned root_idx : roots) - { - subgraph.set (root_idx, wide_parents (root_idx, parents)); - find_subgraph (root_idx, subgraph); - } - - unsigned original_root_idx = root_idx (); - hb_hashmap_t<unsigned, unsigned> index_map; - bool made_changes = false; - for (auto entry : subgraph.iter ()) - { - const auto& node = vertices_[entry.first]; - unsigned subgraph_incoming_edges = entry.second; - - if (subgraph_incoming_edges < node.incoming_edges ()) - { - // Only de-dup objects with incoming links from outside the subgraph. - made_changes = true; - duplicate_subgraph (entry.first, index_map); - } - } - - if (!made_changes) - return false; - - if (original_root_idx != root_idx () - && parents.has (original_root_idx)) - { - // If the root idx has changed since parents was determined, update root idx in parents - parents.add (root_idx ()); - parents.del (original_root_idx); - } - - auto new_subgraph = - + subgraph.keys () - | hb_map([&] (unsigned node_idx) { - if (index_map.has (node_idx)) return index_map[node_idx]; - return node_idx; - }) - ; - - remap_obj_indices (index_map, new_subgraph); - remap_obj_indices (index_map, parents.iter (), true); - - // Update roots set with new indices as needed. - unsigned next = HB_SET_VALUE_INVALID; - while (roots.next (&next)) - { - if (index_map.has (next)) - { - roots.del (next); - roots.add (index_map[next]); - } - } - - return true; - } - - void find_subgraph (unsigned node_idx, hb_hashmap_t<unsigned, unsigned>& subgraph) - { - for (const auto& link : vertices_[node_idx].obj.all_links ()) - { - if (subgraph.has (link.objidx)) - { - subgraph.set (link.objidx, subgraph[link.objidx] + 1); - continue; - } - subgraph.set (link.objidx, 1); - find_subgraph (link.objidx, subgraph); - } - } - - void find_subgraph (unsigned node_idx, hb_set_t& subgraph) - { - if (subgraph.has (node_idx)) return; - subgraph.add (node_idx); - for (const auto& link : vertices_[node_idx].obj.all_links ()) - find_subgraph (link.objidx, subgraph); - } - - /* - * duplicates all nodes in the subgraph reachable from node_idx. Does not re-assign - * links. index_map is updated with mappings from old id to new id. If a duplication has already - * been performed for a given index, then it will be skipped. - */ - void duplicate_subgraph (unsigned node_idx, hb_hashmap_t<unsigned, unsigned>& index_map) - { - if (index_map.has (node_idx)) - return; - - index_map.set (node_idx, duplicate (node_idx)); - for (const auto& l : object (node_idx).all_links ()) { - duplicate_subgraph (l.objidx, index_map); - } - } - - /* - * Creates a copy of node_idx and returns it's new index. - */ - unsigned duplicate (unsigned node_idx) - { - positions_invalid = true; - distance_invalid = true; - - auto* clone = vertices_.push (); - auto& child = vertices_[node_idx]; - if (vertices_.in_error ()) { - return -1; - } - - clone->obj.head = child.obj.head; - clone->obj.tail = child.obj.tail; - clone->distance = child.distance; - clone->space = child.space; - clone->parents.reset (); - - unsigned clone_idx = vertices_.length - 2; - for (const auto& l : child.obj.real_links) - { - clone->obj.real_links.push (l); - vertices_[l.objidx].parents.push (clone_idx); - } - for (const auto& l : child.obj.virtual_links) - { - clone->obj.virtual_links.push (l); - vertices_[l.objidx].parents.push (clone_idx); - } - - check_success (!clone->obj.real_links.in_error ()); - check_success (!clone->obj.virtual_links.in_error ()); - - // The last object is the root of the graph, so swap back the root to the end. - // The root's obj idx does change, however since it's root nothing else refers to it. - // all other obj idx's will be unaffected. - hb_swap (vertices_[vertices_.length - 2], *clone); - - // Since the root moved, update the parents arrays of all children on the root. - for (const auto& l : root ().obj.all_links ()) - vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ()); - - return clone_idx; - } - - /* - * Creates a copy of child and re-assigns the link from - * parent to the clone. The copy is a shallow copy, objects - * linked from child are not duplicated. - */ - bool duplicate (unsigned parent_idx, unsigned child_idx) - { - update_parents (); - - unsigned links_to_child = 0; - for (const auto& l : vertices_[parent_idx].obj.all_links ()) - { - if (l.objidx == child_idx) links_to_child++; - } - - if (vertices_[child_idx].incoming_edges () <= links_to_child) - { - // Can't duplicate this node, doing so would orphan the original one as all remaining links - // to child are from parent. - DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %d => %d", - parent_idx, child_idx); - return false; - } - - DEBUG_MSG (SUBSET_REPACK, nullptr, " Duplicating %d => %d", - parent_idx, child_idx); - - unsigned clone_idx = duplicate (child_idx); - if (clone_idx == (unsigned) -1) return false; - // duplicate shifts the root node idx, so if parent_idx was root update it. - if (parent_idx == clone_idx) parent_idx++; - - auto& parent = vertices_[parent_idx]; - for (auto& l : parent.obj.all_links_writer ()) - { - if (l.objidx != child_idx) - continue; - - reassign_link (l, parent_idx, clone_idx); - } - - return true; - } - - /* - * Raises the sorting priority of all children. - */ - bool raise_childrens_priority (unsigned parent_idx) - { - DEBUG_MSG (SUBSET_REPACK, nullptr, " Raising priority of all children of %d", - parent_idx); - // This operation doesn't change ordering until a sort is run, so no need - // to invalidate positions. It does not change graph structure so no need - // to update distances or edge counts. - auto& parent = vertices_[parent_idx].obj; - bool made_change = false; - for (auto& l : parent.all_links_writer ()) - made_change |= vertices_[l.objidx].raise_priority (); - return made_change; - } - - /* - * Will any offsets overflow on graph when it's serialized? - */ - bool will_overflow (hb_vector_t<overflow_record_t>* overflows = nullptr) - { - if (overflows) overflows->resize (0); - update_positions (); - - for (int parent_idx = vertices_.length - 1; parent_idx >= 0; parent_idx--) - { - // Don't need to check virtual links for overflow - for (const auto& link : vertices_[parent_idx].obj.real_links) - { - int64_t offset = compute_offset (parent_idx, link); - if (is_valid_offset (offset, link)) - continue; - - if (!overflows) return true; - - overflow_record_t r; - r.parent = parent_idx; - r.child = link.objidx; - overflows->push (r); - } - } - - if (!overflows) return false; - return overflows->length; - } - - void print_orphaned_nodes () - { - if (!DEBUG_ENABLED(SUBSET_REPACK)) return; - - DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); - parents_invalid = true; - update_parents(); - - for (unsigned i = 0; i < root_idx (); i++) - { - const auto& v = vertices_[i]; - if (!v.parents) - DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i); - } - } - - void print_overflows (const hb_vector_t<overflow_record_t>& overflows) - { - if (!DEBUG_ENABLED(SUBSET_REPACK)) return; - - update_parents (); - int limit = 10; - for (const auto& o : overflows) - { - if (!limit--) break; - const auto& parent = vertices_[o.parent]; - const auto& child = vertices_[o.child]; - DEBUG_MSG (SUBSET_REPACK, nullptr, - " overflow from " - "%4d (%4d in, %4d out, space %2d) => " - "%4d (%4d in, %4d out, space %2d)", - o.parent, - parent.incoming_edges (), - parent.obj.real_links.length + parent.obj.virtual_links.length, - space_for (o.parent), - o.child, - child.incoming_edges (), - child.obj.real_links.length + child.obj.virtual_links.length, - space_for (o.child)); - } - if (overflows.length > 10) { - DEBUG_MSG (SUBSET_REPACK, nullptr, " ... plus %d more overflows.", overflows.length - 10); - } - } - - unsigned num_roots_for_space (unsigned space) const - { - return num_roots_for_space_[space]; - } - - unsigned next_space () const - { - return num_roots_for_space_.length; - } - - void move_to_new_space (const hb_set_t& indices) - { - num_roots_for_space_.push (0); - unsigned new_space = num_roots_for_space_.length - 1; - - for (unsigned index : indices) { - auto& node = vertices_[index]; - num_roots_for_space_[node.space] = num_roots_for_space_[node.space] - 1; - num_roots_for_space_[new_space] = num_roots_for_space_[new_space] + 1; - node.space = new_space; - distance_invalid = true; - positions_invalid = true; - } - } - - unsigned space_for (unsigned index, unsigned* root = nullptr) const - { - const auto& node = vertices_[index]; - if (node.space) - { - if (root != nullptr) - *root = index; - return node.space; - } - - if (!node.parents) - { - if (root) - *root = index; - return 0; - } - - return space_for (node.parents[0], root); - } - - void err_other_error () { this->successful = false; } - - private: - - size_t serialized_length () const { - size_t total_size = 0; - for (unsigned i = 0; i < vertices_.length; i++) { - size_t size = vertices_[i].obj.tail - vertices_[i].obj.head; - total_size += size; - } - return total_size; - } - - /* - * Returns the numbers of incoming edges that are 32bits wide. - */ - unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const - { - unsigned count = 0; - hb_set_t visited; - for (unsigned p : vertices_[node_idx].parents) - { - if (visited.has (p)) continue; - visited.add (p); - - // Only real links can be wide - for (const auto& l : vertices_[p].obj.real_links) - { - if (l.objidx == node_idx && l.width == 4 && !l.is_signed) - { - count++; - parents.add (p); - } - } - } - return count; - } - - bool check_success (bool success) - { return this->successful && (success || (err_other_error (), false)); } - - /* - * Creates a map from objid to # of incoming edges. - */ - void update_parents () - { - if (!parents_invalid) return; - - for (unsigned i = 0; i < vertices_.length; i++) - vertices_[i].parents.reset (); - - for (unsigned p = 0; p < vertices_.length; p++) - { - for (auto& l : vertices_[p].obj.all_links ()) - { - vertices_[l.objidx].parents.push (p); - } - } - - parents_invalid = false; - } - - /* - * compute the serialized start and end positions for each vertex. - */ - void update_positions () - { - if (!positions_invalid) return; - - unsigned current_pos = 0; - for (int i = root_idx (); i >= 0; i--) - { - auto& v = vertices_[i]; - v.start = current_pos; - current_pos += v.obj.tail - v.obj.head; - v.end = current_pos; - } - - positions_invalid = false; - } - - /* - * Finds the distance to each object in the graph - * from the initial node. - */ - void update_distances () - { - if (!distance_invalid) return; - - // Uses Dijkstra's algorithm to find all of the shortest distances. - // https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm - // - // Implementation Note: - // Since our priority queue doesn't support fast priority decreases - // we instead just add new entries into the queue when a priority changes. - // Redundant ones are filtered out later on by the visited set. - // According to https://www3.cs.stonybrook.edu/~rezaul/papers/TR-07-54.pdf - // for practical performance this is faster then using a more advanced queue - // (such as a fibonacci queue) with a fast decrease priority. - for (unsigned i = 0; i < vertices_.length; i++) - { - if (i == vertices_.length - 1) - vertices_[i].distance = 0; - else - vertices_[i].distance = hb_int_max (int64_t); - } - - hb_priority_queue_t queue; - queue.insert (0, vertices_.length - 1); - - hb_vector_t<bool> visited; - visited.resize (vertices_.length); - - while (!queue.in_error () && !queue.is_empty ()) - { - unsigned next_idx = queue.pop_minimum ().second; - if (visited[next_idx]) continue; - const auto& next = vertices_[next_idx]; - int64_t next_distance = vertices_[next_idx].distance; - visited[next_idx] = true; - - for (const auto& link : next.obj.all_links ()) - { - if (visited[link.objidx]) continue; - - const auto& child = vertices_[link.objidx].obj; - unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide - int64_t child_weight = (child.tail - child.head) + - ((int64_t) 1 << (link_width * 8)) * (vertices_[link.objidx].space + 1); - int64_t child_distance = next_distance + child_weight; - - if (child_distance < vertices_[link.objidx].distance) - { - vertices_[link.objidx].distance = child_distance; - queue.insert (child_distance, link.objidx); - } - } - } - - check_success (!queue.in_error ()); - if (!check_success (queue.is_empty ())) - { - print_orphaned_nodes (); - return; - } - - distance_invalid = false; - } - - int64_t compute_offset ( - unsigned parent_idx, - const hb_serialize_context_t::object_t::link_t& link) const - { - const auto& parent = vertices_[parent_idx]; - const auto& child = vertices_[link.objidx]; - int64_t offset = 0; - switch ((hb_serialize_context_t::whence_t) link.whence) { - case hb_serialize_context_t::whence_t::Head: - offset = child.start - parent.start; break; - case hb_serialize_context_t::whence_t::Tail: - offset = child.start - parent.end; break; - case hb_serialize_context_t::whence_t::Absolute: - offset = child.start; break; - } - - assert (offset >= link.bias); - offset -= link.bias; - return offset; - } - - bool is_valid_offset (int64_t offset, - const hb_serialize_context_t::object_t::link_t& link) const - { - if (unlikely (!link.width)) - // Virtual links can't overflow. - return link.is_signed || offset >= 0; - - if (link.is_signed) - { - if (link.width == 4) - return offset >= -((int64_t) 1 << 31) && offset < ((int64_t) 1 << 31); - else - return offset >= -(1 << 15) && offset < (1 << 15); - } - else - { - if (link.width == 4) - return offset >= 0 && offset < ((int64_t) 1 << 32); - else if (link.width == 3) - return offset >= 0 && offset < ((int32_t) 1 << 24); - else - return offset >= 0 && offset < (1 << 16); - } - } - - /* - * Updates a link in the graph to point to a different object. Corrects the - * parents vector on the previous and new child nodes. - */ - void reassign_link (hb_serialize_context_t::object_t::link_t& link, - unsigned parent_idx, - unsigned new_idx) - { - unsigned old_idx = link.objidx; - link.objidx = new_idx; - vertices_[old_idx].remove_parent (parent_idx); - vertices_[new_idx].parents.push (parent_idx); - } - - /* - * Updates all objidx's in all links using the provided mapping. Corrects incoming edge counts. - */ - template<typename Iterator, hb_requires (hb_is_iterator (Iterator))> - void remap_obj_indices (const hb_hashmap_t<unsigned, unsigned>& id_map, - Iterator subgraph, - bool only_wide = false) - { - if (!id_map) return; - for (unsigned i : subgraph) - { - for (auto& link : vertices_[i].obj.all_links_writer ()) - { - if (!id_map.has (link.objidx)) continue; - if (only_wide && !(link.width == 4 && !link.is_signed)) continue; - - reassign_link (link, i, id_map[link.objidx]); - } - } - } - - /* - * Updates all objidx's in all links using the provided mapping. - */ - void remap_all_obj_indices (const hb_vector_t<unsigned>& id_map, - hb_vector_t<vertex_t>* sorted_graph) const - { - for (unsigned i = 0; i < sorted_graph->length; i++) - { - (*sorted_graph)[i].remap_parents (id_map); - for (auto& link : (*sorted_graph)[i].obj.all_links_writer ()) - { - link.objidx = id_map[link.objidx]; - } - } - } - - template <typename O> void - serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link, - char* head, - hb_serialize_context_t* c) const - { - OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position); - *offset = 0; - c->add_link (*offset, - // serializer has an extra nil object at the start of the - // object array. So all id's are +1 of what our id's are. - link.objidx + 1, - (hb_serialize_context_t::whence_t) link.whence, - link.bias); - } - - void serialize_link (const hb_serialize_context_t::object_t::link_t& link, - char* head, - hb_serialize_context_t* c) const - { - switch (link.width) - { - case 0: - // Virtual links aren't serialized. - return; - case 4: - if (link.is_signed) - { - serialize_link_of_type<OT::HBINT32> (link, head, c); - } else { - serialize_link_of_type<OT::HBUINT32> (link, head, c); - } - return; - case 2: - if (link.is_signed) - { - serialize_link_of_type<OT::HBINT16> (link, head, c); - } else { - serialize_link_of_type<OT::HBUINT16> (link, head, c); - } - return; - case 3: - serialize_link_of_type<OT::HBUINT24> (link, head, c); - return; - default: - // Unexpected link width. - assert (0); - } - } - - /* - * Finds all nodes in targets that are reachable from start_idx, nodes in visited will be skipped. - * For this search the graph is treated as being undirected. - * - * Connected targets will be added to connected and removed from targets. All visited nodes - * will be added to visited. - */ - void find_connected_nodes (unsigned start_idx, - hb_set_t& targets, - hb_set_t& visited, - hb_set_t& connected) - { - if (unlikely (!check_success (!visited.in_error ()))) return; - if (visited.has (start_idx)) return; - visited.add (start_idx); - - if (targets.has (start_idx)) - { - targets.del (start_idx); - connected.add (start_idx); - } - - const auto& v = vertices_[start_idx]; - - // Graph is treated as undirected so search children and parents of start_idx - for (const auto& l : v.obj.all_links ()) - find_connected_nodes (l.objidx, targets, visited, connected); - - for (unsigned p : v.parents) - find_connected_nodes (p, targets, visited, connected); - } - - public: - // TODO(garretrieger): make private, will need to move most of offset overflow code into graph. - hb_vector_t<vertex_t> vertices_; - hb_vector_t<vertex_t> vertices_scratch_; - private: - bool parents_invalid; - bool distance_invalid; - bool positions_invalid; - bool successful; - hb_vector_t<unsigned> num_roots_for_space_; -}; static inline -bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& overflows, +bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows, graph_t& sorted_graph) { unsigned space = 0; @@ -1066,7 +51,7 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& ov for (int i = overflows.length - 1; i >= 0; i--) { - const graph_t::overflow_record_t& r = overflows[i]; + const graph::overflow_record_t& r = overflows[i]; unsigned root; unsigned overflow_space = sorted_graph.space_for (r.parent, &root); @@ -1108,7 +93,7 @@ bool _try_isolating_subgraphs (const hb_vector_t<graph_t::overflow_record_t>& ov } static inline -bool _process_overflows (const hb_vector_t<graph_t::overflow_record_t>& overflows, +bool _process_overflows (const hb_vector_t<graph::overflow_record_t>& overflows, hb_set_t& priority_bumped_parents, graph_t& sorted_graph) { @@ -1117,7 +102,7 @@ bool _process_overflows (const hb_vector_t<graph_t::overflow_record_t>& overflow // Try resolving the furthest overflows first. for (int i = overflows.length - 1; i >= 0; i--) { - const graph_t::overflow_record_t& r = overflows[i]; + const graph::overflow_record_t& r = overflows[i]; const auto& child = sorted_graph.vertices_[r.child]; if (child.is_shared ()) { @@ -1173,19 +158,18 @@ inline hb_blob_t* hb_resolve_overflows (const T& packed, hb_tag_t table_tag, unsigned max_rounds = 20) { - // Kahn sort is ~twice as fast as shortest distance sort and works for many fonts - // so try it first to save time. graph_t sorted_graph (packed); - if (!sorted_graph.will_overflow ()) + sorted_graph.sort_shortest_distance (); + + bool will_overflow = graph::will_overflow (sorted_graph); + if (!will_overflow) { - return sorted_graph.serialize (); + return graph::serialize (sorted_graph); } - sorted_graph.sort_shortest_distance (); - if ((table_tag == HB_OT_TAG_GPOS || table_tag == HB_OT_TAG_GSUB) - && sorted_graph.will_overflow ()) + && will_overflow) { DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs."); if (sorted_graph.assign_32bit_spaces ()) @@ -1193,13 +177,13 @@ hb_resolve_overflows (const T& packed, } unsigned round = 0; - hb_vector_t<graph_t::overflow_record_t> overflows; + hb_vector_t<graph::overflow_record_t> overflows; // TODO(garretrieger): select a good limit for max rounds. while (!sorted_graph.in_error () - && sorted_graph.will_overflow (&overflows) + && graph::will_overflow (sorted_graph, &overflows) && round++ < max_rounds) { DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %d ===", round); - sorted_graph.print_overflows (overflows); + print_overflows (sorted_graph, overflows); hb_set_t priority_bumped_parents; @@ -1221,13 +205,13 @@ hb_resolve_overflows (const T& packed, return nullptr; } - if (sorted_graph.will_overflow ()) + if (graph::will_overflow (sorted_graph)) { DEBUG_MSG (SUBSET_REPACK, nullptr, "Offset overflow resolution failed."); return nullptr; } - return sorted_graph.serialize (); + return graph::serialize (sorted_graph); } #endif /* HB_REPACKER_HH */ diff --git a/thirdparty/harfbuzz/src/hb-serialize.hh b/thirdparty/harfbuzz/src/hb-serialize.hh index 5663b290c3..cecdcdeb74 100644 --- a/thirdparty/harfbuzz/src/hb-serialize.hh +++ b/thirdparty/harfbuzz/src/hb-serialize.hh @@ -695,7 +695,7 @@ struct hb_serialize_context_t check_assign (off, offset, HB_SERIALIZE_ERROR_OFFSET_OVERFLOW); } - public: /* TODO Make private. */ + public: char *start, *head, *tail, *end; unsigned int debug_depth; hb_serialize_error_t errors; @@ -719,9 +719,7 @@ struct hb_serialize_context_t hb_vector_t<object_t *> packed; /* Map view of packed objects. */ - hb_hashmap_t<const object_t *, objidx_t, - const object_t *, objidx_t, - nullptr, 0> packed_map; + hb_hashmap_t<const object_t *, objidx_t> packed_map; }; #endif /* HB_SERIALIZE_HH */ diff --git a/thirdparty/harfbuzz/src/hb-set-digest.hh b/thirdparty/harfbuzz/src/hb-set-digest.hh index 7d4979b73b..fab36216e4 100644 --- a/thirdparty/harfbuzz/src/hb-set-digest.hh +++ b/thirdparty/harfbuzz/src/hb-set-digest.hh @@ -30,7 +30,7 @@ #include "hb.hh" /* - * The set digests here implement various "filters" that support + * The set-digests here implement various "filters" that support * "approximate member query". Conceptually these are like Bloom * Filter and Quotient Filter, however, much smaller, faster, and * designed to fit the requirements of our uses for glyph coverage @@ -40,13 +40,25 @@ * set of glyphs, but fully flooded and ineffective if coverage is * all over the place. * - * The frozen-set can be used instead of a digest, to trade more - * memory for 100% accuracy, but in practice, that doesn't look like - * an attractive trade-off. + * The way these are used is that the filter is first populated by + * a lookup's or subtable's Coverage table(s), and then when we + * want to apply the lookup or subtable to a glyph, before trying + * to apply, we ask the filter if the glyph may be covered. If it's + * not, we return early. + * + * We use these filters both at the lookup-level, and then again, + * at the subtable-level. Both have performance win. + * + * The main filter we use is a combination of three bits-pattern + * filters. A bits-pattern filter checks a number of bits (5 or 6) + * of the input number (glyph-id in this case) and checks whether + * its pattern is amongst the patterns of any of the accepted values. + * The accepted patterns are represented as a "long" integer. The + * check is done using four bitwise operations only. */ template <typename mask_t, unsigned int shift> -struct hb_set_digest_lowest_bits_t +struct hb_set_digest_bits_pattern_t { static constexpr unsigned mask_bytes = sizeof (mask_t); static constexpr unsigned mask_bits = sizeof (mask_t) * 8; @@ -102,7 +114,7 @@ struct hb_set_digest_lowest_bits_t bool add_sorted_array (const hb_sorted_array_t<const T>& arr) { return add_sorted_array (&arr, arr.len ()); } bool may_have (hb_codepoint_t g) const - { return !!(mask & mask_for (g)); } + { return mask & mask_for (g); } private: @@ -171,11 +183,11 @@ struct hb_set_digest_combiner_t using hb_set_digest_t = hb_set_digest_combiner_t < - hb_set_digest_lowest_bits_t<unsigned long, 4>, + hb_set_digest_bits_pattern_t<unsigned long, 4>, hb_set_digest_combiner_t < - hb_set_digest_lowest_bits_t<unsigned long, 0>, - hb_set_digest_lowest_bits_t<unsigned long, 9> + hb_set_digest_bits_pattern_t<unsigned long, 0>, + hb_set_digest_bits_pattern_t<unsigned long, 9> > > ; diff --git a/thirdparty/harfbuzz/src/hb-set.cc b/thirdparty/harfbuzz/src/hb-set.cc index 0e2c1f77ef..2d458294f8 100644 --- a/thirdparty/harfbuzz/src/hb-set.cc +++ b/thirdparty/harfbuzz/src/hb-set.cc @@ -40,7 +40,7 @@ /** - * hb_set_create: (Xconstructor) + * hb_set_create: * * Creates a new, initially empty set. * @@ -186,6 +186,7 @@ hb_set_t * hb_set_copy (const hb_set_t *set) { hb_set_t *copy = hb_set_create (); + if (unlikely (!copy)) return nullptr; copy->set (*set); return copy; } @@ -359,6 +360,23 @@ hb_set_is_equal (const hb_set_t *set, } /** + * hb_set_hash: + * @set: A set + * + * Creates a hash representing @set. + * + * Return value: + * A hash of @set. + * + * Since: 4.4.0 + **/ +HB_EXTERN unsigned int +hb_set_hash (const hb_set_t *set) +{ + return set->hash (); +} + +/** * hb_set_is_subset: * @set: A set * @larger_set: Another set diff --git a/thirdparty/harfbuzz/src/hb-set.h b/thirdparty/harfbuzz/src/hb-set.h index 10ce7c10d4..56902c267e 100644 --- a/thirdparty/harfbuzz/src/hb-set.h +++ b/thirdparty/harfbuzz/src/hb-set.h @@ -128,6 +128,9 @@ HB_EXTERN hb_bool_t hb_set_is_equal (const hb_set_t *set, const hb_set_t *other); +HB_EXTERN unsigned int +hb_set_hash (const hb_set_t *set); + HB_EXTERN hb_bool_t hb_set_is_subset (const hb_set_t *set, const hb_set_t *larger_set); diff --git a/thirdparty/harfbuzz/src/hb-set.hh b/thirdparty/harfbuzz/src/hb-set.hh index 6025626363..7eb5e19a2a 100644 --- a/thirdparty/harfbuzz/src/hb-set.hh +++ b/thirdparty/harfbuzz/src/hb-set.hh @@ -166,7 +166,6 @@ struct hb_set_t : hb_sparseset_t<hb_bit_set_invertible_t> ~hb_set_t () = default; hb_set_t () : sparseset () {}; - hb_set_t (std::nullptr_t) : hb_set_t () {}; hb_set_t (const hb_set_t &o) : sparseset ((sparseset &) o) {}; hb_set_t (hb_set_t&& o) : sparseset (std::move ((sparseset &) o)) {} hb_set_t& operator = (const hb_set_t&) = default; diff --git a/thirdparty/harfbuzz/src/hb-shape-plan.cc b/thirdparty/harfbuzz/src/hb-shape-plan.cc index 66332165c3..0af07825fc 100644 --- a/thirdparty/harfbuzz/src/hb-shape-plan.cc +++ b/thirdparty/harfbuzz/src/hb-shape-plan.cc @@ -170,7 +170,7 @@ hb_shape_plan_key_t::equal (const hb_shape_plan_key_t *other) /** - * hb_shape_plan_create: (Xconstructor) + * hb_shape_plan_create: * @face: #hb_face_t to use * @props: The #hb_segment_properties_t of the segment * @user_features: (array length=num_user_features): The list of user-selected features @@ -198,7 +198,7 @@ hb_shape_plan_create (hb_face_t *face, } /** - * hb_shape_plan_create2: (Xconstructor) + * hb_shape_plan_create2: * @face: #hb_face_t to use * @props: The #hb_segment_properties_t of the segment * @user_features: (array length=num_user_features): The list of user-selected features @@ -231,7 +231,8 @@ hb_shape_plan_create2 (hb_face_t *face, num_coords, shaper_list); - assert (props->direction != HB_DIRECTION_INVALID); + if (unlikely (props->direction == HB_DIRECTION_INVALID)) + return hb_shape_plan_get_empty (); hb_shape_plan_t *shape_plan; diff --git a/thirdparty/harfbuzz/src/hb-shape.cc b/thirdparty/harfbuzz/src/hb-shape.cc index 3407e1af42..14ec92828f 100644 --- a/thirdparty/harfbuzz/src/hb-shape.cc +++ b/thirdparty/harfbuzz/src/hb-shape.cc @@ -50,7 +50,7 @@ static inline void free_static_shaper_list (); -static const char *nil_shaper_list[] = {nullptr}; +static const char * const nil_shaper_list[] = {nullptr}; static struct hb_shaper_list_lazy_loader_t : hb_lazy_loader_t<const char *, hb_shaper_list_lazy_loader_t> @@ -73,7 +73,7 @@ static struct hb_shaper_list_lazy_loader_t : hb_lazy_loader_t<const char *, } static void destroy (const char **l) { hb_free (l); } - static const char ** get_null () + static const char * const * get_null () { return nil_shaper_list; } } static_shaper_list; @@ -126,6 +126,11 @@ hb_shape_full (hb_font_t *font, unsigned int num_features, const char * const *shaper_list) { + if (unlikely (!buffer->len)) + return true; + + buffer->enter (); + hb_buffer_t *text_buffer = nullptr; if (buffer->flags & HB_BUFFER_FLAG_VERIFY) { @@ -137,12 +142,19 @@ hb_shape_full (hb_font_t *font, features, num_features, font->coords, font->num_coords, shaper_list); + hb_bool_t res = hb_shape_plan_execute (shape_plan, font, buffer, features, num_features); + + if (buffer->max_ops <= 0) + buffer->shaping_failed = true; + hb_shape_plan_destroy (shape_plan); if (text_buffer) { - if (res && !buffer->verify (text_buffer, + if (res && buffer->successful && !buffer->shaping_failed + && text_buffer->successful + && !buffer->verify (text_buffer, font, features, num_features, @@ -151,6 +163,8 @@ hb_shape_full (hb_font_t *font, hb_buffer_destroy (text_buffer); } + buffer->leave (); + return res; } diff --git a/thirdparty/harfbuzz/src/hb-shaper.cc b/thirdparty/harfbuzz/src/hb-shaper.cc index a11ed83afd..da4253ed64 100644 --- a/thirdparty/harfbuzz/src/hb-shaper.cc +++ b/thirdparty/harfbuzz/src/hb-shaper.cc @@ -29,18 +29,18 @@ #include "hb-machinery.hh" -static const hb_shaper_entry_t all_shapers[] = { +static const hb_shaper_entry_t _hb_all_shapers[] = { #define HB_SHAPER_IMPLEMENT(name) {#name, _hb_##name##_shape}, #include "hb-shaper-list.hh" #undef HB_SHAPER_IMPLEMENT }; #ifndef HB_NO_SHAPER -static_assert (0 != ARRAY_LENGTH_CONST (all_shapers), "No shaper enabled."); +static_assert (0 != ARRAY_LENGTH_CONST (_hb_all_shapers), "No shaper enabled."); #endif static inline void free_static_shapers (); -static struct hb_shapers_lazy_loader_t : hb_lazy_loader_t<const hb_shaper_entry_t, +static struct hb_shapers_lazy_loader_t : hb_lazy_loader_t<hb_shaper_entry_t, hb_shapers_lazy_loader_t> { static hb_shaper_entry_t *create () @@ -49,11 +49,11 @@ static struct hb_shapers_lazy_loader_t : hb_lazy_loader_t<const hb_shaper_entry_ if (!env || !*env) return nullptr; - hb_shaper_entry_t *shapers = (hb_shaper_entry_t *) hb_calloc (1, sizeof (all_shapers)); + hb_shaper_entry_t *shapers = (hb_shaper_entry_t *) hb_calloc (1, sizeof (_hb_all_shapers)); if (unlikely (!shapers)) return nullptr; - memcpy (shapers, all_shapers, sizeof (all_shapers)); + memcpy (shapers, _hb_all_shapers, sizeof (_hb_all_shapers)); /* Reorder shaper list to prefer requested shapers. */ unsigned int i = 0; @@ -64,7 +64,7 @@ static struct hb_shapers_lazy_loader_t : hb_lazy_loader_t<const hb_shaper_entry_ if (!end) end = p + strlen (p); - for (unsigned int j = i; j < ARRAY_LENGTH (all_shapers); j++) + for (unsigned int j = i; j < ARRAY_LENGTH (_hb_all_shapers); j++) if (end - p == (int) strlen (shapers[j].name) && 0 == strncmp (shapers[j].name, p, end - p)) { @@ -85,8 +85,8 @@ static struct hb_shapers_lazy_loader_t : hb_lazy_loader_t<const hb_shaper_entry_ return shapers; } - static void destroy (const hb_shaper_entry_t *p) { hb_free ((void *) p); } - static const hb_shaper_entry_t *get_null () { return all_shapers; } + static void destroy (hb_shaper_entry_t *p) { hb_free (p); } + static const hb_shaper_entry_t *get_null () { return _hb_all_shapers; } } static_shapers; static inline diff --git a/thirdparty/harfbuzz/src/hb-static.cc b/thirdparty/harfbuzz/src/hb-static.cc index 7cc51be611..5c5ecce880 100644 --- a/thirdparty/harfbuzz/src/hb-static.cc +++ b/thirdparty/harfbuzz/src/hb-static.cc @@ -53,6 +53,9 @@ DEFINE_NULL_NAMESPACE_BYTES (AAT, SettingName) = {0xFF,0xFF, 0xFF,0xFF}; const unsigned char _hb_Null_AAT_Lookup[2] = {0xFF, 0xFF}; +/* hb_map_t */ + +const hb_codepoint_t minus_1 = -1; /* hb_face_t */ diff --git a/thirdparty/harfbuzz/src/hb-subset-cff-common.hh b/thirdparty/harfbuzz/src/hb-subset-cff-common.hh index ae155b4e3c..bb9f27eec1 100644 --- a/thirdparty/harfbuzz/src/hb-subset-cff-common.hh +++ b/thirdparty/harfbuzz/src/hb-subset-cff-common.hh @@ -44,7 +44,8 @@ struct str_encoder_t void encode_byte (unsigned char b) { - if (unlikely (buff.push (b) == &Crap (unsigned char))) + buff.push (b); + if (unlikely (buff.in_error ())) set_error (); } diff --git a/thirdparty/harfbuzz/src/hb-subset-input.cc b/thirdparty/harfbuzz/src/hb-subset-input.cc index 4885280996..028ddf9035 100644 --- a/thirdparty/harfbuzz/src/hb-subset-input.cc +++ b/thirdparty/harfbuzz/src/hb-subset-input.cc @@ -140,7 +140,20 @@ hb_subset_input_create_or_fail (void) HB_TAG ('r', 't', 'l', 'a'), HB_TAG ('r', 't', 'l', 'm'), - //Complex shapers + //random + HB_TAG ('r', 'a', 'n', 'd'), + + //justify + HB_TAG ('j', 'a', 'l', 't'), // HarfBuzz doesn't use; others might + + //private + HB_TAG ('H', 'a', 'r', 'f'), + HB_TAG ('H', 'A', 'R', 'F'), + HB_TAG ('B', 'u', 'z', 'z'), + HB_TAG ('B', 'U', 'Z', 'Z'), + + //shapers + //arabic HB_TAG ('i', 'n', 'i', 't'), HB_TAG ('m', 'e', 'd', 'i'), diff --git a/thirdparty/harfbuzz/src/hb-subset-plan.cc b/thirdparty/harfbuzz/src/hb-subset-plan.cc index a62ae8e024..4e3bb1d477 100644 --- a/thirdparty/harfbuzz/src/hb-subset-plan.cc +++ b/thirdparty/harfbuzz/src/hb-subset-plan.cc @@ -41,9 +41,9 @@ #include "hb-ot-math-table.hh" using OT::Layout::GSUB::GSUB; +using OT::Layout::GPOS; - -typedef hb_hashmap_t<unsigned, hb_set_t *> script_langsys_map; +typedef hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> script_langsys_map; #ifndef HB_NO_SUBSET_CFF static inline void _add_cff_seac_components (const OT::cff1::accelerator_t &cff, @@ -204,7 +204,7 @@ static inline void hb_map_t *layout_variation_idx_map) { hb_blob_ptr_t<OT::GDEF> gdef = hb_sanitize_context_t ().reference_table<OT::GDEF> (face); - hb_blob_ptr_t<OT::GPOS> gpos = hb_sanitize_context_t ().reference_table<OT::GPOS> (face); + hb_blob_ptr_t<GPOS> gpos = hb_sanitize_context_t ().reference_table<GPOS> (face); if (!gdef->has_data ()) { @@ -347,13 +347,42 @@ _populate_unicodes_to_retain (const hb_set_t *unicodes, } } +#ifndef HB_COMPOSITE_OPERATIONS_PER_GLYPH +#define HB_COMPOSITE_OPERATIONS_PER_GLYPH 64 +#endif + +static unsigned +_glyf_add_gid_and_children (const OT::glyf_accelerator_t &glyf, + hb_codepoint_t gid, + hb_set_t *gids_to_retain, + int operation_count, + unsigned depth = 0) +{ + if (unlikely (depth++ > HB_MAX_NESTING_LEVEL)) return operation_count; + if (unlikely (--operation_count < 0)) return operation_count; + /* Check if is already visited */ + if (gids_to_retain->has (gid)) return operation_count; + + gids_to_retain->add (gid); + + for (auto item : glyf.glyph_for_gid (gid).get_composite_iterator ()) + operation_count = + _glyf_add_gid_and_children (glyf, + item.glyphIndex, + gids_to_retain, + operation_count, + depth); + + return operation_count; +} + static void _populate_gids_to_retain (hb_subset_plan_t* plan, bool close_over_gsub, bool close_over_gpos, bool close_over_gdef) { - OT::glyf::accelerator_t glyf (plan->source); + OT::glyf_accelerator_t glyf (plan->source); #ifndef HB_NO_SUBSET_CFF OT::cff1::accelerator_t cff (plan->source); #endif @@ -374,7 +403,7 @@ _populate_gids_to_retain (hb_subset_plan_t* plan, plan->gsub_langsys); if (close_over_gpos) - _closure_glyphs_lookups_features<OT::GPOS> ( + _closure_glyphs_lookups_features<GPOS> ( plan->source, plan->_glyphset_gsub, plan->layout_features, @@ -398,7 +427,8 @@ _populate_gids_to_retain (hb_subset_plan_t* plan, * composite glyphs. */ if (glyf.has_data ()) for (hb_codepoint_t gid : cur_glyphset) - glyf.add_gid_and_children (gid, plan->_glyphset); + _glyf_add_gid_and_children (glyf, gid, plan->_glyphset, + cur_glyphset.get_population () * HB_COMPOSITE_OPERATIONS_PER_GLYPH); else plan->_glyphset->union_ (cur_glyphset); #ifndef HB_NO_SUBSET_CFF @@ -630,9 +660,6 @@ hb_subset_plan_destroy (hb_subset_plan_t *plan) if (plan->gsub_langsys) { - for (auto _ : plan->gsub_langsys->iter ()) - hb_set_destroy (_.second); - hb_object_destroy (plan->gsub_langsys); plan->gsub_langsys->fini_shallow (); hb_free (plan->gsub_langsys); @@ -640,9 +667,6 @@ hb_subset_plan_destroy (hb_subset_plan_t *plan) if (plan->gpos_langsys) { - for (auto _ : plan->gpos_langsys->iter ()) - hb_set_destroy (_.second); - hb_object_destroy (plan->gpos_langsys); plan->gpos_langsys->fini_shallow (); hb_free (plan->gpos_langsys); diff --git a/thirdparty/harfbuzz/src/hb-subset-plan.hh b/thirdparty/harfbuzz/src/hb-subset-plan.hh index cb567b769e..2aaf19c61d 100644 --- a/thirdparty/harfbuzz/src/hb-subset-plan.hh +++ b/thirdparty/harfbuzz/src/hb-subset-plan.hh @@ -87,8 +87,8 @@ struct hb_subset_plan_t hb_map_t *gpos_lookups; //active langsys we'd like to retain - hb_hashmap_t<unsigned, hb_set_t *> *gsub_langsys; - hb_hashmap_t<unsigned, hb_set_t *> *gpos_langsys; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *gsub_langsys; + hb_hashmap_t<unsigned, hb::unique_ptr<hb_set_t>> *gpos_langsys; //active features after removing redundant langsys and prune_features hb_map_t *gsub_features; diff --git a/thirdparty/harfbuzz/src/hb-subset.cc b/thirdparty/harfbuzz/src/hb-subset.cc index 31ddb4f894..10c572c2f7 100644 --- a/thirdparty/harfbuzz/src/hb-subset.cc +++ b/thirdparty/harfbuzz/src/hb-subset.cc @@ -56,6 +56,7 @@ #include "hb-repacker.hh" using OT::Layout::GSUB::GSUB; +using OT::Layout::GPOS; /** * SECTION:hb-subset @@ -78,6 +79,108 @@ using OT::Layout::GSUB::GSUB; * retain glyph ids option and configure the subset to pass through the layout tables untouched. */ +/* + * The list of tables in the open type spec. Used to check for tables that may need handling + * if we are unable to list the tables in a face. + */ +static hb_tag_t known_tables[] { + HB_TAG ('a', 'v', 'a', 'r'), + HB_OT_TAG_BASE, + HB_OT_TAG_CBDT, + HB_OT_TAG_CBLC, + HB_OT_TAG_cff1, + HB_OT_TAG_cff2, + HB_OT_TAG_cmap, + HB_OT_TAG_COLR, + HB_OT_TAG_CPAL, + HB_TAG ('c', 'v', 'a', 'r'), + HB_TAG ('c', 'v', 't', ' '), + HB_TAG ('D', 'S', 'I', 'G'), + HB_TAG ('E', 'B', 'D', 'T'), + HB_TAG ('E', 'B', 'L', 'C'), + HB_TAG ('E', 'B', 'S', 'C'), + HB_TAG ('f', 'p', 'g', 'm'), + HB_TAG ('f', 'v', 'a', 'r'), + HB_TAG ('g', 'a', 's', 'p'), + HB_OT_TAG_GDEF, + HB_OT_TAG_glyf, + HB_OT_TAG_GPOS, + HB_OT_TAG_GSUB, + HB_OT_TAG_gvar, + HB_OT_TAG_hdmx, + HB_OT_TAG_head, + HB_OT_TAG_hhea, + HB_OT_TAG_hmtx, + HB_OT_TAG_HVAR, + HB_OT_TAG_JSTF, + HB_TAG ('k', 'e', 'r', 'n'), + HB_OT_TAG_loca, + HB_TAG ('L', 'T', 'S', 'H'), + HB_OT_TAG_MATH, + HB_OT_TAG_maxp, + HB_TAG ('M', 'E', 'R', 'G'), + HB_TAG ('m', 'e', 't', 'a'), + HB_TAG ('M', 'V', 'A', 'R'), + HB_TAG ('P', 'C', 'L', 'T'), + HB_OT_TAG_post, + HB_TAG ('p', 'r', 'e', 'p'), + HB_OT_TAG_sbix, + HB_TAG ('S', 'T', 'A', 'T'), + HB_TAG ('S', 'V', 'G', ' '), + HB_TAG ('V', 'D', 'M', 'X'), + HB_OT_TAG_vhea, + HB_OT_TAG_vmtx, + HB_OT_TAG_VORG, + HB_OT_TAG_VVAR, + HB_OT_TAG_name, + HB_OT_TAG_OS2 +}; + +static bool _table_is_empty (const hb_face_t *face, hb_tag_t tag) +{ + hb_blob_t* blob = hb_face_reference_table (face, tag); + bool result = (blob == hb_blob_get_empty ()); + hb_blob_destroy (blob); + return result; +} + +static unsigned int +_get_table_tags (const hb_subset_plan_t* plan, + unsigned int start_offset, + unsigned int *table_count, /* IN/OUT */ + hb_tag_t *table_tags /* OUT */) +{ + unsigned num_tables = hb_face_get_table_tags (plan->source, 0, nullptr, nullptr); + if (num_tables) + return hb_face_get_table_tags (plan->source, start_offset, table_count, table_tags); + + // If face has 0 tables associated with it, assume that it was built from + // hb_face_create_tables and thus is unable to list its tables. Fallback to + // checking each table type we can handle for existence instead. + auto it = + hb_concat ( + + hb_array (known_tables) + | hb_filter ([&] (hb_tag_t tag) { + return !_table_is_empty (plan->source, tag) && !plan->no_subset_tables->has (tag); + }) + | hb_map ([] (hb_tag_t tag) -> hb_tag_t { return tag; }), + + plan->no_subset_tables->iter () + | hb_filter([&] (hb_tag_t tag) { + return !_table_is_empty (plan->source, tag); + })); + + it += start_offset; + + unsigned num_written = 0; + while (bool (it) && num_written < *table_count) + table_tags[num_written++] = *it++; + + *table_count = num_written; + return num_written; +} + + static unsigned _plan_estimate_subset_table_size (hb_subset_plan_t *plan, unsigned table_len, @@ -224,9 +327,17 @@ _subset (hb_subset_plan_t *plan, hb_vector_t<char> &buf) static bool _is_table_present (hb_face_t *source, hb_tag_t tag) { + + if (!hb_face_get_table_tags (source, 0, nullptr, nullptr)) { + // If face has 0 tables associated with it, assume that it was built from + // hb_face_create_tables and thus is unable to list its tables. Fallback to + // checking if the blob associated with tag is empty. + return !_table_is_empty (source, tag); + } + hb_tag_t table_tags[32]; unsigned offset = 0, num_tables = ARRAY_LENGTH (table_tags); - while ((hb_face_get_table_tags (source, offset, &num_tables, table_tags), num_tables)) + while (((void) hb_face_get_table_tags (source, offset, &num_tables, table_tags), num_tables)) { for (unsigned i = 0; i < num_tables; ++i) if (table_tags[i] == tag) @@ -322,7 +433,7 @@ _subset_table (hb_subset_plan_t *plan, #ifndef HB_NO_SUBSET_LAYOUT case HB_OT_TAG_GDEF: return _subset<const OT::GDEF> (plan, buf); case HB_OT_TAG_GSUB: return _subset<const GSUB> (plan, buf); - case HB_OT_TAG_GPOS: return _subset<const OT::GPOS> (plan, buf); + case HB_OT_TAG_GPOS: return _subset<const GPOS> (plan, buf); case HB_OT_TAG_gvar: return _subset<const OT::gvar> (plan, buf); case HB_OT_TAG_HVAR: return _subset<const OT::HVAR> (plan, buf); case HB_OT_TAG_VVAR: return _subset<const OT::VVAR> (plan, buf); @@ -388,7 +499,8 @@ hb_subset_plan_execute_or_fail (hb_subset_plan_t *plan) unsigned offset = 0, num_tables = ARRAY_LENGTH (table_tags); hb_vector_t<char> buf; buf.alloc (4096 - 16); - while ((hb_face_get_table_tags (plan->source, offset, &num_tables, table_tags), num_tables)) + + while (((void) _get_table_tags (plan, offset, &num_tables, table_tags), num_tables)) { for (unsigned i = 0; i < num_tables; ++i) { @@ -400,7 +512,7 @@ hb_subset_plan_execute_or_fail (hb_subset_plan_t *plan) } offset += num_tables; } -end: +end: return success ? hb_face_reference (plan->dest) : nullptr; } diff --git a/thirdparty/harfbuzz/src/hb-ucd-table.hh b/thirdparty/harfbuzz/src/hb-ucd-table.hh index 1a4c89c17f..9e76ca460b 100644 --- a/thirdparty/harfbuzz/src/hb-ucd-table.hh +++ b/thirdparty/harfbuzz/src/hb-ucd-table.hh @@ -13,7 +13,7 @@ #include "hb.hh" static const hb_script_t -_hb_ucd_sc_map[162] = +_hb_ucd_sc_map[163] = { HB_SCRIPT_COMMON, HB_SCRIPT_INHERITED, HB_SCRIPT_UNKNOWN, HB_SCRIPT_ARABIC, @@ -96,6 +96,7 @@ _hb_ucd_sc_map[162] = HB_SCRIPT_YEZIDI, HB_SCRIPT_CYPRO_MINOAN, HB_SCRIPT_OLD_UYGHUR, HB_SCRIPT_TANGSA, HB_SCRIPT_TOTO, HB_SCRIPT_VITHKUQI, + HB_SCRIPT_MATH, }; static const uint16_t _hb_ucd_dm1_p0_map[825] = diff --git a/thirdparty/harfbuzz/src/hb-unicode.cc b/thirdparty/harfbuzz/src/hb-unicode.cc index 83ead6398b..05f74b9237 100644 --- a/thirdparty/harfbuzz/src/hb-unicode.cc +++ b/thirdparty/harfbuzz/src/hb-unicode.cc @@ -169,7 +169,7 @@ hb_unicode_funcs_get_default () #endif /** - * hb_unicode_funcs_create: (Xconstructor) + * hb_unicode_funcs_create: * @parent: (nullable): Parent Unicode-functions structure * * Creates a new #hb_unicode_funcs_t structure of Unicode functions. diff --git a/thirdparty/harfbuzz/src/hb-unicode.hh b/thirdparty/harfbuzz/src/hb-unicode.hh index 4c28bb0cdf..39aaee5baa 100644 --- a/thirdparty/harfbuzz/src/hb-unicode.hh +++ b/thirdparty/harfbuzz/src/hb-unicode.hh @@ -105,12 +105,9 @@ HB_UNICODE_FUNCS_IMPLEMENT_CALLBACKS_SIMPLE unsigned int modified_combining_class (hb_codepoint_t u) { - /* XXX This hack belongs to the USE shaper (for Tai Tham): - * Reorder SAKOT to ensure it comes after any tone marks. */ + /* Reorder SAKOT to ensure it comes after any tone marks. */ if (unlikely (u == 0x1A60u)) return 254; - - /* XXX This hack belongs to the Tibetan shaper: - * Reorder PADMA to ensure it comes after any vowel marks. */ + /* Reorder PADMA to ensure it comes after any vowel marks. */ if (unlikely (u == 0x0FC6u)) return 254; /* Reorder TSA -PHRU to reorder before U+0F74 */ if (unlikely (u == 0x0F39u)) return 127; diff --git a/thirdparty/harfbuzz/src/hb-version.h b/thirdparty/harfbuzz/src/hb-version.h index ae707cde6c..f036a12226 100644 --- a/thirdparty/harfbuzz/src/hb-version.h +++ b/thirdparty/harfbuzz/src/hb-version.h @@ -47,20 +47,20 @@ HB_BEGIN_DECLS * * The minor component of the library version available at compile-time. */ -#define HB_VERSION_MINOR 3 +#define HB_VERSION_MINOR 4 /** * HB_VERSION_MICRO: * * The micro component of the library version available at compile-time. */ -#define HB_VERSION_MICRO 0 +#define HB_VERSION_MICRO 1 /** * HB_VERSION_STRING: * * A string literal containing the library version available at compile-time. */ -#define HB_VERSION_STRING "4.3.0" +#define HB_VERSION_STRING "4.4.1" /** * HB_VERSION_ATLEAST: diff --git a/thirdparty/harfbuzz/src/hb.hh b/thirdparty/harfbuzz/src/hb.hh index b9f5f71415..8ec638a2b4 100644 --- a/thirdparty/harfbuzz/src/hb.hh +++ b/thirdparty/harfbuzz/src/hb.hh @@ -29,7 +29,6 @@ #ifndef HB_HH #define HB_HH - #ifndef HB_NO_PRAGMA_GCC_DIAGNOSTIC #ifdef _MSC_VER #pragma warning( disable: 4068 ) /* Unknown pragma */ @@ -65,6 +64,7 @@ #pragma GCC diagnostic error "-Wbitwise-instead-of-logical" #pragma GCC diagnostic error "-Wcast-align" #pragma GCC diagnostic error "-Wcast-function-type" +#pragma GCC diagnostic error "-Wcomma" #pragma GCC diagnostic error "-Wdelete-non-virtual-dtor" #pragma GCC diagnostic error "-Wembedded-directive" #pragma GCC diagnostic error "-Wextra-semi-stmt" @@ -183,7 +183,7 @@ #include <cassert> #include <cfloat> #include <climits> -#ifdef _MSC_VER +#if defined(_MSC_VER) && !defined(_USE_MATH_DEFINES) # define _USE_MATH_DEFINES #endif #include <cmath> @@ -470,6 +470,7 @@ static_assert ((sizeof (hb_var_int_t) == 4), ""); /* Headers we include for everyone. Keep topologically sorted by dependency. * They express dependency amongst themselves, but no other file should include * them directly.*/ +#include "hb-cplusplus.hh" #include "hb-meta.hh" #include "hb-mutex.hh" #include "hb-number.hh" diff --git a/thirdparty/jpeg-compressor/jpge.cpp b/thirdparty/jpeg-compressor/jpge.cpp new file mode 100644 index 0000000000..5a36c19653 --- /dev/null +++ b/thirdparty/jpeg-compressor/jpge.cpp @@ -0,0 +1,1076 @@ +// jpge.cpp - C++ class for JPEG compression. Richard Geldreich <richgel99@gmail.com> +// Supports grayscale, H1V1, H2V1, and H2V2 chroma subsampling factors, one or two pass Huffman table optimization, libjpeg-style quality 1-100 quality factors. +// Also supports using luma quantization tables for chroma. +// +// Released under two licenses. You are free to choose which license you want: +// License 1: +// Public Domain +// +// License 2: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// v1.01, Dec. 18, 2010 - Initial release +// v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.) +// v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc. +// Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03). +// v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug. +// Code tweaks to fix VS2008 static code analysis warnings (all looked harmless). +// Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02. +// v1.05, March 25, 2020: Added Apache 2.0 alternate license + +#include "jpge.h" + +#include <stdlib.h> +#include <string.h> + +#define JPGE_MAX(a,b) (((a)>(b))?(a):(b)) +#define JPGE_MIN(a,b) (((a)<(b))?(a):(b)) + +namespace jpge { + + static inline void* jpge_malloc(size_t nSize) { return malloc(nSize); } + static inline void jpge_free(void* p) { free(p); } + + // Various JPEG enums and tables. + enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 }; + enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 }; + + static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 }; + static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 }; + static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 }; + + // Table from http://www.imagemagick.org/discourse-server/viewtopic.php?f=22&t=20333&p=98008#p98008 + // This is mozjpeg's default table, in zag order. + static int16 s_alt_quant[64] = { 16,16,16,16,17,16,18,20,20,18,25,27,24,27,25,37,34,31,31,34,37,56,40,43,40,43,40,56,85,53,62,53,53,62,53,85,75,91,74,69,74,91,75,135,106,94,94,106,135,156,131,124,131,156,189,169,169,189,238,226,238,311,311,418 }; + + static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 }; + static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; + static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d }; + static uint8 s_ac_lum_val[AC_LUM_CODES] = + { + 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0, + 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49, + 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, + 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5, + 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, + 0xf9,0xfa + }; + static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 }; + static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 }; + static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 }; + static uint8 s_ac_chroma_val[AC_CHROMA_CODES] = + { + 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0, + 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48, + 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, + 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3, + 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8, + 0xf9,0xfa + }; + + // Low-level helper functions. + template <class T> inline void clear_obj(T& obj) { memset(&obj, 0, sizeof(obj)); } + + const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329; + static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); } + + static inline int left_shifti(int val, uint32 bits) + { + return static_cast<int>(static_cast<uint32>(val) << bits); + } + + static void RGB_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels) + { + for (; num_pixels; pDst += 3, pSrc += 3, num_pixels--) + { + const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; + pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16); + pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); + pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); + } + } + + static void RGB_to_Y(uint8* pDst, const uint8* pSrc, int num_pixels) + { + for (; num_pixels; pDst++, pSrc += 3, num_pixels--) + pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); + } + + static void RGBA_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels) + { + for (; num_pixels; pDst += 3, pSrc += 4, num_pixels--) + { + const int r = pSrc[0], g = pSrc[1], b = pSrc[2]; + pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16); + pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16)); + pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16)); + } + } + + static void RGBA_to_Y(uint8* pDst, const uint8* pSrc, int num_pixels) + { + for (; num_pixels; pDst++, pSrc += 4, num_pixels--) + pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16); + } + + static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels) + { + for (; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; } + } + + // Forward DCT - DCT derived from jfdctint. + enum { CONST_BITS = 13, ROW_BITS = 2 }; +#define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n)) +#define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c)) +#define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \ + int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \ + int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \ + int32 u1 = DCT_MUL(t12 + t13, 4433); \ + s2 = u1 + DCT_MUL(t13, 6270); \ + s6 = u1 + DCT_MUL(t12, -15137); \ + u1 = t4 + t7; \ + int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \ + int32 z5 = DCT_MUL(u3 + u4, 9633); \ + t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \ + t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \ + u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \ + u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \ + u3 += z5; u4 += z5; \ + s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3; + + static void DCT2D(int32* p) + { + int32 c, * q = p; + for (c = 7; c >= 0; c--, q += 8) + { + int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7]; + DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); + q[0] = left_shifti(s0, ROW_BITS); q[1] = DCT_DESCALE(s1, CONST_BITS - ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS - ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS - ROW_BITS); + q[4] = left_shifti(s4, ROW_BITS); q[5] = DCT_DESCALE(s5, CONST_BITS - ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS - ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS - ROW_BITS); + } + for (q = p, c = 7; c >= 0; c--, q++) + { + int32 s0 = q[0 * 8], s1 = q[1 * 8], s2 = q[2 * 8], s3 = q[3 * 8], s4 = q[4 * 8], s5 = q[5 * 8], s6 = q[6 * 8], s7 = q[7 * 8]; + DCT1D(s0, s1, s2, s3, s4, s5, s6, s7); + q[0 * 8] = DCT_DESCALE(s0, ROW_BITS + 3); q[1 * 8] = DCT_DESCALE(s1, CONST_BITS + ROW_BITS + 3); q[2 * 8] = DCT_DESCALE(s2, CONST_BITS + ROW_BITS + 3); q[3 * 8] = DCT_DESCALE(s3, CONST_BITS + ROW_BITS + 3); + q[4 * 8] = DCT_DESCALE(s4, ROW_BITS + 3); q[5 * 8] = DCT_DESCALE(s5, CONST_BITS + ROW_BITS + 3); q[6 * 8] = DCT_DESCALE(s6, CONST_BITS + ROW_BITS + 3); q[7 * 8] = DCT_DESCALE(s7, CONST_BITS + ROW_BITS + 3); + } + } + + struct sym_freq { uint m_key, m_sym_index; }; + + // Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values. + static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1) + { + const uint cMaxPasses = 4; + uint32 hist[256 * cMaxPasses]; clear_obj(hist); + for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256 * 2 + ((freq >> 16) & 0xFF)]++; hist[256 * 3 + ((freq >> 24) & 0xFF)]++; } + sym_freq* pCur_syms = pSyms0, * pNew_syms = pSyms1; + uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; + for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) + { + const uint32* pHist = &hist[pass << 8]; + uint offsets[256], cur_ofs = 0; + for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } + for (uint i = 0; i < num_syms; i++) + pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; + sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; + } + return pCur_syms; + } + + // calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. + static void calculate_minimum_redundancy(sym_freq* A, int n) + { + int root, leaf, next, avbl, used, dpth; + if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } + A[0].m_key += A[1].m_key; root = 0; leaf = 2; + for (next = 1; next < n - 1; next++) + { + if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } + else A[next].m_key = A[leaf++].m_key; + if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } + else A[next].m_key += A[leaf++].m_key; + } + A[n - 2].m_key = 0; + for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; + avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; + while (avbl > 0) + { + while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } + while (avbl > used) { A[next--].m_key = dpth; avbl--; } + avbl = 2 * used; dpth++; used = 0; + } + } + + // Limits canonical Huffman code table's max code size to max_code_size. + static void huffman_enforce_max_code_size(int* pNum_codes, int code_list_len, int max_code_size) + { + if (code_list_len <= 1) return; + + for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; + + uint32 total = 0; + for (int i = max_code_size; i > 0; i--) + total += (((uint32)pNum_codes[i]) << (max_code_size - i)); + + while (total != (1UL << max_code_size)) + { + pNum_codes[max_code_size]--; + for (int i = max_code_size - 1; i > 0; i--) + { + if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } + } + total--; + } + } + + // Generates an optimized offman table. + void jpeg_encoder::optimize_huffman_table(int table_num, int table_len) + { + sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS]; + syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's + int num_used_syms = 1; + const uint32* pSym_count = &m_huff_count[table_num][0]; + for (int i = 0; i < table_len; i++) + if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; } + sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1); + calculate_minimum_redundancy(pSyms, num_used_syms); + + // Count the # of symbols of each code size. + int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes); + for (int i = 0; i < num_used_syms; i++) + num_codes[pSyms[i].m_key]++; + + const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol) + huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT); + + // Compute m_huff_bits array, which contains the # of symbols per code size. + clear_obj(m_huff_bits[table_num]); + for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++) + m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]); + + // Remove the dummy symbol added above, which must be in largest bucket. + for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--) + { + if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; } + } + + // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest). + for (int i = num_used_syms - 1; i >= 1; i--) + m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1); + } + + // JPEG marker generation. + void jpeg_encoder::emit_byte(uint8 i) + { + m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i); + } + + void jpeg_encoder::emit_word(uint i) + { + emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF)); + } + + void jpeg_encoder::emit_marker(int marker) + { + emit_byte(uint8(0xFF)); emit_byte(uint8(marker)); + } + + // Emit JFIF marker + void jpeg_encoder::emit_jfif_app0() + { + emit_marker(M_APP0); + emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1); + emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */ + emit_byte(0); + emit_byte(1); /* Major version */ + emit_byte(1); /* Minor version */ + emit_byte(0); /* Density unit */ + emit_word(1); + emit_word(1); + emit_byte(0); /* No thumbnail image */ + emit_byte(0); + } + + // Emit quantization tables + void jpeg_encoder::emit_dqt() + { + for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++) + { + emit_marker(M_DQT); + emit_word(64 + 1 + 2); + emit_byte(static_cast<uint8>(i)); + for (int j = 0; j < 64; j++) + emit_byte(static_cast<uint8>(m_quantization_tables[i][j])); + } + } + + // Emit start of frame marker + void jpeg_encoder::emit_sof() + { + emit_marker(M_SOF0); /* baseline */ + emit_word(3 * m_num_components + 2 + 5 + 1); + emit_byte(8); /* precision */ + emit_word(m_image_y); + emit_word(m_image_x); + emit_byte(m_num_components); + for (int i = 0; i < m_num_components; i++) + { + emit_byte(static_cast<uint8>(i + 1)); /* component ID */ + emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */ + emit_byte(i > 0); /* quant. table num */ + } + } + + // Emit Huffman table. + void jpeg_encoder::emit_dht(uint8* bits, uint8* val, int index, bool ac_flag) + { + emit_marker(M_DHT); + + int length = 0; + for (int i = 1; i <= 16; i++) + length += bits[i]; + + emit_word(length + 2 + 1 + 16); + emit_byte(static_cast<uint8>(index + (ac_flag << 4))); + + for (int i = 1; i <= 16; i++) + emit_byte(bits[i]); + + for (int i = 0; i < length; i++) + emit_byte(val[i]); + } + + // Emit all Huffman tables. + void jpeg_encoder::emit_dhts() + { + emit_dht(m_huff_bits[0 + 0], m_huff_val[0 + 0], 0, false); + emit_dht(m_huff_bits[2 + 0], m_huff_val[2 + 0], 0, true); + if (m_num_components == 3) + { + emit_dht(m_huff_bits[0 + 1], m_huff_val[0 + 1], 1, false); + emit_dht(m_huff_bits[2 + 1], m_huff_val[2 + 1], 1, true); + } + } + + // emit start of scan + void jpeg_encoder::emit_sos() + { + emit_marker(M_SOS); + emit_word(2 * m_num_components + 2 + 1 + 3); + emit_byte(m_num_components); + for (int i = 0; i < m_num_components; i++) + { + emit_byte(static_cast<uint8>(i + 1)); + if (i == 0) + emit_byte((0 << 4) + 0); + else + emit_byte((1 << 4) + 1); + } + emit_byte(0); /* spectral selection */ + emit_byte(63); + emit_byte(0); + } + + // Emit all markers at beginning of image file. + void jpeg_encoder::emit_markers() + { + emit_marker(M_SOI); + emit_jfif_app0(); + emit_dqt(); + emit_sof(); + emit_dhts(); + emit_sos(); + } + + // Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays. + void jpeg_encoder::compute_huffman_table(uint* codes, uint8* code_sizes, uint8* bits, uint8* val) + { + int i, l, last_p, si; + uint8 huff_size[257]; + uint huff_code[257]; + uint code; + + int p = 0; + for (l = 1; l <= 16; l++) + for (i = 1; i <= bits[l]; i++) + huff_size[p++] = (char)l; + + huff_size[p] = 0; last_p = p; // write sentinel + + code = 0; si = huff_size[0]; p = 0; + + while (huff_size[p]) + { + while (huff_size[p] == si) + huff_code[p++] = code++; + code <<= 1; + si++; + } + + memset(codes, 0, sizeof(codes[0]) * 256); + memset(code_sizes, 0, sizeof(code_sizes[0]) * 256); + for (p = 0; p < last_p; p++) + { + codes[val[p]] = huff_code[p]; + code_sizes[val[p]] = huff_size[p]; + } + } + + // Quantization table generation. + void jpeg_encoder::compute_quant_table(int32* pDst, int16* pSrc) + { + int32 q; + if (m_params.m_quality < 50) + q = 5000 / m_params.m_quality; + else + q = 200 - m_params.m_quality * 2; + for (int i = 0; i < 64; i++) + { + int32 j = *pSrc++; j = (j * q + 50L) / 100L; + *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255); + } + } + + // Higher-level methods. + void jpeg_encoder::first_pass_init() + { + m_bit_buffer = 0; m_bits_in = 0; + memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0])); + m_mcu_y_ofs = 0; + m_pass_num = 1; + } + + bool jpeg_encoder::second_pass_init() + { + compute_huffman_table(&m_huff_codes[0 + 0][0], &m_huff_code_sizes[0 + 0][0], m_huff_bits[0 + 0], m_huff_val[0 + 0]); + compute_huffman_table(&m_huff_codes[2 + 0][0], &m_huff_code_sizes[2 + 0][0], m_huff_bits[2 + 0], m_huff_val[2 + 0]); + if (m_num_components > 1) + { + compute_huffman_table(&m_huff_codes[0 + 1][0], &m_huff_code_sizes[0 + 1][0], m_huff_bits[0 + 1], m_huff_val[0 + 1]); + compute_huffman_table(&m_huff_codes[2 + 1][0], &m_huff_code_sizes[2 + 1][0], m_huff_bits[2 + 1], m_huff_val[2 + 1]); + } + first_pass_init(); + emit_markers(); + m_pass_num = 2; + return true; + } + + bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels) + { + m_num_components = 3; + switch (m_params.m_subsampling) + { + case Y_ONLY: + { + m_num_components = 1; + m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; + m_mcu_x = 8; m_mcu_y = 8; + break; + } + case H1V1: + { + m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1; + m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; + m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; + m_mcu_x = 8; m_mcu_y = 8; + break; + } + case H2V1: + { + m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1; + m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; + m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; + m_mcu_x = 16; m_mcu_y = 8; + break; + } + case H2V2: + { + m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2; + m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1; + m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1; + m_mcu_x = 16; m_mcu_y = 16; + } + } + + m_image_x = p_x_res; m_image_y = p_y_res; + m_image_bpp = src_channels; + m_image_bpl = m_image_x * src_channels; + m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1)); + m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1)); + m_image_bpl_xlt = m_image_x * m_num_components; + m_image_bpl_mcu = m_image_x_mcu * m_num_components; + m_mcus_per_row = m_image_x_mcu / m_mcu_x; + + if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false; + for (int i = 1; i < m_mcu_y; i++) + m_mcu_lines[i] = m_mcu_lines[i - 1] + m_image_bpl_mcu; + + if (m_params.m_use_std_tables) + { + compute_quant_table(m_quantization_tables[0], s_std_lum_quant); + compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant); + } + else + { + compute_quant_table(m_quantization_tables[0], s_alt_quant); + memcpy(m_quantization_tables[1], m_quantization_tables[0], sizeof(m_quantization_tables[1])); + } + + m_out_buf_left = JPGE_OUT_BUF_SIZE; + m_pOut_buf = m_out_buf; + + if (m_params.m_two_pass_flag) + { + clear_obj(m_huff_count); + first_pass_init(); + } + else + { + memcpy(m_huff_bits[0 + 0], s_dc_lum_bits, 17); memcpy(m_huff_val[0 + 0], s_dc_lum_val, DC_LUM_CODES); + memcpy(m_huff_bits[2 + 0], s_ac_lum_bits, 17); memcpy(m_huff_val[2 + 0], s_ac_lum_val, AC_LUM_CODES); + memcpy(m_huff_bits[0 + 1], s_dc_chroma_bits, 17); memcpy(m_huff_val[0 + 1], s_dc_chroma_val, DC_CHROMA_CODES); + memcpy(m_huff_bits[2 + 1], s_ac_chroma_bits, 17); memcpy(m_huff_val[2 + 1], s_ac_chroma_val, AC_CHROMA_CODES); + if (!second_pass_init()) return false; // in effect, skip over the first pass + } + return m_all_stream_writes_succeeded; + } + + void jpeg_encoder::load_block_8_8_grey(int x) + { + uint8* pSrc; + sample_array_t* pDst = m_sample_array; + x <<= 3; + for (int i = 0; i < 8; i++, pDst += 8) + { + pSrc = m_mcu_lines[i] + x; + pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128; + pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128; + } + } + + void jpeg_encoder::load_block_8_8(int x, int y, int c) + { + uint8* pSrc; + sample_array_t* pDst = m_sample_array; + x = (x * (8 * 3)) + c; + y <<= 3; + for (int i = 0; i < 8; i++, pDst += 8) + { + pSrc = m_mcu_lines[y + i] + x; + pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128; + pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128; + } + } + + void jpeg_encoder::load_block_16_8(int x, int c) + { + uint8* pSrc1, * pSrc2; + sample_array_t* pDst = m_sample_array; + x = (x * (16 * 3)) + c; + for (int i = 0; i < 16; i += 2, pDst += 8) + { + pSrc1 = m_mcu_lines[i + 0] + x; + pSrc2 = m_mcu_lines[i + 1] + x; + pDst[0] = ((pSrc1[0 * 3] + pSrc1[1 * 3] + pSrc2[0 * 3] + pSrc2[1 * 3] + 2) >> 2) - 128; pDst[1] = ((pSrc1[2 * 3] + pSrc1[3 * 3] + pSrc2[2 * 3] + pSrc2[3 * 3] + 2) >> 2) - 128; + pDst[2] = ((pSrc1[4 * 3] + pSrc1[5 * 3] + pSrc2[4 * 3] + pSrc2[5 * 3] + 2) >> 2) - 128; pDst[3] = ((pSrc1[6 * 3] + pSrc1[7 * 3] + pSrc2[6 * 3] + pSrc2[7 * 3] + 2) >> 2) - 128; + pDst[4] = ((pSrc1[8 * 3] + pSrc1[9 * 3] + pSrc2[8 * 3] + pSrc2[9 * 3] + 2) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + 2) >> 2) - 128; + pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + 2) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + 2) >> 2) - 128; + } + } + + void jpeg_encoder::load_block_16_8_8(int x, int c) + { + uint8* pSrc1; + sample_array_t* pDst = m_sample_array; + x = (x * (16 * 3)) + c; + for (int i = 0; i < 8; i++, pDst += 8) + { + pSrc1 = m_mcu_lines[i + 0] + x; + pDst[0] = ((pSrc1[0 * 3] + pSrc1[1 * 3] + 1) >> 1) - 128; pDst[1] = ((pSrc1[2 * 3] + pSrc1[3 * 3] + 1) >> 1) - 128; + pDst[2] = ((pSrc1[4 * 3] + pSrc1[5 * 3] + 1) >> 1) - 128; pDst[3] = ((pSrc1[6 * 3] + pSrc1[7 * 3] + 1) >> 1) - 128; + pDst[4] = ((pSrc1[8 * 3] + pSrc1[9 * 3] + 1) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + 1) >> 1) - 128; + pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + 1) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + 1) >> 1) - 128; + } + } + + void jpeg_encoder::load_quantized_coefficients(int component_num) + { + int32* q = m_quantization_tables[component_num > 0]; + int16* pDst = m_coefficient_array; + for (int i = 0; i < 64; i++) + { + sample_array_t j = m_sample_array[s_zag[i]]; + if (j < 0) + { + if ((j = -j + (*q >> 1)) < *q) + *pDst++ = 0; + else + *pDst++ = static_cast<int16>(-(j / *q)); + } + else + { + if ((j = j + (*q >> 1)) < *q) + *pDst++ = 0; + else + *pDst++ = static_cast<int16>((j / *q)); + } + q++; + } + } + + void jpeg_encoder::flush_output_buffer() + { + if (m_out_buf_left != JPGE_OUT_BUF_SIZE) + m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left); + m_pOut_buf = m_out_buf; + m_out_buf_left = JPGE_OUT_BUF_SIZE; + } + + void jpeg_encoder::put_bits(uint bits, uint len) + { + m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len))); + while (m_bits_in >= 8) + { + uint8 c; +#define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); } + JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF)); + if (c == 0xFF) JPGE_PUT_BYTE(0); + m_bit_buffer <<= 8; + m_bits_in -= 8; + } + } + + void jpeg_encoder::code_coefficients_pass_one(int component_num) + { + if (component_num >= 3) return; // just to shut up static analysis + int i, run_len, nbits, temp1; + int16* src = m_coefficient_array; + uint32* dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], * ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0]; + + temp1 = src[0] - m_last_dc_val[component_num]; + m_last_dc_val[component_num] = src[0]; + if (temp1 < 0) temp1 = -temp1; + + nbits = 0; + while (temp1) + { + nbits++; temp1 >>= 1; + } + + dc_count[nbits]++; + for (run_len = 0, i = 1; i < 64; i++) + { + if ((temp1 = m_coefficient_array[i]) == 0) + run_len++; + else + { + while (run_len >= 16) + { + ac_count[0xF0]++; + run_len -= 16; + } + if (temp1 < 0) temp1 = -temp1; + nbits = 1; + while (temp1 >>= 1) nbits++; + ac_count[(run_len << 4) + nbits]++; + run_len = 0; + } + } + if (run_len) ac_count[0]++; + } + + void jpeg_encoder::code_coefficients_pass_two(int component_num) + { + int i, j, run_len, nbits, temp1, temp2; + int16* pSrc = m_coefficient_array; + uint* codes[2]; + uint8* code_sizes[2]; + + if (component_num == 0) + { + codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0]; + code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0]; + } + else + { + codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1]; + code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1]; + } + + temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num]; + m_last_dc_val[component_num] = pSrc[0]; + + if (temp1 < 0) + { + temp1 = -temp1; temp2--; + } + + nbits = 0; + while (temp1) + { + nbits++; temp1 >>= 1; + } + + put_bits(codes[0][nbits], code_sizes[0][nbits]); + if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits); + + for (run_len = 0, i = 1; i < 64; i++) + { + if ((temp1 = m_coefficient_array[i]) == 0) + run_len++; + else + { + while (run_len >= 16) + { + put_bits(codes[1][0xF0], code_sizes[1][0xF0]); + run_len -= 16; + } + if ((temp2 = temp1) < 0) + { + temp1 = -temp1; + temp2--; + } + nbits = 1; + while (temp1 >>= 1) + nbits++; + j = (run_len << 4) + nbits; + put_bits(codes[1][j], code_sizes[1][j]); + put_bits(temp2 & ((1 << nbits) - 1), nbits); + run_len = 0; + } + } + if (run_len) + put_bits(codes[1][0], code_sizes[1][0]); + } + + void jpeg_encoder::code_block(int component_num) + { + DCT2D(m_sample_array); + load_quantized_coefficients(component_num); + if (m_pass_num == 1) + code_coefficients_pass_one(component_num); + else + code_coefficients_pass_two(component_num); + } + + void jpeg_encoder::process_mcu_row() + { + if (m_num_components == 1) + { + for (int i = 0; i < m_mcus_per_row; i++) + { + load_block_8_8_grey(i); code_block(0); + } + } + else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1)) + { + for (int i = 0; i < m_mcus_per_row; i++) + { + load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2); + } + } + else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1)) + { + for (int i = 0; i < m_mcus_per_row; i++) + { + load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); + load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2); + } + } + else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2)) + { + for (int i = 0; i < m_mcus_per_row; i++) + { + load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0); + load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0); + load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2); + } + } + } + + bool jpeg_encoder::terminate_pass_one() + { + optimize_huffman_table(0 + 0, DC_LUM_CODES); optimize_huffman_table(2 + 0, AC_LUM_CODES); + if (m_num_components > 1) + { + optimize_huffman_table(0 + 1, DC_CHROMA_CODES); optimize_huffman_table(2 + 1, AC_CHROMA_CODES); + } + return second_pass_init(); + } + + bool jpeg_encoder::terminate_pass_two() + { + put_bits(0x7F, 7); + flush_output_buffer(); + emit_marker(M_EOI); + m_pass_num++; // purposely bump up m_pass_num, for debugging + return true; + } + + bool jpeg_encoder::process_end_of_image() + { + if (m_mcu_y_ofs) + { + if (m_mcu_y_ofs < 16) // check here just to shut up static analysis + { + for (int i = m_mcu_y_ofs; i < m_mcu_y; i++) + memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu); + } + + process_mcu_row(); + } + + if (m_pass_num == 1) + return terminate_pass_one(); + else + return terminate_pass_two(); + } + + void jpeg_encoder::load_mcu(const void* pSrc) + { + const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc); + + uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst + + if (m_num_components == 1) + { + if (m_image_bpp == 4) + RGBA_to_Y(pDst, Psrc, m_image_x); + else if (m_image_bpp == 3) + RGB_to_Y(pDst, Psrc, m_image_x); + else + memcpy(pDst, Psrc, m_image_x); + } + else + { + if (m_image_bpp == 4) + RGBA_to_YCC(pDst, Psrc, m_image_x); + else if (m_image_bpp == 3) + RGB_to_YCC(pDst, Psrc, m_image_x); + else + Y_to_YCC(pDst, Psrc, m_image_x); + } + + // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16 + if (m_num_components == 1) + memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x); + else + { + const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2]; + uint8* q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt; + for (int i = m_image_x; i < m_image_x_mcu; i++) + { + *q++ = y; *q++ = cb; *q++ = cr; + } + } + + if (++m_mcu_y_ofs == m_mcu_y) + { + process_mcu_row(); + m_mcu_y_ofs = 0; + } + } + + void jpeg_encoder::clear() + { + m_mcu_lines[0] = NULL; + m_pass_num = 0; + m_all_stream_writes_succeeded = true; + } + + jpeg_encoder::jpeg_encoder() + { + clear(); + } + + jpeg_encoder::~jpeg_encoder() + { + deinit(); + } + + bool jpeg_encoder::init(output_stream* pStream, int width, int height, int src_channels, const params& comp_params) + { + deinit(); + if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check())) return false; + m_pStream = pStream; + m_params = comp_params; + return jpg_open(width, height, src_channels); + } + + void jpeg_encoder::deinit() + { + jpge_free(m_mcu_lines[0]); + clear(); + } + + bool jpeg_encoder::process_scanline(const void* pScanline) + { + if ((m_pass_num < 1) || (m_pass_num > 2)) return false; + if (m_all_stream_writes_succeeded) + { + if (!pScanline) + { + if (!process_end_of_image()) return false; + } + else + { + load_mcu(pScanline); + } + } + return m_all_stream_writes_succeeded; + } + + // Higher level wrappers/examples (optional). +#include <stdio.h> + + class cfile_stream : public output_stream + { + cfile_stream(const cfile_stream&); + cfile_stream& operator= (const cfile_stream&); + + FILE* m_pFile; + bool m_bStatus; + + public: + cfile_stream() : m_pFile(NULL), m_bStatus(false) { } + + virtual ~cfile_stream() + { + close(); + } + + bool open(const char* pFilename) + { + close(); + m_pFile = fopen(pFilename, "wb"); + m_bStatus = (m_pFile != NULL); + return m_bStatus; + } + + bool close() + { + if (m_pFile) + { + if (fclose(m_pFile) == EOF) + { + m_bStatus = false; + } + m_pFile = NULL; + } + return m_bStatus; + } + + virtual bool put_buf(const void* pBuf, int len) + { + m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1); + return m_bStatus; + } + + uint get_size() const + { + return m_pFile ? ftell(m_pFile) : 0; + } + }; + + // Writes JPEG image to file. + bool compress_image_to_jpeg_file(const char* pFilename, int width, int height, int num_channels, const uint8* pImage_data, const params& comp_params) + { + cfile_stream dst_stream; + if (!dst_stream.open(pFilename)) + return false; + + jpge::jpeg_encoder dst_image; + if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) + return false; + + for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) + { + for (int i = 0; i < height; i++) + { + const uint8* pBuf = pImage_data + i * width * num_channels; + if (!dst_image.process_scanline(pBuf)) + return false; + } + if (!dst_image.process_scanline(NULL)) + return false; + } + + dst_image.deinit(); + + return dst_stream.close(); + } + + class memory_stream : public output_stream + { + memory_stream(const memory_stream&); + memory_stream& operator= (const memory_stream&); + + uint8* m_pBuf; + uint m_buf_size, m_buf_ofs; + + public: + memory_stream(void* pBuf, uint buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { } + + virtual ~memory_stream() { } + + virtual bool put_buf(const void* pBuf, int len) + { + uint buf_remaining = m_buf_size - m_buf_ofs; + if ((uint)len > buf_remaining) + return false; + memcpy(m_pBuf + m_buf_ofs, pBuf, len); + m_buf_ofs += len; + return true; + } + + uint get_size() const + { + return m_buf_ofs; + } + }; + + bool compress_image_to_jpeg_file_in_memory(void* pDstBuf, int& buf_size, int width, int height, int num_channels, const uint8* pImage_data, const params& comp_params) + { + if ((!pDstBuf) || (!buf_size)) + return false; + + memory_stream dst_stream(pDstBuf, buf_size); + + buf_size = 0; + + jpge::jpeg_encoder dst_image; + if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params)) + return false; + + for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++) + { + for (int i = 0; i < height; i++) + { + const uint8* pScanline = pImage_data + i * width * num_channels; + if (!dst_image.process_scanline(pScanline)) + return false; + } + if (!dst_image.process_scanline(NULL)) + return false; + } + + dst_image.deinit(); + + buf_size = dst_stream.get_size(); + return true; + } + +} // namespace jpge + diff --git a/thirdparty/jpeg-compressor/jpge.h b/thirdparty/jpeg-compressor/jpge.h new file mode 100644 index 0000000000..d10510e553 --- /dev/null +++ b/thirdparty/jpeg-compressor/jpge.h @@ -0,0 +1,174 @@ +// jpge.h - C++ class for JPEG compression. +// Public Domain or Apache 2.0, Richard Geldreich <richgel99@gmail.com> +// Alex Evans: Added RGBA support, linear memory allocator. +#ifndef JPEG_ENCODER_H +#define JPEG_ENCODER_H + +namespace jpge +{ + typedef unsigned char uint8; + typedef signed short int16; + typedef signed int int32; + typedef unsigned short uint16; + typedef unsigned int uint32; + typedef unsigned int uint; + + // JPEG chroma subsampling factors. Y_ONLY (grayscale images) and H2V2 (color images) are the most common. + enum subsampling_t { Y_ONLY = 0, H1V1 = 1, H2V1 = 2, H2V2 = 3 }; + + // JPEG compression parameters structure. + struct params + { + inline params() : m_quality(85), m_subsampling(H2V2), m_no_chroma_discrim_flag(false), m_two_pass_flag(false), m_use_std_tables(false) { } + + inline bool check() const + { + if ((m_quality < 1) || (m_quality > 100)) return false; + if ((uint)m_subsampling > (uint)H2V2) return false; + return true; + } + + // Quality: 1-100, higher is better. Typical values are around 50-95. + int m_quality; + + // m_subsampling: + // 0 = Y (grayscale) only + // 1 = YCbCr, no subsampling (H1V1, YCbCr 1x1x1, 3 blocks per MCU) + // 2 = YCbCr, H2V1 subsampling (YCbCr 2x1x1, 4 blocks per MCU) + // 3 = YCbCr, H2V2 subsampling (YCbCr 4x1x1, 6 blocks per MCU-- very common) + subsampling_t m_subsampling; + + // Disables CbCr discrimination - only intended for testing. + // If true, the Y quantization table is also used for the CbCr channels. + bool m_no_chroma_discrim_flag; + + bool m_two_pass_flag; + + // By default we use the same quantization tables as mozjpeg's default. + // Set to true to use the traditional tables from JPEG Annex K. + bool m_use_std_tables; + }; + + // Writes JPEG image to a file. + // num_channels must be 1 (Y) or 3 (RGB), image pitch must be width*num_channels. + bool compress_image_to_jpeg_file(const char* pFilename, int width, int height, int num_channels, const uint8* pImage_data, const params& comp_params = params()); + + // Writes JPEG image to memory buffer. + // On entry, buf_size is the size of the output buffer pointed at by pBuf, which should be at least ~1024 bytes. + // If return value is true, buf_size will be set to the size of the compressed data. + bool compress_image_to_jpeg_file_in_memory(void* pBuf, int& buf_size, int width, int height, int num_channels, const uint8* pImage_data, const params& comp_params = params()); + + // Output stream abstract class - used by the jpeg_encoder class to write to the output stream. + // put_buf() is generally called with len==JPGE_OUT_BUF_SIZE bytes, but for headers it'll be called with smaller amounts. + class output_stream + { + public: + virtual ~output_stream() { }; + virtual bool put_buf(const void* Pbuf, int len) = 0; + template<class T> inline bool put_obj(const T& obj) { return put_buf(&obj, sizeof(T)); } + }; + + // Lower level jpeg_encoder class - useful if more control is needed than the above helper functions. + class jpeg_encoder + { + public: + jpeg_encoder(); + ~jpeg_encoder(); + + // Initializes the compressor. + // pStream: The stream object to use for writing compressed data. + // params - Compression parameters structure, defined above. + // width, height - Image dimensions. + // channels - May be 1, or 3. 1 indicates grayscale, 3 indicates RGB source data. + // Returns false on out of memory or if a stream write fails. + bool init(output_stream* pStream, int width, int height, int src_channels, const params& comp_params = params()); + + const params& get_params() const { return m_params; } + + // Deinitializes the compressor, freeing any allocated memory. May be called at any time. + void deinit(); + + uint get_total_passes() const { return m_params.m_two_pass_flag ? 2 : 1; } + inline uint get_cur_pass() { return m_pass_num; } + + // Call this method with each source scanline. + // width * src_channels bytes per scanline is expected (RGB or Y format). + // You must call with NULL after all scanlines are processed to finish compression. + // Returns false on out of memory or if a stream write fails. + bool process_scanline(const void* pScanline); + + private: + jpeg_encoder(const jpeg_encoder&); + jpeg_encoder& operator =(const jpeg_encoder&); + + typedef int32 sample_array_t; + + output_stream* m_pStream; + params m_params; + uint8 m_num_components; + uint8 m_comp_h_samp[3], m_comp_v_samp[3]; + int m_image_x, m_image_y, m_image_bpp, m_image_bpl; + int m_image_x_mcu, m_image_y_mcu; + int m_image_bpl_xlt, m_image_bpl_mcu; + int m_mcus_per_row; + int m_mcu_x, m_mcu_y; + uint8* m_mcu_lines[16]; + uint8 m_mcu_y_ofs; + sample_array_t m_sample_array[64]; + int16 m_coefficient_array[64]; + int32 m_quantization_tables[2][64]; + uint m_huff_codes[4][256]; + uint8 m_huff_code_sizes[4][256]; + uint8 m_huff_bits[4][17]; + uint8 m_huff_val[4][256]; + uint32 m_huff_count[4][256]; + int m_last_dc_val[3]; + enum { JPGE_OUT_BUF_SIZE = 2048 }; + uint8 m_out_buf[JPGE_OUT_BUF_SIZE]; + uint8* m_pOut_buf; + uint m_out_buf_left; + uint32 m_bit_buffer; + uint m_bits_in; + uint8 m_pass_num; + bool m_all_stream_writes_succeeded; + + void optimize_huffman_table(int table_num, int table_len); + void emit_byte(uint8 i); + void emit_word(uint i); + void emit_marker(int marker); + void emit_jfif_app0(); + void emit_dqt(); + void emit_sof(); + void emit_dht(uint8* bits, uint8* val, int index, bool ac_flag); + void emit_dhts(); + void emit_sos(); + void emit_markers(); + void compute_huffman_table(uint* codes, uint8* code_sizes, uint8* bits, uint8* val); + void compute_quant_table(int32* dst, int16* src); + void adjust_quant_table(int32* dst, int32* src); + void first_pass_init(); + bool second_pass_init(); + bool jpg_open(int p_x_res, int p_y_res, int src_channels); + void load_block_8_8_grey(int x); + void load_block_8_8(int x, int y, int c); + void load_block_16_8(int x, int c); + void load_block_16_8_8(int x, int c); + void load_quantized_coefficients(int component_num); + void flush_output_buffer(); + void put_bits(uint bits, uint len); + void code_coefficients_pass_one(int component_num); + void code_coefficients_pass_two(int component_num); + void code_block(int component_num); + void process_mcu_row(); + bool terminate_pass_one(); + bool terminate_pass_two(); + bool process_end_of_image(); + void load_mcu(const void* src); + void clear(); + void init(); + }; + +} // namespace jpge + +#endif // JPEG_ENCODER + diff --git a/thirdparty/mbedtls/LICENSE b/thirdparty/mbedtls/LICENSE index e15ea821d2..d645695673 100644 --- a/thirdparty/mbedtls/LICENSE +++ b/thirdparty/mbedtls/LICENSE @@ -1,5 +1,202 @@ -Unless specifically indicated otherwise in a file, Mbed TLS files are provided -under the Apache License 2.0, or the GNU General Public License v2.0 or later -(SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later). -A copy of these licenses can be found in apache-2.0.txt and gpl-2.0.txt + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/thirdparty/mbedtls/apache-2.0.txt b/thirdparty/mbedtls/apache-2.0.txt deleted file mode 100644 index d645695673..0000000000 --- a/thirdparty/mbedtls/apache-2.0.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/thirdparty/mbedtls/include/mbedtls/aes.h b/thirdparty/mbedtls/include/mbedtls/aes.h index e280dbb1c6..401ac39de8 100644 --- a/thirdparty/mbedtls/include/mbedtls/aes.h +++ b/thirdparty/mbedtls/include/mbedtls/aes.h @@ -564,7 +564,7 @@ int mbedtls_aes_crypt_ofb( mbedtls_aes_context *ctx, * for example, with 96-bit random nonces, you should not encrypt * more than 2**32 messages with the same key. * - * Note that for both stategies, sizes are measured in blocks and + * Note that for both strategies, sizes are measured in blocks and * that an AES block is 16 bytes. * * \warning Upon return, \p stream_block contains sensitive data. Its diff --git a/thirdparty/mbedtls/include/mbedtls/aria.h b/thirdparty/mbedtls/include/mbedtls/aria.h index 226e2dbf3c..d294c47f2d 100644 --- a/thirdparty/mbedtls/include/mbedtls/aria.h +++ b/thirdparty/mbedtls/include/mbedtls/aria.h @@ -44,7 +44,7 @@ #define MBEDTLS_ARIA_DECRYPT 0 /**< ARIA decryption. */ #define MBEDTLS_ARIA_BLOCKSIZE 16 /**< ARIA block size in bytes. */ -#define MBEDTLS_ARIA_MAX_ROUNDS 16 /**< Maxiumum number of rounds in ARIA. */ +#define MBEDTLS_ARIA_MAX_ROUNDS 16 /**< Maximum number of rounds in ARIA. */ #define MBEDTLS_ARIA_MAX_KEYSIZE 32 /**< Maximum size of an ARIA key in bytes. */ #if !defined(MBEDTLS_DEPRECATED_REMOVED) @@ -321,7 +321,7 @@ int mbedtls_aria_crypt_cfb128( mbedtls_aria_context *ctx, * for example, with 96-bit random nonces, you should not encrypt * more than 2**32 messages with the same key. * - * Note that for both stategies, sizes are measured in blocks and + * Note that for both strategies, sizes are measured in blocks and * that an ARIA block is 16 bytes. * * \warning Upon return, \p stream_block contains sensitive data. Its diff --git a/thirdparty/mbedtls/include/mbedtls/asn1.h b/thirdparty/mbedtls/include/mbedtls/asn1.h index 10f7905b7e..5117fc7a41 100644 --- a/thirdparty/mbedtls/include/mbedtls/asn1.h +++ b/thirdparty/mbedtls/include/mbedtls/asn1.h @@ -61,7 +61,7 @@ /** Buffer too small when writing ASN.1 data structure. */ #define MBEDTLS_ERR_ASN1_BUF_TOO_SMALL -0x006C -/* \} name */ +/** \} name ASN1 Error codes */ /** * \name DER constants @@ -121,8 +121,7 @@ #define MBEDTLS_ASN1_TAG_PC_MASK 0x20 #define MBEDTLS_ASN1_TAG_VALUE_MASK 0x1F -/* \} name */ -/* \} addtogroup asn1_module */ +/** \} name DER constants */ /** Returns the size of the binary string, without the trailing \\0 */ #define MBEDTLS_OID_SIZE(x) (sizeof(x) - 1) @@ -210,7 +209,7 @@ mbedtls_asn1_named_data; * \return 0 if successful. * \return #MBEDTLS_ERR_ASN1_OUT_OF_DATA if the ASN.1 element * would end beyond \p end. - * \return #MBEDTLS_ERR_ASN1_INVALID_LENGTH if the length is unparseable. + * \return #MBEDTLS_ERR_ASN1_INVALID_LENGTH if the length is unparsable. */ int mbedtls_asn1_get_len( unsigned char **p, const unsigned char *end, @@ -235,7 +234,7 @@ int mbedtls_asn1_get_len( unsigned char **p, * with the requested tag. * \return #MBEDTLS_ERR_ASN1_OUT_OF_DATA if the ASN.1 element * would end beyond \p end. - * \return #MBEDTLS_ERR_ASN1_INVALID_LENGTH if the length is unparseable. + * \return #MBEDTLS_ERR_ASN1_INVALID_LENGTH if the length is unparsable. */ int mbedtls_asn1_get_tag( unsigned char **p, const unsigned char *end, @@ -607,6 +606,9 @@ void mbedtls_asn1_free_named_data( mbedtls_asn1_named_data *entry ); */ void mbedtls_asn1_free_named_data_list( mbedtls_asn1_named_data **head ); +/** \} name Functions to parse ASN.1 data structures */ +/** \} addtogroup asn1_module */ + #ifdef __cplusplus } #endif diff --git a/thirdparty/mbedtls/include/mbedtls/bignum.h b/thirdparty/mbedtls/include/mbedtls/bignum.h index 9d2cff3275..dd594c512d 100644 --- a/thirdparty/mbedtls/include/mbedtls/bignum.h +++ b/thirdparty/mbedtls/include/mbedtls/bignum.h @@ -989,7 +989,7 @@ MBEDTLS_DEPRECATED int mbedtls_mpi_is_prime( const mbedtls_mpi *X, * generate yourself and that are supposed to be prime, then * \p rounds should be at least the half of the security * strength of the cryptographic algorithm. On the other hand, - * if \p X is chosen uniformly or non-adversially (as is the + * if \p X is chosen uniformly or non-adversarially (as is the * case when mbedtls_mpi_gen_prime calls this function), then * \p rounds can be much lower. * diff --git a/thirdparty/mbedtls/include/mbedtls/blowfish.h b/thirdparty/mbedtls/include/mbedtls/blowfish.h index 77dca70d31..d5f809921f 100644 --- a/thirdparty/mbedtls/include/mbedtls/blowfish.h +++ b/thirdparty/mbedtls/include/mbedtls/blowfish.h @@ -185,7 +185,7 @@ int mbedtls_blowfish_crypt_cbc( mbedtls_blowfish_context *ctx, * #MBEDTLS_BLOWFISH_ENCRYPT for encryption, or * #MBEDTLS_BLOWFISH_DECRYPT for decryption. * \param length The length of the input data in Bytes. - * \param iv_off The offset in the initialiation vector. + * \param iv_off The offset in the initialization vector. * The value pointed to must be smaller than \c 8 Bytes. * It is updated by this function to support the aforementioned * streaming usage. @@ -246,7 +246,7 @@ int mbedtls_blowfish_crypt_cfb64( mbedtls_blowfish_context *ctx, * The recommended way to ensure uniqueness is to use a message * counter. * - * Note that for both stategies, sizes are measured in blocks and + * Note that for both strategies, sizes are measured in blocks and * that a Blowfish block is 8 bytes. * * \warning Upon return, \p stream_block contains sensitive data. Its diff --git a/thirdparty/mbedtls/include/mbedtls/camellia.h b/thirdparty/mbedtls/include/mbedtls/camellia.h index 925a623e47..d39d932fa2 100644 --- a/thirdparty/mbedtls/include/mbedtls/camellia.h +++ b/thirdparty/mbedtls/include/mbedtls/camellia.h @@ -273,7 +273,7 @@ int mbedtls_camellia_crypt_cfb128( mbedtls_camellia_context *ctx, * encrypted: for example, with 96-bit random nonces, you should * not encrypt more than 2**32 messages with the same key. * - * Note that for both stategies, sizes are measured in blocks and + * Note that for both strategies, sizes are measured in blocks and * that a CAMELLIA block is \c 16 Bytes. * * \warning Upon return, \p stream_block contains sensitive data. Its diff --git a/thirdparty/mbedtls/include/mbedtls/chachapoly.h b/thirdparty/mbedtls/include/mbedtls/chachapoly.h index c4ec7b5f2a..ed568bc98b 100644 --- a/thirdparty/mbedtls/include/mbedtls/chachapoly.h +++ b/thirdparty/mbedtls/include/mbedtls/chachapoly.h @@ -161,7 +161,7 @@ int mbedtls_chachapoly_setkey( mbedtls_chachapoly_context *ctx, * \param ctx The ChaCha20-Poly1305 context. This must be initialized * and bound to a key. * \param nonce The nonce/IV to use for the message. - * This must be a redable buffer of length \c 12 Bytes. + * This must be a readable buffer of length \c 12 Bytes. * \param mode The operation to perform: #MBEDTLS_CHACHAPOLY_ENCRYPT or * #MBEDTLS_CHACHAPOLY_DECRYPT (discouraged, see warning). * diff --git a/thirdparty/mbedtls/include/mbedtls/check_config.h b/thirdparty/mbedtls/include/mbedtls/check_config.h index 396fe7dfc2..be5c548e56 100644 --- a/thirdparty/mbedtls/include/mbedtls/check_config.h +++ b/thirdparty/mbedtls/include/mbedtls/check_config.h @@ -173,7 +173,11 @@ #endif #if defined(MBEDTLS_PK_PARSE_C) && !defined(MBEDTLS_ASN1_PARSE_C) -#error "MBEDTLS_PK_PARSE_C defined, but not all prerequesites" +#error "MBEDTLS_PK_PARSE_C defined, but not all prerequisites" +#endif + +#if defined(MBEDTLS_PKCS5_C) && !defined(MBEDTLS_MD_C) +#error "MBEDTLS_PKCS5_C defined, but not all prerequisites" #endif #if defined(MBEDTLS_ENTROPY_C) && (!defined(MBEDTLS_SHA512_C) && \ @@ -214,11 +218,32 @@ #error "MBEDTLS_TEST_NULL_ENTROPY defined, but entropy sources too" #endif +#if defined(MBEDTLS_CCM_C) && ( \ + !defined(MBEDTLS_AES_C) && !defined(MBEDTLS_CAMELLIA_C) && !defined(MBEDTLS_ARIA_C) ) +#error "MBEDTLS_CCM_C defined, but not all prerequisites" +#endif + +#if defined(MBEDTLS_CCM_C) && !defined(MBEDTLS_CIPHER_C) +#error "MBEDTLS_CCM_C defined, but not all prerequisites" +#endif + #if defined(MBEDTLS_GCM_C) && ( \ - !defined(MBEDTLS_AES_C) && !defined(MBEDTLS_CAMELLIA_C) && !defined(MBEDTLS_ARIA_C) ) + !defined(MBEDTLS_AES_C) && !defined(MBEDTLS_CAMELLIA_C) && !defined(MBEDTLS_ARIA_C) ) +#error "MBEDTLS_GCM_C defined, but not all prerequisites" +#endif + +#if defined(MBEDTLS_GCM_C) && !defined(MBEDTLS_CIPHER_C) #error "MBEDTLS_GCM_C defined, but not all prerequisites" #endif +#if defined(MBEDTLS_CHACHAPOLY_C) && !defined(MBEDTLS_CHACHA20_C) +#error "MBEDTLS_CHACHAPOLY_C defined, but not all prerequisites" +#endif + +#if defined(MBEDTLS_CHACHAPOLY_C) && !defined(MBEDTLS_POLY1305_C) +#error "MBEDTLS_CHACHAPOLY_C defined, but not all prerequisites" +#endif + #if defined(MBEDTLS_ECP_RANDOMIZE_JAC_ALT) && !defined(MBEDTLS_ECP_INTERNAL_ALT) #error "MBEDTLS_ECP_RANDOMIZE_JAC_ALT defined, but not all prerequisites" #endif @@ -338,11 +363,11 @@ #endif #if defined(MBEDTLS_MEMORY_BACKTRACE) && !defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C) -#error "MBEDTLS_MEMORY_BACKTRACE defined, but not all prerequesites" +#error "MBEDTLS_MEMORY_BACKTRACE defined, but not all prerequisites" #endif #if defined(MBEDTLS_MEMORY_DEBUG) && !defined(MBEDTLS_MEMORY_BUFFER_ALLOC_C) -#error "MBEDTLS_MEMORY_DEBUG defined, but not all prerequesites" +#error "MBEDTLS_MEMORY_DEBUG defined, but not all prerequisites" #endif #if defined(MBEDTLS_PADLOCK_C) && !defined(MBEDTLS_HAVE_ASM) @@ -619,6 +644,18 @@ #error "MBEDTLS_PSA_CRYPTO_KEY_ID_ENCODES_OWNER defined, but it cannot coexist with MBEDTLS_USE_PSA_CRYPTO." #endif +#if defined(MBEDTLS_PK_C) && defined(MBEDTLS_USE_PSA_CRYPTO) && \ + !defined(MBEDTLS_PK_WRITE_C) && defined(MBEDTLS_ECDSA_C) +#error "MBEDTLS_PK_C in configuration with MBEDTLS_USE_PSA_CRYPTO and \ + MBEDTLS_ECDSA_C requires MBEDTLS_PK_WRITE_C to be defined." +#endif + +#if defined(MBEDTLS_RSA_C) && defined(MBEDTLS_PKCS1_V15) && \ + !defined(MBEDTLS_PK_WRITE_C) && defined(MBEDTLS_PSA_CRYPTO_C) +#error "MBEDTLS_PSA_CRYPTO_C, MBEDTLS_RSA_C and MBEDTLS_PKCS1_V15 defined, \ + but not all prerequisites" +#endif + #if defined(MBEDTLS_RSA_C) && ( !defined(MBEDTLS_BIGNUM_C) || \ !defined(MBEDTLS_OID_C) ) #error "MBEDTLS_RSA_C defined, but not all prerequisites" @@ -761,14 +798,14 @@ !defined(MBEDTLS_SSL_PROTO_TLS1) && \ !defined(MBEDTLS_SSL_PROTO_TLS1_1) && \ !defined(MBEDTLS_SSL_PROTO_TLS1_2) -#error "MBEDTLS_SSL_ENCRYPT_THEN_MAC defined, but not all prerequsites" +#error "MBEDTLS_SSL_ENCRYPT_THEN_MAC defined, but not all prerequisites" #endif #if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET) && \ !defined(MBEDTLS_SSL_PROTO_TLS1) && \ !defined(MBEDTLS_SSL_PROTO_TLS1_1) && \ !defined(MBEDTLS_SSL_PROTO_TLS1_2) -#error "MBEDTLS_SSL_EXTENDED_MASTER_SECRET defined, but not all prerequsites" +#error "MBEDTLS_SSL_EXTENDED_MASTER_SECRET defined, but not all prerequisites" #endif #if defined(MBEDTLS_SSL_TICKET_C) && !defined(MBEDTLS_CIPHER_C) diff --git a/thirdparty/mbedtls/include/mbedtls/config.h b/thirdparty/mbedtls/include/mbedtls/config.h index 87b4e9192e..1cd6eb6634 100644 --- a/thirdparty/mbedtls/include/mbedtls/config.h +++ b/thirdparty/mbedtls/include/mbedtls/config.h @@ -128,7 +128,12 @@ * MBEDTLS_PLATFORM_TIME_MACRO, MBEDTLS_PLATFORM_TIME_TYPE_MACRO and * MBEDTLS_PLATFORM_STD_TIME. * - * Comment if your system does not support time functions + * Comment if your system does not support time functions. + * + * \note If MBEDTLS_TIMING_C is set - to enable the semi-portable timing + * interface - timing.c will include time.h on suitable platforms + * regardless of the setting of MBEDTLS_HAVE_TIME, unless + * MBEDTLS_TIMING_ALT is used. See timing.c for more information. */ #define MBEDTLS_HAVE_TIME @@ -321,7 +326,7 @@ */ //#define MBEDTLS_CHECK_PARAMS_ASSERT -/* \} name SECTION: System support */ +/** \} name SECTION: System support */ /** * \name SECTION: mbed TLS feature support @@ -395,7 +400,7 @@ //#define MBEDTLS_XTEA_ALT /* - * When replacing the elliptic curve module, pleace consider, that it is + * When replacing the elliptic curve module, please consider, that it is * implemented with two .c files: * - ecp.c * - ecp_curves.c @@ -1493,7 +1498,7 @@ * Enable an implementation of SHA-256 that has lower ROM footprint but also * lower performance. * - * The default implementation is meant to be a reasonnable compromise between + * The default implementation is meant to be a reasonable compromise between * performance and size. This version optimizes more aggressively for size at * the expense of performance. Eg on Cortex-M4 it reduces the size of * mbedtls_sha256_process() from ~2KB to ~0.5KB for a performance hit of about @@ -1658,7 +1663,7 @@ * Enable support for RFC 7627: Session Hash and Extended Master Secret * Extension. * - * This was introduced as "the proper fix" to the Triple Handshake familiy of + * This was introduced as "the proper fix" to the Triple Handshake family of * attacks, but it is recommended to always use it (even if you disable * renegotiation), since it actually fixes a more fundamental issue in the * original SSL/TLS design, and has implications beyond Triple Handshake. @@ -1704,7 +1709,7 @@ * \note This option has no influence on the protection against the * triple handshake attack. Even if it is disabled, Mbed TLS will * still ensure that certificates do not change during renegotiation, - * for exaple by keeping a hash of the peer's certificate. + * for example by keeping a hash of the peer's certificate. * * Comment this macro to disable storing the peer's certificate * after the handshake. @@ -1909,7 +1914,7 @@ * unless you know for sure amplification cannot be a problem in the * environment in which your server operates. * - * \warning Disabling this can ba a security risk! (see above) + * \warning Disabling this can be a security risk! (see above) * * Requires: MBEDTLS_SSL_PROTO_DTLS * @@ -2162,8 +2167,19 @@ * This setting allows support for cryptographic mechanisms through the PSA * API to be configured separately from support through the mbedtls API. * - * Uncomment this to enable use of PSA Crypto configuration settings which - * can be found in include/psa/crypto_config.h. + * When this option is disabled, the PSA API exposes the cryptographic + * mechanisms that can be implemented on top of the `mbedtls_xxx` API + * configured with `MBEDTLS_XXX` symbols. + * + * When this option is enabled, the PSA API exposes the cryptographic + * mechanisms requested by the `PSA_WANT_XXX` symbols defined in + * include/psa/crypto_config.h. The corresponding `MBEDTLS_XXX` settings are + * automatically enabled if required (i.e. if no PSA driver provides the + * mechanism). You may still freely enable additional `MBEDTLS_XXX` symbols + * in config.h. + * + * If the symbol #MBEDTLS_PSA_CRYPTO_CONFIG_FILE is defined, it specifies + * an alternative header to include instead of include/psa/crypto_config.h. * * If you enable this option and write your own configuration file, you must * include mbedtls/config_psa.h in your configuration file. The default @@ -2289,7 +2305,7 @@ * Uncomment to enable use of ZLIB */ //#define MBEDTLS_ZLIB_SUPPORT -/* \} name SECTION: mbed TLS feature support */ +/** \} name SECTION: mbed TLS feature support */ /** * \name SECTION: mbed TLS modules @@ -2902,7 +2918,7 @@ * * Requires: MBEDTLS_MD_C * - * Uncomment to enable the HMAC_DRBG random number geerator. + * Uncomment to enable the HMAC_DRBG random number generator. */ #define MBEDTLS_HMAC_DRBG_C @@ -3096,7 +3112,7 @@ /** * \def MBEDTLS_PK_C * - * Enable the generic public (asymetric) key layer. + * Enable the generic public (asymmetric) key layer. * * Module: library/pk.c * Caller: library/ssl_tls.c @@ -3112,7 +3128,7 @@ /** * \def MBEDTLS_PK_PARSE_C * - * Enable the generic public (asymetric) key parser. + * Enable the generic public (asymmetric) key parser. * * Module: library/pkparse.c * Caller: library/x509_crt.c @@ -3127,7 +3143,7 @@ /** * \def MBEDTLS_PK_WRITE_C * - * Enable the generic public (asymetric) key writer. + * Enable the generic public (asymmetric) key writer. * * Module: library/pkwrite.c * Caller: library/x509write.c @@ -3466,6 +3482,10 @@ * your own implementation of the whole module by setting * \c MBEDTLS_TIMING_ALT in the current file. * + * \note The timing module will include time.h on suitable platforms + * regardless of the setting of MBEDTLS_HAVE_TIME, unless + * MBEDTLS_TIMING_ALT is used. See timing.c for more information. + * * \note See also our Knowledge Base article about porting to a new * environment: * https://tls.mbed.org/kb/how-to/how-do-i-port-mbed-tls-to-a-new-environment-OS @@ -3598,7 +3618,88 @@ */ #define MBEDTLS_XTEA_C -/* \} name SECTION: mbed TLS modules */ +/** \} name SECTION: mbed TLS modules */ + +/** + * \name SECTION: General configuration options + * + * This section contains Mbed TLS build settings that are not associated + * with a particular module. + * + * \{ + */ + +/** + * \def MBEDTLS_CONFIG_FILE + * + * If defined, this is a header which will be included instead of + * `"mbedtls/config.h"`. + * This header file specifies the compile-time configuration of Mbed TLS. + * Unlike other configuration options, this one must be defined on the + * compiler command line: a definition in `config.h` would have no effect. + * + * This macro is expanded after an <tt>\#include</tt> directive. This is a popular but + * non-standard feature of the C language, so this feature is only available + * with compilers that perform macro expansion on an <tt>\#include</tt> line. + * + * The value of this symbol is typically a path in double quotes, either + * absolute or relative to a directory on the include search path. + */ +//#define MBEDTLS_CONFIG_FILE "mbedtls/config.h" + +/** + * \def MBEDTLS_USER_CONFIG_FILE + * + * If defined, this is a header which will be included after + * `"mbedtls/config.h"` or #MBEDTLS_CONFIG_FILE. + * This allows you to modify the default configuration, including the ability + * to undefine options that are enabled by default. + * + * This macro is expanded after an <tt>\#include</tt> directive. This is a popular but + * non-standard feature of the C language, so this feature is only available + * with compilers that perform macro expansion on an <tt>\#include</tt> line. + * + * The value of this symbol is typically a path in double quotes, either + * absolute or relative to a directory on the include search path. + */ +//#define MBEDTLS_USER_CONFIG_FILE "/dev/null" + +/** + * \def MBEDTLS_PSA_CRYPTO_CONFIG_FILE + * + * If defined, this is a header which will be included instead of + * `"psa/crypto_config.h"`. + * This header file specifies which cryptographic mechanisms are available + * through the PSA API when #MBEDTLS_PSA_CRYPTO_CONFIG is enabled, and + * is not used when #MBEDTLS_PSA_CRYPTO_CONFIG is disabled. + * + * This macro is expanded after an <tt>\#include</tt> directive. This is a popular but + * non-standard feature of the C language, so this feature is only available + * with compilers that perform macro expansion on an <tt>\#include</tt> line. + * + * The value of this symbol is typically a path in double quotes, either + * absolute or relative to a directory on the include search path. + */ +//#define MBEDTLS_PSA_CRYPTO_CONFIG_FILE "psa/crypto_config.h" + +/** + * \def MBEDTLS_PSA_CRYPTO_USER_CONFIG_FILE + * + * If defined, this is a header which will be included after + * `"psa/crypto_config.h"` or #MBEDTLS_PSA_CRYPTO_CONFIG_FILE. + * This allows you to modify the default configuration, including the ability + * to undefine options that are enabled by default. + * + * This macro is expanded after an <tt>\#include</tt> directive. This is a popular but + * non-standard feature of the C language, so this feature is only available + * with compilers that perform macro expansion on an <tt>\#include</tt> line. + * + * The value of this symbol is typically a path in double quotes, either + * absolute or relative to a directory on the include search path. + */ +//#define MBEDTLS_PSA_CRYPTO_USER_CONFIG_FILE "/dev/null" + +/** \} name SECTION: General configuration options */ /** * \name SECTION: Module configuration options @@ -3609,11 +3710,15 @@ * * Our advice is to enable options and change their values here * only if you have a good reason and know the consequences. - * - * Please check the respective header file for documentation on these - * parameters (to prevent duplicate documentation). * \{ */ +/* The Doxygen documentation here is used when a user comments out a + * setting and runs doxygen themselves. On the other hand, when we typeset + * the full documentation including disabled settings, the documentation + * in specific modules' header files is used if present. When editing this + * file, make sure that each option is documented in exactly one place, + * plus optionally a same-line Doxygen comment here if there is a Doxygen + * comment in the specific module. */ /* MPI / BIGNUM options */ //#define MBEDTLS_MPI_WINDOW_SIZE 6 /**< Maximum window size used. */ @@ -4002,7 +4107,7 @@ */ //#define MBEDTLS_ECDH_VARIANT_EVEREST_ENABLED -/* \} name SECTION: Customisation configuration options */ +/** \} name SECTION: Module configuration options */ /* Target and application specific configurations * diff --git a/thirdparty/mbedtls/include/mbedtls/ctr_drbg.h b/thirdparty/mbedtls/include/mbedtls/ctr_drbg.h index dc4adc896d..e68237a439 100644 --- a/thirdparty/mbedtls/include/mbedtls/ctr_drbg.h +++ b/thirdparty/mbedtls/include/mbedtls/ctr_drbg.h @@ -138,7 +138,7 @@ /**< The maximum size of seed or reseed buffer in bytes. */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #define MBEDTLS_CTR_DRBG_PR_OFF 0 /**< Prediction resistance is disabled. */ diff --git a/thirdparty/mbedtls/include/mbedtls/debug.h b/thirdparty/mbedtls/include/mbedtls/debug.h index 3c08244f3d..4fc4662d9a 100644 --- a/thirdparty/mbedtls/include/mbedtls/debug.h +++ b/thirdparty/mbedtls/include/mbedtls/debug.h @@ -139,7 +139,7 @@ extern "C" { * discarded. * (Default value: 0 = No debug ) * - * \param threshold theshold level of messages to filter on. Messages at a + * \param threshold threshold level of messages to filter on. Messages at a * higher level will be discarded. * - Debug levels * - 0 No debug diff --git a/thirdparty/mbedtls/include/mbedtls/ecjpake.h b/thirdparty/mbedtls/include/mbedtls/ecjpake.h index 891705d8c4..3564ff8dd3 100644 --- a/thirdparty/mbedtls/include/mbedtls/ecjpake.h +++ b/thirdparty/mbedtls/include/mbedtls/ecjpake.h @@ -68,7 +68,7 @@ typedef enum { * (KeyExchange) as defined by the Thread spec. * * In order to benefit from this symmetry, we choose a different naming - * convetion from the Thread v1.0 spec. Correspondance is indicated in the + * convention from the Thread v1.0 spec. Correspondence is indicated in the * description as a pair C: client name, S: server name */ typedef struct mbedtls_ecjpake_context diff --git a/thirdparty/mbedtls/include/mbedtls/ecp.h b/thirdparty/mbedtls/include/mbedtls/ecp.h index 0924341e00..64a0bccda0 100644 --- a/thirdparty/mbedtls/include/mbedtls/ecp.h +++ b/thirdparty/mbedtls/include/mbedtls/ecp.h @@ -315,7 +315,7 @@ mbedtls_ecp_group; #if !defined(MBEDTLS_ECP_WINDOW_SIZE) /* * Maximum "window" size used for point multiplication. - * Default: a point where higher memory usage yields disminishing performance + * Default: a point where higher memory usage yields diminishing performance * returns. * Minimum value: 2. Maximum value: 7. * @@ -351,7 +351,7 @@ mbedtls_ecp_group; #define MBEDTLS_ECP_FIXED_POINT_OPTIM 1 /**< Enable fixed-point speed-up. */ #endif /* MBEDTLS_ECP_FIXED_POINT_OPTIM */ -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #else /* MBEDTLS_ECP_ALT */ #include "ecp_alt.h" diff --git a/thirdparty/mbedtls/include/mbedtls/entropy.h b/thirdparty/mbedtls/include/mbedtls/entropy.h index deb3c50300..40259ebc8a 100644 --- a/thirdparty/mbedtls/include/mbedtls/entropy.h +++ b/thirdparty/mbedtls/include/mbedtls/entropy.h @@ -75,7 +75,7 @@ #define MBEDTLS_ENTROPY_MAX_GATHER 128 /**< Maximum amount requested from entropy sources */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #if defined(MBEDTLS_ENTROPY_SHA512_ACCUMULATOR) #define MBEDTLS_ENTROPY_BLOCK_SIZE 64 /**< Block size of entropy accumulator (SHA-512) */ diff --git a/thirdparty/mbedtls/include/mbedtls/hkdf.h b/thirdparty/mbedtls/include/mbedtls/hkdf.h index 223004b8ed..111d960e56 100644 --- a/thirdparty/mbedtls/include/mbedtls/hkdf.h +++ b/thirdparty/mbedtls/include/mbedtls/hkdf.h @@ -39,7 +39,7 @@ */ /** Bad input parameters to function. */ #define MBEDTLS_ERR_HKDF_BAD_INPUT_DATA -0x5F80 -/* \} name */ +/** \} name */ #ifdef __cplusplus extern "C" { diff --git a/thirdparty/mbedtls/include/mbedtls/hmac_drbg.h b/thirdparty/mbedtls/include/mbedtls/hmac_drbg.h index 79132d4d91..6d372b9788 100644 --- a/thirdparty/mbedtls/include/mbedtls/hmac_drbg.h +++ b/thirdparty/mbedtls/include/mbedtls/hmac_drbg.h @@ -74,7 +74,7 @@ #define MBEDTLS_HMAC_DRBG_MAX_SEED_INPUT 384 /**< Maximum size of (re)seed buffer */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #define MBEDTLS_HMAC_DRBG_PR_OFF 0 /**< No prediction resistance */ #define MBEDTLS_HMAC_DRBG_PR_ON 1 /**< Prediction resistance enabled */ @@ -207,7 +207,7 @@ int mbedtls_hmac_drbg_seed( mbedtls_hmac_drbg_context *ctx, size_t len ); /** - * \brief Initilisation of simpified HMAC_DRBG (never reseeds). + * \brief Initialisation of simplified HMAC_DRBG (never reseeds). * * This function is meant for use in algorithms that need a pseudorandom * input such as deterministic ECDSA. diff --git a/thirdparty/mbedtls/include/mbedtls/memory_buffer_alloc.h b/thirdparty/mbedtls/include/mbedtls/memory_buffer_alloc.h index 233977252a..3954b36ab5 100644 --- a/thirdparty/mbedtls/include/mbedtls/memory_buffer_alloc.h +++ b/thirdparty/mbedtls/include/mbedtls/memory_buffer_alloc.h @@ -42,7 +42,7 @@ #define MBEDTLS_MEMORY_ALIGN_MULTIPLE 4 /**< Align on multiples of this value */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #define MBEDTLS_MEMORY_VERIFY_NONE 0 #define MBEDTLS_MEMORY_VERIFY_ALLOC (1 << 0) diff --git a/thirdparty/mbedtls/include/mbedtls/oid.h b/thirdparty/mbedtls/include/mbedtls/oid.h index 1c39186a49..0186217804 100644 --- a/thirdparty/mbedtls/include/mbedtls/oid.h +++ b/thirdparty/mbedtls/include/mbedtls/oid.h @@ -143,7 +143,7 @@ #define MBEDTLS_OID_AT_GIVEN_NAME MBEDTLS_OID_AT "\x2A" /**< id-at-givenName AttributeType:= {id-at 42} */ #define MBEDTLS_OID_AT_INITIALS MBEDTLS_OID_AT "\x2B" /**< id-at-initials AttributeType:= {id-at 43} */ #define MBEDTLS_OID_AT_GENERATION_QUALIFIER MBEDTLS_OID_AT "\x2C" /**< id-at-generationQualifier AttributeType:= {id-at 44} */ -#define MBEDTLS_OID_AT_UNIQUE_IDENTIFIER MBEDTLS_OID_AT "\x2D" /**< id-at-uniqueIdentifier AttributType:= {id-at 45} */ +#define MBEDTLS_OID_AT_UNIQUE_IDENTIFIER MBEDTLS_OID_AT "\x2D" /**< id-at-uniqueIdentifier AttributeType:= {id-at 45} */ #define MBEDTLS_OID_AT_DN_QUALIFIER MBEDTLS_OID_AT "\x2E" /**< id-at-dnQualifier AttributeType:= {id-at 46} */ #define MBEDTLS_OID_AT_PSEUDONYM MBEDTLS_OID_AT "\x41" /**< id-at-pseudonym AttributeType:= {id-at 65} */ diff --git a/thirdparty/mbedtls/include/mbedtls/pem.h b/thirdparty/mbedtls/include/mbedtls/pem.h index dfb4ff218e..daa71c886b 100644 --- a/thirdparty/mbedtls/include/mbedtls/pem.h +++ b/thirdparty/mbedtls/include/mbedtls/pem.h @@ -54,7 +54,7 @@ #define MBEDTLS_ERR_PEM_FEATURE_UNAVAILABLE -0x1400 /** Bad input parameters to function. */ #define MBEDTLS_ERR_PEM_BAD_INPUT_DATA -0x1480 -/* \} name */ +/** \} name PEM Error codes */ #ifdef __cplusplus extern "C" { diff --git a/thirdparty/mbedtls/include/mbedtls/pk.h b/thirdparty/mbedtls/include/mbedtls/pk.h index 8f2abf2a60..c9a13f484e 100644 --- a/thirdparty/mbedtls/include/mbedtls/pk.h +++ b/thirdparty/mbedtls/include/mbedtls/pk.h @@ -217,32 +217,6 @@ typedef struct typedef void mbedtls_pk_restart_ctx; #endif /* MBEDTLS_ECDSA_C && MBEDTLS_ECP_RESTARTABLE */ -#if defined(MBEDTLS_RSA_C) -/** - * Quick access to an RSA context inside a PK context. - * - * \warning You must make sure the PK context actually holds an RSA context - * before using this function! - */ -static inline mbedtls_rsa_context *mbedtls_pk_rsa( const mbedtls_pk_context pk ) -{ - return( (mbedtls_rsa_context *) (pk).pk_ctx ); -} -#endif /* MBEDTLS_RSA_C */ - -#if defined(MBEDTLS_ECP_C) -/** - * Quick access to an EC context inside a PK context. - * - * \warning You must make sure the PK context actually holds an EC context - * before using this function! - */ -static inline mbedtls_ecp_keypair *mbedtls_pk_ec( const mbedtls_pk_context pk ) -{ - return( (mbedtls_ecp_keypair *) (pk).pk_ctx ); -} -#endif /* MBEDTLS_ECP_C */ - #if defined(MBEDTLS_PK_RSA_ALT_SUPPORT) /** * \brief Types for RSA-alt abstraction @@ -656,6 +630,55 @@ const char * mbedtls_pk_get_name( const mbedtls_pk_context *ctx ); */ mbedtls_pk_type_t mbedtls_pk_get_type( const mbedtls_pk_context *ctx ); +#if defined(MBEDTLS_RSA_C) +/** + * Quick access to an RSA context inside a PK context. + * + * \warning This function can only be used when the type of the context, as + * returned by mbedtls_pk_get_type(), is #MBEDTLS_PK_RSA. + * Ensuring that is the caller's responsibility. + * Alternatively, you can check whether this function returns NULL. + * + * \return The internal RSA context held by the PK context, or NULL. + */ +static inline mbedtls_rsa_context *mbedtls_pk_rsa( const mbedtls_pk_context pk ) +{ + switch( mbedtls_pk_get_type( &pk ) ) + { + case MBEDTLS_PK_RSA: + return( (mbedtls_rsa_context *) (pk).pk_ctx ); + default: + return( NULL ); + } +} +#endif /* MBEDTLS_RSA_C */ + +#if defined(MBEDTLS_ECP_C) +/** + * Quick access to an EC context inside a PK context. + * + * \warning This function can only be used when the type of the context, as + * returned by mbedtls_pk_get_type(), is #MBEDTLS_PK_ECKEY, + * #MBEDTLS_PK_ECKEY_DH, or #MBEDTLS_PK_ECDSA. + * Ensuring that is the caller's responsibility. + * Alternatively, you can check whether this function returns NULL. + * + * \return The internal EC context held by the PK context, or NULL. + */ +static inline mbedtls_ecp_keypair *mbedtls_pk_ec( const mbedtls_pk_context pk ) +{ + switch( mbedtls_pk_get_type( &pk ) ) + { + case MBEDTLS_PK_ECKEY: + case MBEDTLS_PK_ECKEY_DH: + case MBEDTLS_PK_ECDSA: + return( (mbedtls_ecp_keypair *) (pk).pk_ctx ); + default: + return( NULL ); + } +} +#endif /* MBEDTLS_ECP_C */ + #if defined(MBEDTLS_PK_PARSE_C) /** \ingroup pk_module */ /** diff --git a/thirdparty/mbedtls/include/mbedtls/platform.h b/thirdparty/mbedtls/include/mbedtls/platform.h index bdef07498d..06dd192eab 100644 --- a/thirdparty/mbedtls/include/mbedtls/platform.h +++ b/thirdparty/mbedtls/include/mbedtls/platform.h @@ -70,7 +70,9 @@ extern "C" { #if !defined(MBEDTLS_PLATFORM_NO_STD_FUNCTIONS) #include <stdio.h> #include <stdlib.h> +#if defined(MBEDTLS_HAVE_TIME) #include <time.h> +#endif #if !defined(MBEDTLS_PLATFORM_STD_SNPRINTF) #if defined(MBEDTLS_PLATFORM_HAS_NON_CONFORMING_SNPRINTF) #define MBEDTLS_PLATFORM_STD_SNPRINTF mbedtls_platform_win32_snprintf /**< The default \c snprintf function to use. */ @@ -127,7 +129,7 @@ extern "C" { #endif /* MBEDTLS_PLATFORM_NO_STD_FUNCTIONS */ -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ /* * The function pointers for calloc and free. diff --git a/thirdparty/mbedtls/include/mbedtls/platform_time.h b/thirdparty/mbedtls/include/mbedtls/platform_time.h index 7e7daab692..94055711b2 100644 --- a/thirdparty/mbedtls/include/mbedtls/platform_time.h +++ b/thirdparty/mbedtls/include/mbedtls/platform_time.h @@ -32,14 +32,6 @@ extern "C" { #endif -/** - * \name SECTION: Module settings - * - * The configuration options you can set for this module are in this section. - * Either change them in config.h or define them on the compiler command line. - * \{ - */ - /* * The time_t datatype */ diff --git a/thirdparty/mbedtls/include/mbedtls/platform_util.h b/thirdparty/mbedtls/include/mbedtls/platform_util.h index f982db8c01..cd112ab58e 100644 --- a/thirdparty/mbedtls/include/mbedtls/platform_util.h +++ b/thirdparty/mbedtls/include/mbedtls/platform_util.h @@ -67,7 +67,7 @@ extern "C" { * \brief User supplied callback function for parameter validation failure. * See #MBEDTLS_CHECK_PARAMS for context. * - * This function will be called unless an alternative treatement + * This function will be called unless an alternative treatment * is defined through the #MBEDTLS_PARAM_FAILED macro. * * This function can return, and the operation will be aborted, or @@ -198,7 +198,7 @@ MBEDTLS_DEPRECATED typedef int mbedtls_deprecated_numeric_constant_t; * * This macro has an empty expansion. It exists for documentation purposes: * a #MBEDTLS_CHECK_RETURN_OPTIONAL annotation indicates that the function - * has been analyzed for return-check usefuless, whereas the lack of + * has been analyzed for return-check usefulness, whereas the lack of * an annotation indicates that the function has not been analyzed and its * return-check usefulness is unknown. */ diff --git a/thirdparty/mbedtls/include/mbedtls/rsa.h b/thirdparty/mbedtls/include/mbedtls/rsa.h index 3c481e12a1..062df73aa0 100644 --- a/thirdparty/mbedtls/include/mbedtls/rsa.h +++ b/thirdparty/mbedtls/include/mbedtls/rsa.h @@ -88,7 +88,7 @@ /* * The above constants may be used even if the RSA module is compile out, - * eg for alternative (PKCS#11) RSA implemenations in the PK layers. + * eg for alternative (PKCS#11) RSA implementations in the PK layers. */ #ifdef __cplusplus @@ -552,7 +552,7 @@ int mbedtls_rsa_public( mbedtls_rsa_context *ctx, * * \note Blinding is used if and only if a PRNG is provided. * - * \note If blinding is used, both the base of exponentation + * \note If blinding is used, both the base of exponentiation * and the exponent are blinded, providing protection * against some side-channel attacks. * @@ -687,7 +687,7 @@ int mbedtls_rsa_rsaes_pkcs1_v15_encrypt( mbedtls_rsa_context *ctx, * mode being set to #MBEDTLS_RSA_PRIVATE and might instead * return #MBEDTLS_ERR_PLATFORM_FEATURE_UNSUPPORTED. * - * \param ctx The initnialized RSA context to use. + * \param ctx The initialized RSA context to use. * \param f_rng The RNG function to use. This is needed for padding * generation and must be provided. * \param p_rng The RNG context to be passed to \p f_rng. This may diff --git a/thirdparty/mbedtls/include/mbedtls/ssl.h b/thirdparty/mbedtls/include/mbedtls/ssl.h index 209dbf6053..5064ec5689 100644 --- a/thirdparty/mbedtls/include/mbedtls/ssl.h +++ b/thirdparty/mbedtls/include/mbedtls/ssl.h @@ -349,7 +349,7 @@ #define MBEDTLS_SSL_TLS1_3_PADDING_GRANULARITY 1 #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ /* * Length of the verify data for secure renegotiation @@ -1152,7 +1152,7 @@ struct mbedtls_ssl_config #endif #if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY) && defined(MBEDTLS_SSL_SRV_C) - /** Callback to create & write a cookie for ClientHello veirifcation */ + /** Callback to create & write a cookie for ClientHello verification */ int (*f_cookie_write)( void *, unsigned char **, unsigned char *, const unsigned char *, size_t ); /** Callback to verify validity of a ClientHello cookie */ @@ -1405,7 +1405,7 @@ struct mbedtls_ssl_context unsigned char *compress_buf; /*!< zlib data buffer */ #endif /* MBEDTLS_ZLIB_SUPPORT */ #if defined(MBEDTLS_SSL_CBC_RECORD_SPLITTING) - signed char split_done; /*!< current record already splitted? */ + signed char split_done; /*!< current record already split? */ #endif /* MBEDTLS_SSL_CBC_RECORD_SPLITTING */ /* @@ -1688,7 +1688,7 @@ void mbedtls_ssl_conf_dbg( mbedtls_ssl_config *conf, * * \note The two most common use cases are: * - non-blocking I/O, f_recv != NULL, f_recv_timeout == NULL - * - blocking I/O, f_recv == NULL, f_recv_timout != NULL + * - blocking I/O, f_recv == NULL, f_recv_timeout != NULL * * \note For DTLS, you need to provide either a non-NULL * f_recv_timeout callback, or a f_recv that doesn't block. @@ -1846,7 +1846,7 @@ int mbedtls_ssl_get_peer_cid( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */ /** - * \brief Set the Maximum Tranport Unit (MTU). + * \brief Set the Maximum Transport Unit (MTU). * Special value: 0 means unset (no limit). * This represents the maximum size of a datagram payload * handled by the transport layer (usually UDP) as determined @@ -2387,7 +2387,7 @@ void mbedtls_ssl_conf_dtls_anti_replay( mbedtls_ssl_config *conf, char mode ); * ones going through the authentication-decryption phase. * * \note This is a security trade-off related to the fact that it's - * often relatively easy for an active attacker ot inject UDP + * often relatively easy for an active attacker to inject UDP * datagrams. On one hand, setting a low limit here makes it * easier for such an attacker to forcibly terminated a * connection. On the other hand, a high limit or no limit @@ -2498,7 +2498,7 @@ void mbedtls_ssl_conf_handshake_timeout( mbedtls_ssl_config *conf, uint32_t min, * successfully cached, return 1 otherwise. * * \param conf SSL configuration - * \param p_cache parmater (context) for both callbacks + * \param p_cache parameter (context) for both callbacks * \param f_get_cache session get callback * \param f_set_cache session set callback */ @@ -2529,7 +2529,7 @@ int mbedtls_ssl_set_session( mbedtls_ssl_context *ssl, const mbedtls_ssl_session /** * \brief Load serialized session data into a session structure. * On client, this can be used for loading saved sessions - * before resuming them with mbedstls_ssl_set_session(). + * before resuming them with mbedtls_ssl_set_session(). * On server, this can be used for alternative implementations * of session cache or session tickets. * @@ -2793,7 +2793,7 @@ void mbedtls_ssl_conf_ca_cb( mbedtls_ssl_config *conf, * * \note On client, only the first call has any effect. That is, * only one client certificate can be provisioned. The - * server's preferences in its CertficateRequest message will + * server's preferences in its CertificateRequest message will * be ignored and our only cert will be sent regardless of * whether it matches those preferences - the server can then * decide what it wants to do with it. @@ -3241,7 +3241,7 @@ int mbedtls_ssl_set_hs_ecjpake_password( mbedtls_ssl_context *ssl, * \param protos Pointer to a NULL-terminated list of supported protocols, * in decreasing preference order. The pointer to the list is * recorded by the library for later reference as required, so - * the lifetime of the table must be atleast as long as the + * the lifetime of the table must be at least as long as the * lifetime of the SSL configuration structure. * * \return 0 on success, or MBEDTLS_ERR_SSL_BAD_INPUT_DATA. @@ -3255,7 +3255,7 @@ int mbedtls_ssl_conf_alpn_protocols( mbedtls_ssl_config *conf, const char **prot * * \param ssl SSL context * - * \return Protcol name, or NULL if no protocol was negotiated. + * \return Protocol name, or NULL if no protocol was negotiated. */ const char *mbedtls_ssl_get_alpn_protocol( const mbedtls_ssl_context *ssl ); #endif /* MBEDTLS_SSL_ALPN */ @@ -3338,7 +3338,7 @@ int mbedtls_ssl_dtls_srtp_set_mki_value( mbedtls_ssl_context *ssl, unsigned char *mki_value, uint16_t mki_len ); /** - * \brief Get the negotiated DTLS-SRTP informations: + * \brief Get the negotiated DTLS-SRTP information: * Protection profile and MKI value. * * \warning This function must be called after the handshake is @@ -3346,7 +3346,7 @@ int mbedtls_ssl_dtls_srtp_set_mki_value( mbedtls_ssl_context *ssl, * not be trusted or acted upon before the handshake completes. * * \param ssl The SSL context to query. - * \param dtls_srtp_info The negotiated DTLS-SRTP informations: + * \param dtls_srtp_info The negotiated DTLS-SRTP information: * - Protection profile in use. * A direct mapping of the iana defined value for protection * profile on an uint16_t. @@ -3508,7 +3508,7 @@ void mbedtls_ssl_conf_cert_req_ca_list( mbedtls_ssl_config *conf, * \c mbedtls_ssl_get_record_expansion(). * * \note For DTLS, it is also possible to set a limit for the total - * size of daragrams passed to the transport layer, including + * size of datagrams passed to the transport layer, including * record overhead, see \c mbedtls_ssl_set_mtu(). * * \param conf SSL configuration @@ -3568,7 +3568,7 @@ void mbedtls_ssl_conf_session_tickets( mbedtls_ssl_config *conf, int use_tickets * initiated by peer * (Default: MBEDTLS_SSL_RENEGOTIATION_DISABLED) * - * \warning It is recommended to always disable renegotation unless you + * \warning It is recommended to always disable renegotiation unless you * know you need it and you know what you're doing. In the * past, there have been several issues associated with * renegotiation or a poor understanding of its properties. @@ -3631,7 +3631,7 @@ void mbedtls_ssl_conf_legacy_renegotiation( mbedtls_ssl_config *conf, int allow_ * scenario. * * \note With DTLS and server-initiated renegotiation, the - * HelloRequest is retransmited every time mbedtls_ssl_read() times + * HelloRequest is retransmitted every time mbedtls_ssl_read() times * out or receives Application Data, until: * - max_records records have beens seen, if it is >= 0, or * - the number of retransmits that would happen during an @@ -4263,7 +4263,7 @@ void mbedtls_ssl_free( mbedtls_ssl_context *ssl ); * \return \c 0 if successful. * \return #MBEDTLS_ERR_SSL_BUFFER_TOO_SMALL if \p buf is too small. * \return #MBEDTLS_ERR_SSL_ALLOC_FAILED if memory allocation failed - * while reseting the context. + * while resetting the context. * \return #MBEDTLS_ERR_SSL_BAD_INPUT_DATA if a handshake is in * progress, or there is pending data for reading or sending, * or the connection does not use DTLS 1.2 with an AEAD @@ -4357,7 +4357,7 @@ int mbedtls_ssl_context_load( mbedtls_ssl_context *ssl, void mbedtls_ssl_config_init( mbedtls_ssl_config *conf ); /** - * \brief Load reasonnable default SSL configuration values. + * \brief Load reasonable default SSL configuration values. * (You need to call mbedtls_ssl_config_init() first.) * * \param conf SSL configuration context diff --git a/thirdparty/mbedtls/include/mbedtls/ssl_cache.h b/thirdparty/mbedtls/include/mbedtls/ssl_cache.h index c6ef2960f4..02eab96d45 100644 --- a/thirdparty/mbedtls/include/mbedtls/ssl_cache.h +++ b/thirdparty/mbedtls/include/mbedtls/ssl_cache.h @@ -50,7 +50,7 @@ #define MBEDTLS_SSL_CACHE_DEFAULT_MAX_ENTRIES 50 /*!< Maximum entries in cache */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #ifdef __cplusplus extern "C" { diff --git a/thirdparty/mbedtls/include/mbedtls/ssl_cookie.h b/thirdparty/mbedtls/include/mbedtls/ssl_cookie.h index 0a238708e5..2aa373177b 100644 --- a/thirdparty/mbedtls/include/mbedtls/ssl_cookie.h +++ b/thirdparty/mbedtls/include/mbedtls/ssl_cookie.h @@ -45,7 +45,7 @@ #define MBEDTLS_SSL_COOKIE_TIMEOUT 60 /**< Default expiration delay of DTLS cookies, in seconds if HAVE_TIME, or in number of cookies issued */ #endif -/* \} name SECTION: Module settings */ +/** \} name SECTION: Module settings */ #ifdef __cplusplus extern "C" { @@ -84,7 +84,7 @@ int mbedtls_ssl_cookie_setup( mbedtls_ssl_cookie_ctx *ctx, * \brief Set expiration delay for cookies * (Default MBEDTLS_SSL_COOKIE_TIMEOUT) * - * \param ctx Cookie contex + * \param ctx Cookie context * \param delay Delay, in seconds if HAVE_TIME, or in number of cookies * issued in the meantime. * 0 to disable expiration (NOT recommended) diff --git a/thirdparty/mbedtls/include/mbedtls/ssl_internal.h b/thirdparty/mbedtls/include/mbedtls/ssl_internal.h index 6913dc0f66..46ade67b9c 100644 --- a/thirdparty/mbedtls/include/mbedtls/ssl_internal.h +++ b/thirdparty/mbedtls/include/mbedtls/ssl_internal.h @@ -934,16 +934,22 @@ void mbedtls_ssl_transform_free( mbedtls_ssl_transform *transform ); */ void mbedtls_ssl_handshake_free( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_handshake_client_step( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_handshake_server_step( mbedtls_ssl_context *ssl ); void mbedtls_ssl_handshake_wrapup( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_send_fatal_handshake_failure( mbedtls_ssl_context *ssl ); void mbedtls_ssl_reset_checksum( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_derive_keys( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_handle_message_type( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_prepare_handshake_record( mbedtls_ssl_context *ssl ); void mbedtls_ssl_update_handshake_status( mbedtls_ssl_context *ssl ); @@ -1023,27 +1029,39 @@ void mbedtls_ssl_update_handshake_status( mbedtls_ssl_context *ssl ); * following the above definition. * */ +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_read_record( mbedtls_ssl_context *ssl, unsigned update_hs_digest ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_fetch_input( mbedtls_ssl_context *ssl, size_t nb_want ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_write_handshake_msg( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_write_record( mbedtls_ssl_context *ssl, uint8_t force_flush ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_flush_output( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_parse_certificate( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_write_certificate( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_parse_change_cipher_spec( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_write_change_cipher_spec( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_parse_finished( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_write_finished( mbedtls_ssl_context *ssl ); void mbedtls_ssl_optimize_checksum( mbedtls_ssl_context *ssl, const mbedtls_ssl_ciphersuite_t *ciphersuite_info ); #if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_psk_derive_premaster( mbedtls_ssl_context *ssl, mbedtls_key_exchange_type_t key_ex ); /** @@ -1108,13 +1126,18 @@ mbedtls_pk_type_t mbedtls_ssl_pk_alg_from_sig( unsigned char sig ); mbedtls_md_type_t mbedtls_ssl_md_alg_from_hash( unsigned char hash ); unsigned char mbedtls_ssl_hash_from_md_alg( int md ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_set_calc_verify_md( mbedtls_ssl_context *ssl, int md ); #if defined(MBEDTLS_ECP_C) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_check_curve( const mbedtls_ssl_context *ssl, mbedtls_ecp_group_id grp_id ); +MBEDTLS_CHECK_RETURN_CRITICAL +int mbedtls_ssl_check_curve_tls_id( const mbedtls_ssl_context *ssl, uint16_t tls_id ); #endif #if defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_check_sig_hash( const mbedtls_ssl_context *ssl, mbedtls_md_type_t md ); #endif @@ -1170,6 +1193,7 @@ static inline mbedtls_x509_crt *mbedtls_ssl_own_cert( mbedtls_ssl_context *ssl ) * * Return 0 if everything is OK, -1 if not. */ +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_check_cert_usage( const mbedtls_x509_crt *cert, const mbedtls_ssl_ciphersuite_t *ciphersuite, int cert_endpoint, @@ -1218,21 +1242,26 @@ static inline size_t mbedtls_ssl_hs_hdr_len( const mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_SSL_PROTO_DTLS) void mbedtls_ssl_send_flight_completed( mbedtls_ssl_context *ssl ); void mbedtls_ssl_recv_flight_completed( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_resend( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_flight_transmit( mbedtls_ssl_context *ssl ); #endif /* Visible for testing purposes only */ #if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_dtls_replay_check( mbedtls_ssl_context const *ssl ); void mbedtls_ssl_dtls_replay_update( mbedtls_ssl_context *ssl ); #endif +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_session_copy( mbedtls_ssl_session *dst, const mbedtls_ssl_session *src ); #if defined(MBEDTLS_SSL_PROTO_SSL3) || defined(MBEDTLS_SSL_PROTO_TLS1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_1) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_get_key_exchange_md_ssl_tls( mbedtls_ssl_context *ssl, unsigned char *output, unsigned char *data, size_t data_len ); @@ -1242,6 +1271,7 @@ int mbedtls_ssl_get_key_exchange_md_ssl_tls( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) || \ defined(MBEDTLS_SSL_PROTO_TLS1_2) /* The hash buffer must have at least MBEDTLS_MD_MAX_SIZE bytes of length. */ +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_get_key_exchange_md_tls1_2( mbedtls_ssl_context *ssl, unsigned char *hash, size_t *hashlen, unsigned char *data, size_t data_len, @@ -1254,11 +1284,13 @@ int mbedtls_ssl_get_key_exchange_md_tls1_2( mbedtls_ssl_context *ssl, #endif void mbedtls_ssl_transform_init( mbedtls_ssl_transform *transform ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_encrypt_buf( mbedtls_ssl_context *ssl, mbedtls_ssl_transform *transform, mbedtls_record *rec, int (*f_rng)(void *, unsigned char *, size_t), void *p_rng ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_decrypt_buf( mbedtls_ssl_context const *ssl, mbedtls_ssl_transform *transform, mbedtls_record *rec ); @@ -1276,10 +1308,12 @@ static inline size_t mbedtls_ssl_ep_len( const mbedtls_ssl_context *ssl ) } #if defined(MBEDTLS_SSL_PROTO_DTLS) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_resend_hello_request( mbedtls_ssl_context *ssl ); #endif /* MBEDTLS_SSL_PROTO_DTLS */ void mbedtls_ssl_set_timer( mbedtls_ssl_context *ssl, uint32_t millisecs ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_check_timer( mbedtls_ssl_context *ssl ); void mbedtls_ssl_reset_in_out_pointers( mbedtls_ssl_context *ssl ); @@ -1287,6 +1321,7 @@ void mbedtls_ssl_update_out_pointers( mbedtls_ssl_context *ssl, mbedtls_ssl_transform *transform ); void mbedtls_ssl_update_in_pointers( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_session_reset_int( mbedtls_ssl_context *ssl, int partial ); #if defined(MBEDTLS_SSL_DTLS_ANTI_REPLAY) @@ -1296,6 +1331,7 @@ void mbedtls_ssl_dtls_replay_reset( mbedtls_ssl_context *ssl ); void mbedtls_ssl_handshake_wrapup_free_hs_transform( mbedtls_ssl_context *ssl ); #if defined(MBEDTLS_SSL_RENEGOTIATION) +MBEDTLS_CHECK_RETURN_CRITICAL int mbedtls_ssl_start_renegotiation( mbedtls_ssl_context *ssl ); #endif /* MBEDTLS_SSL_RENEGOTIATION */ @@ -1305,4 +1341,12 @@ void mbedtls_ssl_buffering_free( mbedtls_ssl_context *ssl ); void mbedtls_ssl_flight_free( mbedtls_ssl_flight_item *flight ); #endif /* MBEDTLS_SSL_PROTO_DTLS */ +#if defined(MBEDTLS_TEST_HOOKS) +int mbedtls_ssl_check_dtls_clihlo_cookie( + mbedtls_ssl_context *ssl, + const unsigned char *cli_id, size_t cli_id_len, + const unsigned char *in, size_t in_len, + unsigned char *obuf, size_t buf_len, size_t *olen ); +#endif + #endif /* ssl_internal.h */ diff --git a/thirdparty/mbedtls/include/mbedtls/ssl_ticket.h b/thirdparty/mbedtls/include/mbedtls/ssl_ticket.h index a882eed23b..8221051b24 100644 --- a/thirdparty/mbedtls/include/mbedtls/ssl_ticket.h +++ b/thirdparty/mbedtls/include/mbedtls/ssl_ticket.h @@ -101,7 +101,7 @@ void mbedtls_ssl_ticket_init( mbedtls_ssl_ticket_context *ctx ); * supported. Usually that means a 256-bit key. * * \note The lifetime of the keys is twice the lifetime of tickets. - * It is recommended to pick a reasonnable lifetime so as not + * It is recommended to pick a reasonable lifetime so as not * to negate the benefits of forward secrecy. * * \return 0 if successful, diff --git a/thirdparty/mbedtls/include/mbedtls/version.h b/thirdparty/mbedtls/include/mbedtls/version.h index b1a92b2bcf..44adcbfe03 100644 --- a/thirdparty/mbedtls/include/mbedtls/version.h +++ b/thirdparty/mbedtls/include/mbedtls/version.h @@ -38,16 +38,16 @@ */ #define MBEDTLS_VERSION_MAJOR 2 #define MBEDTLS_VERSION_MINOR 28 -#define MBEDTLS_VERSION_PATCH 0 +#define MBEDTLS_VERSION_PATCH 1 /** * The single version number has the following structure: * MMNNPP00 * Major version | Minor version | Patch version */ -#define MBEDTLS_VERSION_NUMBER 0x021C0000 -#define MBEDTLS_VERSION_STRING "2.28.0" -#define MBEDTLS_VERSION_STRING_FULL "mbed TLS 2.28.0" +#define MBEDTLS_VERSION_NUMBER 0x021C0100 +#define MBEDTLS_VERSION_STRING "2.28.1" +#define MBEDTLS_VERSION_STRING_FULL "mbed TLS 2.28.1" #if defined(MBEDTLS_VERSION_C) diff --git a/thirdparty/mbedtls/include/mbedtls/x509.h b/thirdparty/mbedtls/include/mbedtls/x509.h index c177501430..31b78df32f 100644 --- a/thirdparty/mbedtls/include/mbedtls/x509.h +++ b/thirdparty/mbedtls/include/mbedtls/x509.h @@ -96,7 +96,7 @@ #define MBEDTLS_ERR_X509_BUFFER_TOO_SMALL -0x2980 /** A fatal error occurred, eg the chain is too long or the vrfy callback failed. */ #define MBEDTLS_ERR_X509_FATAL_ERROR -0x3000 -/* \} name */ +/** \} name X509 Error codes */ /** * \name X509 Verify codes @@ -124,8 +124,8 @@ #define MBEDTLS_X509_BADCRL_BAD_PK 0x040000 /**< The CRL is signed with an unacceptable PK alg (eg RSA vs ECDSA). */ #define MBEDTLS_X509_BADCRL_BAD_KEY 0x080000 /**< The CRL is signed with an unacceptable key (eg bad curve, RSA too short). */ -/* \} name */ -/* \} addtogroup x509_module */ +/** \} name X509 Verify codes */ +/** \} addtogroup x509_module */ /* * X.509 v3 Subject Alternative Name types. @@ -255,7 +255,6 @@ typedef struct mbedtls_x509_time mbedtls_x509_time; /** \} name Structures for parsing X.509 certificates, CRLs and CSRs */ -/** \} addtogroup x509_module */ /** * \brief Store the certificate DN in printable form into buf; @@ -311,6 +310,8 @@ int mbedtls_x509_time_is_past( const mbedtls_x509_time *to ); */ int mbedtls_x509_time_is_future( const mbedtls_x509_time *from ); +/** \} addtogroup x509_module */ + #if defined(MBEDTLS_SELF_TEST) /** diff --git a/thirdparty/mbedtls/include/mbedtls/x509_crl.h b/thirdparty/mbedtls/include/mbedtls/x509_crl.h index 7e9e8885f4..9222009019 100644 --- a/thirdparty/mbedtls/include/mbedtls/x509_crl.h +++ b/thirdparty/mbedtls/include/mbedtls/x509_crl.h @@ -162,8 +162,8 @@ void mbedtls_x509_crl_init( mbedtls_x509_crl *crl ); */ void mbedtls_x509_crl_free( mbedtls_x509_crl *crl ); -/* \} name */ -/* \} addtogroup x509_module */ +/** \} name Structures and functions for parsing CRLs */ +/** \} addtogroup x509_module */ #ifdef __cplusplus } diff --git a/thirdparty/mbedtls/include/mbedtls/x509_crt.h b/thirdparty/mbedtls/include/mbedtls/x509_crt.h index 64ccb433ba..0f2885a7ee 100644 --- a/thirdparty/mbedtls/include/mbedtls/x509_crt.h +++ b/thirdparty/mbedtls/include/mbedtls/x509_crt.h @@ -107,7 +107,7 @@ mbedtls_x509_crt; typedef struct mbedtls_x509_san_other_name { /** - * The type_id is an OID as deifned in RFC 5280. + * The type_id is an OID as defined in RFC 5280. * To check the value of the type id, you should use * \p MBEDTLS_OID_CMP with a known OID mbedtls_x509_buf. */ @@ -159,7 +159,9 @@ mbedtls_x509_subject_alternative_name; typedef struct mbedtls_x509_crt_profile { uint32_t allowed_mds; /**< MDs for signatures */ - uint32_t allowed_pks; /**< PK algs for signatures */ + uint32_t allowed_pks; /**< PK algs for public keys; + * this applies to all certificates + * in the provided chain. */ uint32_t allowed_curves; /**< Elliptic curves for ECDSA */ uint32_t rsa_min_bitlen; /**< Minimum size for RSA keys */ } @@ -850,8 +852,7 @@ void mbedtls_x509_crt_restart_free( mbedtls_x509_crt_restart_ctx *ctx ); #endif /* MBEDTLS_ECDSA_C && MBEDTLS_ECP_RESTARTABLE */ #endif /* MBEDTLS_X509_CRT_PARSE_C */ -/* \} name */ -/* \} addtogroup x509_module */ +/** \} name Structures and functions for parsing and writing X.509 certificates */ #if defined(MBEDTLS_X509_CRT_WRITE_C) /** @@ -862,7 +863,7 @@ void mbedtls_x509_crt_restart_free( mbedtls_x509_crt_restart_ctx *ctx ); void mbedtls_x509write_crt_init( mbedtls_x509write_cert *ctx ); /** - * \brief Set the verion for a Certificate + * \brief Set the version for a Certificate * Default: MBEDTLS_X509_CRT_VERSION_3 * * \param ctx CRT context to use @@ -978,7 +979,7 @@ int mbedtls_x509write_crt_set_extension( mbedtls_x509write_cert *ctx, * \param is_ca is this a CA certificate * \param max_pathlen maximum length of certificate chains below this * certificate (only for CA certificates, -1 is - * inlimited) + * unlimited) * * \return 0 if successful, or a MBEDTLS_ERR_X509_ALLOC_FAILED */ @@ -1087,6 +1088,8 @@ int mbedtls_x509write_crt_pem( mbedtls_x509write_cert *ctx, unsigned char *buf, #endif /* MBEDTLS_PEM_WRITE_C */ #endif /* MBEDTLS_X509_CRT_WRITE_C */ +/** \} addtogroup x509_module */ + #ifdef __cplusplus } #endif diff --git a/thirdparty/mbedtls/include/mbedtls/x509_csr.h b/thirdparty/mbedtls/include/mbedtls/x509_csr.h index b1dfc21f1f..2a1c046131 100644 --- a/thirdparty/mbedtls/include/mbedtls/x509_csr.h +++ b/thirdparty/mbedtls/include/mbedtls/x509_csr.h @@ -151,8 +151,7 @@ void mbedtls_x509_csr_init( mbedtls_x509_csr *csr ); void mbedtls_x509_csr_free( mbedtls_x509_csr *csr ); #endif /* MBEDTLS_X509_CSR_PARSE_C */ -/* \} name */ -/* \} addtogroup x509_module */ +/** \} name Structures and functions for X.509 Certificate Signing Requests (CSR) */ #if defined(MBEDTLS_X509_CSR_WRITE_C) /** @@ -182,7 +181,7 @@ int mbedtls_x509write_csr_set_subject_name( mbedtls_x509write_csr *ctx, * private key used to sign the CSR when writing it) * * \param ctx CSR context to use - * \param key Asymetric key to include + * \param key Asymmetric key to include */ void mbedtls_x509write_csr_set_key( mbedtls_x509write_csr *ctx, mbedtls_pk_context *key ); @@ -298,6 +297,8 @@ int mbedtls_x509write_csr_pem( mbedtls_x509write_csr *ctx, unsigned char *buf, s #endif /* MBEDTLS_PEM_WRITE_C */ #endif /* MBEDTLS_X509_CSR_WRITE_C */ +/** \} addtogroup x509_module */ + #ifdef __cplusplus } #endif diff --git a/thirdparty/mbedtls/library/aes.c b/thirdparty/mbedtls/library/aes.c index 31824e75cf..03d8b7ea61 100644 --- a/thirdparty/mbedtls/library/aes.c +++ b/thirdparty/mbedtls/library/aes.c @@ -1106,7 +1106,7 @@ typedef unsigned char mbedtls_be128[16]; * * This function multiplies a field element by x in the polynomial field * representation. It uses 64-bit word operations to gain speed but compensates - * for machine endianess and hence works correctly on both big and little + * for machine endianness and hence works correctly on both big and little * endian machines. */ static void mbedtls_gf128mul_x_ble( unsigned char r[16], @@ -1206,7 +1206,7 @@ int mbedtls_aes_crypt_xts( mbedtls_aes_xts_context *ctx, unsigned char *prev_output = output - 16; /* Copy ciphertext bytes from the previous block to our output for each - * byte of cyphertext we won't steal. At the same time, copy the + * byte of ciphertext we won't steal. At the same time, copy the * remainder of the input for this final round (since the loop bounds * are the same). */ for( i = 0; i < leftover; i++ ) diff --git a/thirdparty/mbedtls/library/asn1write.c b/thirdparty/mbedtls/library/asn1write.c index 3811ef27a3..afa26a6be9 100644 --- a/thirdparty/mbedtls/library/asn1write.c +++ b/thirdparty/mbedtls/library/asn1write.c @@ -133,6 +133,11 @@ int mbedtls_asn1_write_mpi( unsigned char **p, unsigned char *start, const mbedt // len = mbedtls_mpi_size( X ); + /* DER represents 0 with a sign bit (0=nonnegative) and 7 value bits, not + * as 0 digits. We need to end up with 020100, not with 0200. */ + if( len == 0 ) + len = 1; + if( *p < start || (size_t)( *p - start ) < len ) return( MBEDTLS_ERR_ASN1_BUF_TOO_SMALL ); @@ -472,7 +477,7 @@ mbedtls_asn1_named_data *mbedtls_asn1_store_named_data( cur->val.len = val_len; } - if( val != NULL ) + if( val != NULL && val_len != 0 ) memcpy( cur->val.p, val, val_len ); return( cur ); diff --git a/thirdparty/mbedtls/library/bignum.c b/thirdparty/mbedtls/library/bignum.c index 62e7f76727..32578e2c68 100644 --- a/thirdparty/mbedtls/library/bignum.c +++ b/thirdparty/mbedtls/library/bignum.c @@ -1829,7 +1829,7 @@ int mbedtls_mpi_mod_int( mbedtls_mpi_uint *r, const mbedtls_mpi *A, mbedtls_mpi_ /* * handle trivial cases */ - if( b == 1 ) + if( b == 1 || A->n == 0 ) { *r = 0; return( 0 ); @@ -2317,7 +2317,7 @@ int mbedtls_mpi_gcd( mbedtls_mpi *G, const mbedtls_mpi *A, const mbedtls_mpi *B * TA-TB is even so the division by 2 has an integer result. * Invariant (I) is preserved since any odd divisor of both TA and TB * also divides |TA-TB|/2, and any odd divisor of both TA and |TA-TB|/2 - * also divides TB, and any odd divisior of both TB and |TA-TB|/2 also + * also divides TB, and any odd divisor of both TB and |TA-TB|/2 also * divides TA. */ if( mbedtls_mpi_cmp_mpi( &TA, &TB ) >= 0 ) diff --git a/thirdparty/mbedtls/library/cipher.c b/thirdparty/mbedtls/library/cipher.c index 4ec40d2cac..f3b4bd29ce 100644 --- a/thirdparty/mbedtls/library/cipher.c +++ b/thirdparty/mbedtls/library/cipher.c @@ -386,6 +386,12 @@ int mbedtls_cipher_set_iv( mbedtls_cipher_context_t *ctx, #if defined(MBEDTLS_CHACHA20_C) if ( ctx->cipher_info->type == MBEDTLS_CIPHER_CHACHA20 ) { + /* Even though the actual_iv_size is overwritten with a correct value + * of 12 from the cipher info, return an error to indicate that + * the input iv_len is wrong. */ + if( iv_len != 12 ) + return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA ); + if ( 0 != mbedtls_chacha20_starts( (mbedtls_chacha20_context*)ctx->cipher_ctx, iv, 0U ) ) /* Initial counter value */ @@ -393,6 +399,11 @@ int mbedtls_cipher_set_iv( mbedtls_cipher_context_t *ctx, return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA ); } } +#if defined(MBEDTLS_CHACHAPOLY_C) + if ( ctx->cipher_info->type == MBEDTLS_CIPHER_CHACHA20_POLY1305 && + iv_len != 12 ) + return( MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA ); +#endif #endif if ( actual_iv_size != 0 ) diff --git a/thirdparty/mbedtls/library/constant_time.c b/thirdparty/mbedtls/library/constant_time.c index 18f1b20daa..e276d23ca0 100644 --- a/thirdparty/mbedtls/library/constant_time.c +++ b/thirdparty/mbedtls/library/constant_time.c @@ -489,6 +489,12 @@ int mbedtls_ct_hmac( mbedtls_md_context_t *ctx, MD_CHK( mbedtls_md_update( ctx, add_data, add_data_len ) ); MD_CHK( mbedtls_md_update( ctx, data, min_data_len ) ); + /* Fill the hash buffer in advance with something that is + * not a valid hash (barring an attack on the hash and + * deliberately-crafted input), in case the caller doesn't + * check the return status properly. */ + memset( output, '!', hash_size ); + /* For each possible length, compute the hash up to that point */ for( offset = min_data_len; offset <= max_data_len; offset++ ) { @@ -533,6 +539,13 @@ cleanup: * about whether the assignment was made or not. * (Leaking information about the respective sizes of X and Y is ok however.) */ +#if defined(_MSC_VER) && defined(_M_ARM64) && (_MSC_FULL_VER < 193131103) +/* + * MSVC miscompiles this function if it's inlined prior to Visual Studio 2022 version 17.1. See: + * https://developercommunity.visualstudio.com/t/c-compiler-miscompiles-part-of-mbedtls-library-on/1646989 + */ +__declspec(noinline) +#endif int mbedtls_mpi_safe_cond_assign( mbedtls_mpi *X, const mbedtls_mpi *Y, unsigned char assign ) @@ -562,7 +575,7 @@ cleanup: /* * Conditionally swap X and Y, without leaking information * about whether the swap was made or not. - * Here it is not ok to simply swap the pointers, which whould lead to + * Here it is not ok to simply swap the pointers, which would lead to * different memory access patterns when X and Y are used afterwards. */ int mbedtls_mpi_safe_cond_swap( mbedtls_mpi *X, diff --git a/thirdparty/mbedtls/library/constant_time_internal.h b/thirdparty/mbedtls/library/constant_time_internal.h index bbb3a90670..a550b38fa5 100644 --- a/thirdparty/mbedtls/library/constant_time_internal.h +++ b/thirdparty/mbedtls/library/constant_time_internal.h @@ -221,6 +221,13 @@ void mbedtls_ct_memcpy_if_eq( unsigned char *dest, * offset_secret, but only on \p offset_min, \p offset_max and \p len. * Functionally equivalent to `memcpy(dst, src + offset_secret, len)`. * + * \note This function reads from \p dest, but the value that + * is read does not influence the result and this + * function's behavior is well-defined regardless of the + * contents of the buffers. This may result in false + * positives from static or dynamic analyzers, especially + * if \p dest is not initialized. + * * \param dest The destination buffer. This must point to a writable * buffer of at least \p len bytes. * \param src The base of the source buffer. This must point to a diff --git a/thirdparty/mbedtls/library/ctr_drbg.c b/thirdparty/mbedtls/library/ctr_drbg.c index a604ec0761..a00d66ce87 100644 --- a/thirdparty/mbedtls/library/ctr_drbg.c +++ b/thirdparty/mbedtls/library/ctr_drbg.c @@ -828,7 +828,7 @@ static int ctr_drbg_self_test_entropy( void *data, unsigned char *buf, return( 1 ); \ } -#define SELF_TEST_OUPUT_DISCARD_LENGTH 64 +#define SELF_TEST_OUTPUT_DISCARD_LENGTH 64 /* * Checkup routine @@ -854,7 +854,7 @@ int mbedtls_ctr_drbg_self_test( int verbose ) (void *) entropy_source_pr, pers_pr, MBEDTLS_CTR_DRBG_KEYSIZE ) ); mbedtls_ctr_drbg_set_prediction_resistance( &ctx, MBEDTLS_CTR_DRBG_PR_ON ); - CHK( mbedtls_ctr_drbg_random( &ctx, buf, SELF_TEST_OUPUT_DISCARD_LENGTH ) ); + CHK( mbedtls_ctr_drbg_random( &ctx, buf, SELF_TEST_OUTPUT_DISCARD_LENGTH ) ); CHK( mbedtls_ctr_drbg_random( &ctx, buf, sizeof( result_pr ) ) ); CHK( memcmp( buf, result_pr, sizeof( result_pr ) ) ); @@ -879,7 +879,7 @@ int mbedtls_ctr_drbg_self_test( int verbose ) (void *) entropy_source_nopr, pers_nopr, MBEDTLS_CTR_DRBG_KEYSIZE ) ); CHK( mbedtls_ctr_drbg_reseed( &ctx, NULL, 0 ) ); - CHK( mbedtls_ctr_drbg_random( &ctx, buf, SELF_TEST_OUPUT_DISCARD_LENGTH ) ); + CHK( mbedtls_ctr_drbg_random( &ctx, buf, SELF_TEST_OUTPUT_DISCARD_LENGTH ) ); CHK( mbedtls_ctr_drbg_random( &ctx, buf, sizeof( result_nopr ) ) ); CHK( memcmp( buf, result_nopr, sizeof( result_nopr ) ) ); diff --git a/thirdparty/mbedtls/library/ecdh.c b/thirdparty/mbedtls/library/ecdh.c index 9dfa868063..60c6e429de 100644 --- a/thirdparty/mbedtls/library/ecdh.c +++ b/thirdparty/mbedtls/library/ecdh.c @@ -399,7 +399,7 @@ static int ecdh_read_params_internal( mbedtls_ecdh_context_mbed *ctx, } /* - * Read the ServerKeyExhange parameters (RFC 4492) + * Read the ServerKeyExchange parameters (RFC 4492) * struct { * ECParameters curve_params; * ECPoint public; diff --git a/thirdparty/mbedtls/library/ecjpake.c b/thirdparty/mbedtls/library/ecjpake.c index 368b6c7124..0b9bffb93e 100644 --- a/thirdparty/mbedtls/library/ecjpake.c +++ b/thirdparty/mbedtls/library/ecjpake.c @@ -435,7 +435,7 @@ cleanup: /* * Read a ECJPAKEKeyKPPairList (7.4.2.3) and check proofs - * Ouputs: verified peer public keys Xa, Xb + * Outputs: verified peer public keys Xa, Xb */ static int ecjpake_kkpp_read( const mbedtls_md_info_t *md_info, const mbedtls_ecp_group *grp, diff --git a/thirdparty/mbedtls/library/ecp.c b/thirdparty/mbedtls/library/ecp.c index 7f9e1045d4..890f364a08 100644 --- a/thirdparty/mbedtls/library/ecp.c +++ b/thirdparty/mbedtls/library/ecp.c @@ -1307,7 +1307,7 @@ cleanup: * For curves in short Weierstrass form, we do all the internal operations in * Jacobian coordinates. * - * For multiplication, we'll use a comb method with coutermeasueres against + * For multiplication, we'll use a comb method with countermeasures against * SPA, hence timing attacks. */ @@ -2251,7 +2251,7 @@ static unsigned char ecp_pick_window_size( const mbedtls_ecp_group *grp, * This function is mainly responsible for administrative work: * - managing the restart context if enabled * - managing the table of precomputed points (passed between the below two - * functions): allocation, computation, ownership tranfer, freeing. + * functions): allocation, computation, ownership transfer, freeing. * * It delegates the actual arithmetic work to: * ecp_precompute_comb() and ecp_mul_comb_with_precomp() @@ -2422,7 +2422,7 @@ cleanup: /* * For Montgomery curves, we do all the internal arithmetic in projective * coordinates. Import/export of points uses only the x coordinates, which is - * internaly represented as X / Z. + * internally represented as X / Z. * * For scalar multiplication, we'll use a Montgomery ladder. */ @@ -2592,7 +2592,7 @@ static int ecp_mul_mxz( mbedtls_ecp_group *grp, mbedtls_ecp_point *R, MBEDTLS_MPI_CHK( mbedtls_mpi_lset( &R->Z, 0 ) ); mbedtls_mpi_free( &R->Y ); - /* RP.X might be sligtly larger than P, so reduce it */ + /* RP.X might be slightly larger than P, so reduce it */ MOD_ADD( RP.X ); /* Randomize coordinates of the starting point */ diff --git a/thirdparty/mbedtls/library/ecp_curves.c b/thirdparty/mbedtls/library/ecp_curves.c index ff26a18e8f..2199be6461 100644 --- a/thirdparty/mbedtls/library/ecp_curves.c +++ b/thirdparty/mbedtls/library/ecp_curves.c @@ -755,6 +755,8 @@ int mbedtls_ecp_group_load( mbedtls_ecp_group *grp, mbedtls_ecp_group_id id ) ECP_VALIDATE_RET( grp != NULL ); mbedtls_ecp_group_free( grp ); + mbedtls_ecp_group_init( grp ); + grp->id = id; switch( id ) diff --git a/thirdparty/mbedtls/library/memory_buffer_alloc.c b/thirdparty/mbedtls/library/memory_buffer_alloc.c index 0d5d27d3de..cc62324bdc 100644 --- a/thirdparty/mbedtls/library/memory_buffer_alloc.c +++ b/thirdparty/mbedtls/library/memory_buffer_alloc.c @@ -555,8 +555,8 @@ static void *buffer_alloc_calloc_mutexed( size_t n, size_t size ) static void buffer_alloc_free_mutexed( void *ptr ) { - /* We have to good option here, but corrupting the heap seems - * worse than loosing memory. */ + /* We have no good option here, but corrupting the heap seems + * worse than losing memory. */ if( mbedtls_mutex_lock( &heap.mutex ) ) return; buffer_alloc_free( ptr ); diff --git a/thirdparty/mbedtls/library/mps_common.h b/thirdparty/mbedtls/library/mps_common.h index d20776f159..668876ccfc 100644 --- a/thirdparty/mbedtls/library/mps_common.h +++ b/thirdparty/mbedtls/library/mps_common.h @@ -51,7 +51,7 @@ * the function's behavior is entirely undefined. * In addition to state integrity, all MPS structures have a more refined * notion of abstract state that the API operates on. For example, all layers - * have a notion of 'abtract read state' which indicates if incoming data has + * have a notion of 'abstract read state' which indicates if incoming data has * been passed to the user, e.g. through mps_l2_read_start() for Layer 2 * or mps_l3_read() in Layer 3. After such a call, it doesn't make sense to * call these reading functions again until the incoming data has been diff --git a/thirdparty/mbedtls/library/net_sockets.c b/thirdparty/mbedtls/library/net_sockets.c index 5fbe1f764a..8c765e1c8c 100644 --- a/thirdparty/mbedtls/library/net_sockets.c +++ b/thirdparty/mbedtls/library/net_sockets.c @@ -107,7 +107,9 @@ static int wsa_init_done = 0; #include <stdio.h> +#if defined(MBEDTLS_HAVE_TIME) #include <time.h> +#endif #include <stdint.h> diff --git a/thirdparty/mbedtls/library/pkparse.c b/thirdparty/mbedtls/library/pkparse.c index 535ed70eb1..ea5c6b69cb 100644 --- a/thirdparty/mbedtls/library/pkparse.c +++ b/thirdparty/mbedtls/library/pkparse.c @@ -474,7 +474,7 @@ static int pk_use_ecparams( const mbedtls_asn1_buf *params, mbedtls_ecp_group *g } /* - * grp may already be initilialized; if so, make sure IDs match + * grp may already be initialized; if so, make sure IDs match */ if( grp->id != MBEDTLS_ECP_DP_NONE && grp->id != grp_id ) return( MBEDTLS_ERR_PK_KEY_INVALID_FORMAT ); @@ -807,7 +807,7 @@ static int pk_parse_key_pkcs1_der( mbedtls_rsa_context *rsa, goto cleanup; #else - /* Verify existance of the CRT params */ + /* Verify existence of the CRT params */ if( ( ret = asn1_get_nonzero_mpi( &p, end, &T ) ) != 0 || ( ret = asn1_get_nonzero_mpi( &p, end, &T ) ) != 0 || ( ret = asn1_get_nonzero_mpi( &p, end, &T ) ) != 0 ) @@ -1463,10 +1463,16 @@ int mbedtls_pk_parse_public_key( mbedtls_pk_context *ctx, { p = pem.buf; if( ( pk_info = mbedtls_pk_info_from_type( MBEDTLS_PK_RSA ) ) == NULL ) + { + mbedtls_pem_free( &pem ); return( MBEDTLS_ERR_PK_UNKNOWN_PK_ALG ); + } if( ( ret = mbedtls_pk_setup( ctx, pk_info ) ) != 0 ) + { + mbedtls_pem_free( &pem ); return( ret ); + } if ( ( ret = pk_get_rsapubkey( &p, p + pem.buflen, mbedtls_pk_rsa( *ctx ) ) ) != 0 ) mbedtls_pk_free( ctx ); diff --git a/thirdparty/mbedtls/library/rsa.c b/thirdparty/mbedtls/library/rsa.c index 8a5d40ff1e..d1f6ddb177 100644 --- a/thirdparty/mbedtls/library/rsa.c +++ b/thirdparty/mbedtls/library/rsa.c @@ -832,10 +832,10 @@ cleanup: * the more bits of the key can be recovered. See [3]. * * Collecting n collisions with m bit long blinding value requires 2^(m-m/n) - * observations on avarage. + * observations on average. * * For example with 28 byte blinding to achieve 2 collisions the adversary has - * to make 2^112 observations on avarage. + * to make 2^112 observations on average. * * (With the currently (as of 2017 April) known best algorithms breaking 2048 * bit RSA requires approximately as much time as trying out 2^112 random keys. diff --git a/thirdparty/mbedtls/library/ssl_ciphersuites.c b/thirdparty/mbedtls/library/ssl_ciphersuites.c index 3826ad27fa..ceec77efb0 100644 --- a/thirdparty/mbedtls/library/ssl_ciphersuites.c +++ b/thirdparty/mbedtls/library/ssl_ciphersuites.c @@ -2181,6 +2181,7 @@ const int *mbedtls_ssl_list_ciphersuites( void ) static int supported_ciphersuites[MAX_CIPHERSUITES]; static int supported_init = 0; +MBEDTLS_CHECK_RETURN_CRITICAL static int ciphersuite_is_removed( const mbedtls_ssl_ciphersuite_t *cs_info ) { (void)cs_info; diff --git a/thirdparty/mbedtls/library/ssl_cli.c b/thirdparty/mbedtls/library/ssl_cli.c index b87879ce6a..72351c9757 100644 --- a/thirdparty/mbedtls/library/ssl_cli.c +++ b/thirdparty/mbedtls/library/ssl_cli.c @@ -53,6 +53,7 @@ #endif #if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_conf_has_static_psk( mbedtls_ssl_config const *conf ) { if( conf->psk_identity == NULL || @@ -73,6 +74,7 @@ static int ssl_conf_has_static_psk( mbedtls_ssl_config const *conf ) } #if defined(MBEDTLS_USE_PSA_CRYPTO) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_conf_has_static_raw_psk( mbedtls_ssl_config const *conf ) { if( conf->psk_identity == NULL || @@ -91,6 +93,7 @@ static int ssl_conf_has_static_raw_psk( mbedtls_ssl_config const *conf ) #endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */ #if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_hostname_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -161,6 +164,7 @@ static int ssl_write_hostname_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */ #if defined(MBEDTLS_SSL_RENEGOTIATION) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_renegotiation_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -204,6 +208,7 @@ static int ssl_write_renegotiation_ext( mbedtls_ssl_context *ssl, */ #if defined(MBEDTLS_SSL_PROTO_TLS1_2) && \ defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_signature_algorithms_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -302,6 +307,7 @@ static int ssl_write_signature_algorithms_ext( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \ defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_supported_elliptic_curves_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -373,6 +379,7 @@ static int ssl_write_supported_elliptic_curves_ext( mbedtls_ssl_context *ssl, return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_supported_point_formats_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -404,6 +411,7 @@ static int ssl_write_supported_point_formats_ext( mbedtls_ssl_context *ssl, MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_ecjpake_kkpp_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -477,6 +485,7 @@ static int ssl_write_ecjpake_kkpp_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_cid_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -523,6 +532,7 @@ static int ssl_write_cid_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */ #if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_max_fragment_length_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -555,6 +565,7 @@ static int ssl_write_max_fragment_length_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */ #if defined(MBEDTLS_SSL_TRUNCATED_HMAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_truncated_hmac_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -585,6 +596,7 @@ static int ssl_write_truncated_hmac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_TRUNCATED_HMAC */ #if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -616,6 +628,7 @@ static int ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */ #if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_extended_ms_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -647,6 +660,7 @@ static int ssl_write_extended_ms_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */ #if defined(MBEDTLS_SSL_SESSION_TICKETS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_session_ticket_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -689,6 +703,7 @@ static int ssl_write_session_ticket_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_SESSION_TICKETS */ #if defined(MBEDTLS_SSL_ALPN) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_alpn_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -748,6 +763,7 @@ static int ssl_write_alpn_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ALPN */ #if defined(MBEDTLS_SSL_DTLS_SRTP) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_use_srtp_ext( mbedtls_ssl_context *ssl, unsigned char *buf, const unsigned char *end, @@ -868,6 +884,7 @@ static int ssl_write_use_srtp_ext( mbedtls_ssl_context *ssl, /* * Generate random bytes for ClientHello */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_generate_random( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -917,6 +934,7 @@ static int ssl_generate_random( mbedtls_ssl_context *ssl ) * * \return 0 if valid, else 1 */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_validate_ciphersuite( const mbedtls_ssl_ciphersuite_t * suite_info, const mbedtls_ssl_context * ssl, @@ -960,6 +978,7 @@ static int ssl_validate_ciphersuite( return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_client_hello( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -1450,6 +1469,7 @@ static int ssl_write_client_hello( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1494,6 +1514,7 @@ static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl, } #if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1520,6 +1541,7 @@ static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */ #if defined(MBEDTLS_SSL_TRUNCATED_HMAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1545,6 +1567,7 @@ static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_TRUNCATED_HMAC */ #if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1601,6 +1624,7 @@ static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */ #if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1627,6 +1651,7 @@ static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */ #if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1653,6 +1678,7 @@ static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */ #if defined(MBEDTLS_SSL_SESSION_TICKETS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1679,6 +1705,7 @@ static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \ defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_supported_point_formats_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1724,6 +1751,7 @@ static int ssl_parse_supported_point_formats_ext( mbedtls_ssl_context *ssl, MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1758,6 +1786,7 @@ static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_SSL_ALPN) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { @@ -1828,6 +1857,7 @@ static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ALPN */ #if defined(MBEDTLS_SSL_DTLS_SRTP) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_use_srtp_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -1948,6 +1978,7 @@ static int ssl_parse_use_srtp_ext( mbedtls_ssl_context *ssl, * Parse HelloVerifyRequest. Only called after verifying the HS type. */ #if defined(MBEDTLS_SSL_PROTO_DTLS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_hello_verify_request( mbedtls_ssl_context *ssl ) { const unsigned char *p = ssl->in_msg + mbedtls_ssl_hs_hdr_len( ssl ); @@ -2031,6 +2062,7 @@ static int ssl_parse_hello_verify_request( mbedtls_ssl_context *ssl ) } #endif /* MBEDTLS_SSL_PROTO_DTLS */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_hello( mbedtls_ssl_context *ssl ) { int ret, i; @@ -2276,16 +2308,6 @@ static int ssl_parse_server_hello( mbedtls_ssl_context *ssl ) else { ssl->state = MBEDTLS_SSL_SERVER_CHANGE_CIPHER_SPEC; - - if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 ) - { - MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret ); - mbedtls_ssl_send_alert_message( - ssl, - MBEDTLS_SSL_ALERT_LEVEL_FATAL, - MBEDTLS_SSL_ALERT_MSG_INTERNAL_ERROR ); - return( ret ); - } } MBEDTLS_SSL_DEBUG_MSG( 3, ( "%s session has been resumed", @@ -2538,6 +2560,24 @@ static int ssl_parse_server_hello( mbedtls_ssl_context *ssl ) } /* + * mbedtls_ssl_derive_keys() has to be called after the parsing of the + * extensions. It sets the transform data for the resumed session which in + * case of DTLS includes the server CID extracted from the CID extension. + */ + if( ssl->handshake->resume ) + { + if( ( ret = mbedtls_ssl_derive_keys( ssl ) ) != 0 ) + { + MBEDTLS_SSL_DEBUG_RET( 1, "mbedtls_ssl_derive_keys", ret ); + mbedtls_ssl_send_alert_message( + ssl, + MBEDTLS_SSL_ALERT_LEVEL_FATAL, + MBEDTLS_SSL_ALERT_MSG_INTERNAL_ERROR ); + return( ret ); + } + } + + /* * Renegotiation security checks */ if( ssl->secure_renegotiation == MBEDTLS_SSL_LEGACY_RENEGOTIATION && @@ -2591,6 +2631,7 @@ static int ssl_parse_server_hello( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_dh_params( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) @@ -2637,6 +2678,7 @@ static int ssl_parse_server_dh_params( mbedtls_ssl_context *ssl, defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_server_ecdh_params( const mbedtls_ssl_context *ssl ) { const mbedtls_ecp_curve_info *curve_info; @@ -2678,6 +2720,7 @@ static int ssl_check_server_ecdh_params( const mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_USE_PSA_CRYPTO) && \ ( defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) ) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_ecdh_params_psa( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) @@ -2703,6 +2746,10 @@ static int ssl_parse_server_ecdh_params_psa( mbedtls_ssl_context *ssl, tls_id <<= 8; tls_id |= *(*p)++; + /* Check it's a curve we offered */ + if( mbedtls_ssl_check_curve_tls_id( ssl, tls_id ) != 0 ) + return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); + /* Convert EC group to PSA key type. */ if( ( handshake->ecdh_psa_type = mbedtls_psa_parse_tls_ecc_group( tls_id, &ecdh_bits ) ) == 0 ) @@ -2740,6 +2787,7 @@ static int ssl_parse_server_ecdh_params_psa( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_ecdh_params( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) @@ -2779,6 +2827,7 @@ static int ssl_parse_server_ecdh_params( mbedtls_ssl_context *ssl, MBEDTLS_KEY_EXCHANGE_ECDHE_PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_psk_hint( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end ) @@ -2825,6 +2874,7 @@ static int ssl_parse_server_psk_hint( mbedtls_ssl_context *ssl, /* * Generate a pre-master secret and encrypt it with the server's RSA key */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_encrypted_pms( mbedtls_ssl_context *ssl, size_t offset, size_t *olen, size_t pms_offset ) @@ -2912,6 +2962,7 @@ static int ssl_write_encrypted_pms( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_signature_algorithm( mbedtls_ssl_context *ssl, unsigned char **p, unsigned char *end, @@ -2978,6 +3029,7 @@ static int ssl_parse_signature_algorithm( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -2996,6 +3048,8 @@ static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl ) peer_pk = &ssl->session_negotiate->peer_cert->pk; #endif /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */ + /* This is a public key, so it can't be opaque, so can_do() is a good + * enough check to ensure pk_ec() is safe to use below. */ if( ! mbedtls_pk_can_do( peer_pk, MBEDTLS_PK_ECKEY ) ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "server key not ECDH capable" ) ); @@ -3029,6 +3083,7 @@ static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl ) #endif /* MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_key_exchange( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3147,7 +3202,7 @@ start_processing: MBEDTLS_SSL_ALERT_MSG_ILLEGAL_PARAMETER ); return( MBEDTLS_ERR_SSL_BAD_HS_SERVER_KEY_EXCHANGE ); } - } /* FALLTROUGH */ + } /* FALLTHROUGH */ #endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_PSK_ENABLED) || \ @@ -3435,6 +3490,7 @@ exit: } #if ! defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_request( mbedtls_ssl_context *ssl ) { const mbedtls_ssl_ciphersuite_t *ciphersuite_info = @@ -3453,6 +3509,7 @@ static int ssl_parse_certificate_request( mbedtls_ssl_context *ssl ) return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } #else /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_request( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3624,6 +3681,7 @@ exit: } #endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_server_hello_done( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3663,6 +3721,7 @@ static int ssl_parse_server_hello_done( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_client_key_exchange( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3718,7 +3777,8 @@ static int ssl_write_client_key_exchange( mbedtls_ssl_context *ssl ) if( ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_RSA || ciphersuite_info->key_exchange == MBEDTLS_KEY_EXCHANGE_ECDHE_ECDSA ) { - psa_status_t status; + psa_status_t status = PSA_ERROR_CORRUPTION_DETECTED; + psa_status_t destruction_status = PSA_ERROR_CORRUPTION_DETECTED; psa_key_attributes_t key_attributes; mbedtls_ssl_handshake_params *handshake = ssl->handshake; @@ -3761,13 +3821,19 @@ static int ssl_write_client_key_exchange( mbedtls_ssl_context *ssl ) own_pubkey, sizeof( own_pubkey ), &own_pubkey_len ); if( status != PSA_SUCCESS ) + { + psa_destroy_key( handshake->ecdh_psa_privkey ); + handshake->ecdh_psa_privkey = MBEDTLS_SVC_KEY_ID_INIT; return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); + } if( mbedtls_psa_tls_psa_ec_to_ecpoint( own_pubkey, own_pubkey_len, &own_pubkey_ecpoint, &own_pubkey_ecpoint_len ) != 0 ) { + psa_destroy_key( handshake->ecdh_psa_privkey ); + handshake->ecdh_psa_privkey = MBEDTLS_SVC_KEY_ID_INIT; return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); } @@ -3787,13 +3853,12 @@ static int ssl_write_client_key_exchange( mbedtls_ssl_context *ssl ) ssl->handshake->premaster, sizeof( ssl->handshake->premaster ), &ssl->handshake->pmslen ); - if( status != PSA_SUCCESS ) - return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); - status = psa_destroy_key( handshake->ecdh_psa_privkey ); - if( status != PSA_SUCCESS ) - return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); + destruction_status = psa_destroy_key( handshake->ecdh_psa_privkey ); handshake->ecdh_psa_privkey = MBEDTLS_SVC_KEY_ID_INIT; + + if( status != PSA_SUCCESS || destruction_status != PSA_SUCCESS ) + return( MBEDTLS_ERR_SSL_HW_ACCEL_FAILED ); } else #endif /* MBEDTLS_USE_PSA_CRYPTO && @@ -3918,7 +3983,10 @@ ecdh_calc_secret: #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only suites. */ if( ssl_conf_has_static_raw_psk( ssl->conf ) == 0 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with RSA-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif /* MBEDTLS_USE_PSA_CRYPTO */ if( ( ret = ssl_write_encrypted_pms( ssl, header_len, @@ -3933,7 +4001,10 @@ ecdh_calc_secret: #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only suites. */ if( ssl_conf_has_static_raw_psk( ssl->conf ) == 0 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with DHE-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif /* MBEDTLS_USE_PSA_CRYPTO */ /* @@ -3970,7 +4041,10 @@ ecdh_calc_secret: #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only suites. */ if( ssl_conf_has_static_raw_psk( ssl->conf ) == 0 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with ECDHE-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif /* MBEDTLS_USE_PSA_CRYPTO */ /* @@ -4080,6 +4154,7 @@ ecdh_calc_secret: } #if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_certificate_verify( mbedtls_ssl_context *ssl ) { const mbedtls_ssl_ciphersuite_t *ciphersuite_info = @@ -4105,6 +4180,7 @@ static int ssl_write_certificate_verify( mbedtls_ssl_context *ssl ) return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } #else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_certificate_verify( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE; @@ -4277,6 +4353,7 @@ sign: #endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ #if defined(MBEDTLS_SSL_SESSION_TICKETS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_new_session_ticket( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; diff --git a/thirdparty/mbedtls/library/ssl_cookie.c b/thirdparty/mbedtls/library/ssl_cookie.c index abf29ae717..3781796b72 100644 --- a/thirdparty/mbedtls/library/ssl_cookie.c +++ b/thirdparty/mbedtls/library/ssl_cookie.c @@ -63,7 +63,7 @@ /* * Cookies are formed of a 4-bytes timestamp (or serial number) and - * an HMAC of timestemp and client ID. + * an HMAC of timestamp and client ID. */ #define COOKIE_LEN ( 4 + COOKIE_HMAC_LEN ) @@ -122,6 +122,7 @@ int mbedtls_ssl_cookie_setup( mbedtls_ssl_cookie_ctx *ctx, /* * Generate the HMAC part of a cookie */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_cookie_hmac( mbedtls_md_context_t *hmac_ctx, const unsigned char time[4], unsigned char **p, unsigned char *end, diff --git a/thirdparty/mbedtls/library/ssl_msg.c b/thirdparty/mbedtls/library/ssl_msg.c index 0b696dd561..e47c538888 100644 --- a/thirdparty/mbedtls/library/ssl_msg.c +++ b/thirdparty/mbedtls/library/ssl_msg.c @@ -91,6 +91,7 @@ int mbedtls_ssl_check_timer( mbedtls_ssl_context *ssl ) } #if defined(MBEDTLS_SSL_RECORD_CHECKING) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_record_header( mbedtls_ssl_context const *ssl, unsigned char *buf, size_t len, @@ -165,11 +166,16 @@ exit: static void ssl_buffering_free_slot( mbedtls_ssl_context *ssl, uint8_t slot ); static void ssl_free_buffered_record( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_load_buffered_message( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_load_buffered_record( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_buffer_message( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_buffer_future_record( mbedtls_ssl_context *ssl, mbedtls_record const *rec ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_next_record_is_in_datagram( mbedtls_ssl_context *ssl ); static size_t ssl_get_maximum_datagram_size( mbedtls_ssl_context const *ssl ) @@ -187,6 +193,7 @@ static size_t ssl_get_maximum_datagram_size( mbedtls_ssl_context const *ssl ) return( out_buf_len ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_remaining_space_in_datagram( mbedtls_ssl_context const *ssl ) { size_t const bytes_written = ssl->out_left; @@ -203,6 +210,7 @@ static int ssl_get_remaining_space_in_datagram( mbedtls_ssl_context const *ssl ) return( (int) ( mtu - bytes_written ) ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_remaining_payload_in_datagram( mbedtls_ssl_context const *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -254,6 +262,7 @@ static int ssl_get_remaining_payload_in_datagram( mbedtls_ssl_context const *ssl * Double the retransmit timeout value, within the allowed range, * returning -1 if the maximum value has already been reached. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_double_retransmit_timeout( mbedtls_ssl_context *ssl ) { uint32_t new_timeout; @@ -353,6 +362,7 @@ static size_t ssl_compute_padding_length( size_t len, * - A negative error code if `max_len` didn't offer enough space * for the expansion. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_build_inner_plaintext( unsigned char *content, size_t *content_size, size_t remaining, @@ -380,6 +390,7 @@ static int ssl_build_inner_plaintext( unsigned char *content, /* This function parses a (D)TLSInnerPlaintext structure. * See ssl_build_inner_plaintext() for details. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_inner_plaintext( unsigned char const *content, size_t *content_size, uint8_t *rec_type ) @@ -474,6 +485,7 @@ static void ssl_extract_add_data_from_record( unsigned char* add_data, /* * SSLv3.0 MAC functions */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_mac( mbedtls_md_context_t *md_ctx, const unsigned char *secret, const unsigned char *buf, size_t len, @@ -541,6 +553,7 @@ static int ssl_mac( mbedtls_md_context_t *md_ctx, #if defined(MBEDTLS_GCM_C) || \ defined(MBEDTLS_CCM_C) || \ defined(MBEDTLS_CHACHAPOLY_C) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_transform_aead_dynamic_iv_is_explicit( mbedtls_ssl_transform const *transform ) { @@ -1245,7 +1258,7 @@ int mbedtls_ssl_decrypt_buf( mbedtls_ssl_context const *ssl, add_data, add_data_len ); /* Because of the check above, we know that there are - * explicit_iv_len Bytes preceeding data, and taglen + * explicit_iv_len Bytes preceding data, and taglen * bytes following data + data_len. This justifies * the debug message and the invocation of * mbedtls_cipher_auth_decrypt() below. */ @@ -1590,8 +1603,8 @@ int mbedtls_ssl_decrypt_buf( mbedtls_ssl_context const *ssl, #if defined(MBEDTLS_SSL_SOME_MODES_USE_MAC) if( auth_done == 0 ) { - unsigned char mac_expect[MBEDTLS_SSL_MAC_ADD]; - unsigned char mac_peer[MBEDTLS_SSL_MAC_ADD]; + unsigned char mac_expect[MBEDTLS_SSL_MAC_ADD] = { 0 }; + unsigned char mac_peer[MBEDTLS_SSL_MAC_ADD] = { 0 }; /* If the initial value of padlen was such that * data_len < maclen + padlen + 1, then padlen @@ -1738,6 +1751,7 @@ int mbedtls_ssl_decrypt_buf( mbedtls_ssl_context const *ssl, /* * Compression/decompression functions */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_compress_buf( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -1790,6 +1804,7 @@ static int ssl_compress_buf( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_decompress_buf( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -2149,6 +2164,7 @@ int mbedtls_ssl_flush_output( mbedtls_ssl_context *ssl ) /* * Append current handshake message to current outgoing flight */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_flight_append( mbedtls_ssl_context *ssl ) { mbedtls_ssl_flight_item *msg; @@ -2215,6 +2231,7 @@ void mbedtls_ssl_flight_free( mbedtls_ssl_flight_item *flight ) /* * Swap transform_out and out_ctr with the alternative ones */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_swap_epochs( mbedtls_ssl_context *ssl ) { mbedtls_ssl_transform *tmp_transform; @@ -2857,6 +2874,7 @@ int mbedtls_ssl_write_record( mbedtls_ssl_context *ssl, uint8_t force_flush ) #if defined(MBEDTLS_SSL_PROTO_DTLS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_hs_is_proper_fragment( mbedtls_ssl_context *ssl ) { if( ssl->in_msglen < ssl->in_hslen || @@ -2882,6 +2900,7 @@ static uint32_t ssl_get_hs_frag_off( mbedtls_ssl_context const *ssl ) ssl->in_msg[8] ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_hs_header( mbedtls_ssl_context const *ssl ) { uint32_t msg_len, frag_off, frag_len; @@ -2948,6 +2967,7 @@ static void ssl_bitmask_set( unsigned char *mask, size_t offset, size_t len ) /* * Check that bitmask is full */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_bitmask_check( unsigned char *mask, size_t len ) { size_t i; @@ -3147,6 +3167,7 @@ static inline uint64_t ssl_load_six_bytes( unsigned char *buf ) ( (uint64_t) buf[5] ) ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int mbedtls_ssl_dtls_record_replay_check( mbedtls_ssl_context *ssl, uint8_t *record_in_ctr ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3229,8 +3250,8 @@ void mbedtls_ssl_dtls_replay_update( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_SSL_DTLS_CLIENT_PORT_REUSE) && defined(MBEDTLS_SSL_SRV_C) /* - * Without any SSL context, check if a datagram looks like a ClientHello with - * a valid cookie, and if it doesn't, generate a HelloVerifyRequest message. + * Check if a datagram looks like a ClientHello with a valid cookie, + * and if it doesn't, generate a HelloVerifyRequest message. * Both input and output include full DTLS headers. * * - if cookie is valid, return 0 @@ -3239,10 +3260,10 @@ void mbedtls_ssl_dtls_replay_update( mbedtls_ssl_context *ssl ) * return MBEDTLS_ERR_SSL_HELLO_VERIFY_REQUIRED * - otherwise return a specific error code */ -static int ssl_check_dtls_clihlo_cookie( - mbedtls_ssl_cookie_write_t *f_cookie_write, - mbedtls_ssl_cookie_check_t *f_cookie_check, - void *p_cookie, +MBEDTLS_CHECK_RETURN_CRITICAL +MBEDTLS_STATIC_TESTABLE +int mbedtls_ssl_check_dtls_clihlo_cookie( + mbedtls_ssl_context *ssl, const unsigned char *cli_id, size_t cli_id_len, const unsigned char *in, size_t in_len, unsigned char *obuf, size_t buf_len, size_t *olen ) @@ -3276,26 +3297,53 @@ static int ssl_check_dtls_clihlo_cookie( * * Minimum length is 61 bytes. */ - if( in_len < 61 || - in[0] != MBEDTLS_SSL_MSG_HANDSHAKE || + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: in_len=%u", + (unsigned) in_len ) ); + MBEDTLS_SSL_DEBUG_BUF( 4, "cli_id", cli_id, cli_id_len ); + if( in_len < 61 ) + { + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: record too short" ) ); + return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); + } + if( in[0] != MBEDTLS_SSL_MSG_HANDSHAKE || in[3] != 0 || in[4] != 0 || in[19] != 0 || in[20] != 0 || in[21] != 0 ) { + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: not a good ClientHello" ) ); + MBEDTLS_SSL_DEBUG_MSG( 4, ( " type=%u epoch=%u fragment_offset=%u", + in[0], + (unsigned) in[3] << 8 | in[4], + (unsigned) in[19] << 16 | in[20] << 8 | in[21] ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } sid_len = in[59]; - if( sid_len > in_len - 61 ) + if( 59 + 1 + sid_len + 1 > in_len ) + { + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: sid_len=%u > %u", + (unsigned) sid_len, + (unsigned) in_len - 61 ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); + } + MBEDTLS_SSL_DEBUG_BUF( 4, "sid received from network", + in + 60, sid_len ); cookie_len = in[60 + sid_len]; - if( cookie_len > in_len - 60 ) + if( 59 + 1 + sid_len + 1 + cookie_len > in_len ) + { + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: cookie_len=%u > %u", + (unsigned) cookie_len, + (unsigned) ( in_len - sid_len - 61 ) ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); + } - if( f_cookie_check( p_cookie, in + sid_len + 61, cookie_len, - cli_id, cli_id_len ) == 0 ) + MBEDTLS_SSL_DEBUG_BUF( 4, "cookie received from network", + in + sid_len + 61, cookie_len ); + if( ssl->conf->f_cookie_check( ssl->conf->p_cookie, + in + sid_len + 61, cookie_len, + cli_id, cli_id_len ) == 0 ) { - /* Valid cookie */ + MBEDTLS_SSL_DEBUG_MSG( 4, ( "check cookie: valid" ) ); return( 0 ); } @@ -3330,8 +3378,9 @@ static int ssl_check_dtls_clihlo_cookie( /* Generate and write actual cookie */ p = obuf + 28; - if( f_cookie_write( p_cookie, - &p, obuf + buf_len, cli_id, cli_id_len ) != 0 ) + if( ssl->conf->f_cookie_write( ssl->conf->p_cookie, + &p, obuf + buf_len, + cli_id, cli_id_len ) != 0 ) { return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } @@ -3370,6 +3419,7 @@ static int ssl_check_dtls_clihlo_cookie( * includes the case of MBEDTLS_ERR_SSL_CLIENT_RECONNECT and of unexpected * errors, and is the right thing to do in both cases). */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_handle_possible_reconnect( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3385,15 +3435,13 @@ static int ssl_handle_possible_reconnect( mbedtls_ssl_context *ssl ) return( 0 ); } - ret = ssl_check_dtls_clihlo_cookie( - ssl->conf->f_cookie_write, - ssl->conf->f_cookie_check, - ssl->conf->p_cookie, + ret = mbedtls_ssl_check_dtls_clihlo_cookie( + ssl, ssl->cli_id, ssl->cli_id_len, ssl->in_buf, ssl->in_left, ssl->out_buf, MBEDTLS_SSL_OUT_CONTENT_LEN, &len ); - MBEDTLS_SSL_DEBUG_RET( 2, "ssl_check_dtls_clihlo_cookie", ret ); + MBEDTLS_SSL_DEBUG_RET( 2, "mbedtls_ssl_check_dtls_clihlo_cookie", ret ); if( ret == MBEDTLS_ERR_SSL_HELLO_VERIFY_REQUIRED ) { @@ -3427,6 +3475,7 @@ static int ssl_handle_possible_reconnect( mbedtls_ssl_context *ssl ) } #endif /* MBEDTLS_SSL_DTLS_CLIENT_PORT_REUSE && MBEDTLS_SSL_SRV_C */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_record_type( uint8_t record_type ) { if( record_type != MBEDTLS_SSL_MSG_HANDSHAKE && @@ -3459,6 +3508,7 @@ static int ssl_check_record_type( uint8_t record_type ) * Point 2 is needed when the peer is resending, and we have already received * the first record from a datagram but are still waiting for the others. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_record_header( mbedtls_ssl_context const *ssl, unsigned char *buf, size_t len, @@ -3571,7 +3621,6 @@ static int ssl_parse_record_header( mbedtls_ssl_context const *ssl, /* * Parse and validate record version */ - rec->ver[0] = buf[ rec_hdr_version_offset + 0 ]; rec->ver[1] = buf[ rec_hdr_version_offset + 1 ]; mbedtls_ssl_read_version( &major_ver, &minor_ver, @@ -3580,16 +3629,19 @@ static int ssl_parse_record_header( mbedtls_ssl_context const *ssl, if( major_ver != ssl->major_ver ) { - MBEDTLS_SSL_DEBUG_MSG( 1, ( "major version mismatch" ) ); + MBEDTLS_SSL_DEBUG_MSG( 1, ( "major version mismatch: got %u, expected %u", + (unsigned) major_ver, + (unsigned) ssl->major_ver ) ); return( MBEDTLS_ERR_SSL_INVALID_RECORD ); } if( minor_ver > ssl->conf->max_minor_ver ) { - MBEDTLS_SSL_DEBUG_MSG( 1, ( "minor version mismatch" ) ); + MBEDTLS_SSL_DEBUG_MSG( 1, ( "minor version mismatch: got %u, expected max %u", + (unsigned) minor_ver, + (unsigned) ssl->conf->max_minor_ver ) ); return( MBEDTLS_ERR_SSL_INVALID_RECORD ); } - /* * Parse/Copy record sequence number. */ @@ -3692,6 +3744,7 @@ static int ssl_parse_record_header( mbedtls_ssl_context const *ssl, #if defined(MBEDTLS_SSL_DTLS_CLIENT_PORT_REUSE) && defined(MBEDTLS_SSL_SRV_C) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_client_reconnect( mbedtls_ssl_context *ssl ) { unsigned int rec_epoch = ( ssl->in_ctr[0] << 8 ) | ssl->in_ctr[1]; @@ -3721,6 +3774,7 @@ static int ssl_check_client_reconnect( mbedtls_ssl_context *ssl ) /* * If applicable, decrypt record content */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_prepare_record_content( mbedtls_ssl_context *ssl, mbedtls_record *rec ) { @@ -3854,7 +3908,7 @@ static int ssl_prepare_record_content( mbedtls_ssl_context *ssl, /* Check actual (decrypted) record content length against * configured maximum. */ - if( ssl->in_msglen > MBEDTLS_SSL_IN_CONTENT_LEN ) + if( rec->data_len > MBEDTLS_SSL_IN_CONTENT_LEN ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad message length" ) ); return( MBEDTLS_ERR_SSL_INVALID_RECORD ); @@ -3872,8 +3926,11 @@ static int ssl_prepare_record_content( mbedtls_ssl_context *ssl, */ /* Helper functions for mbedtls_ssl_read_record(). */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_consume_current_message( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_next_record( mbedtls_ssl_context *ssl ); +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_record_is_in_progress( mbedtls_ssl_context *ssl ); int mbedtls_ssl_read_record( mbedtls_ssl_context *ssl, @@ -3961,6 +4018,7 @@ int mbedtls_ssl_read_record( mbedtls_ssl_context *ssl, } #if defined(MBEDTLS_SSL_PROTO_DTLS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_next_record_is_in_datagram( mbedtls_ssl_context *ssl ) { if( ssl->in_left > ssl->next_record_offset ) @@ -3969,6 +4027,7 @@ static int ssl_next_record_is_in_datagram( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_load_buffered_message( mbedtls_ssl_context *ssl ) { mbedtls_ssl_handshake_params * const hs = ssl->handshake; @@ -4066,6 +4125,7 @@ exit: return( ret ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_buffer_make_space( mbedtls_ssl_context *ssl, size_t desired ) { @@ -4108,6 +4168,7 @@ static int ssl_buffer_make_space( mbedtls_ssl_context *ssl, return( -1 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_buffer_message( mbedtls_ssl_context *ssl ) { int ret = 0; @@ -4312,6 +4373,7 @@ exit: } #endif /* MBEDTLS_SSL_PROTO_DTLS */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_consume_current_message( mbedtls_ssl_context *ssl ) { /* @@ -4399,6 +4461,7 @@ static int ssl_consume_current_message( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_record_is_in_progress( mbedtls_ssl_context *ssl ) { if( ssl->in_msglen > 0 ) @@ -4425,6 +4488,7 @@ static void ssl_free_buffered_record( mbedtls_ssl_context *ssl ) } } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_load_buffered_record( mbedtls_ssl_context *ssl ) { mbedtls_ssl_handshake_params * const hs = ssl->handshake; @@ -4482,6 +4546,7 @@ exit: return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_buffer_future_record( mbedtls_ssl_context *ssl, mbedtls_record const *rec ) { @@ -4540,6 +4605,7 @@ static int ssl_buffer_future_record( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_PROTO_DTLS */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_next_record( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -4918,6 +4984,9 @@ int mbedtls_ssl_send_alert_message( mbedtls_ssl_context *ssl, if( ssl == NULL || ssl->conf == NULL ) return( MBEDTLS_ERR_SSL_BAD_INPUT_DATA ); + if( ssl->out_left != 0 ) + return( mbedtls_ssl_flush_output( ssl ) ); + MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> send alert message" ) ); MBEDTLS_SSL_DEBUG_MSG( 3, ( "send alert level=%u message=%u", level, message )); @@ -5287,6 +5356,7 @@ int mbedtls_ssl_get_record_expansion( const mbedtls_ssl_context *ssl ) /* * Check record counters and renegotiate if they're above the limit. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_ctr_renegotiate( mbedtls_ssl_context *ssl ) { size_t ep_len = mbedtls_ssl_ep_len( ssl ); @@ -5637,6 +5707,7 @@ int mbedtls_ssl_read( mbedtls_ssl_context *ssl, unsigned char *buf, size_t len ) * Therefore, it is possible that the input message length is 0 and the * corresponding return code is 0 on success. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_real( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { @@ -5708,6 +5779,7 @@ static int ssl_write_real( mbedtls_ssl_context *ssl, * remember whether we already did the split or not. */ #if defined(MBEDTLS_SSL_CBC_RECORD_SPLITTING) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_split( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { @@ -5790,9 +5862,6 @@ int mbedtls_ssl_close_notify( mbedtls_ssl_context *ssl ) MBEDTLS_SSL_DEBUG_MSG( 2, ( "=> write close notify" ) ); - if( ssl->out_left != 0 ) - return( mbedtls_ssl_flush_output( ssl ) ); - if( ssl->state == MBEDTLS_SSL_HANDSHAKE_OVER ) { if( ( ret = mbedtls_ssl_send_alert_message( ssl, diff --git a/thirdparty/mbedtls/library/ssl_srv.c b/thirdparty/mbedtls/library/ssl_srv.c index 1a63173204..2efb13cc33 100644 --- a/thirdparty/mbedtls/library/ssl_srv.c +++ b/thirdparty/mbedtls/library/ssl_srv.c @@ -78,6 +78,7 @@ void mbedtls_ssl_conf_dtls_cookies( mbedtls_ssl_config *conf, #endif /* MBEDTLS_SSL_DTLS_HELLO_VERIFY */ #if defined(MBEDTLS_SSL_SERVER_NAME_INDICATION) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -147,6 +148,7 @@ static int ssl_parse_servername_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_SERVER_NAME_INDICATION */ #if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_conf_has_psk_or_cb( mbedtls_ssl_config const *conf ) { if( conf->f_psk != NULL ) @@ -167,6 +169,7 @@ static int ssl_conf_has_psk_or_cb( mbedtls_ssl_config const *conf ) } #if defined(MBEDTLS_USE_PSA_CRYPTO) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_use_opaque_psk( mbedtls_ssl_context const *ssl ) { if( ssl->conf->f_psk != NULL ) @@ -188,6 +191,7 @@ static int ssl_use_opaque_psk( mbedtls_ssl_context const *ssl ) #endif /* MBEDTLS_USE_PSA_CRYPTO */ #endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -239,6 +243,7 @@ static int ssl_parse_renegotiation_info( mbedtls_ssl_context *ssl, * This needs to be done at a later stage. * */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_signature_algorithms_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -317,6 +322,7 @@ static int ssl_parse_signature_algorithms_ext( mbedtls_ssl_context *ssl, #if defined(MBEDTLS_ECDH_C) || defined(MBEDTLS_ECDSA_C) || \ defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_supported_elliptic_curves( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -383,6 +389,7 @@ static int ssl_parse_supported_elliptic_curves( mbedtls_ssl_context *ssl, return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_supported_point_formats( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -425,6 +432,7 @@ static int ssl_parse_supported_point_formats( mbedtls_ssl_context *ssl, MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -454,6 +462,7 @@ static int ssl_parse_ecjpake_kkpp( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_KEY_EXCHANGE_ECJPAKE_ENABLED */ #if defined(MBEDTLS_SSL_MAX_FRAGMENT_LENGTH) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -473,6 +482,7 @@ static int ssl_parse_max_fragment_length_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_MAX_FRAGMENT_LENGTH */ #if defined(MBEDTLS_SSL_DTLS_CONNECTION_ID) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -545,6 +555,7 @@ static int ssl_parse_cid_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_DTLS_CONNECTION_ID */ #if defined(MBEDTLS_SSL_TRUNCATED_HMAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -567,6 +578,7 @@ static int ssl_parse_truncated_hmac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_TRUNCATED_HMAC */ #if defined(MBEDTLS_SSL_ENCRYPT_THEN_MAC) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -592,6 +604,7 @@ static int ssl_parse_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ENCRYPT_THEN_MAC */ #if defined(MBEDTLS_SSL_EXTENDED_MASTER_SECRET) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -617,6 +630,7 @@ static int ssl_parse_extended_ms_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_EXTENDED_MASTER_SECRET */ #if defined(MBEDTLS_SSL_SESSION_TICKETS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl, unsigned char *buf, size_t len ) @@ -691,6 +705,7 @@ static int ssl_parse_session_ticket_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_SESSION_TICKETS */ #if defined(MBEDTLS_SSL_ALPN) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) { @@ -779,6 +794,7 @@ static int ssl_parse_alpn_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_ALPN */ #if defined(MBEDTLS_SSL_DTLS_SRTP) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_use_srtp_ext( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -907,6 +923,7 @@ static int ssl_parse_use_srtp_ext( mbedtls_ssl_context *ssl, * Return 0 if the given key uses one of the acceptable curves, -1 otherwise */ #if defined(MBEDTLS_ECDSA_C) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_key_curve( mbedtls_pk_context *pk, const mbedtls_ecp_curve_info **curves ) { @@ -928,6 +945,7 @@ static int ssl_check_key_curve( mbedtls_pk_context *pk, * Try picking a certificate for this ciphersuite, * return 0 on success and -1 on failure. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_pick_cert( mbedtls_ssl_context *ssl, const mbedtls_ssl_ciphersuite_t * ciphersuite_info ) { @@ -1032,6 +1050,7 @@ static int ssl_pick_cert( mbedtls_ssl_context *ssl, * Check if a given ciphersuite is suitable for use with our config/keys/etc * Sets ciphersuite_info only if the suite matches. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_ciphersuite_match( mbedtls_ssl_context *ssl, int suite_id, const mbedtls_ssl_ciphersuite_t **ciphersuite_info ) { @@ -1147,6 +1166,7 @@ static int ssl_ciphersuite_match( mbedtls_ssl_context *ssl, int suite_id, } #if defined(MBEDTLS_SSL_SRV_SUPPORT_SSLV2_CLIENT_HELLO) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_client_hello_v2( mbedtls_ssl_context *ssl ) { int ret, got_common_suite; @@ -1410,6 +1430,7 @@ have_ciphersuite_v2: /* This function doesn't alert on errors that happen early during ClientHello parsing because they might indicate that the client is not talking SSL/TLS at all and would not understand our alert. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_client_hello( mbedtls_ssl_context *ssl ) { int ret, got_common_suite; @@ -1583,7 +1604,7 @@ read_record_header: * Handshake layer: * 0 . 0 handshake type * 1 . 3 handshake length - * 4 . 5 DTLS only: message seqence number + * 4 . 5 DTLS only: message sequence number * 6 . 8 DTLS only: fragment offset * 9 . 11 DTLS only: fragment length */ @@ -1604,11 +1625,19 @@ read_record_header: MBEDTLS_SSL_DEBUG_MSG( 3, ( "client hello v3, handshake len.: %d", ( buf[1] << 16 ) | ( buf[2] << 8 ) | buf[3] ) ); + if( buf[1] != 0 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message: %u != 0", + (unsigned) buf[1] ) ); + return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); + } /* We don't support fragmentation of ClientHello (yet?) */ - if( buf[1] != 0 || - msg_len != mbedtls_ssl_hs_hdr_len( ssl ) + ( ( buf[2] << 8 ) | buf[3] ) ) + if( msg_len != mbedtls_ssl_hs_hdr_len( ssl ) + ( ( buf[2] << 8 ) | buf[3] ) ) { - MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message" ) ); + MBEDTLS_SSL_DEBUG_MSG( 1, ( "bad client hello message: %u != %u + %u", + (unsigned) msg_len, + (unsigned) mbedtls_ssl_hs_hdr_len( ssl ), + (unsigned) ( buf[2] << 8 ) | buf[3] ) ); return( MBEDTLS_ERR_SSL_BAD_HS_CLIENT_HELLO ); } @@ -1649,6 +1678,11 @@ read_record_header: * For now we don't support fragmentation, so make sure * fragment_offset == 0 and fragment_length == length */ + MBEDTLS_SSL_DEBUG_MSG( + 4, ( "fragment_offset=%u fragment_length=%u length=%u", + (unsigned) ( ssl->in_msg[6] << 16 | ssl->in_msg[7] << 8 | ssl->in_msg[8] ), + (unsigned) ( ssl->in_msg[9] << 16 | ssl->in_msg[10] << 8 | ssl->in_msg[11] ), + (unsigned) ( ssl->in_msg[1] << 16 | ssl->in_msg[2] << 8 | ssl->in_msg[3] ) ) ); if( ssl->in_msg[6] != 0 || ssl->in_msg[7] != 0 || ssl->in_msg[8] != 0 || memcmp( ssl->in_msg + 1, ssl->in_msg + 9, 3 ) != 0 ) { @@ -2354,12 +2388,8 @@ static void ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, const mbedtls_ssl_ciphersuite_t *suite = NULL; const mbedtls_cipher_info_t *cipher = NULL; - if( ssl->session_negotiate->encrypt_then_mac == MBEDTLS_SSL_ETM_DISABLED || - ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 ) - { - *olen = 0; - return; - } + if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_0 ) + ssl->session_negotiate->encrypt_then_mac = MBEDTLS_SSL_ETM_DISABLED; /* * RFC 7366: "If a server receives an encrypt-then-MAC request extension @@ -2372,6 +2402,11 @@ static void ssl_write_encrypt_then_mac_ext( mbedtls_ssl_context *ssl, ( cipher = mbedtls_cipher_info_from_type( suite->cipher ) ) == NULL || cipher->mode != MBEDTLS_MODE_CBC ) { + ssl->session_negotiate->encrypt_then_mac = MBEDTLS_SSL_ETM_DISABLED; + } + + if( ssl->session_negotiate->encrypt_then_mac == MBEDTLS_SSL_ETM_DISABLED ) + { *olen = 0; return; } @@ -2685,6 +2720,7 @@ static void ssl_write_use_srtp_ext( mbedtls_ssl_context *ssl, #endif /* MBEDTLS_SSL_DTLS_SRTP */ #if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_hello_verify_request( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -2805,6 +2841,7 @@ exit: mbedtls_ssl_session_free( &session_tmp ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_server_hello( mbedtls_ssl_context *ssl ) { #if defined(MBEDTLS_HAVE_TIME) @@ -3035,6 +3072,7 @@ static int ssl_write_server_hello( mbedtls_ssl_context *ssl ) } #if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_certificate_request( mbedtls_ssl_context *ssl ) { const mbedtls_ssl_ciphersuite_t *ciphersuite_info = @@ -3053,6 +3091,7 @@ static int ssl_write_certificate_request( mbedtls_ssl_context *ssl ) return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } #else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_certificate_request( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE; @@ -3222,18 +3261,23 @@ static int ssl_write_certificate_request( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_KEY_EXCHANGE_ECDH_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_ECDH_ECDSA_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; + mbedtls_pk_context *own_key = mbedtls_ssl_own_key( ssl ); - if( ! mbedtls_pk_can_do( mbedtls_ssl_own_key( ssl ), MBEDTLS_PK_ECKEY ) ) + /* Check if the key is a transparent ECDH key. + * This also ensures that it is safe to call mbedtls_pk_ec(). */ + if( mbedtls_pk_get_type( own_key ) != MBEDTLS_PK_ECKEY && + mbedtls_pk_get_type( own_key ) != MBEDTLS_PK_ECKEY_DH ) { MBEDTLS_SSL_DEBUG_MSG( 1, ( "server key not ECDH capable" ) ); return( MBEDTLS_ERR_SSL_PK_TYPE_MISMATCH ); } if( ( ret = mbedtls_ecdh_get_params( &ssl->handshake->ecdh_ctx, - mbedtls_pk_ec( *mbedtls_ssl_own_key( ssl ) ), + mbedtls_pk_ec( *own_key ), MBEDTLS_ECDH_OURS ) ) != 0 ) { MBEDTLS_SSL_DEBUG_RET( 1, ( "mbedtls_ecdh_get_params" ), ret ); @@ -3247,6 +3291,7 @@ static int ssl_get_ecdh_params_from_cert( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_KEY_EXCHANGE_WITH_SERVER_SIGNATURE_ENABLED) && \ defined(MBEDTLS_SSL_ASYNC_PRIVATE) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_resume_server_key_exchange( mbedtls_ssl_context *ssl, size_t *signature_len ) { @@ -3274,6 +3319,7 @@ static int ssl_resume_server_key_exchange( mbedtls_ssl_context *ssl, /* Prepare the ServerKeyExchange message, up to and including * calculating the signature if any, but excluding formatting the * signature and sending the message. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_prepare_server_key_exchange( mbedtls_ssl_context *ssl, size_t *signature_len ) { @@ -3643,6 +3689,7 @@ curve_matching_done: * that do not include a ServerKeyExchange message, do nothing. Either * way, if successful, move on to the next step in the SSL state * machine. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_server_key_exchange( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3664,7 +3711,12 @@ static int ssl_write_server_key_exchange( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED) if( mbedtls_ssl_ciphersuite_uses_ecdh( ciphersuite_info ) ) { - ssl_get_ecdh_params_from_cert( ssl ); + ret = ssl_get_ecdh_params_from_cert( ssl ); + if( ret != 0 ) + { + MBEDTLS_SSL_DEBUG_RET( 1, "ssl_get_ecdh_params_from_cert", ret ); + return( ret ); + } } #endif /* MBEDTLS_KEY_EXCHANGE_SOME_ECDH_ENABLED */ @@ -3740,6 +3792,7 @@ static int ssl_write_server_key_exchange( mbedtls_ssl_context *ssl ) return( 0 ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_server_hello_done( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -3779,6 +3832,7 @@ static int ssl_write_server_hello_done( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_KEY_EXCHANGE_DHE_RSA_ENABLED) || \ defined(MBEDTLS_KEY_EXCHANGE_DHE_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_client_dh_public( mbedtls_ssl_context *ssl, unsigned char **p, const unsigned char *end ) { @@ -3822,6 +3876,7 @@ static int ssl_parse_client_dh_public( mbedtls_ssl_context *ssl, unsigned char * defined(MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED) #if defined(MBEDTLS_SSL_ASYNC_PRIVATE) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_resume_decrypt_pms( mbedtls_ssl_context *ssl, unsigned char *peer_pms, size_t *peer_pmslen, @@ -3839,6 +3894,7 @@ static int ssl_resume_decrypt_pms( mbedtls_ssl_context *ssl, } #endif /* MBEDTLS_SSL_ASYNC_PRIVATE */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_decrypt_encrypted_pms( mbedtls_ssl_context *ssl, const unsigned char *p, const unsigned char *end, @@ -3931,6 +3987,7 @@ static int ssl_decrypt_encrypted_pms( mbedtls_ssl_context *ssl, return( ret ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_encrypted_pms( mbedtls_ssl_context *ssl, const unsigned char *p, const unsigned char *end, @@ -4020,6 +4077,7 @@ static int ssl_parse_encrypted_pms( mbedtls_ssl_context *ssl, MBEDTLS_KEY_EXCHANGE_RSA_PSK_ENABLED */ #if defined(MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned char **p, const unsigned char *end ) { @@ -4080,6 +4138,7 @@ static int ssl_parse_client_psk_identity( mbedtls_ssl_context *ssl, unsigned cha } #endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -4207,7 +4266,7 @@ static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) } #if defined(MBEDTLS_USE_PSA_CRYPTO) - /* For opaque PSKs, we perform the PSK-to-MS derivation atomatically + /* For opaque PSKs, we perform the PSK-to-MS derivation automatically * and skip the intermediate PMS. */ if( ssl_use_opaque_psk( ssl ) == 1 ) MBEDTLS_SSL_DEBUG_MSG( 1, ( "skip PMS generation for opaque PSK" ) ); @@ -4247,7 +4306,10 @@ static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only. */ if( ssl_use_opaque_psk( ssl ) == 1 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with RSA-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif if( ( ret = ssl_parse_encrypted_pms( ssl, p, end, 2 ) ) != 0 ) @@ -4282,7 +4344,10 @@ static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only. */ if( ssl_use_opaque_psk( ssl ) == 1 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with DHE-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif if( p != end ) @@ -4319,7 +4384,10 @@ static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_USE_PSA_CRYPTO) /* Opaque PSKs are currently only supported for PSK-only. */ if( ssl_use_opaque_psk( ssl ) == 1 ) + { + MBEDTLS_SSL_DEBUG_MSG( 1, ( "opaque PSK not supported with ECDHE-PSK" ) ); return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); + } #endif MBEDTLS_SSL_DEBUG_ECDH( 3, &ssl->handshake->ecdh_ctx, @@ -4386,6 +4454,7 @@ static int ssl_parse_client_key_exchange( mbedtls_ssl_context *ssl ) } #if !defined(MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl ) { const mbedtls_ssl_ciphersuite_t *ciphersuite_info = @@ -4404,6 +4473,7 @@ static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl ) return( MBEDTLS_ERR_SSL_INTERNAL_ERROR ); } #else /* !MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE; @@ -4597,6 +4667,7 @@ static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl ) #endif /* MBEDTLS_KEY_EXCHANGE_CERT_REQ_ALLOWED_ENABLED */ #if defined(MBEDTLS_SSL_SESSION_TICKETS) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_new_session_ticket( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; diff --git a/thirdparty/mbedtls/library/ssl_ticket.c b/thirdparty/mbedtls/library/ssl_ticket.c index 046ed1b2ff..e0126cc9d1 100644 --- a/thirdparty/mbedtls/library/ssl_ticket.c +++ b/thirdparty/mbedtls/library/ssl_ticket.c @@ -37,7 +37,7 @@ #include <string.h> /* - * Initialze context + * Initialize context */ void mbedtls_ssl_ticket_init( mbedtls_ssl_ticket_context *ctx ) { @@ -66,6 +66,7 @@ void mbedtls_ssl_ticket_init( mbedtls_ssl_ticket_context *ctx ) /* * Generate/update a key */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_ticket_gen_key( mbedtls_ssl_ticket_context *ctx, unsigned char index ) { @@ -96,6 +97,7 @@ static int ssl_ticket_gen_key( mbedtls_ssl_ticket_context *ctx, /* * Rotate/generate keys if necessary */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_ticket_update_keys( mbedtls_ssl_ticket_context *ctx ) { #if !defined(MBEDTLS_HAVE_TIME) diff --git a/thirdparty/mbedtls/library/ssl_tls.c b/thirdparty/mbedtls/library/ssl_tls.c index 2e6469de83..7badec51ae 100644 --- a/thirdparty/mbedtls/library/ssl_tls.c +++ b/thirdparty/mbedtls/library/ssl_tls.c @@ -245,6 +245,7 @@ int mbedtls_ssl_session_copy( mbedtls_ssl_session *dst, } #if defined(MBEDTLS_SSL_VARIABLE_BUFFER_LENGTH) +MBEDTLS_CHECK_RETURN_CRITICAL static int resize_buffer( unsigned char **buffer, size_t len_new, size_t *len_old ) { unsigned char* resized_buffer = mbedtls_calloc( 1, len_new ); @@ -337,6 +338,7 @@ static void handle_buffer_resizing( mbedtls_ssl_context *ssl, int downsizing, * Key material generation */ #if defined(MBEDTLS_SSL_PROTO_SSL3) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl3_prf( const unsigned char *secret, size_t slen, const char *label, const unsigned char *random, size_t rlen, @@ -398,6 +400,7 @@ exit: #endif /* MBEDTLS_SSL_PROTO_SSL3 */ #if defined(MBEDTLS_SSL_PROTO_TLS1) || defined(MBEDTLS_SSL_PROTO_TLS1_1) +MBEDTLS_CHECK_RETURN_CRITICAL static int tls1_prf( const unsigned char *secret, size_t slen, const char *label, const unsigned char *random, size_t rlen, @@ -605,6 +608,7 @@ static psa_status_t setup_psa_key_derivation( psa_key_derivation_operation_t* de return( PSA_SUCCESS ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int tls_prf_generic( mbedtls_md_type_t md_type, const unsigned char *secret, size_t slen, const char *label, @@ -679,6 +683,7 @@ static int tls_prf_generic( mbedtls_md_type_t md_type, #else /* MBEDTLS_USE_PSA_CRYPTO */ +MBEDTLS_CHECK_RETURN_CRITICAL static int tls_prf_generic( mbedtls_md_type_t md_type, const unsigned char *secret, size_t slen, const char *label, @@ -770,6 +775,7 @@ exit: } #endif /* MBEDTLS_USE_PSA_CRYPTO */ #if defined(MBEDTLS_SHA256_C) +MBEDTLS_CHECK_RETURN_CRITICAL static int tls_prf_sha256( const unsigned char *secret, size_t slen, const char *label, const unsigned char *random, size_t rlen, @@ -781,6 +787,7 @@ static int tls_prf_sha256( const unsigned char *secret, size_t slen, #endif /* MBEDTLS_SHA256_C */ #if defined(MBEDTLS_SHA512_C) && !defined(MBEDTLS_SHA512_NO_SHA384) +MBEDTLS_CHECK_RETURN_CRITICAL static int tls_prf_sha384( const unsigned char *secret, size_t slen, const char *label, const unsigned char *random, size_t rlen, @@ -825,6 +832,7 @@ static void ssl_calc_finished_tls_sha384( mbedtls_ssl_context *, unsigned char * #if defined(MBEDTLS_KEY_EXCHANGE_PSK_ENABLED) && \ defined(MBEDTLS_USE_PSA_CRYPTO) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_use_opaque_psk( mbedtls_ssl_context const *ssl ) { if( ssl->conf->f_psk != NULL ) @@ -949,6 +957,7 @@ typedef int ssl_tls_prf_t(const unsigned char *, size_t, const char *, * - MBEDTLS_SSL_EXPORT_KEYS: ssl->conf->{f,p}_export_keys * - MBEDTLS_DEBUG_C: ssl->conf->{f,p}_dbg */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_populate_transform( mbedtls_ssl_transform *transform, int ciphersuite, const unsigned char master[48], @@ -990,6 +999,7 @@ static int ssl_populate_transform( mbedtls_ssl_transform *transform, #if !defined(MBEDTLS_SSL_HW_RECORD_ACCEL) && \ !defined(MBEDTLS_SSL_EXPORT_KEYS) && \ + !defined(MBEDTLS_SSL_DTLS_CONNECTION_ID) && \ !defined(MBEDTLS_DEBUG_C) ssl = NULL; /* make sure we don't use it except for those cases */ (void) ssl; @@ -1361,7 +1371,7 @@ static int ssl_populate_transform( mbedtls_ssl_transform *transform, * the structure field for the IV, which the PSA-based * implementation currently doesn't. */ #if defined(MBEDTLS_SSL_PROTO_TLS1_2) - if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 ) + if( minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 ) { ret = mbedtls_cipher_setup_psa( &transform->cipher_ctx_enc, cipher_info, transform->taglen ); @@ -1404,7 +1414,7 @@ static int ssl_populate_transform( mbedtls_ssl_transform *transform, * the structure field for the IV, which the PSA-based * implementation currently doesn't. */ #if defined(MBEDTLS_SSL_PROTO_TLS1_2) - if( ssl->minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 ) + if( minor_ver == MBEDTLS_SSL_MINOR_VERSION_3 ) { ret = mbedtls_cipher_setup_psa( &transform->cipher_ctx_dec, cipher_info, transform->taglen ); @@ -1511,6 +1521,7 @@ end: * Outputs: * - the tls_prf, calc_verify and calc_finished members of handshake structure */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_set_handshake_prfs( mbedtls_ssl_handshake_params *handshake, int minor_ver, mbedtls_md_type_t hash ) @@ -1580,6 +1591,7 @@ static int ssl_set_handshake_prfs( mbedtls_ssl_handshake_params *handshake, * EMS: passed to calc_verify (debug + (SSL3) session_negotiate) * PSA-PSA: minor_ver, conf */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_compute_master( mbedtls_ssl_handshake_params *handshake, unsigned char *master, const mbedtls_ssl_context *ssl ) @@ -2108,6 +2120,7 @@ int mbedtls_ssl_psk_derive_premaster( mbedtls_ssl_context *ssl, mbedtls_key_exch #endif /* MBEDTLS_KEY_EXCHANGE_SOME_PSK_ENABLED */ #if defined(MBEDTLS_SSL_SRV_C) && defined(MBEDTLS_SSL_RENEGOTIATION) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_hello_request( mbedtls_ssl_context *ssl ); #if defined(MBEDTLS_SSL_PROTO_DTLS) @@ -2323,6 +2336,7 @@ write_msg: #if defined(MBEDTLS_SSL_RENEGOTIATION) && defined(MBEDTLS_SSL_CLI_C) #if defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_peer_crt_unchanged( mbedtls_ssl_context *ssl, unsigned char *crt_buf, size_t crt_buf_len ) @@ -2338,6 +2352,7 @@ static int ssl_check_peer_crt_unchanged( mbedtls_ssl_context *ssl, return( memcmp( peer_crt->raw.p, crt_buf, peer_crt->raw.len ) ); } #else /* MBEDTLS_SSL_KEEP_PEER_CERTIFICATE */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_check_peer_crt_unchanged( mbedtls_ssl_context *ssl, unsigned char *crt_buf, size_t crt_buf_len ) @@ -2372,6 +2387,7 @@ static int ssl_check_peer_crt_unchanged( mbedtls_ssl_context *ssl, * Once the certificate message is read, parse it into a cert chain and * perform basic checks, but leave actual verification to the caller */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_chain( mbedtls_ssl_context *ssl, mbedtls_x509_crt *chain ) { @@ -2521,6 +2537,7 @@ static int ssl_parse_certificate_chain( mbedtls_ssl_context *ssl, } #if defined(MBEDTLS_SSL_SRV_C) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_srv_check_client_no_crt_notification( mbedtls_ssl_context *ssl ) { if( ssl->conf->endpoint == MBEDTLS_SSL_IS_CLIENT ) @@ -2570,6 +2587,7 @@ static int ssl_srv_check_client_no_crt_notification( mbedtls_ssl_context *ssl ) */ #define SSL_CERTIFICATE_EXPECTED 0 #define SSL_CERTIFICATE_SKIP 1 +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_coordinate( mbedtls_ssl_context *ssl, int authmode ) { @@ -2599,6 +2617,7 @@ static int ssl_parse_certificate_coordinate( mbedtls_ssl_context *ssl, return( SSL_CERTIFICATE_EXPECTED ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl, int authmode, mbedtls_x509_crt *chain, @@ -2696,7 +2715,9 @@ static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl, { const mbedtls_pk_context *pk = &chain->pk; - /* If certificate uses an EC key, make sure the curve is OK */ + /* If certificate uses an EC key, make sure the curve is OK. + * This is a public key, so it can't be opaque, so can_do() is a good + * enough check to ensure pk_ec() is safe to use here. */ if( mbedtls_pk_can_do( pk, MBEDTLS_PK_ECKEY ) && mbedtls_ssl_check_curve( ssl, mbedtls_pk_ec( *pk )->grp.id ) != 0 ) { @@ -2787,6 +2808,7 @@ static int ssl_parse_certificate_verify( mbedtls_ssl_context *ssl, } #if !defined(MBEDTLS_SSL_KEEP_PEER_CERTIFICATE) +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_remember_peer_crt_digest( mbedtls_ssl_context *ssl, unsigned char *start, size_t len ) { @@ -2818,6 +2840,7 @@ static int ssl_remember_peer_crt_digest( mbedtls_ssl_context *ssl, return( ret ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_remember_peer_pubkey( mbedtls_ssl_context *ssl, unsigned char *start, size_t len ) { @@ -3428,7 +3451,7 @@ void mbedtls_ssl_handshake_wrapup_free_hs_transform( mbedtls_ssl_context *ssl ) ssl->handshake = NULL; /* - * Free the previous transform and swith in the current one + * Free the previous transform and switch in the current one */ if( ssl->transform ) { @@ -3796,6 +3819,7 @@ void mbedtls_ssl_session_init( mbedtls_ssl_session *session ) memset( session, 0, sizeof(mbedtls_ssl_session) ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_handshake_init( mbedtls_ssl_context *ssl ) { /* Clear old handshake information if present */ @@ -3873,6 +3897,7 @@ static int ssl_handshake_init( mbedtls_ssl_context *ssl ) #if defined(MBEDTLS_SSL_DTLS_HELLO_VERIFY) && defined(MBEDTLS_SSL_SRV_C) /* Dummy cookie callbacks for defaults */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_cookie_write_dummy( void *ctx, unsigned char **p, unsigned char *end, const unsigned char *cli_id, size_t cli_id_len ) @@ -3886,6 +3911,7 @@ static int ssl_cookie_write_dummy( void *ctx, return( MBEDTLS_ERR_SSL_FEATURE_UNAVAILABLE ); } +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_cookie_check_dummy( void *ctx, const unsigned char *cookie, size_t cookie_len, const unsigned char *cli_id, size_t cli_id_len ) @@ -4303,6 +4329,7 @@ void mbedtls_ssl_conf_cert_profile( mbedtls_ssl_config *conf, } /* Append a new keycert entry to a (possibly empty) list */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_append_key_cert( mbedtls_ssl_key_cert **head, mbedtls_x509_crt *cert, mbedtls_pk_context *key ) @@ -4471,6 +4498,7 @@ static void ssl_conf_remove_psk( mbedtls_ssl_config *conf ) * It checks that the provided identity is well-formed and attempts * to make a copy of it in the SSL config. * On failure, the PSK identity in the config remains unset. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_conf_set_psk_identity( mbedtls_ssl_config *conf, unsigned char const *psk_identity, size_t psk_identity_len ) @@ -4632,6 +4660,9 @@ int mbedtls_ssl_conf_dh_param_bin( mbedtls_ssl_config *conf, { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; + mbedtls_mpi_free( &conf->dhm_P ); + mbedtls_mpi_free( &conf->dhm_G ); + if( ( ret = mbedtls_mpi_read_binary( &conf->dhm_P, dhm_P, P_len ) ) != 0 || ( ret = mbedtls_mpi_read_binary( &conf->dhm_G, dhm_G, G_len ) ) != 0 ) { @@ -4647,6 +4678,9 @@ int mbedtls_ssl_conf_dh_param_ctx( mbedtls_ssl_config *conf, mbedtls_dhm_context { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; + mbedtls_mpi_free( &conf->dhm_P ); + mbedtls_mpi_free( &conf->dhm_G ); + if( ( ret = mbedtls_mpi_copy( &conf->dhm_P, &dhm_ctx->P ) ) != 0 || ( ret = mbedtls_mpi_copy( &conf->dhm_G, &dhm_ctx->G ) ) != 0 ) { @@ -5384,6 +5418,7 @@ static unsigned char ssl_serialized_session_header[] = { * verify_result is put before peer_cert so that all mandatory fields come * together in one block. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_session_save( const mbedtls_ssl_session *session, unsigned char omit_header, unsigned char *buf, @@ -5583,6 +5618,7 @@ int mbedtls_ssl_session_save( const mbedtls_ssl_session *session, * This internal version is wrapped by a public function that cleans up in * case of error, and has an extra option omit_header. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_session_load( mbedtls_ssl_session *session, unsigned char omit_header, const unsigned char *buf, @@ -5886,6 +5922,7 @@ int mbedtls_ssl_handshake( mbedtls_ssl_context *ssl ) /* * Write HelloRequest to request renegotiation on server */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_write_hello_request( mbedtls_ssl_context *ssl ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; @@ -6497,6 +6534,7 @@ static tls_prf_fn ssl_tls12prf_from_cs( int ciphersuite_id ) * This internal version is wrapped by a public function that cleans up in * case of error. */ +MBEDTLS_CHECK_RETURN_CRITICAL static int ssl_context_load( mbedtls_ssl_context *ssl, const unsigned char *buf, size_t len ) @@ -7320,6 +7358,18 @@ int mbedtls_ssl_check_curve( const mbedtls_ssl_context *ssl, mbedtls_ecp_group_i return( -1 ); } + +/* + * Same as mbedtls_ssl_check_curve() but takes a TLS ID for the curve. + */ +int mbedtls_ssl_check_curve_tls_id( const mbedtls_ssl_context *ssl, uint16_t tls_id ) +{ + const mbedtls_ecp_curve_info *curve_info = + mbedtls_ecp_curve_info_from_tls_id( tls_id ); + if( curve_info == NULL ) + return( -1 ); + return( mbedtls_ssl_check_curve( ssl, curve_info->grp_id ) ); +} #endif /* MBEDTLS_ECP_C */ #if defined(MBEDTLS_KEY_EXCHANGE_WITH_CERT_ENABLED) diff --git a/thirdparty/mbedtls/library/threading.c b/thirdparty/mbedtls/library/threading.c index 2de117f52a..5e0aaa4f21 100644 --- a/thirdparty/mbedtls/library/threading.c +++ b/thirdparty/mbedtls/library/threading.c @@ -113,7 +113,7 @@ int (*mbedtls_mutex_lock)( mbedtls_threading_mutex_t * ) = threading_mutex_lock_ int (*mbedtls_mutex_unlock)( mbedtls_threading_mutex_t * ) = threading_mutex_unlock_pthread; /* - * With phtreads we can statically initialize mutexes + * With pthreads we can statically initialize mutexes */ #define MUTEX_INIT = { PTHREAD_MUTEX_INITIALIZER, 1 } diff --git a/thirdparty/mbedtls/library/timing.c b/thirdparty/mbedtls/library/timing.c index eb41461320..57bc9bcc12 100644 --- a/thirdparty/mbedtls/library/timing.c +++ b/thirdparty/mbedtls/library/timing.c @@ -56,15 +56,15 @@ struct _hr_time #include <unistd.h> #include <sys/types.h> -#include <sys/time.h> #include <signal.h> +/* time.h should be included independently of MBEDTLS_HAVE_TIME. If the + * platform matches the ifdefs above, it will be used. */ #include <time.h> - +#include <sys/time.h> struct _hr_time { struct timeval start; }; - #endif /* _WIN32 && !EFIX64 && !EFI32 */ #if !defined(HAVE_HARDCLOCK) && defined(MBEDTLS_HAVE_ASM) && \ @@ -364,7 +364,6 @@ int mbedtls_timing_get_delay( void *data ) return( 0 ); } -#endif /* !MBEDTLS_TIMING_ALT */ #if defined(MBEDTLS_SELF_TEST) @@ -526,5 +525,5 @@ hard_test_done: } #endif /* MBEDTLS_SELF_TEST */ - +#endif /* !MBEDTLS_TIMING_ALT */ #endif /* MBEDTLS_TIMING_C */ diff --git a/thirdparty/mbedtls/library/x509.c b/thirdparty/mbedtls/library/x509.c index f21e9e6944..3997ebd1f3 100644 --- a/thirdparty/mbedtls/library/x509.c +++ b/thirdparty/mbedtls/library/x509.c @@ -741,7 +741,7 @@ int mbedtls_x509_get_ext( unsigned char **p, const unsigned char *end, int mbedtls_x509_dn_gets( char *buf, size_t size, const mbedtls_x509_name *dn ) { int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED; - size_t i, n; + size_t i, j, n; unsigned char c, merge = 0; const mbedtls_x509_name *name; const char *short_name = NULL; @@ -775,17 +775,24 @@ int mbedtls_x509_dn_gets( char *buf, size_t size, const mbedtls_x509_name *dn ) ret = mbedtls_snprintf( p, n, "\?\?=" ); MBEDTLS_X509_SAFE_SNPRINTF; - for( i = 0; i < name->val.len; i++ ) + for( i = 0, j = 0; i < name->val.len; i++, j++ ) { - if( i >= sizeof( s ) - 1 ) - break; + if( j >= sizeof( s ) - 1 ) + return( MBEDTLS_ERR_X509_BUFFER_TOO_SMALL ); c = name->val.p[i]; + // Special characters requiring escaping, RFC 1779 + if( c && strchr( ",=+<>#;\"\\", c ) ) + { + if( j + 1 >= sizeof( s ) - 1 ) + return( MBEDTLS_ERR_X509_BUFFER_TOO_SMALL ); + s[j++] = '\\'; + } if( c < 32 || c >= 127 ) - s[i] = '?'; - else s[i] = c; + s[j] = '?'; + else s[j] = c; } - s[i] = '\0'; + s[j] = '\0'; ret = mbedtls_snprintf( p, n, "%s", s ); MBEDTLS_X509_SAFE_SNPRINTF; diff --git a/thirdparty/mbedtls/library/x509_crl.c b/thirdparty/mbedtls/library/x509_crl.c index ac4fc75de3..d2d8042029 100644 --- a/thirdparty/mbedtls/library/x509_crl.c +++ b/thirdparty/mbedtls/library/x509_crl.c @@ -52,11 +52,13 @@ #define mbedtls_snprintf snprintf #endif +#if defined(MBEDTLS_HAVE_TIME) #if defined(_WIN32) && !defined(EFIX64) && !defined(EFI32) #include <windows.h> #else #include <time.h> #endif +#endif #if defined(MBEDTLS_FS_IO) || defined(EFIX64) || defined(EFI32) #include <stdio.h> diff --git a/thirdparty/mbedtls/library/x509_crt.c b/thirdparty/mbedtls/library/x509_crt.c index 60312bf2f5..96477e4c9d 100644 --- a/thirdparty/mbedtls/library/x509_crt.c +++ b/thirdparty/mbedtls/library/x509_crt.c @@ -63,6 +63,7 @@ #include "mbedtls/threading.h" #endif +#if defined(MBEDTLS_HAVE_TIME) #if defined(_WIN32) && !defined(EFIX64) && !defined(EFI32) #include <windows.h> #if defined(_MSC_VER) && _MSC_VER <= 1600 @@ -81,6 +82,7 @@ #else #include <time.h> #endif +#endif #if defined(MBEDTLS_FS_IO) #include <stdio.h> diff --git a/thirdparty/mbedtls/library/x509write_crt.c b/thirdparty/mbedtls/library/x509write_crt.c index 184c90cd33..0c5e991834 100644 --- a/thirdparty/mbedtls/library/x509write_crt.c +++ b/thirdparty/mbedtls/library/x509write_crt.c @@ -299,7 +299,7 @@ static int x509_write_time( unsigned char **p, unsigned char *start, /* * write MBEDTLS_ASN1_UTC_TIME if year < 2050 (2 bytes shorter) */ - if( t[0] == '2' && t[1] == '0' && t[2] < '5' ) + if( t[0] < '2' || ( t[0] == '2' && t[1] == '0' && t[2] < '5' ) ) { MBEDTLS_ASN1_CHK_ADD( len, mbedtls_asn1_write_raw_buffer( p, start, (const unsigned char *) t + 2, diff --git a/thirdparty/meshoptimizer/patches/attribute-aware-simplify-distance-only-metric.patch b/thirdparty/meshoptimizer/patches/attribute-aware-simplify-distance-only-metric.patch index 213f35dd69..21daac6eec 100644 --- a/thirdparty/meshoptimizer/patches/attribute-aware-simplify-distance-only-metric.patch +++ b/thirdparty/meshoptimizer/patches/attribute-aware-simplify-distance-only-metric.patch @@ -1,5 +1,5 @@ diff --git a/thirdparty/meshoptimizer/simplifier.cpp b/thirdparty/meshoptimizer/simplifier.cpp -index e384046ffe..ccc99edb1a 100644 +index 5e92e2dc73..e40c141e76 100644 --- a/thirdparty/meshoptimizer/simplifier.cpp +++ b/thirdparty/meshoptimizer/simplifier.cpp @@ -20,7 +20,7 @@ @@ -11,7 +11,7 @@ index e384046ffe..ccc99edb1a 100644 // This work is based on: // Michael Garland and Paul S. Heckbert. Surface simplification using quadric error metrics. 1997 -@@ -445,6 +445,7 @@ struct Collapse +@@ -453,6 +453,7 @@ struct Collapse float error; unsigned int errorui; }; @@ -19,7 +19,7 @@ index e384046ffe..ccc99edb1a 100644 }; static float normalize(Vector3& v) -@@ -525,6 +526,34 @@ static float quadricError(const Quadric& Q, const Vector3& v) +@@ -533,6 +534,34 @@ static float quadricError(const Quadric& Q, const Vector3& v) return fabsf(r) * s; } @@ -54,7 +54,7 @@ index e384046ffe..ccc99edb1a 100644 static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, float w) { float aw = a * w; -@@ -680,7 +709,7 @@ static void quadricUpdateAttributes(Quadric& Q, const Vector3& p0, const Vector3 +@@ -688,7 +717,7 @@ static void quadricUpdateAttributes(Quadric& Q, const Vector3& p0, const Vector3 } #endif @@ -63,7 +63,7 @@ index e384046ffe..ccc99edb1a 100644 { for (size_t i = 0; i < index_count; i += 3) { -@@ -690,6 +719,9 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic +@@ -698,6 +727,9 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic Quadric Q; quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], 1.f); @@ -73,7 +73,7 @@ index e384046ffe..ccc99edb1a 100644 #if ATTRIBUTES quadricUpdateAttributes(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], Q.w); -@@ -700,7 +732,7 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic +@@ -708,7 +740,7 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic } } @@ -82,7 +82,7 @@ index e384046ffe..ccc99edb1a 100644 { for (size_t i = 0; i < index_count; i += 3) { -@@ -744,6 +776,9 @@ static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indic +@@ -752,6 +784,9 @@ static void fillEdgeQuadrics(Quadric* vertex_quadrics, const unsigned int* indic quadricAdd(vertex_quadrics[remap[i0]], Q); quadricAdd(vertex_quadrics[remap[i1]], Q); @@ -92,7 +92,7 @@ index e384046ffe..ccc99edb1a 100644 } } } -@@ -848,7 +883,7 @@ static size_t pickEdgeCollapses(Collapse* collapses, const unsigned int* indices +@@ -856,7 +891,7 @@ static size_t pickEdgeCollapses(Collapse* collapses, const unsigned int* indices return collapse_count; } @@ -101,7 +101,7 @@ index e384046ffe..ccc99edb1a 100644 { for (size_t i = 0; i < collapse_count; ++i) { -@@ -868,10 +903,14 @@ static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const +@@ -876,10 +911,14 @@ static void rankEdgeCollapses(Collapse* collapses, size_t collapse_count, const float ei = quadricError(qi, vertex_positions[i1]); float ej = quadricError(qj, vertex_positions[j1]); @@ -116,7 +116,7 @@ index e384046ffe..ccc99edb1a 100644 } } -@@ -968,7 +1007,7 @@ static void sortEdgeCollapses(unsigned int* sort_order, const Collapse* collapse +@@ -976,7 +1015,7 @@ static void sortEdgeCollapses(unsigned int* sort_order, const Collapse* collapse } } @@ -125,7 +125,7 @@ index e384046ffe..ccc99edb1a 100644 { size_t edge_collapses = 0; size_t triangle_collapses = 0; -@@ -1030,6 +1069,7 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* +@@ -1038,6 +1077,7 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* assert(collapse_remap[r1] == r1); quadricAdd(vertex_quadrics[r1], vertex_quadrics[r0]); @@ -133,7 +133,7 @@ index e384046ffe..ccc99edb1a 100644 if (vertex_kind[i0] == Kind_Complex) { -@@ -1067,7 +1107,7 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* +@@ -1075,7 +1115,7 @@ static size_t performEdgeCollapses(unsigned int* collapse_remap, unsigned char* triangle_collapses += (vertex_kind[i0] == Kind_Border) ? 1 : 2; edge_collapses++; @@ -142,7 +142,7 @@ index e384046ffe..ccc99edb1a 100644 } #if TRACE -@@ -1455,9 +1495,11 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned +@@ -1463,9 +1503,11 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count); memset(vertex_quadrics, 0, vertex_count * sizeof(Quadric)); @@ -156,7 +156,7 @@ index e384046ffe..ccc99edb1a 100644 if (result != indices) memcpy(result, indices, index_count * sizeof(unsigned int)); -@@ -1488,7 +1530,7 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned +@@ -1496,7 +1538,7 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned if (edge_collapse_count == 0) break; @@ -165,7 +165,7 @@ index e384046ffe..ccc99edb1a 100644 #if TRACE > 1 dumpEdgeCollapses(edge_collapses, edge_collapse_count, vertex_kind); -@@ -1507,7 +1549,7 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned +@@ -1515,7 +1557,7 @@ size_t meshopt_simplifyWithAttributes(unsigned int* destination, const unsigned printf("pass %d: ", int(pass_count++)); #endif diff --git a/thirdparty/meshoptimizer/patches/attribute-aware-simplify.patch b/thirdparty/meshoptimizer/patches/attribute-aware-simplify.patch index 51a424765e..33a17fe9fa 100644 --- a/thirdparty/meshoptimizer/patches/attribute-aware-simplify.patch +++ b/thirdparty/meshoptimizer/patches/attribute-aware-simplify.patch @@ -15,7 +15,7 @@ index be4b765d97..463fad29da 100644 * Experimental: Mesh simplifier (sloppy) * Reduces the number of triangles in the mesh, sacrificing mesh appearance for simplification performance diff --git a/thirdparty/meshoptimizer/simplifier.cpp b/thirdparty/meshoptimizer/simplifier.cpp -index bf1431269d..e384046ffe 100644 +index a74b08a97d..5e92e2dc73 100644 --- a/thirdparty/meshoptimizer/simplifier.cpp +++ b/thirdparty/meshoptimizer/simplifier.cpp @@ -20,6 +20,8 @@ @@ -27,7 +27,7 @@ index bf1431269d..e384046ffe 100644 // This work is based on: // Michael Garland and Paul S. Heckbert. Surface simplification using quadric error metrics. 1997 // Michael Garland. Quadric-based polygonal surface simplification. 1999 -@@ -363,6 +365,10 @@ static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned +@@ -371,6 +373,10 @@ static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned struct Vector3 { float x, y, z; @@ -38,7 +38,7 @@ index bf1431269d..e384046ffe 100644 }; static float rescalePositions(Vector3* result, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride) -@@ -419,6 +425,13 @@ struct Quadric +@@ -427,6 +433,13 @@ struct Quadric float a10, a20, a21; float b0, b1, b2, c; float w; @@ -52,7 +52,7 @@ index bf1431269d..e384046ffe 100644 }; struct Collapse -@@ -461,6 +474,16 @@ static void quadricAdd(Quadric& Q, const Quadric& R) +@@ -469,6 +482,16 @@ static void quadricAdd(Quadric& Q, const Quadric& R) Q.b2 += R.b2; Q.c += R.c; Q.w += R.w; @@ -69,7 +69,7 @@ index bf1431269d..e384046ffe 100644 } static float quadricError(const Quadric& Q, const Vector3& v) -@@ -486,6 +509,17 @@ static float quadricError(const Quadric& Q, const Vector3& v) +@@ -494,6 +517,17 @@ static float quadricError(const Quadric& Q, const Vector3& v) r += ry * v.y; r += rz * v.z; @@ -87,7 +87,7 @@ index bf1431269d..e384046ffe 100644 float s = Q.w == 0.f ? 0.f : 1.f / Q.w; return fabsf(r) * s; -@@ -509,6 +543,13 @@ static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, flo +@@ -517,6 +551,13 @@ static void quadricFromPlane(Quadric& Q, float a, float b, float c, float d, flo Q.b2 = c * dw; Q.c = d * dw; Q.w = w; @@ -101,7 +101,7 @@ index bf1431269d..e384046ffe 100644 } static void quadricFromPoint(Quadric& Q, float x, float y, float z, float w) -@@ -561,6 +602,84 @@ static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3 +@@ -569,6 +610,84 @@ static void quadricFromTriangleEdge(Quadric& Q, const Vector3& p0, const Vector3 quadricFromPlane(Q, normal.x, normal.y, normal.z, -distance, length * weight); } @@ -186,7 +186,7 @@ index bf1431269d..e384046ffe 100644 static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indices, size_t index_count, const Vector3* vertex_positions, const unsigned int* remap) { for (size_t i = 0; i < index_count; i += 3) -@@ -572,6 +691,9 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic +@@ -580,6 +699,9 @@ static void fillFaceQuadrics(Quadric* vertex_quadrics, const unsigned int* indic Quadric Q; quadricFromTriangle(Q, vertex_positions[i0], vertex_positions[i1], vertex_positions[i2], 1.f); @@ -196,7 +196,7 @@ index bf1431269d..e384046ffe 100644 quadricAdd(vertex_quadrics[remap[i0]], Q); quadricAdd(vertex_quadrics[remap[i1]], Q); quadricAdd(vertex_quadrics[remap[i2]], Q); -@@ -1265,13 +1387,19 @@ MESHOPTIMIZER_API unsigned int* meshopt_simplifyDebugLoopBack = 0; +@@ -1273,13 +1395,19 @@ MESHOPTIMIZER_API unsigned int* meshopt_simplifyDebugLoopBack = 0; #endif size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions_data, size_t vertex_count, size_t vertex_positions_stride, size_t target_index_count, float target_error, float* out_result_error) @@ -218,7 +218,7 @@ index bf1431269d..e384046ffe 100644 meshopt_Allocator allocator; -@@ -1285,7 +1413,7 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, +@@ -1293,7 +1421,7 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, // build position remap that maps each vertex to the one with identical position unsigned int* remap = allocator.allocate<unsigned int>(vertex_count); unsigned int* wedge = allocator.allocate<unsigned int>(vertex_count); @@ -227,7 +227,7 @@ index bf1431269d..e384046ffe 100644 // classify vertices; vertex kind determines collapse rules, see kCanCollapse unsigned char* vertex_kind = allocator.allocate<unsigned char>(vertex_count); -@@ -1309,7 +1437,21 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, +@@ -1317,7 +1445,21 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, #endif Vector3* vertex_positions = allocator.allocate<Vector3>(vertex_count); @@ -250,7 +250,7 @@ index bf1431269d..e384046ffe 100644 Quadric* vertex_quadrics = allocator.allocate<Quadric>(vertex_count); memset(vertex_quadrics, 0, vertex_count * sizeof(Quadric)); -@@ -1401,7 +1543,9 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, +@@ -1409,7 +1551,9 @@ size_t meshopt_simplify(unsigned int* destination, const unsigned int* indices, // result_error is quadratic; we need to remap it back to linear if (out_result_error) diff --git a/thirdparty/meshoptimizer/simplifier.cpp b/thirdparty/meshoptimizer/simplifier.cpp index ccc99edb1a..e40c141e76 100644 --- a/thirdparty/meshoptimizer/simplifier.cpp +++ b/thirdparty/meshoptimizer/simplifier.cpp @@ -276,7 +276,15 @@ static void classifyVertices(unsigned char* result, unsigned int* loop, unsigned { unsigned int target = edges[j].next; - if (!hasEdge(adjacency, target, vertex)) + if (target == vertex) + { + // degenerate triangles have two distinct edges instead of three, and the self edge + // is bi-directional by definition; this can break border/seam classification by "closing" + // the open edge from another triangle and falsely marking the vertex as manifold + // instead we mark the vertex as having >1 open edges which turns it into locked/complex + openinc[vertex] = openout[vertex] = vertex; + } + else if (!hasEdge(adjacency, target, vertex)) { openinc[target] = (openinc[target] == ~0u) ? vertex : target; openout[vertex] = (openout[vertex] == ~0u) ? target : vertex; diff --git a/thirdparty/misc/ok_color.h b/thirdparty/misc/ok_color.h new file mode 100644 index 0000000000..dbc7dafc36 --- /dev/null +++ b/thirdparty/misc/ok_color.h @@ -0,0 +1,688 @@ +// Copyright(c) 2021 Björn Ottosson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files(the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and /or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions : +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef OK_COLOR_H +#define OK_COLOR_H + +#include <cmath> +#include <cfloat> + +class ok_color +{ +public: + +struct Lab { float L; float a; float b; }; +struct RGB { float r; float g; float b; }; +struct HSV { float h; float s; float v; }; +struct HSL { float h; float s; float l; }; +struct LC { float L; float C; }; + +// Alternative representation of (L_cusp, C_cusp) +// Encoded so S = C_cusp/L_cusp and T = C_cusp/(1-L_cusp) +// The maximum value for C in the triangle is then found as fmin(S*L, T*(1-L)), for a given L +struct ST { float S; float T; }; + +static constexpr float pi = 3.1415926535897932384626433832795028841971693993751058209749445923078164062f; + +float clamp(float x, float min, float max) +{ + if (x < min) + return min; + if (x > max) + return max; + + return x; +} + +float sgn(float x) +{ + return (float)(0.f < x) - (float)(x < 0.f); +} + +float srgb_transfer_function(float a) +{ + return .0031308f >= a ? 12.92f * a : 1.055f * powf(a, .4166666666666667f) - .055f; +} + +float srgb_transfer_function_inv(float a) +{ + return .04045f < a ? powf((a + .055f) / 1.055f, 2.4f) : a / 12.92f; +} + +Lab linear_srgb_to_oklab(RGB c) +{ + float l = 0.4122214708f * c.r + 0.5363325363f * c.g + 0.0514459929f * c.b; + float m = 0.2119034982f * c.r + 0.6806995451f * c.g + 0.1073969566f * c.b; + float s = 0.0883024619f * c.r + 0.2817188376f * c.g + 0.6299787005f * c.b; + + float l_ = cbrtf(l); + float m_ = cbrtf(m); + float s_ = cbrtf(s); + + return { + 0.2104542553f * l_ + 0.7936177850f * m_ - 0.0040720468f * s_, + 1.9779984951f * l_ - 2.4285922050f * m_ + 0.4505937099f * s_, + 0.0259040371f * l_ + 0.7827717662f * m_ - 0.8086757660f * s_, + }; +} + +RGB oklab_to_linear_srgb(Lab c) +{ + float l_ = c.L + 0.3963377774f * c.a + 0.2158037573f * c.b; + float m_ = c.L - 0.1055613458f * c.a - 0.0638541728f * c.b; + float s_ = c.L - 0.0894841775f * c.a - 1.2914855480f * c.b; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + return { + +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s, + -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s, + -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s, + }; +} + +// Finds the maximum saturation possible for a given hue that fits in sRGB +// Saturation here is defined as S = C/L +// a and b must be normalized so a^2 + b^2 == 1 +float compute_max_saturation(float a, float b) +{ + // Max saturation will be when one of r, g or b goes below zero. + + // Select different coefficients depending on which component goes below zero first + float k0, k1, k2, k3, k4, wl, wm, ws; + + if (-1.88170328f * a - 0.80936493f * b > 1) + { + // Red component + k0 = +1.19086277f; k1 = +1.76576728f; k2 = +0.59662641f; k3 = +0.75515197f; k4 = +0.56771245f; + wl = +4.0767416621f; wm = -3.3077115913f; ws = +0.2309699292f; + } + else if (1.81444104f * a - 1.19445276f * b > 1) + { + // Green component + k0 = +0.73956515f; k1 = -0.45954404f; k2 = +0.08285427f; k3 = +0.12541070f; k4 = +0.14503204f; + wl = -1.2684380046f; wm = +2.6097574011f; ws = -0.3413193965f; + } + else + { + // Blue component + k0 = +1.35733652f; k1 = -0.00915799f; k2 = -1.15130210f; k3 = -0.50559606f; k4 = +0.00692167f; + wl = -0.0041960863f; wm = -0.7034186147f; ws = +1.7076147010f; + } + + // Approximate max saturation using a polynomial: + float S = k0 + k1 * a + k2 * b + k3 * a * a + k4 * a * b; + + // Do one step Halley's method to get closer + // this gives an error less than 10e6, except for some blue hues where the dS/dh is close to infinite + // this should be sufficient for most applications, otherwise do two/three steps + + float k_l = +0.3963377774f * a + 0.2158037573f * b; + float k_m = -0.1055613458f * a - 0.0638541728f * b; + float k_s = -0.0894841775f * a - 1.2914855480f * b; + + { + float l_ = 1.f + S * k_l; + float m_ = 1.f + S * k_m; + float s_ = 1.f + S * k_s; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + float l_dS = 3.f * k_l * l_ * l_; + float m_dS = 3.f * k_m * m_ * m_; + float s_dS = 3.f * k_s * s_ * s_; + + float l_dS2 = 6.f * k_l * k_l * l_; + float m_dS2 = 6.f * k_m * k_m * m_; + float s_dS2 = 6.f * k_s * k_s * s_; + + float f = wl * l + wm * m + ws * s; + float f1 = wl * l_dS + wm * m_dS + ws * s_dS; + float f2 = wl * l_dS2 + wm * m_dS2 + ws * s_dS2; + + S = S - f * f1 / (f1 * f1 - 0.5f * f * f2); + } + + return S; +} + +// finds L_cusp and C_cusp for a given hue +// a and b must be normalized so a^2 + b^2 == 1 +LC find_cusp(float a, float b) +{ + // First, find the maximum saturation (saturation S = C/L) + float S_cusp = compute_max_saturation(a, b); + + // Convert to linear sRGB to find the first point where at least one of r,g or b >= 1: + RGB rgb_at_max = oklab_to_linear_srgb({ 1, S_cusp * a, S_cusp * b }); + float L_cusp = cbrtf(1.f / fmax(fmax(rgb_at_max.r, rgb_at_max.g), rgb_at_max.b)); + float C_cusp = L_cusp * S_cusp; + + return { L_cusp , C_cusp }; +} + +// Finds intersection of the line defined by +// L = L0 * (1 - t) + t * L1; +// C = t * C1; +// a and b must be normalized so a^2 + b^2 == 1 +float find_gamut_intersection(float a, float b, float L1, float C1, float L0, LC cusp) +{ + // Find the intersection for upper and lower half seprately + float t; + if (((L1 - L0) * cusp.C - (cusp.L - L0) * C1) <= 0.f) + { + // Lower half + + t = cusp.C * L0 / (C1 * cusp.L + cusp.C * (L0 - L1)); + } + else + { + // Upper half + + // First intersect with triangle + t = cusp.C * (L0 - 1.f) / (C1 * (cusp.L - 1.f) + cusp.C * (L0 - L1)); + + // Then one step Halley's method + { + float dL = L1 - L0; + float dC = C1; + + float k_l = +0.3963377774f * a + 0.2158037573f * b; + float k_m = -0.1055613458f * a - 0.0638541728f * b; + float k_s = -0.0894841775f * a - 1.2914855480f * b; + + float l_dt = dL + dC * k_l; + float m_dt = dL + dC * k_m; + float s_dt = dL + dC * k_s; + + + // If higher accuracy is required, 2 or 3 iterations of the following block can be used: + { + float L = L0 * (1.f - t) + t * L1; + float C = t * C1; + + float l_ = L + C * k_l; + float m_ = L + C * k_m; + float s_ = L + C * k_s; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + float ldt = 3 * l_dt * l_ * l_; + float mdt = 3 * m_dt * m_ * m_; + float sdt = 3 * s_dt * s_ * s_; + + float ldt2 = 6 * l_dt * l_dt * l_; + float mdt2 = 6 * m_dt * m_dt * m_; + float sdt2 = 6 * s_dt * s_dt * s_; + + float r = 4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s - 1; + float r1 = 4.0767416621f * ldt - 3.3077115913f * mdt + 0.2309699292f * sdt; + float r2 = 4.0767416621f * ldt2 - 3.3077115913f * mdt2 + 0.2309699292f * sdt2; + + float u_r = r1 / (r1 * r1 - 0.5f * r * r2); + float t_r = -r * u_r; + + float g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s - 1; + float g1 = -1.2684380046f * ldt + 2.6097574011f * mdt - 0.3413193965f * sdt; + float g2 = -1.2684380046f * ldt2 + 2.6097574011f * mdt2 - 0.3413193965f * sdt2; + + float u_g = g1 / (g1 * g1 - 0.5f * g * g2); + float t_g = -g * u_g; + + b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s - 1; + float b1 = -0.0041960863f * ldt - 0.7034186147f * mdt + 1.7076147010f * sdt; + float b2 = -0.0041960863f * ldt2 - 0.7034186147f * mdt2 + 1.7076147010f * sdt2; + + float u_b = b1 / (b1 * b1 - 0.5f * b * b2); + float t_b = -b * u_b; + + t_r = u_r >= 0.f ? t_r : FLT_MAX; + t_g = u_g >= 0.f ? t_g : FLT_MAX; + t_b = u_b >= 0.f ? t_b : FLT_MAX; + + t += fmin(t_r, fmin(t_g, t_b)); + } + } + } + + return t; +} + +float find_gamut_intersection(float a, float b, float L1, float C1, float L0) +{ + // Find the cusp of the gamut triangle + LC cusp = find_cusp(a, b); + + return find_gamut_intersection(a, b, L1, C1, L0, cusp); +} + +RGB gamut_clip_preserve_chroma(RGB rgb) +{ + if (rgb.r < 1 && rgb.g < 1 && rgb.b < 1 && rgb.r > 0 && rgb.g > 0 && rgb.b > 0) + return rgb; + + Lab lab = linear_srgb_to_oklab(rgb); + + float L = lab.L; + float eps = 0.00001f; + float C = fmax(eps, sqrtf(lab.a * lab.a + lab.b * lab.b)); + float a_ = lab.a / C; + float b_ = lab.b / C; + + float L0 = clamp(L, 0, 1); + + float t = find_gamut_intersection(a_, b_, L, C, L0); + float L_clipped = L0 * (1 - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb({ L_clipped, C_clipped * a_, C_clipped * b_ }); +} + +RGB gamut_clip_project_to_0_5(RGB rgb) +{ + if (rgb.r < 1 && rgb.g < 1 && rgb.b < 1 && rgb.r > 0 && rgb.g > 0 && rgb.b > 0) + return rgb; + + Lab lab = linear_srgb_to_oklab(rgb); + + float L = lab.L; + float eps = 0.00001f; + float C = fmax(eps, sqrtf(lab.a * lab.a + lab.b * lab.b)); + float a_ = lab.a / C; + float b_ = lab.b / C; + + float L0 = 0.5; + + float t = find_gamut_intersection(a_, b_, L, C, L0); + float L_clipped = L0 * (1 - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb({ L_clipped, C_clipped * a_, C_clipped * b_ }); +} + +RGB gamut_clip_project_to_L_cusp(RGB rgb) +{ + if (rgb.r < 1 && rgb.g < 1 && rgb.b < 1 && rgb.r > 0 && rgb.g > 0 && rgb.b > 0) + return rgb; + + Lab lab = linear_srgb_to_oklab(rgb); + + float L = lab.L; + float eps = 0.00001f; + float C = fmax(eps, sqrtf(lab.a * lab.a + lab.b * lab.b)); + float a_ = lab.a / C; + float b_ = lab.b / C; + + // The cusp is computed here and in find_gamut_intersection, an optimized solution would only compute it once. + LC cusp = find_cusp(a_, b_); + + float L0 = cusp.L; + + float t = find_gamut_intersection(a_, b_, L, C, L0); + + float L_clipped = L0 * (1 - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb({ L_clipped, C_clipped * a_, C_clipped * b_ }); +} + +RGB gamut_clip_adaptive_L0_0_5(RGB rgb, float alpha = 0.05f) +{ + if (rgb.r < 1 && rgb.g < 1 && rgb.b < 1 && rgb.r > 0 && rgb.g > 0 && rgb.b > 0) + return rgb; + + Lab lab = linear_srgb_to_oklab(rgb); + + float L = lab.L; + float eps = 0.00001f; + float C = fmax(eps, sqrtf(lab.a * lab.a + lab.b * lab.b)); + float a_ = lab.a / C; + float b_ = lab.b / C; + + float Ld = L - 0.5f; + float e1 = 0.5f + fabs(Ld) + alpha * C; + float L0 = 0.5f * (1.f + sgn(Ld) * (e1 - sqrtf(e1 * e1 - 2.f * fabs(Ld)))); + + float t = find_gamut_intersection(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb({ L_clipped, C_clipped * a_, C_clipped * b_ }); +} + +RGB gamut_clip_adaptive_L0_L_cusp(RGB rgb, float alpha = 0.05f) +{ + if (rgb.r < 1 && rgb.g < 1 && rgb.b < 1 && rgb.r > 0 && rgb.g > 0 && rgb.b > 0) + return rgb; + + Lab lab = linear_srgb_to_oklab(rgb); + + float L = lab.L; + float eps = 0.00001f; + float C = fmax(eps, sqrtf(lab.a * lab.a + lab.b * lab.b)); + float a_ = lab.a / C; + float b_ = lab.b / C; + + // The cusp is computed here and in find_gamut_intersection, an optimized solution would only compute it once. + LC cusp = find_cusp(a_, b_); + + float Ld = L - cusp.L; + float k = 2.f * (Ld > 0 ? 1.f - cusp.L : cusp.L); + + float e1 = 0.5f * k + fabs(Ld) + alpha * C / k; + float L0 = cusp.L + 0.5f * (sgn(Ld) * (e1 - sqrtf(e1 * e1 - 2.f * k * fabs(Ld)))); + + float t = find_gamut_intersection(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb({ L_clipped, C_clipped * a_, C_clipped * b_ }); +} + +float toe(float x) +{ + constexpr float k_1 = 0.206f; + constexpr float k_2 = 0.03f; + constexpr float k_3 = (1.f + k_1) / (1.f + k_2); + return 0.5f * (k_3 * x - k_1 + sqrtf((k_3 * x - k_1) * (k_3 * x - k_1) + 4 * k_2 * k_3 * x)); +} + +float toe_inv(float x) +{ + constexpr float k_1 = 0.206f; + constexpr float k_2 = 0.03f; + constexpr float k_3 = (1.f + k_1) / (1.f + k_2); + return (x * x + k_1 * x) / (k_3 * (x + k_2)); +} + +ST to_ST(LC cusp) +{ + float L = cusp.L; + float C = cusp.C; + return { C / L, C / (1 - L) }; +} + +// Returns a smooth approximation of the location of the cusp +// This polynomial was created by an optimization process +// It has been designed so that S_mid < S_max and T_mid < T_max +ST get_ST_mid(float a_, float b_) +{ + float S = 0.11516993f + 1.f / ( + +7.44778970f + 4.15901240f * b_ + + a_ * (-2.19557347f + 1.75198401f * b_ + + a_ * (-2.13704948f - 10.02301043f * b_ + + a_ * (-4.24894561f + 5.38770819f * b_ + 4.69891013f * a_ + ))) + ); + + float T = 0.11239642f + 1.f / ( + +1.61320320f - 0.68124379f * b_ + + a_ * (+0.40370612f + 0.90148123f * b_ + + a_ * (-0.27087943f + 0.61223990f * b_ + + a_ * (+0.00299215f - 0.45399568f * b_ - 0.14661872f * a_ + ))) + ); + + return { S, T }; +} + +struct Cs { float C_0; float C_mid; float C_max; }; +Cs get_Cs(float L, float a_, float b_) +{ + LC cusp = find_cusp(a_, b_); + + float C_max = find_gamut_intersection(a_, b_, L, 1, L, cusp); + ST ST_max = to_ST(cusp); + + // Scale factor to compensate for the curved part of gamut shape: + float k = C_max / fmin((L * ST_max.S), (1 - L) * ST_max.T); + + float C_mid; + { + ST ST_mid = get_ST_mid(a_, b_); + + // Use a soft minimum function, instead of a sharp triangle shape to get a smooth value for chroma. + float C_a = L * ST_mid.S; + float C_b = (1.f - L) * ST_mid.T; + C_mid = 0.9f * k * sqrtf(sqrtf(1.f / (1.f / (C_a * C_a * C_a * C_a) + 1.f / (C_b * C_b * C_b * C_b)))); + } + + float C_0; + { + // for C_0, the shape is independent of hue, so ST are constant. Values picked to roughly be the average values of ST. + float C_a = L * 0.4f; + float C_b = (1.f - L) * 0.8f; + + // Use a soft minimum function, instead of a sharp triangle shape to get a smooth value for chroma. + C_0 = sqrtf(1.f / (1.f / (C_a * C_a) + 1.f / (C_b * C_b))); + } + + return { C_0, C_mid, C_max }; +} + +RGB okhsl_to_srgb(HSL hsl) +{ + float h = hsl.h; + float s = hsl.s; + float l = hsl.l; + + if (l == 1.0f) + { + return { 1.f, 1.f, 1.f }; + } + + else if (l == 0.f) + { + return { 0.f, 0.f, 0.f }; + } + + float a_ = cosf(2.f * pi * h); + float b_ = sinf(2.f * pi * h); + float L = toe_inv(l); + + Cs cs = get_Cs(L, a_, b_); + float C_0 = cs.C_0; + float C_mid = cs.C_mid; + float C_max = cs.C_max; + + float mid = 0.8f; + float mid_inv = 1.25f; + + float C, t, k_0, k_1, k_2; + + if (s < mid) + { + t = mid_inv * s; + + k_1 = mid * C_0; + k_2 = (1.f - k_1 / C_mid); + + C = t * k_1 / (1.f - k_2 * t); + } + else + { + t = (s - mid)/ (1 - mid); + + k_0 = C_mid; + k_1 = (1.f - mid) * C_mid * C_mid * mid_inv * mid_inv / C_0; + k_2 = (1.f - (k_1) / (C_max - C_mid)); + + C = k_0 + t * k_1 / (1.f - k_2 * t); + } + + RGB rgb = oklab_to_linear_srgb({ L, C * a_, C * b_ }); + return { + srgb_transfer_function(rgb.r), + srgb_transfer_function(rgb.g), + srgb_transfer_function(rgb.b), + }; +} + +HSL srgb_to_okhsl(RGB rgb) +{ + Lab lab = linear_srgb_to_oklab({ + srgb_transfer_function_inv(rgb.r), + srgb_transfer_function_inv(rgb.g), + srgb_transfer_function_inv(rgb.b) + }); + + float C = sqrtf(lab.a * lab.a + lab.b * lab.b); + float a_ = lab.a / C; + float b_ = lab.b / C; + + float L = lab.L; + float h = 0.5f + 0.5f * atan2f(-lab.b, -lab.a) / pi; + + Cs cs = get_Cs(L, a_, b_); + float C_0 = cs.C_0; + float C_mid = cs.C_mid; + float C_max = cs.C_max; + + // Inverse of the interpolation in okhsl_to_srgb: + + float mid = 0.8f; + float mid_inv = 1.25f; + + float s; + if (C < C_mid) + { + float k_1 = mid * C_0; + float k_2 = (1.f - k_1 / C_mid); + + float t = C / (k_1 + k_2 * C); + s = t * mid; + } + else + { + float k_0 = C_mid; + float k_1 = (1.f - mid) * C_mid * C_mid * mid_inv * mid_inv / C_0; + float k_2 = (1.f - (k_1) / (C_max - C_mid)); + + float t = (C - k_0) / (k_1 + k_2 * (C - k_0)); + s = mid + (1.f - mid) * t; + } + + float l = toe(L); + return { h, s, l }; +} + + +RGB okhsv_to_srgb(HSV hsv) +{ + float h = hsv.h; + float s = hsv.s; + float v = hsv.v; + + float a_ = cosf(2.f * pi * h); + float b_ = sinf(2.f * pi * h); + + LC cusp = find_cusp(a_, b_); + ST ST_max = to_ST(cusp); + float S_max = ST_max.S; + float T_max = ST_max.T; + float S_0 = 0.5f; + float k = 1 - S_0 / S_max; + + // first we compute L and V as if the gamut is a perfect triangle: + + // L, C when v==1: + float L_v = 1 - s * S_0 / (S_0 + T_max - T_max * k * s); + float C_v = s * T_max * S_0 / (S_0 + T_max - T_max * k * s); + + float L = v * L_v; + float C = v * C_v; + + // then we compensate for both toe and the curved top part of the triangle: + float L_vt = toe_inv(L_v); + float C_vt = C_v * L_vt / L_v; + + float L_new = toe_inv(L); + C = C * L_new / L; + L = L_new; + + RGB rgb_scale = oklab_to_linear_srgb({ L_vt, a_ * C_vt, b_ * C_vt }); + float scale_L = cbrtf(1.f / fmax(fmax(rgb_scale.r, rgb_scale.g), fmax(rgb_scale.b, 0.f))); + + L = L * scale_L; + C = C * scale_L; + + RGB rgb = oklab_to_linear_srgb({ L, C * a_, C * b_ }); + return { + srgb_transfer_function(rgb.r), + srgb_transfer_function(rgb.g), + srgb_transfer_function(rgb.b), + }; +} + +HSV srgb_to_okhsv(RGB rgb) +{ + Lab lab = linear_srgb_to_oklab({ + srgb_transfer_function_inv(rgb.r), + srgb_transfer_function_inv(rgb.g), + srgb_transfer_function_inv(rgb.b) + }); + + float C = sqrtf(lab.a * lab.a + lab.b * lab.b); + float a_ = lab.a / C; + float b_ = lab.b / C; + + float L = lab.L; + float h = 0.5f + 0.5f * atan2f(-lab.b, -lab.a) / pi; + + LC cusp = find_cusp(a_, b_); + ST ST_max = to_ST(cusp); + float S_max = ST_max.S; + float T_max = ST_max.T; + float S_0 = 0.5f; + float k = 1 - S_0 / S_max; + + // first we find L_v, C_v, L_vt and C_vt + + float t = T_max / (C + L * T_max); + float L_v = t * L; + float C_v = t * C; + + float L_vt = toe_inv(L_v); + float C_vt = C_v * L_vt / L_v; + + // we can then use these to invert the step that compensates for the toe and the curved top part of the triangle: + RGB rgb_scale = oklab_to_linear_srgb({ L_vt, a_ * C_vt, b_ * C_vt }); + float scale_L = cbrtf(1.f / fmax(fmax(rgb_scale.r, rgb_scale.g), fmax(rgb_scale.b, 0.f))); + + L = L / scale_L; + C = C / scale_L; + + C = C * toe(L) / L; + L = toe(L); + + // we can now compute v and s: + + float v = L / L_v; + float s = (S_0 + T_max) * C_v / ((T_max * S_0) + T_max * k * C_v); + + return { h, s, v }; +} + +}; +#endif // OK_COLOR_H diff --git a/thirdparty/misc/ok_color_shader.h b/thirdparty/misc/ok_color_shader.h new file mode 100644 index 0000000000..40d83366ee --- /dev/null +++ b/thirdparty/misc/ok_color_shader.h @@ -0,0 +1,663 @@ +// Copyright(c) 2021 Björn Ottosson +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files(the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and /or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions : +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +#ifndef OK_COLOR_SHADER_H +#define OK_COLOR_SHADER_H + +#include "core/string/ustring.h" + +static String OK_COLOR_SHADER = R"(shader_type canvas_item; + +const float M_PI = 3.1415926535897932384626433832795; + +float cbrt( float x ) +{ + return sign(x)*pow(abs(x),1.0f/3.0f); +} + +float srgb_transfer_function(float a) +{ + return .0031308f >= a ? 12.92f * a : 1.055f * pow(a, .4166666666666667f) - .055f; +} + +float srgb_transfer_function_inv(float a) +{ + return .04045f < a ? pow((a + .055f) / 1.055f, 2.4f) : a / 12.92f; +} + +vec3 linear_srgb_to_oklab(vec3 c) +{ + float l = 0.4122214708f * c.r + 0.5363325363f * c.g + 0.0514459929f * c.b; + float m = 0.2119034982f * c.r + 0.6806995451f * c.g + 0.1073969566f * c.b; + float s = 0.0883024619f * c.r + 0.2817188376f * c.g + 0.6299787005f * c.b; + + float l_ = cbrt(l); + float m_ = cbrt(m); + float s_ = cbrt(s); + + return vec3( + 0.2104542553f * l_ + 0.7936177850f * m_ - 0.0040720468f * s_, + 1.9779984951f * l_ - 2.4285922050f * m_ + 0.4505937099f * s_, + 0.0259040371f * l_ + 0.7827717662f * m_ - 0.8086757660f * s_ + ); +} + +vec3 oklab_to_linear_srgb(vec3 c) +{ + float l_ = c.x + 0.3963377774f * c.y + 0.2158037573f * c.z; + float m_ = c.x - 0.1055613458f * c.y - 0.0638541728f * c.z; + float s_ = c.x - 0.0894841775f * c.y - 1.2914855480f * c.z; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + return vec3( + +4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s, + -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s, + -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s + ); +} + +// Finds the maximum saturation possible for a given hue that fits in sRGB +// Saturation here is defined as S = C/L +// a and b must be normalized so a^2 + b^2 == 1 +float compute_max_saturation(float a, float b) +{ + // Max saturation will be when one of r, g or b goes below zero. + + // Select different coefficients depending on which component goes below zero first + float k0, k1, k2, k3, k4, wl, wm, ws; + + if (-1.88170328f * a - 0.80936493f * b > 1.f) + { + // Red component + k0 = +1.19086277f; k1 = +1.76576728f; k2 = +0.59662641f; k3 = +0.75515197f; k4 = +0.56771245f; + wl = +4.0767416621f; wm = -3.3077115913f; ws = +0.2309699292f; + } + else if (1.81444104f * a - 1.19445276f * b > 1.f) + { + // Green component + k0 = +0.73956515f; k1 = -0.45954404f; k2 = +0.08285427f; k3 = +0.12541070f; k4 = +0.14503204f; + wl = -1.2684380046f; wm = +2.6097574011f; ws = -0.3413193965f; + } + else + { + // Blue component + k0 = +1.35733652f; k1 = -0.00915799f; k2 = -1.15130210f; k3 = -0.50559606f; k4 = +0.00692167f; + wl = -0.0041960863f; wm = -0.7034186147f; ws = +1.7076147010f; + } + + // Approximate max saturation using a polynomial: + float S = k0 + k1 * a + k2 * b + k3 * a * a + k4 * a * b; + + // Do one step Halley's method to get closer + // this gives an error less than 10e6, except for some blue hues where the dS/dh is close to infinite + // this should be sufficient for most applications, otherwise do two/three steps + + float k_l = +0.3963377774f * a + 0.2158037573f * b; + float k_m = -0.1055613458f * a - 0.0638541728f * b; + float k_s = -0.0894841775f * a - 1.2914855480f * b; + + { + float l_ = 1.f + S * k_l; + float m_ = 1.f + S * k_m; + float s_ = 1.f + S * k_s; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + float l_dS = 3.f * k_l * l_ * l_; + float m_dS = 3.f * k_m * m_ * m_; + float s_dS = 3.f * k_s * s_ * s_; + + float l_dS2 = 6.f * k_l * k_l * l_; + float m_dS2 = 6.f * k_m * k_m * m_; + float s_dS2 = 6.f * k_s * k_s * s_; + + float f = wl * l + wm * m + ws * s; + float f1 = wl * l_dS + wm * m_dS + ws * s_dS; + float f2 = wl * l_dS2 + wm * m_dS2 + ws * s_dS2; + + S = S - f * f1 / (f1 * f1 - 0.5f * f * f2); + } + + return S; +} + +// finds L_cusp and C_cusp for a given hue +// a and b must be normalized so a^2 + b^2 == 1 +vec2 find_cusp(float a, float b) +{ + // First, find the maximum saturation (saturation S = C/L) + float S_cusp = compute_max_saturation(a, b); + + // Convert to linear sRGB to find the first point where at least one of r,g or b >= 1: + vec3 rgb_at_max = oklab_to_linear_srgb(vec3( 1, S_cusp * a, S_cusp * b )); + float L_cusp = cbrt(1.f / max(max(rgb_at_max.r, rgb_at_max.g), rgb_at_max.b)); + float C_cusp = L_cusp * S_cusp; + + return vec2( L_cusp , C_cusp ); +} )" +R"(// Finds intersection of the line defined by +// L = L0 * (1 - t) + t * L1; +// C = t * C1; +// a and b must be normalized so a^2 + b^2 == 1 +float find_gamut_intersection(float a, float b, float L1, float C1, float L0, vec2 cusp) +{ + // Find the intersection for upper and lower half seprately + float t; + if (((L1 - L0) * cusp.y - (cusp.x - L0) * C1) <= 0.f) + { + // Lower half + + t = cusp.y * L0 / (C1 * cusp.x + cusp.y * (L0 - L1)); + } + else + { + // Upper half + + // First intersect with triangle + t = cusp.y * (L0 - 1.f) / (C1 * (cusp.x - 1.f) + cusp.y * (L0 - L1)); + + // Then one step Halley's method + { + float dL = L1 - L0; + float dC = C1; + + float k_l = +0.3963377774f * a + 0.2158037573f * b; + float k_m = -0.1055613458f * a - 0.0638541728f * b; + float k_s = -0.0894841775f * a - 1.2914855480f * b; + + float l_dt = dL + dC * k_l; + float m_dt = dL + dC * k_m; + float s_dt = dL + dC * k_s; + + + // If higher accuracy is required, 2 or 3 iterations of the following block can be used: + { + float L = L0 * (1.f - t) + t * L1; + float C = t * C1; + + float l_ = L + C * k_l; + float m_ = L + C * k_m; + float s_ = L + C * k_s; + + float l = l_ * l_ * l_; + float m = m_ * m_ * m_; + float s = s_ * s_ * s_; + + float ldt = 3.f * l_dt * l_ * l_; + float mdt = 3.f * m_dt * m_ * m_; + float sdt = 3.f * s_dt * s_ * s_; + + float ldt2 = 6.f * l_dt * l_dt * l_; + float mdt2 = 6.f * m_dt * m_dt * m_; + float sdt2 = 6.f * s_dt * s_dt * s_; + + float r = 4.0767416621f * l - 3.3077115913f * m + 0.2309699292f * s - 1.f; + float r1 = 4.0767416621f * ldt - 3.3077115913f * mdt + 0.2309699292f * sdt; + float r2 = 4.0767416621f * ldt2 - 3.3077115913f * mdt2 + 0.2309699292f * sdt2; + + float u_r = r1 / (r1 * r1 - 0.5f * r * r2); + float t_r = -r * u_r; + + float g = -1.2684380046f * l + 2.6097574011f * m - 0.3413193965f * s - 1.f; + float g1 = -1.2684380046f * ldt + 2.6097574011f * mdt - 0.3413193965f * sdt; + float g2 = -1.2684380046f * ldt2 + 2.6097574011f * mdt2 - 0.3413193965f * sdt2; + + float u_g = g1 / (g1 * g1 - 0.5f * g * g2); + float t_g = -g * u_g; + + float b = -0.0041960863f * l - 0.7034186147f * m + 1.7076147010f * s - 1.f; + float b1 = -0.0041960863f * ldt - 0.7034186147f * mdt + 1.7076147010f * sdt; + float b2 = -0.0041960863f * ldt2 - 0.7034186147f * mdt2 + 1.7076147010f * sdt2; + + float u_b = b1 / (b1 * b1 - 0.5f * b * b2); + float t_b = -b * u_b; + + t_r = u_r >= 0.f ? t_r : 10000.f; + t_g = u_g >= 0.f ? t_g : 10000.f; + t_b = u_b >= 0.f ? t_b : 10000.f; + + t += min(t_r, min(t_g, t_b)); + } + } + } + + return t; +} + +float find_gamut_intersection_5(float a, float b, float L1, float C1, float L0) +{ + // Find the cusp of the gamut triangle + vec2 cusp = find_cusp(a, b); + + return find_gamut_intersection(a, b, L1, C1, L0, cusp); +})" +R"( + +vec3 gamut_clip_preserve_chroma(vec3 rgb) +{ + if (rgb.r < 1.f && rgb.g < 1.f && rgb.b < 1.f && rgb.r > 0.f && rgb.g > 0.f && rgb.b > 0.f) + return rgb; + + vec3 lab = linear_srgb_to_oklab(rgb); + + float L = lab.x; + float eps = 0.00001f; + float C = max(eps, sqrt(lab.y * lab.y + lab.z * lab.z)); + float a_ = lab.y / C; + float b_ = lab.z / C; + + float L0 = clamp(L, 0.f, 1.f); + + float t = find_gamut_intersection_5(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb(vec3( L_clipped, C_clipped * a_, C_clipped * b_ )); +} + +vec3 gamut_clip_project_to_0_5(vec3 rgb) +{ + if (rgb.r < 1.f && rgb.g < 1.f && rgb.b < 1.f && rgb.r > 0.f && rgb.g > 0.f && rgb.b > 0.f) + return rgb; + + vec3 lab = linear_srgb_to_oklab(rgb); + + float L = lab.x; + float eps = 0.00001f; + float C = max(eps, sqrt(lab.y * lab.y + lab.z * lab.z)); + float a_ = lab.y / C; + float b_ = lab.z / C; + + float L0 = 0.5; + + float t = find_gamut_intersection_5(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb(vec3( L_clipped, C_clipped * a_, C_clipped * b_ )); +} + +vec3 gamut_clip_project_to_L_cusp(vec3 rgb) +{ + if (rgb.r < 1.f && rgb.g < 1.f && rgb.b < 1.f && rgb.r > 0.f && rgb.g > 0.f && rgb.b > 0.f) + return rgb; + + vec3 lab = linear_srgb_to_oklab(rgb); + + float L = lab.x; + float eps = 0.00001f; + float C = max(eps, sqrt(lab.y * lab.y + lab.z * lab.z)); + float a_ = lab.y / C; + float b_ = lab.z / C; + + // The cusp is computed here and in find_gamut_intersection, an optimized solution would only compute it once. + vec2 cusp = find_cusp(a_, b_); + + float L0 = cusp.x; + + float t = find_gamut_intersection_5(a_, b_, L, C, L0); + + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb(vec3( L_clipped, C_clipped * a_, C_clipped * b_ )); +} + +vec3 gamut_clip_adaptive_L0_0_5(vec3 rgb, float alpha) +{ + if (rgb.r < 1.f && rgb.g < 1.f && rgb.b < 1.f && rgb.r > 0.f && rgb.g > 0.f && rgb.b > 0.f) + return rgb; + + vec3 lab = linear_srgb_to_oklab(rgb); + + float L = lab.x; + float eps = 0.00001f; + float C = max(eps, sqrt(lab.y * lab.y + lab.z * lab.z)); + float a_ = lab.y / C; + float b_ = lab.z / C; + + float Ld = L - 0.5f; + float e1 = 0.5f + abs(Ld) + alpha * C; + float L0 = 0.5f * (1.f + sign(Ld) * (e1 - sqrt(e1 * e1 - 2.f * abs(Ld)))); + + float t = find_gamut_intersection_5(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb(vec3( L_clipped, C_clipped * a_, C_clipped * b_ )); +} + +vec3 gamut_clip_adaptive_L0_L_cusp(vec3 rgb, float alpha) +{ + if (rgb.r < 1.f && rgb.g < 1.f && rgb.b < 1.f && rgb.r > 0.f && rgb.g > 0.f && rgb.b > 0.f) + return rgb; + + vec3 lab = linear_srgb_to_oklab(rgb); + + float L = lab.x; + float eps = 0.00001f; + float C = max(eps, sqrt(lab.y * lab.y + lab.z * lab.z)); + float a_ = lab.y / C; + float b_ = lab.z / C; + + // The cusp is computed here and in find_gamut_intersection, an optimized solution would only compute it once. + vec2 cusp = find_cusp(a_, b_); + + float Ld = L - cusp.x; + float k = 2.f * (Ld > 0.f ? 1.f - cusp.x : cusp.x); + + float e1 = 0.5f * k + abs(Ld) + alpha * C / k; + float L0 = cusp.x + 0.5f * (sign(Ld) * (e1 - sqrt(e1 * e1 - 2.f * k * abs(Ld)))); + + float t = find_gamut_intersection_5(a_, b_, L, C, L0); + float L_clipped = L0 * (1.f - t) + t * L; + float C_clipped = t * C; + + return oklab_to_linear_srgb(vec3( L_clipped, C_clipped * a_, C_clipped * b_ )); +} + +float toe(float x) +{ + float k_1 = 0.206f; + float k_2 = 0.03f; + float k_3 = (1.f + k_1) / (1.f + k_2); + return 0.5f * (k_3 * x - k_1 + sqrt((k_3 * x - k_1) * (k_3 * x - k_1) + 4.f * k_2 * k_3 * x)); +} + +float toe_inv(float x) +{ + float k_1 = 0.206f; + float k_2 = 0.03f; + float k_3 = (1.f + k_1) / (1.f + k_2); + return (x * x + k_1 * x) / (k_3 * (x + k_2)); +} +)" +R"(vec2 to_ST(vec2 cusp) +{ + float L = cusp.x; + float C = cusp.y; + return vec2( C / L, C / (1.f - L) ); +} + +// Returns a smooth approximation of the location of the cusp +// This polynomial was created by an optimization process +// It has been designed so that S_mid < S_max and T_mid < T_max +vec2 get_ST_mid(float a_, float b_) +{ + float S = 0.11516993f + 1.f / ( + +7.44778970f + 4.15901240f * b_ + + a_ * (-2.19557347f + 1.75198401f * b_ + + a_ * (-2.13704948f - 10.02301043f * b_ + + a_ * (-4.24894561f + 5.38770819f * b_ + 4.69891013f * a_ + ))) + ); + + float T = 0.11239642f + 1.f / ( + +1.61320320f - 0.68124379f * b_ + + a_ * (+0.40370612f + 0.90148123f * b_ + + a_ * (-0.27087943f + 0.61223990f * b_ + + a_ * (+0.00299215f - 0.45399568f * b_ - 0.14661872f * a_ + ))) + ); + + return vec2( S, T ); +} + +vec3 get_Cs(float L, float a_, float b_) +{ + vec2 cusp = find_cusp(a_, b_); + + float C_max = find_gamut_intersection(a_, b_, L, 1.f, L, cusp); + vec2 ST_max = to_ST(cusp); + + // Scale factor to compensate for the curved part of gamut shape: + float k = C_max / min((L * ST_max.x), (1.f - L) * ST_max.y); + + float C_mid; + { + vec2 ST_mid = get_ST_mid(a_, b_); + + // Use a soft minimum function, instead of a sharp triangle shape to get a smooth value for chroma. + float C_a = L * ST_mid.x; + float C_b = (1.f - L) * ST_mid.y; + C_mid = 0.9f * k * sqrt(sqrt(1.f / (1.f / (C_a * C_a * C_a * C_a) + 1.f / (C_b * C_b * C_b * C_b)))); + } + + float C_0; + { + // for C_0, the shape is independent of hue, so vec2 are constant. Values picked to roughly be the average values of vec2. + float C_a = L * 0.4f; + float C_b = (1.f - L) * 0.8f; + + // Use a soft minimum function, instead of a sharp triangle shape to get a smooth value for chroma. + C_0 = sqrt(1.f / (1.f / (C_a * C_a) + 1.f / (C_b * C_b))); + } + + return vec3( C_0, C_mid, C_max ); +} + +vec3 okhsl_to_srgb(vec3 hsl) +{ + float h = hsl.x; + float s = hsl.y; + float l = hsl.z; + + if (l == 1.0f) + { + return vec3( 1.f, 1.f, 1.f ); + } + + else if (l == 0.f) + { + return vec3( 0.f, 0.f, 0.f ); + } + + float a_ = cos(2.f * M_PI * h); + float b_ = sin(2.f * M_PI * h); + float L = toe_inv(l); + + vec3 cs = get_Cs(L, a_, b_); + float C_0 = cs.x; + float C_mid = cs.y; + float C_max = cs.z; + + float mid = 0.8f; + float mid_inv = 1.25f; + + float C, t, k_0, k_1, k_2; + + if (s < mid) + { + t = mid_inv * s; + + k_1 = mid * C_0; + k_2 = (1.f - k_1 / C_mid); + + C = t * k_1 / (1.f - k_2 * t); + } + else + { + t = (s - mid)/ (1.f - mid); + + k_0 = C_mid; + k_1 = (1.f - mid) * C_mid * C_mid * mid_inv * mid_inv / C_0; + k_2 = (1.f - (k_1) / (C_max - C_mid)); + + C = k_0 + t * k_1 / (1.f - k_2 * t); + } + + vec3 rgb = oklab_to_linear_srgb(vec3( L, C * a_, C * b_ )); + return vec3( + srgb_transfer_function(rgb.r), + srgb_transfer_function(rgb.g), + srgb_transfer_function(rgb.b) + ); +} + +vec3 srgb_to_okhsl(vec3 rgb) +{ + vec3 lab = linear_srgb_to_oklab(vec3( + srgb_transfer_function_inv(rgb.r), + srgb_transfer_function_inv(rgb.g), + srgb_transfer_function_inv(rgb.b) + )); + + float C = sqrt(lab.y * lab.y + lab.z * lab.z); + float a_ = lab.y / C; + float b_ = lab.z / C; + + float L = lab.x; + float h = 0.5f + 0.5f * atan(-lab.z, -lab.y) / M_PI; + + vec3 cs = get_Cs(L, a_, b_); + float C_0 = cs.x; + float C_mid = cs.y; + float C_max = cs.z; + + // Inverse of the interpolation in okhsl_to_srgb: + + float mid = 0.8f; + float mid_inv = 1.25f; + + float s; + if (C < C_mid) + { + float k_1 = mid * C_0; + float k_2 = (1.f - k_1 / C_mid); + + float t = C / (k_1 + k_2 * C); + s = t * mid; + } + else + { + float k_0 = C_mid; + float k_1 = (1.f - mid) * C_mid * C_mid * mid_inv * mid_inv / C_0; + float k_2 = (1.f - (k_1) / (C_max - C_mid)); + + float t = (C - k_0) / (k_1 + k_2 * (C - k_0)); + s = mid + (1.f - mid) * t; + } + + float l = toe(L); + return vec3( h, s, l ); +} + + +vec3 okhsv_to_srgb(vec3 hsv) +{ + float h = hsv.x; + float s = hsv.y; + float v = hsv.z; + + float a_ = cos(2.f * M_PI * h); + float b_ = sin(2.f * M_PI * h); + + vec2 cusp = find_cusp(a_, b_); + vec2 ST_max = to_ST(cusp); + float S_max = ST_max.x; + float T_max = ST_max.y; + float S_0 = 0.5f; + float k = 1.f- S_0 / S_max; + + // first we compute L and V as if the gamut is a perfect triangle: + + // L, C when v==1: + float L_v = 1.f - s * S_0 / (S_0 + T_max - T_max * k * s); + float C_v = s * T_max * S_0 / (S_0 + T_max - T_max * k * s); + + float L = v * L_v; + float C = v * C_v; + + // then we compensate for both toe and the curved top part of the triangle: + float L_vt = toe_inv(L_v); + float C_vt = C_v * L_vt / L_v; + + float L_new = toe_inv(L); + C = C * L_new / L; + L = L_new; + + vec3 rgb_scale = oklab_to_linear_srgb(vec3( L_vt, a_ * C_vt, b_ * C_vt )); + float scale_L = cbrt(1.f / max(max(rgb_scale.r, rgb_scale.g), max(rgb_scale.b, 0.f))); + + L = L * scale_L; + C = C * scale_L; + + vec3 rgb = oklab_to_linear_srgb(vec3( L, C * a_, C * b_ )); + return vec3( + srgb_transfer_function(rgb.r), + srgb_transfer_function(rgb.g), + srgb_transfer_function(rgb.b) + ); +} +)" +R"( +vec3 srgb_to_okhsv(vec3 rgb) +{ + vec3 lab = linear_srgb_to_oklab(vec3( + srgb_transfer_function_inv(rgb.r), + srgb_transfer_function_inv(rgb.g), + srgb_transfer_function_inv(rgb.b) + )); + + float C = sqrt(lab.y * lab.y + lab.z * lab.z); + float a_ = lab.y / C; + float b_ = lab.z / C; + + float L = lab.x; + float h = 0.5f + 0.5f * atan(-lab.z, -lab.y) / M_PI; + + vec2 cusp = find_cusp(a_, b_); + vec2 ST_max = to_ST(cusp); + float S_max = ST_max.x; + float T_max = ST_max.y; + float S_0 = 0.5f; + float k = 1.f - S_0 / S_max; + + // first we find L_v, C_v, L_vt and C_vt + + float t = T_max / (C + L * T_max); + float L_v = t * L; + float C_v = t * C; + + float L_vt = toe_inv(L_v); + float C_vt = C_v * L_vt / L_v; + + // we can then use these to invert the step that compensates for the toe and the curved top part of the triangle: + vec3 rgb_scale = oklab_to_linear_srgb(vec3( L_vt, a_ * C_vt, b_ * C_vt )); + float scale_L = cbrt(1.f / max(max(rgb_scale.r, rgb_scale.g), max(rgb_scale.b, 0.f))); + + L = L / scale_L; + C = C / scale_L; + + C = C * toe(L) / L; + L = toe(L); + + // we can now compute v and s: + + float v = L / L_v; + float s = (S_0 + T_max) * C_v / ((T_max * S_0) + T_max * k * C_v); + + return vec3 (h, s, v ); +})"; + +#endif diff --git a/thirdparty/openxr/include/openxr/openxr.h b/thirdparty/openxr/include/openxr/openxr.h index 8798e5a6e0..6c6a52d27e 100644 --- a/thirdparty/openxr/include/openxr/openxr.h +++ b/thirdparty/openxr/include/openxr/openxr.h @@ -25,12 +25,15 @@ extern "C" { ((((major) & 0xffffULL) << 48) | (((minor) & 0xffffULL) << 32) | ((patch) & 0xffffffffULL)) // OpenXR current version number. -#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 0, 22) +#define XR_CURRENT_API_VERSION XR_MAKE_VERSION(1, 0, 23) #define XR_VERSION_MAJOR(version) (uint16_t)(((uint64_t)(version) >> 48)& 0xffffULL) #define XR_VERSION_MINOR(version) (uint16_t)(((uint64_t)(version) >> 32) & 0xffffULL) #define XR_VERSION_PATCH(version) (uint32_t)((uint64_t)(version) & 0xffffffffULL) +#define XR_MIN_COMPOSITION_LAYERS_SUPPORTED 16 + + #if !defined(XR_NULL_HANDLE) #if (XR_PTR_SIZE == 8) && XR_CPP_NULLPTR_SUPPORTED #define XR_NULL_HANDLE nullptr @@ -120,7 +123,6 @@ XR_DEFINE_HANDLE(XrActionSet) #define XR_MAX_PATH_LENGTH 256 #define XR_MAX_STRUCTURE_NAME_SIZE 64 #define XR_MAX_RESULT_STRING_SIZE 64 -#define XR_MIN_COMPOSITION_LAYERS_SUPPORTED 16 #define XR_MAX_ACTION_SET_NAME_SIZE 64 #define XR_MAX_LOCALIZED_ACTION_SET_NAME_SIZE 128 #define XR_MAX_ACTION_NAME_SIZE 64 @@ -196,6 +198,10 @@ typedef enum XrResult { XR_ERROR_SCENE_COMPUTE_CONSISTENCY_MISMATCH_MSFT = -1000097005, XR_ERROR_DISPLAY_REFRESH_RATE_UNSUPPORTED_FB = -1000101000, XR_ERROR_COLOR_SPACE_UNSUPPORTED_FB = -1000108000, + XR_ERROR_SPACE_COMPONENT_NOT_SUPPORTED_FB = -1000113000, + XR_ERROR_SPACE_COMPONENT_NOT_ENABLED_FB = -1000113001, + XR_ERROR_SPACE_COMPONENT_STATUS_PENDING_FB = -1000113002, + XR_ERROR_SPACE_COMPONENT_STATUS_ALREADY_SET_FB = -1000113003, XR_ERROR_UNEXPECTED_STATE_PASSTHROUGH_FB = -1000118000, XR_ERROR_FEATURE_ALREADY_CREATED_PASSTHROUGH_FB = -1000118001, XR_ERROR_FEATURE_REQUIRED_PASSTHROUGH_FB = -1000118002, @@ -305,6 +311,9 @@ typedef enum XrStructureType { XR_TYPE_VIEW_CONFIGURATION_DEPTH_RANGE_EXT = 1000046000, XR_TYPE_GRAPHICS_BINDING_EGL_MNDX = 1000048004, XR_TYPE_SPATIAL_GRAPH_NODE_SPACE_CREATE_INFO_MSFT = 1000049000, + XR_TYPE_SPATIAL_GRAPH_STATIC_NODE_BINDING_CREATE_INFO_MSFT = 1000049001, + XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_GET_INFO_MSFT = 1000049002, + XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_MSFT = 1000049003, XR_TYPE_SYSTEM_HAND_TRACKING_PROPERTIES_EXT = 1000051000, XR_TYPE_HAND_TRACKER_CREATE_INFO_EXT = 1000051001, XR_TYPE_HAND_JOINTS_LOCATE_INFO_EXT = 1000051002, @@ -332,6 +341,7 @@ typedef enum XrStructureType { XR_TYPE_COMPOSITION_LAYER_REPROJECTION_PLANE_OVERRIDE_MSFT = 1000066001, XR_TYPE_ANDROID_SURFACE_SWAPCHAIN_CREATE_INFO_FB = 1000070000, XR_TYPE_COMPOSITION_LAYER_SECURE_CONTENT_FB = 1000072000, + XR_TYPE_INTERACTION_PROFILE_DPAD_BINDING_EXT = 1000078000, XR_TYPE_INTERACTION_PROFILE_ANALOG_THRESHOLD_VALVE = 1000079000, XR_TYPE_HAND_JOINTS_MOTION_RANGE_INFO_EXT = 1000080000, XR_TYPE_LOADER_INIT_INFO_ANDROID_KHR = 1000089000, @@ -371,6 +381,12 @@ typedef enum XrStructureType { XR_TYPE_HAND_TRACKING_SCALE_FB = 1000110003, XR_TYPE_HAND_TRACKING_AIM_STATE_FB = 1000111001, XR_TYPE_HAND_TRACKING_CAPSULES_STATE_FB = 1000112000, + XR_TYPE_SYSTEM_SPATIAL_ENTITY_PROPERTIES_FB = 1000113004, + XR_TYPE_SPATIAL_ANCHOR_CREATE_INFO_FB = 1000113003, + XR_TYPE_SPACE_COMPONENT_STATUS_SET_INFO_FB = 1000113007, + XR_TYPE_SPACE_COMPONENT_STATUS_FB = 1000113001, + XR_TYPE_EVENT_DATA_SPATIAL_ANCHOR_CREATE_COMPLETE_FB = 1000113005, + XR_TYPE_EVENT_DATA_SPACE_SET_STATUS_COMPLETE_FB = 1000113006, XR_TYPE_FOVEATION_PROFILE_CREATE_INFO_FB = 1000114000, XR_TYPE_SWAPCHAIN_CREATE_INFO_FOVEATION_FB = 1000114001, XR_TYPE_SWAPCHAIN_STATE_FOVEATION_FB = 1000114002, @@ -388,12 +404,14 @@ typedef enum XrStructureType { XR_TYPE_PASSTHROUGH_STYLE_FB = 1000118020, XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_RGBA_FB = 1000118021, XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_MONO_FB = 1000118022, + XR_TYPE_PASSTHROUGH_BRIGHTNESS_CONTRAST_SATURATION_FB = 1000118023, XR_TYPE_EVENT_DATA_PASSTHROUGH_STATE_CHANGED_FB = 1000118030, XR_TYPE_RENDER_MODEL_PATH_INFO_FB = 1000119000, XR_TYPE_RENDER_MODEL_PROPERTIES_FB = 1000119001, XR_TYPE_RENDER_MODEL_BUFFER_FB = 1000119002, XR_TYPE_RENDER_MODEL_LOAD_INFO_FB = 1000119003, XR_TYPE_SYSTEM_RENDER_MODEL_PROPERTIES_FB = 1000119004, + XR_TYPE_RENDER_MODEL_CAPABILITIES_REQUEST_FB = 1000119005, XR_TYPE_BINDING_MODIFICATIONS_KHR = 1000120000, XR_TYPE_VIEW_LOCATE_FOVEATED_RENDERING_VARJO = 1000121000, XR_TYPE_FOVEATED_VIEW_CONFIGURATION_VIEW_VARJO = 1000121001, @@ -404,6 +422,17 @@ typedef enum XrStructureType { XR_TYPE_MARKER_SPACE_CREATE_INFO_VARJO = 1000124002, XR_TYPE_SPATIAL_ANCHOR_PERSISTENCE_INFO_MSFT = 1000142000, XR_TYPE_SPATIAL_ANCHOR_FROM_PERSISTED_ANCHOR_CREATE_INFO_MSFT = 1000142001, + XR_TYPE_SPACE_QUERY_INFO_FB = 1000156001, + XR_TYPE_SPACE_QUERY_RESULTS_FB = 1000156002, + XR_TYPE_SPACE_STORAGE_LOCATION_FILTER_INFO_FB = 1000156003, + XR_TYPE_SPACE_UUID_FILTER_INFO_FB = 1000156054, + XR_TYPE_SPACE_COMPONENT_FILTER_INFO_FB = 1000156052, + XR_TYPE_EVENT_DATA_SPACE_QUERY_RESULTS_AVAILABLE_FB = 1000156103, + XR_TYPE_EVENT_DATA_SPACE_QUERY_COMPLETE_FB = 1000156104, + XR_TYPE_SPACE_SAVE_INFO_FB = 1000158000, + XR_TYPE_SPACE_ERASE_INFO_FB = 1000158001, + XR_TYPE_EVENT_DATA_SPACE_SAVE_COMPLETE_FB = 1000158106, + XR_TYPE_EVENT_DATA_SPACE_ERASE_COMPLETE_FB = 1000158107, XR_TYPE_SWAPCHAIN_IMAGE_FOVEATION_VULKAN_FB = 1000160000, XR_TYPE_SWAPCHAIN_STATE_ANDROID_SURFACE_DIMENSIONS_FB = 1000161000, XR_TYPE_SWAPCHAIN_STATE_SAMPLER_OPENGL_ES_FB = 1000162000, @@ -411,7 +440,12 @@ typedef enum XrStructureType { XR_TYPE_COMPOSITION_LAYER_SPACE_WARP_INFO_FB = 1000171000, XR_TYPE_SYSTEM_SPACE_WARP_PROPERTIES_FB = 1000171001, XR_TYPE_DIGITAL_LENS_CONTROL_ALMALENCE = 1000196000, + XR_TYPE_SPACE_CONTAINER_FB = 1000199000, XR_TYPE_PASSTHROUGH_KEYBOARD_HANDS_INTENSITY_FB = 1000203002, + XR_TYPE_COMPOSITION_LAYER_SETTINGS_FB = 1000204000, + XR_TYPE_VULKAN_SWAPCHAIN_CREATE_INFO_META = 1000227000, + XR_TYPE_PERFORMANCE_METRICS_STATE_META = 1000232001, + XR_TYPE_PERFORMANCE_METRICS_COUNTER_META = 1000232002, XR_TYPE_GRAPHICS_BINDING_VULKAN2_KHR = XR_TYPE_GRAPHICS_BINDING_VULKAN_KHR, XR_TYPE_SWAPCHAIN_IMAGE_VULKAN2_KHR = XR_TYPE_SWAPCHAIN_IMAGE_VULKAN_KHR, XR_TYPE_GRAPHICS_REQUIREMENTS_VULKAN2_KHR = XR_TYPE_GRAPHICS_REQUIREMENTS_VULKAN_KHR, @@ -487,6 +521,7 @@ typedef enum XrObjectType { XR_OBJECT_TYPE_ACTION = 6, XR_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT = 1000019000, XR_OBJECT_TYPE_SPATIAL_ANCHOR_MSFT = 1000039000, + XR_OBJECT_TYPE_SPATIAL_GRAPH_NODE_BINDING_MSFT = 1000049000, XR_OBJECT_TYPE_HAND_TRACKER_EXT = 1000051000, XR_OBJECT_TYPE_SCENE_OBSERVER_MSFT = 1000097000, XR_OBJECT_TYPE_SCENE_MSFT = 1000097001, @@ -1424,7 +1459,7 @@ typedef struct XrCompositionLayerCubeKHR { #define XR_KHR_composition_layer_depth 1 -#define XR_KHR_composition_layer_depth_SPEC_VERSION 5 +#define XR_KHR_composition_layer_depth_SPEC_VERSION 6 #define XR_KHR_COMPOSITION_LAYER_DEPTH_EXTENSION_NAME "XR_KHR_composition_layer_depth" // XrCompositionLayerDepthInfoKHR extends XrCompositionLayerProjectionView typedef struct XrCompositionLayerDepthInfoKHR { @@ -1766,7 +1801,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrSessionInsertDebugUtilsLabelEXT( #define XR_EXT_eye_gaze_interaction 1 -#define XR_EXT_eye_gaze_interaction_SPEC_VERSION 1 +#define XR_EXT_eye_gaze_interaction_SPEC_VERSION 2 #define XR_EXT_EYE_GAZE_INTERACTION_EXTENSION_NAME "XR_EXT_eye_gaze_interaction" // XrSystemEyeGazeInteractionPropertiesEXT extends XrSystemProperties typedef struct XrSystemEyeGazeInteractionPropertiesEXT { @@ -1977,8 +2012,10 @@ XRAPI_ATTR XrResult XRAPI_CALL xrSetInputDeviceLocationEXT( #define XR_MSFT_spatial_graph_bridge 1 -#define XR_MSFT_spatial_graph_bridge_SPEC_VERSION 1 +XR_DEFINE_HANDLE(XrSpatialGraphNodeBindingMSFT) +#define XR_MSFT_spatial_graph_bridge_SPEC_VERSION 2 #define XR_MSFT_SPATIAL_GRAPH_BRIDGE_EXTENSION_NAME "XR_MSFT_spatial_graph_bridge" +#define XR_GUID_SIZE_MSFT 16 typedef enum XrSpatialGraphNodeTypeMSFT { XR_SPATIAL_GRAPH_NODE_TYPE_STATIC_MSFT = 1, @@ -1989,11 +2026,34 @@ typedef struct XrSpatialGraphNodeSpaceCreateInfoMSFT { XrStructureType type; const void* XR_MAY_ALIAS next; XrSpatialGraphNodeTypeMSFT nodeType; - uint8_t nodeId[16]; + uint8_t nodeId[XR_GUID_SIZE_MSFT]; XrPosef pose; } XrSpatialGraphNodeSpaceCreateInfoMSFT; +typedef struct XrSpatialGraphStaticNodeBindingCreateInfoMSFT { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpace space; + XrPosef poseInSpace; + XrTime time; +} XrSpatialGraphStaticNodeBindingCreateInfoMSFT; + +typedef struct XrSpatialGraphNodeBindingPropertiesGetInfoMSFT { + XrStructureType type; + const void* XR_MAY_ALIAS next; +} XrSpatialGraphNodeBindingPropertiesGetInfoMSFT; + +typedef struct XrSpatialGraphNodeBindingPropertiesMSFT { + XrStructureType type; + void* XR_MAY_ALIAS next; + uint8_t nodeId[XR_GUID_SIZE_MSFT]; + XrPosef poseInNodeSpace; +} XrSpatialGraphNodeBindingPropertiesMSFT; + typedef XrResult (XRAPI_PTR *PFN_xrCreateSpatialGraphNodeSpaceMSFT)(XrSession session, const XrSpatialGraphNodeSpaceCreateInfoMSFT* createInfo, XrSpace* space); +typedef XrResult (XRAPI_PTR *PFN_xrTryCreateSpatialGraphStaticNodeBindingMSFT)(XrSession session, const XrSpatialGraphStaticNodeBindingCreateInfoMSFT* createInfo, XrSpatialGraphNodeBindingMSFT* nodeBinding); +typedef XrResult (XRAPI_PTR *PFN_xrDestroySpatialGraphNodeBindingMSFT)(XrSpatialGraphNodeBindingMSFT nodeBinding); +typedef XrResult (XRAPI_PTR *PFN_xrGetSpatialGraphNodeBindingPropertiesMSFT)(XrSpatialGraphNodeBindingMSFT nodeBinding, const XrSpatialGraphNodeBindingPropertiesGetInfoMSFT* getInfo, XrSpatialGraphNodeBindingPropertiesMSFT* properties); #ifndef XR_NO_PROTOTYPES #ifdef XR_EXTENSION_PROTOTYPES @@ -2001,6 +2061,19 @@ XRAPI_ATTR XrResult XRAPI_CALL xrCreateSpatialGraphNodeSpaceMSFT( XrSession session, const XrSpatialGraphNodeSpaceCreateInfoMSFT* createInfo, XrSpace* space); + +XRAPI_ATTR XrResult XRAPI_CALL xrTryCreateSpatialGraphStaticNodeBindingMSFT( + XrSession session, + const XrSpatialGraphStaticNodeBindingCreateInfoMSFT* createInfo, + XrSpatialGraphNodeBindingMSFT* nodeBinding); + +XRAPI_ATTR XrResult XRAPI_CALL xrDestroySpatialGraphNodeBindingMSFT( + XrSpatialGraphNodeBindingMSFT nodeBinding); + +XRAPI_ATTR XrResult XRAPI_CALL xrGetSpatialGraphNodeBindingPropertiesMSFT( + XrSpatialGraphNodeBindingMSFT nodeBinding, + const XrSpatialGraphNodeBindingPropertiesGetInfoMSFT* getInfo, + XrSpatialGraphNodeBindingPropertiesMSFT* properties); #endif /* XR_EXTENSION_PROTOTYPES */ #endif /* !XR_NO_PROTOTYPES */ @@ -2056,6 +2129,7 @@ typedef enum XrHandJointEXT { typedef enum XrHandJointSetEXT { XR_HAND_JOINT_SET_DEFAULT_EXT = 0, + XR_HAND_JOINT_SET_HAND_WITH_FOREARM_ULTRALEAP = 1000149000, XR_HAND_JOINT_SET_MAX_ENUM_EXT = 0x7FFFFFFF } XrHandJointSetEXT; // XrSystemHandTrackingPropertiesEXT extends XrSystemProperties @@ -2454,6 +2528,25 @@ typedef struct XrCompositionLayerSecureContentFB { +#define XR_EXT_dpad_binding 1 +#define XR_EXT_dpad_binding_SPEC_VERSION 1 +#define XR_EXT_DPAD_BINDING_EXTENSION_NAME "XR_EXT_dpad_binding" +typedef struct XrInteractionProfileDpadBindingEXT { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrPath binding; + XrActionSet actionSet; + float forceThreshold; + float forceThresholdReleased; + float centerRegion; + float wedgeAngle; + XrBool32 isSticky; + const XrHapticBaseHeader* onHaptic; + const XrHapticBaseHeader* offHaptic; +} XrInteractionProfileDpadBindingEXT; + + + #define XR_VALVE_analog_threshold 1 #define XR_VALVE_analog_threshold_SPEC_VERSION 2 #define XR_VALVE_ANALOG_THRESHOLD_EXTENSION_NAME "XR_VALVE_analog_threshold" @@ -3058,10 +3151,20 @@ XRAPI_ATTR XrResult XRAPI_CALL xrGetFacialExpressionsHTC( #define XR_HTC_vive_focus3_controller_interaction 1 -#define XR_HTC_vive_focus3_controller_interaction_SPEC_VERSION 1 +#define XR_HTC_vive_focus3_controller_interaction_SPEC_VERSION 2 #define XR_HTC_VIVE_FOCUS3_CONTROLLER_INTERACTION_EXTENSION_NAME "XR_HTC_vive_focus3_controller_interaction" +#define XR_HTC_hand_interaction 1 +#define XR_HTC_hand_interaction_SPEC_VERSION 1 +#define XR_HTC_HAND_INTERACTION_EXTENSION_NAME "XR_HTC_hand_interaction" + + +#define XR_HTC_vive_wrist_tracker_interaction 1 +#define XR_HTC_vive_wrist_tracker_interaction_SPEC_VERSION 1 +#define XR_HTC_VIVE_WRIST_TRACKER_INTERACTION_EXTENSION_NAME "XR_HTC_vive_wrist_tracker_interaction" + + #define XR_FB_color_space 1 #define XR_FB_color_space_SPEC_VERSION 2 #define XR_FB_COLOR_SPACE_EXTENSION_NAME "XR_FB_color_space" @@ -3103,7 +3206,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrSetColorSpaceFB( #define XR_FB_hand_tracking_mesh 1 -#define XR_FB_hand_tracking_mesh_SPEC_VERSION 1 +#define XR_FB_hand_tracking_mesh_SPEC_VERSION 2 #define XR_FB_HAND_TRACKING_MESH_EXTENSION_NAME "XR_FB_hand_tracking_mesh" typedef struct XrVector4sFB { int16_t x; @@ -3132,7 +3235,7 @@ typedef struct XrHandTrackingMeshFB { int16_t* indices; } XrHandTrackingMeshFB; -// XrHandTrackingScaleFB extends XrHandJointsLocateInfoEXT +// XrHandTrackingScaleFB extends XrHandJointLocationsEXT typedef struct XrHandTrackingScaleFB { XrStructureType type; void* XR_MAY_ALIAS next; @@ -3154,7 +3257,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrGetHandMeshFB( #define XR_FB_hand_tracking_aim 1 -#define XR_FB_hand_tracking_aim_SPEC_VERSION 1 +#define XR_FB_hand_tracking_aim_SPEC_VERSION 2 #define XR_FB_HAND_TRACKING_AIM_EXTENSION_NAME "XR_FB_hand_tracking_aim" typedef XrFlags64 XrHandTrackingAimFlagsFB; @@ -3169,7 +3272,7 @@ static const XrHandTrackingAimFlagsFB XR_HAND_TRACKING_AIM_SYSTEM_GESTURE_BIT_FB static const XrHandTrackingAimFlagsFB XR_HAND_TRACKING_AIM_DOMINANT_HAND_BIT_FB = 0x00000080; static const XrHandTrackingAimFlagsFB XR_HAND_TRACKING_AIM_MENU_PRESSED_BIT_FB = 0x00000100; -// XrHandTrackingAimStateFB extends XrHandJointsLocateInfoEXT +// XrHandTrackingAimStateFB extends XrHandJointLocationsEXT typedef struct XrHandTrackingAimStateFB { XrStructureType type; void* XR_MAY_ALIAS next; @@ -3185,26 +3288,128 @@ typedef struct XrHandTrackingAimStateFB { #define XR_FB_hand_tracking_capsules 1 #define XR_HAND_TRACKING_CAPSULE_POINT_COUNT_FB 2 -#define XR_FB_HAND_TRACKING_CAPSULE_POINT_COUNT XR_HAND_TRACKING_CAPSULE_POINT_COUNT_FB #define XR_HAND_TRACKING_CAPSULE_COUNT_FB 19 -#define XR_FB_HAND_TRACKING_CAPSULE_COUNT XR_HAND_TRACKING_CAPSULE_COUNT_FB -#define XR_FB_hand_tracking_capsules_SPEC_VERSION 2 +#define XR_FB_hand_tracking_capsules_SPEC_VERSION 3 #define XR_FB_HAND_TRACKING_CAPSULES_EXTENSION_NAME "XR_FB_hand_tracking_capsules" +#define XR_FB_HAND_TRACKING_CAPSULE_POINT_COUNT XR_HAND_TRACKING_CAPSULE_POINT_COUNT_FB +#define XR_FB_HAND_TRACKING_CAPSULE_COUNT XR_HAND_TRACKING_CAPSULE_COUNT_FB typedef struct XrHandCapsuleFB { - XrVector3f points[XR_FB_HAND_TRACKING_CAPSULE_POINT_COUNT]; + XrVector3f points[XR_HAND_TRACKING_CAPSULE_POINT_COUNT_FB]; float radius; XrHandJointEXT joint; } XrHandCapsuleFB; -// XrHandTrackingCapsulesStateFB extends XrHandJointsLocateInfoEXT +// XrHandTrackingCapsulesStateFB extends XrHandJointLocationsEXT typedef struct XrHandTrackingCapsulesStateFB { XrStructureType type; void* XR_MAY_ALIAS next; - XrHandCapsuleFB capsules[XR_FB_HAND_TRACKING_CAPSULE_COUNT]; + XrHandCapsuleFB capsules[XR_HAND_TRACKING_CAPSULE_COUNT_FB]; } XrHandTrackingCapsulesStateFB; +#define XR_FB_spatial_entity 1 +XR_DEFINE_ATOM(XrAsyncRequestIdFB) +#define XR_UUID_SIZE_EXT 16 +#define XR_FB_spatial_entity_SPEC_VERSION 1 +#define XR_FB_SPATIAL_ENTITY_EXTENSION_NAME "XR_FB_spatial_entity" + +typedef enum XrSpaceComponentTypeFB { + XR_SPACE_COMPONENT_TYPE_LOCATABLE_FB = 0, + XR_SPACE_COMPONENT_TYPE_STORABLE_FB = 1, + XR_SPACE_COMPONENT_TYPE_SPACE_CONTAINER_FB = 7, + XR_SPACE_COMPONENT_TYPE_MAX_ENUM_FB = 0x7FFFFFFF +} XrSpaceComponentTypeFB; +// XrSystemSpatialEntityPropertiesFB extends XrSystemProperties +typedef struct XrSystemSpatialEntityPropertiesFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrBool32 supportsSpatialEntity; +} XrSystemSpatialEntityPropertiesFB; + +typedef struct XrSpatialAnchorCreateInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpace space; + XrPosef poseInSpace; + XrTime time; +} XrSpatialAnchorCreateInfoFB; + +typedef struct XrSpaceComponentStatusSetInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpaceComponentTypeFB componentType; + XrBool32 enabled; + XrDuration timeout; +} XrSpaceComponentStatusSetInfoFB; + +typedef struct XrSpaceComponentStatusFB { + XrStructureType type; + void* XR_MAY_ALIAS next; + XrBool32 enabled; + XrBool32 changePending; +} XrSpaceComponentStatusFB; + +typedef struct XrUuidEXT { + uint8_t data[XR_UUID_SIZE_EXT]; +} XrUuidEXT; + +typedef struct XrEventDataSpatialAnchorCreateCompleteFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; + XrResult result; + XrSpace space; + XrUuidEXT uuid; +} XrEventDataSpatialAnchorCreateCompleteFB; + +typedef struct XrEventDataSpaceSetStatusCompleteFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; + XrResult result; + XrSpace space; + XrUuidEXT uuid; + XrSpaceComponentTypeFB componentType; + XrBool32 enabled; +} XrEventDataSpaceSetStatusCompleteFB; + +typedef XrResult (XRAPI_PTR *PFN_xrCreateSpatialAnchorFB)(XrSession session, const XrSpatialAnchorCreateInfoFB* info, XrAsyncRequestIdFB* requestId); +typedef XrResult (XRAPI_PTR *PFN_xrGetSpaceUuidFB)(XrSpace space, XrUuidEXT* uuid); +typedef XrResult (XRAPI_PTR *PFN_xrEnumerateSpaceSupportedComponentsFB)(XrSpace space, uint32_t componentTypeCapacityInput, uint32_t* componentTypeCountOutput, XrSpaceComponentTypeFB* componentTypes); +typedef XrResult (XRAPI_PTR *PFN_xrSetSpaceComponentStatusFB)(XrSpace space, const XrSpaceComponentStatusSetInfoFB* info, XrAsyncRequestIdFB* requestId); +typedef XrResult (XRAPI_PTR *PFN_xrGetSpaceComponentStatusFB)(XrSpace space, XrSpaceComponentTypeFB componentType, XrSpaceComponentStatusFB* status); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrCreateSpatialAnchorFB( + XrSession session, + const XrSpatialAnchorCreateInfoFB* info, + XrAsyncRequestIdFB* requestId); + +XRAPI_ATTR XrResult XRAPI_CALL xrGetSpaceUuidFB( + XrSpace space, + XrUuidEXT* uuid); + +XRAPI_ATTR XrResult XRAPI_CALL xrEnumerateSpaceSupportedComponentsFB( + XrSpace space, + uint32_t componentTypeCapacityInput, + uint32_t* componentTypeCountOutput, + XrSpaceComponentTypeFB* componentTypes); + +XRAPI_ATTR XrResult XRAPI_CALL xrSetSpaceComponentStatusFB( + XrSpace space, + const XrSpaceComponentStatusSetInfoFB* info, + XrAsyncRequestIdFB* requestId); + +XRAPI_ATTR XrResult XRAPI_CALL xrGetSpaceComponentStatusFB( + XrSpace space, + XrSpaceComponentTypeFB componentType, + XrSpaceComponentStatusFB* status); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + #define XR_FB_foveation 1 XR_DEFINE_HANDLE(XrFoveationProfileFB) #define XR_FB_foveation_SPEC_VERSION 1 @@ -3346,7 +3551,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrCreateKeyboardSpaceFB( #define XR_FB_triangle_mesh 1 XR_DEFINE_HANDLE(XrTriangleMeshFB) -#define XR_FB_triangle_mesh_SPEC_VERSION 1 +#define XR_FB_triangle_mesh_SPEC_VERSION 2 #define XR_FB_TRIANGLE_MESH_EXTENSION_NAME "XR_FB_triangle_mesh" typedef enum XrWindingOrderFB { @@ -3420,7 +3625,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrTriangleMeshEndVertexBufferUpdateFB( XR_DEFINE_HANDLE(XrPassthroughFB) XR_DEFINE_HANDLE(XrPassthroughLayerFB) XR_DEFINE_HANDLE(XrGeometryInstanceFB) -#define XR_FB_passthrough_SPEC_VERSION 1 +#define XR_FB_passthrough_SPEC_VERSION 2 #define XR_FB_PASSTHROUGH_EXTENSION_NAME "XR_FB_passthrough" #define XR_PASSTHROUGH_COLOR_MAP_MONO_SIZE_FB 256 @@ -3428,6 +3633,7 @@ typedef enum XrPassthroughLayerPurposeFB { XR_PASSTHROUGH_LAYER_PURPOSE_RECONSTRUCTION_FB = 0, XR_PASSTHROUGH_LAYER_PURPOSE_PROJECTED_FB = 1, XR_PASSTHROUGH_LAYER_PURPOSE_TRACKED_KEYBOARD_HANDS_FB = 1000203001, + XR_PASSTHROUGH_LAYER_PURPOSE_TRACKED_KEYBOARD_MASKED_HANDS_FB = 1000203002, XR_PASSTHROUGH_LAYER_PURPOSE_MAX_ENUM_FB = 0x7FFFFFFF } XrPassthroughLayerPurposeFB; typedef XrFlags64 XrPassthroughFlagsFB; @@ -3499,18 +3705,29 @@ typedef struct XrPassthroughStyleFB { XrColor4f edgeColor; } XrPassthroughStyleFB; +// XrPassthroughColorMapMonoToRgbaFB extends XrPassthroughStyleFB typedef struct XrPassthroughColorMapMonoToRgbaFB { XrStructureType type; const void* XR_MAY_ALIAS next; XrColor4f textureColorMap[XR_PASSTHROUGH_COLOR_MAP_MONO_SIZE_FB]; } XrPassthroughColorMapMonoToRgbaFB; +// XrPassthroughColorMapMonoToMonoFB extends XrPassthroughStyleFB typedef struct XrPassthroughColorMapMonoToMonoFB { XrStructureType type; const void* XR_MAY_ALIAS next; uint8_t textureColorMap[XR_PASSTHROUGH_COLOR_MAP_MONO_SIZE_FB]; } XrPassthroughColorMapMonoToMonoFB; +// XrPassthroughBrightnessContrastSaturationFB extends XrPassthroughStyleFB +typedef struct XrPassthroughBrightnessContrastSaturationFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + float brightness; + float contrast; + float saturation; +} XrPassthroughBrightnessContrastSaturationFB; + typedef struct XrEventDataPassthroughStateChangedFB { XrStructureType type; const void* XR_MAY_ALIAS next; @@ -3584,12 +3801,14 @@ XRAPI_ATTR XrResult XRAPI_CALL xrGeometryInstanceSetTransformFB( #define XR_NULL_RENDER_MODEL_KEY_FB 0 XR_DEFINE_ATOM(XrRenderModelKeyFB) -#define XR_FB_render_model_SPEC_VERSION 1 +#define XR_FB_render_model_SPEC_VERSION 2 #define XR_FB_RENDER_MODEL_EXTENSION_NAME "XR_FB_render_model" #define XR_MAX_RENDER_MODEL_NAME_SIZE_FB 64 typedef XrFlags64 XrRenderModelFlagsFB; // Flag bits for XrRenderModelFlagsFB +static const XrRenderModelFlagsFB XR_RENDER_MODEL_SUPPORTS_GLTF_2_0_SUBSET_1_BIT_FB = 0x00000001; +static const XrRenderModelFlagsFB XR_RENDER_MODEL_SUPPORTS_GLTF_2_0_SUBSET_2_BIT_FB = 0x00000002; typedef struct XrRenderModelPathInfoFB { XrStructureType type; @@ -3628,6 +3847,13 @@ typedef struct XrSystemRenderModelPropertiesFB { XrBool32 supportsRenderModelLoading; } XrSystemRenderModelPropertiesFB; +// XrRenderModelCapabilitiesRequestFB extends XrSystemProperties +typedef struct XrRenderModelCapabilitiesRequestFB { + XrStructureType type; + void* XR_MAY_ALIAS next; + XrRenderModelFlagsFB flags; +} XrRenderModelCapabilitiesRequestFB; + typedef XrResult (XRAPI_PTR *PFN_xrEnumerateRenderModelPathsFB)(XrSession session, uint32_t pathCapacityInput, uint32_t* pathCountOutput, XrRenderModelPathInfoFB* paths); typedef XrResult (XRAPI_PTR *PFN_xrGetRenderModelPropertiesFB)(XrSession session, XrPath path, XrRenderModelPropertiesFB* properties); typedef XrResult (XRAPI_PTR *PFN_xrLoadRenderModelFB)(XrSession session, const XrRenderModelLoadInfoFB* info, XrRenderModelBufferFB* buffer); @@ -3767,6 +3993,20 @@ XRAPI_ATTR XrResult XRAPI_CALL xrCreateMarkerSpaceVARJO( #endif /* !XR_NO_PROTOTYPES */ +#define XR_VARJO_view_offset 1 +#define XR_VARJO_view_offset_SPEC_VERSION 1 +#define XR_VARJO_VIEW_OFFSET_EXTENSION_NAME "XR_VARJO_view_offset" +typedef XrResult (XRAPI_PTR *PFN_xrSetViewOffsetVARJO)(XrSession session, float offset); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrSetViewOffsetVARJO( + XrSession session, + float offset); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + #define XR_MSFT_spatial_anchor_persistence 1 XR_DEFINE_HANDLE(XrSpatialAnchorStoreConnectionMSFT) #define XR_MAX_SPATIAL_ANCHOR_NAME_SIZE_MSFT 256 @@ -3832,12 +4072,212 @@ XRAPI_ATTR XrResult XRAPI_CALL xrClearSpatialAnchorStoreMSFT( #endif /* !XR_NO_PROTOTYPES */ +#define XR_ULTRALEAP_hand_tracking_forearm 1 + +#define XR_HAND_FOREARM_JOINT_COUNT_ULTRALEAP 27 + +#define XR_ULTRALEAP_hand_tracking_forearm_SPEC_VERSION 1 +#define XR_ULTRALEAP_HAND_TRACKING_FOREARM_EXTENSION_NAME "XR_ULTRALEAP_hand_tracking_forearm" + +typedef enum XrHandForearmJointULTRALEAP { + XR_HAND_FOREARM_JOINT_PALM_ULTRALEAP = 0, + XR_HAND_FOREARM_JOINT_WRIST_ULTRALEAP = 1, + XR_HAND_FOREARM_JOINT_THUMB_METACARPAL_ULTRALEAP = 2, + XR_HAND_FOREARM_JOINT_THUMB_PROXIMAL_ULTRALEAP = 3, + XR_HAND_FOREARM_JOINT_THUMB_DISTAL_ULTRALEAP = 4, + XR_HAND_FOREARM_JOINT_THUMB_TIP_ULTRALEAP = 5, + XR_HAND_FOREARM_JOINT_INDEX_METACARPAL_ULTRALEAP = 6, + XR_HAND_FOREARM_JOINT_INDEX_PROXIMAL_ULTRALEAP = 7, + XR_HAND_FOREARM_JOINT_INDEX_INTERMEDIATE_ULTRALEAP = 8, + XR_HAND_FOREARM_JOINT_INDEX_DISTAL_ULTRALEAP = 9, + XR_HAND_FOREARM_JOINT_INDEX_TIP_ULTRALEAP = 10, + XR_HAND_FOREARM_JOINT_MIDDLE_METACARPAL_ULTRALEAP = 11, + XR_HAND_FOREARM_JOINT_MIDDLE_PROXIMAL_ULTRALEAP = 12, + XR_HAND_FOREARM_JOINT_MIDDLE_INTERMEDIATE_ULTRALEAP = 13, + XR_HAND_FOREARM_JOINT_MIDDLE_DISTAL_ULTRALEAP = 14, + XR_HAND_FOREARM_JOINT_MIDDLE_TIP_ULTRALEAP = 15, + XR_HAND_FOREARM_JOINT_RING_METACARPAL_ULTRALEAP = 16, + XR_HAND_FOREARM_JOINT_RING_PROXIMAL_ULTRALEAP = 17, + XR_HAND_FOREARM_JOINT_RING_INTERMEDIATE_ULTRALEAP = 18, + XR_HAND_FOREARM_JOINT_RING_DISTAL_ULTRALEAP = 19, + XR_HAND_FOREARM_JOINT_RING_TIP_ULTRALEAP = 20, + XR_HAND_FOREARM_JOINT_LITTLE_METACARPAL_ULTRALEAP = 21, + XR_HAND_FOREARM_JOINT_LITTLE_PROXIMAL_ULTRALEAP = 22, + XR_HAND_FOREARM_JOINT_LITTLE_INTERMEDIATE_ULTRALEAP = 23, + XR_HAND_FOREARM_JOINT_LITTLE_DISTAL_ULTRALEAP = 24, + XR_HAND_FOREARM_JOINT_LITTLE_TIP_ULTRALEAP = 25, + XR_HAND_FOREARM_JOINT_ELBOW_ULTRALEAP = 26, + XR_HAND_FOREARM_JOINT_MAX_ENUM_ULTRALEAP = 0x7FFFFFFF +} XrHandForearmJointULTRALEAP; + + +#define XR_FB_spatial_entity_query 1 +#define XR_FB_spatial_entity_query_SPEC_VERSION 1 +#define XR_FB_SPATIAL_ENTITY_QUERY_EXTENSION_NAME "XR_FB_spatial_entity_query" + +typedef enum XrSpaceQueryActionFB { + XR_SPACE_QUERY_ACTION_LOAD_FB = 0, + XR_SPACE_QUERY_ACTION_MAX_ENUM_FB = 0x7FFFFFFF +} XrSpaceQueryActionFB; + +typedef enum XrSpaceStorageLocationFB { + XR_SPACE_STORAGE_LOCATION_INVALID_FB = 0, + XR_SPACE_STORAGE_LOCATION_LOCAL_FB = 1, + XR_SPACE_STORAGE_LOCATION_MAX_ENUM_FB = 0x7FFFFFFF +} XrSpaceStorageLocationFB; +typedef struct XR_MAY_ALIAS XrSpaceQueryInfoBaseHeaderFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; +} XrSpaceQueryInfoBaseHeaderFB; + +typedef struct XR_MAY_ALIAS XrSpaceFilterInfoBaseHeaderFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; +} XrSpaceFilterInfoBaseHeaderFB; + +typedef struct XrSpaceQueryInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpaceQueryActionFB queryAction; + uint32_t maxResultCount; + XrDuration timeout; + const XrSpaceFilterInfoBaseHeaderFB* filter; + const XrSpaceFilterInfoBaseHeaderFB* excludeFilter; +} XrSpaceQueryInfoFB; + +// XrSpaceStorageLocationFilterInfoFB extends XrSpaceFilterInfoBaseHeaderFB +typedef struct XrSpaceStorageLocationFilterInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpaceStorageLocationFB location; +} XrSpaceStorageLocationFilterInfoFB; + +typedef struct XrSpaceUuidFilterInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + uint32_t uuidCount; + XrUuidEXT* uuids; +} XrSpaceUuidFilterInfoFB; + +typedef struct XrSpaceComponentFilterInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpaceComponentTypeFB componentType; +} XrSpaceComponentFilterInfoFB; + +typedef struct XrSpaceQueryResultFB { + XrSpace space; + XrUuidEXT uuid; +} XrSpaceQueryResultFB; + +typedef struct XrSpaceQueryResultsFB { + XrStructureType type; + void* XR_MAY_ALIAS next; + uint32_t resultCapacityInput; + uint32_t resultCountOutput; + XrSpaceQueryResultFB* results; +} XrSpaceQueryResultsFB; + +typedef struct XrEventDataSpaceQueryResultsAvailableFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; +} XrEventDataSpaceQueryResultsAvailableFB; + +typedef struct XrEventDataSpaceQueryCompleteFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; + XrResult result; +} XrEventDataSpaceQueryCompleteFB; + +typedef XrResult (XRAPI_PTR *PFN_xrQuerySpacesFB)(XrSession session, const XrSpaceQueryInfoBaseHeaderFB* info, XrAsyncRequestIdFB* requestId); +typedef XrResult (XRAPI_PTR *PFN_xrRetrieveSpaceQueryResultsFB)(XrSession session, XrAsyncRequestIdFB requestId, XrSpaceQueryResultsFB* results); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrQuerySpacesFB( + XrSession session, + const XrSpaceQueryInfoBaseHeaderFB* info, + XrAsyncRequestIdFB* requestId); + +XRAPI_ATTR XrResult XRAPI_CALL xrRetrieveSpaceQueryResultsFB( + XrSession session, + XrAsyncRequestIdFB requestId, + XrSpaceQueryResultsFB* results); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + +#define XR_FB_spatial_entity_storage 1 +#define XR_FB_spatial_entity_storage_SPEC_VERSION 1 +#define XR_FB_SPATIAL_ENTITY_STORAGE_EXTENSION_NAME "XR_FB_spatial_entity_storage" + +typedef enum XrSpacePersistenceModeFB { + XR_SPACE_PERSISTENCE_MODE_INVALID_FB = 0, + XR_SPACE_PERSISTENCE_MODE_INDEFINITE_FB = 1, + XR_SPACE_PERSISTENCE_MODE_MAX_ENUM_FB = 0x7FFFFFFF +} XrSpacePersistenceModeFB; +typedef struct XrSpaceSaveInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpace space; + XrSpaceStorageLocationFB location; + XrSpacePersistenceModeFB persistenceMode; +} XrSpaceSaveInfoFB; + +typedef struct XrSpaceEraseInfoFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrSpace space; + XrSpaceStorageLocationFB location; +} XrSpaceEraseInfoFB; + +typedef struct XrEventDataSpaceSaveCompleteFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; + XrResult result; + XrSpace space; + XrUuidEXT uuid; + XrSpaceStorageLocationFB location; +} XrEventDataSpaceSaveCompleteFB; + +typedef struct XrEventDataSpaceEraseCompleteFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrAsyncRequestIdFB requestId; + XrResult result; + XrSpace space; + XrUuidEXT uuid; + XrSpaceStorageLocationFB location; +} XrEventDataSpaceEraseCompleteFB; + +typedef XrResult (XRAPI_PTR *PFN_xrSaveSpaceFB)(XrSession session, const XrSpaceSaveInfoFB* info, XrAsyncRequestIdFB* requestId); +typedef XrResult (XRAPI_PTR *PFN_xrEraseSpaceFB)(XrSession session, const XrSpaceEraseInfoFB* info, XrAsyncRequestIdFB* requestId); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrSaveSpaceFB( + XrSession session, + const XrSpaceSaveInfoFB* info, + XrAsyncRequestIdFB* requestId); + +XRAPI_ATTR XrResult XRAPI_CALL xrEraseSpaceFB( + XrSession session, + const XrSpaceEraseInfoFB* info, + XrAsyncRequestIdFB* requestId); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + #define XR_FB_space_warp 1 -#define XR_FB_space_warp_SPEC_VERSION 1 +#define XR_FB_space_warp_SPEC_VERSION 2 #define XR_FB_SPACE_WARP_EXTENSION_NAME "XR_FB_space_warp" typedef XrFlags64 XrCompositionLayerSpaceWarpInfoFlagsFB; // Flag bits for XrCompositionLayerSpaceWarpInfoFlagsFB +static const XrCompositionLayerSpaceWarpInfoFlagsFB XR_COMPOSITION_LAYER_SPACE_WARP_INFO_FRAME_SKIP_BIT_FB = 0x00000001; // XrCompositionLayerSpaceWarpInfoFB extends XrCompositionLayerProjectionView typedef struct XrCompositionLayerSpaceWarpInfoFB { @@ -3888,8 +4328,31 @@ XRAPI_ATTR XrResult XRAPI_CALL xrSetDigitalLensControlALMALENCE( #endif /* !XR_NO_PROTOTYPES */ +#define XR_FB_spatial_entity_container 1 +#define XR_FB_spatial_entity_container_SPEC_VERSION 1 +#define XR_FB_SPATIAL_ENTITY_CONTAINER_EXTENSION_NAME "XR_FB_spatial_entity_container" +typedef struct XrSpaceContainerFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + uint32_t uuidCapacityInput; + uint32_t* uuidCountOutput; + XrUuidEXT* uuids; +} XrSpaceContainerFB; + +typedef XrResult (XRAPI_PTR *PFN_xrGetSpaceContainerFB)(XrSession session, XrSpace space, XrSpaceContainerFB* spaceContainerOutput); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrGetSpaceContainerFB( + XrSession session, + XrSpace space, + XrSpaceContainerFB* spaceContainerOutput); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + #define XR_FB_passthrough_keyboard_hands 1 -#define XR_FB_passthrough_keyboard_hands_SPEC_VERSION 1 +#define XR_FB_passthrough_keyboard_hands_SPEC_VERSION 2 #define XR_FB_PASSTHROUGH_KEYBOARD_HANDS_EXTENSION_NAME "XR_FB_passthrough_keyboard_hands" typedef struct XrPassthroughKeyboardHandsIntensityFB { XrStructureType type; @@ -3909,14 +4372,92 @@ XRAPI_ATTR XrResult XRAPI_CALL xrPassthroughLayerSetKeyboardHandsIntensityFB( #endif /* !XR_NO_PROTOTYPES */ +#define XR_FB_composition_layer_settings 1 +#define XR_FB_composition_layer_settings_SPEC_VERSION 1 +#define XR_FB_COMPOSITION_LAYER_SETTINGS_EXTENSION_NAME "XR_FB_composition_layer_settings" +typedef XrFlags64 XrCompositionLayerSettingsFlagsFB; + +// Flag bits for XrCompositionLayerSettingsFlagsFB +static const XrCompositionLayerSettingsFlagsFB XR_COMPOSITION_LAYER_SETTINGS_NORMAL_SUPER_SAMPLING_BIT_FB = 0x00000001; +static const XrCompositionLayerSettingsFlagsFB XR_COMPOSITION_LAYER_SETTINGS_QUALITY_SUPER_SAMPLING_BIT_FB = 0x00000002; +static const XrCompositionLayerSettingsFlagsFB XR_COMPOSITION_LAYER_SETTINGS_NORMAL_SHARPENING_BIT_FB = 0x00000004; +static const XrCompositionLayerSettingsFlagsFB XR_COMPOSITION_LAYER_SETTINGS_QUALITY_SHARPENING_BIT_FB = 0x00000008; + +// XrCompositionLayerSettingsFB extends XrCompositionLayerBaseHeader +typedef struct XrCompositionLayerSettingsFB { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrCompositionLayerSettingsFlagsFB layerFlags; +} XrCompositionLayerSettingsFB; + + + +#define XR_META_performance_metrics 1 +#define XR_META_performance_metrics_SPEC_VERSION 1 +#define XR_META_PERFORMANCE_METRICS_EXTENSION_NAME "XR_META_performance_metrics" + +typedef enum XrPerformanceMetricsCounterUnitMETA { + XR_PERFORMANCE_METRICS_COUNTER_UNIT_GENERIC_META = 0, + XR_PERFORMANCE_METRICS_COUNTER_UNIT_PERCENTAGE_META = 1, + XR_PERFORMANCE_METRICS_COUNTER_UNIT_MILLISECONDS_META = 2, + XR_PERFORMANCE_METRICS_COUNTER_UNIT_BYTES_META = 3, + XR_PERFORMANCE_METRICS_COUNTER_UNIT_HERTZ_META = 4, + XR_PERFORMANCE_METRICS_COUNTER_UNIT_MAX_ENUM_META = 0x7FFFFFFF +} XrPerformanceMetricsCounterUnitMETA; +typedef XrFlags64 XrPerformanceMetricsCounterFlagsMETA; + +// Flag bits for XrPerformanceMetricsCounterFlagsMETA +static const XrPerformanceMetricsCounterFlagsMETA XR_PERFORMANCE_METRICS_COUNTER_ANY_VALUE_VALID_BIT_META = 0x00000001; +static const XrPerformanceMetricsCounterFlagsMETA XR_PERFORMANCE_METRICS_COUNTER_UINT_VALUE_VALID_BIT_META = 0x00000002; +static const XrPerformanceMetricsCounterFlagsMETA XR_PERFORMANCE_METRICS_COUNTER_FLOAT_VALUE_VALID_BIT_META = 0x00000004; + +typedef struct XrPerformanceMetricsStateMETA { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrBool32 enabled; +} XrPerformanceMetricsStateMETA; + +typedef struct XrPerformanceMetricsCounterMETA { + XrStructureType type; + const void* XR_MAY_ALIAS next; + XrPerformanceMetricsCounterFlagsMETA counterFlags; + XrPerformanceMetricsCounterUnitMETA counterUnit; + uint32_t uintValue; + float floatValue; +} XrPerformanceMetricsCounterMETA; + +typedef XrResult (XRAPI_PTR *PFN_xrEnumeratePerformanceMetricsCounterPathsMETA)(XrInstance instance, uint32_t counterPathCapacityInput, uint32_t* counterPathCountOutput, XrPath* counterPaths); +typedef XrResult (XRAPI_PTR *PFN_xrSetPerformanceMetricsStateMETA)(XrSession session, const XrPerformanceMetricsStateMETA* state); +typedef XrResult (XRAPI_PTR *PFN_xrGetPerformanceMetricsStateMETA)(XrSession session, XrPerformanceMetricsStateMETA* state); +typedef XrResult (XRAPI_PTR *PFN_xrQueryPerformanceMetricsCounterMETA)(XrSession session, XrPath counterPath, XrPerformanceMetricsCounterMETA* counter); + +#ifndef XR_NO_PROTOTYPES +#ifdef XR_EXTENSION_PROTOTYPES +XRAPI_ATTR XrResult XRAPI_CALL xrEnumeratePerformanceMetricsCounterPathsMETA( + XrInstance instance, + uint32_t counterPathCapacityInput, + uint32_t* counterPathCountOutput, + XrPath* counterPaths); + +XRAPI_ATTR XrResult XRAPI_CALL xrSetPerformanceMetricsStateMETA( + XrSession session, + const XrPerformanceMetricsStateMETA* state); + +XRAPI_ATTR XrResult XRAPI_CALL xrGetPerformanceMetricsStateMETA( + XrSession session, + XrPerformanceMetricsStateMETA* state); + +XRAPI_ATTR XrResult XRAPI_CALL xrQueryPerformanceMetricsCounterMETA( + XrSession session, + XrPath counterPath, + XrPerformanceMetricsCounterMETA* counter); +#endif /* XR_EXTENSION_PROTOTYPES */ +#endif /* !XR_NO_PROTOTYPES */ + + #define XR_EXT_uuid 1 #define XR_EXT_uuid_SPEC_VERSION 1 #define XR_EXT_UUID_EXTENSION_NAME "XR_EXT_uuid" -#define XR_UUID_SIZE_EXT 16 -typedef struct XrUuidEXT { - uint8_t data[XR_UUID_SIZE_EXT]; -} XrUuidEXT; - #ifdef __cplusplus } diff --git a/thirdparty/openxr/include/openxr/openxr_platform.h b/thirdparty/openxr/include/openxr/openxr_platform.h index eb5e5f8c4b..f0fbf6955a 100644 --- a/thirdparty/openxr/include/openxr/openxr_platform.h +++ b/thirdparty/openxr/include/openxr/openxr_platform.h @@ -275,7 +275,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrGetVulkanGraphicsRequirementsKHR( #ifdef XR_USE_GRAPHICS_API_D3D11 #define XR_KHR_D3D11_enable 1 -#define XR_KHR_D3D11_enable_SPEC_VERSION 8 +#define XR_KHR_D3D11_enable_SPEC_VERSION 9 #define XR_KHR_D3D11_ENABLE_EXTENSION_NAME "XR_KHR_D3D11_enable" // XrGraphicsBindingD3D11KHR extends XrSessionCreateInfo typedef struct XrGraphicsBindingD3D11KHR { @@ -312,7 +312,7 @@ XRAPI_ATTR XrResult XRAPI_CALL xrGetD3D11GraphicsRequirementsKHR( #ifdef XR_USE_GRAPHICS_API_D3D12 #define XR_KHR_D3D12_enable 1 -#define XR_KHR_D3D12_enable_SPEC_VERSION 8 +#define XR_KHR_D3D12_enable_SPEC_VERSION 9 #define XR_KHR_D3D12_ENABLE_EXTENSION_NAME "XR_KHR_D3D12_enable" // XrGraphicsBindingD3D12KHR extends XrSessionCreateInfo typedef struct XrGraphicsBindingD3D12KHR { @@ -668,6 +668,21 @@ typedef struct XrSwapchainStateSamplerVulkanFB { #endif /* XR_USE_GRAPHICS_API_VULKAN */ +#ifdef XR_USE_GRAPHICS_API_VULKAN + +#define XR_META_vulkan_swapchain_create_info 1 +#define XR_META_vulkan_swapchain_create_info_SPEC_VERSION 1 +#define XR_META_VULKAN_SWAPCHAIN_CREATE_INFO_EXTENSION_NAME "XR_META_vulkan_swapchain_create_info" +// XrVulkanSwapchainCreateInfoMETA extends XrSwapchainCreateInfo +typedef struct XrVulkanSwapchainCreateInfoMETA { + XrStructureType type; + const void* XR_MAY_ALIAS next; + VkImageCreateFlags additionalCreateFlags; + VkImageUsageFlags additionalUsageFlags; +} XrVulkanSwapchainCreateInfoMETA; + +#endif /* XR_USE_GRAPHICS_API_VULKAN */ + #ifdef __cplusplus } #endif diff --git a/thirdparty/openxr/include/openxr/openxr_reflection.h b/thirdparty/openxr/include/openxr/openxr_reflection.h index 2bc14be600..163b54e4e4 100644 --- a/thirdparty/openxr/include/openxr/openxr_reflection.h +++ b/thirdparty/openxr/include/openxr/openxr_reflection.h @@ -100,6 +100,10 @@ XR_ENUM_STR(XrResult); _(XR_ERROR_SCENE_COMPUTE_CONSISTENCY_MISMATCH_MSFT, -1000097005) \ _(XR_ERROR_DISPLAY_REFRESH_RATE_UNSUPPORTED_FB, -1000101000) \ _(XR_ERROR_COLOR_SPACE_UNSUPPORTED_FB, -1000108000) \ + _(XR_ERROR_SPACE_COMPONENT_NOT_SUPPORTED_FB, -1000113000) \ + _(XR_ERROR_SPACE_COMPONENT_NOT_ENABLED_FB, -1000113001) \ + _(XR_ERROR_SPACE_COMPONENT_STATUS_PENDING_FB, -1000113002) \ + _(XR_ERROR_SPACE_COMPONENT_STATUS_ALREADY_SET_FB, -1000113003) \ _(XR_ERROR_UNEXPECTED_STATE_PASSTHROUGH_FB, -1000118000) \ _(XR_ERROR_FEATURE_ALREADY_CREATED_PASSTHROUGH_FB, -1000118001) \ _(XR_ERROR_FEATURE_REQUIRED_PASSTHROUGH_FB, -1000118002) \ @@ -208,6 +212,9 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_VIEW_CONFIGURATION_DEPTH_RANGE_EXT, 1000046000) \ _(XR_TYPE_GRAPHICS_BINDING_EGL_MNDX, 1000048004) \ _(XR_TYPE_SPATIAL_GRAPH_NODE_SPACE_CREATE_INFO_MSFT, 1000049000) \ + _(XR_TYPE_SPATIAL_GRAPH_STATIC_NODE_BINDING_CREATE_INFO_MSFT, 1000049001) \ + _(XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_GET_INFO_MSFT, 1000049002) \ + _(XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_MSFT, 1000049003) \ _(XR_TYPE_SYSTEM_HAND_TRACKING_PROPERTIES_EXT, 1000051000) \ _(XR_TYPE_HAND_TRACKER_CREATE_INFO_EXT, 1000051001) \ _(XR_TYPE_HAND_JOINTS_LOCATE_INFO_EXT, 1000051002) \ @@ -235,6 +242,7 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_COMPOSITION_LAYER_REPROJECTION_PLANE_OVERRIDE_MSFT, 1000066001) \ _(XR_TYPE_ANDROID_SURFACE_SWAPCHAIN_CREATE_INFO_FB, 1000070000) \ _(XR_TYPE_COMPOSITION_LAYER_SECURE_CONTENT_FB, 1000072000) \ + _(XR_TYPE_INTERACTION_PROFILE_DPAD_BINDING_EXT, 1000078000) \ _(XR_TYPE_INTERACTION_PROFILE_ANALOG_THRESHOLD_VALVE, 1000079000) \ _(XR_TYPE_HAND_JOINTS_MOTION_RANGE_INFO_EXT, 1000080000) \ _(XR_TYPE_LOADER_INIT_INFO_ANDROID_KHR, 1000089000) \ @@ -274,6 +282,12 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_HAND_TRACKING_SCALE_FB, 1000110003) \ _(XR_TYPE_HAND_TRACKING_AIM_STATE_FB, 1000111001) \ _(XR_TYPE_HAND_TRACKING_CAPSULES_STATE_FB, 1000112000) \ + _(XR_TYPE_SYSTEM_SPATIAL_ENTITY_PROPERTIES_FB, 1000113004) \ + _(XR_TYPE_SPATIAL_ANCHOR_CREATE_INFO_FB, 1000113003) \ + _(XR_TYPE_SPACE_COMPONENT_STATUS_SET_INFO_FB, 1000113007) \ + _(XR_TYPE_SPACE_COMPONENT_STATUS_FB, 1000113001) \ + _(XR_TYPE_EVENT_DATA_SPATIAL_ANCHOR_CREATE_COMPLETE_FB, 1000113005) \ + _(XR_TYPE_EVENT_DATA_SPACE_SET_STATUS_COMPLETE_FB, 1000113006) \ _(XR_TYPE_FOVEATION_PROFILE_CREATE_INFO_FB, 1000114000) \ _(XR_TYPE_SWAPCHAIN_CREATE_INFO_FOVEATION_FB, 1000114001) \ _(XR_TYPE_SWAPCHAIN_STATE_FOVEATION_FB, 1000114002) \ @@ -291,12 +305,14 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_PASSTHROUGH_STYLE_FB, 1000118020) \ _(XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_RGBA_FB, 1000118021) \ _(XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_MONO_FB, 1000118022) \ + _(XR_TYPE_PASSTHROUGH_BRIGHTNESS_CONTRAST_SATURATION_FB, 1000118023) \ _(XR_TYPE_EVENT_DATA_PASSTHROUGH_STATE_CHANGED_FB, 1000118030) \ _(XR_TYPE_RENDER_MODEL_PATH_INFO_FB, 1000119000) \ _(XR_TYPE_RENDER_MODEL_PROPERTIES_FB, 1000119001) \ _(XR_TYPE_RENDER_MODEL_BUFFER_FB, 1000119002) \ _(XR_TYPE_RENDER_MODEL_LOAD_INFO_FB, 1000119003) \ _(XR_TYPE_SYSTEM_RENDER_MODEL_PROPERTIES_FB, 1000119004) \ + _(XR_TYPE_RENDER_MODEL_CAPABILITIES_REQUEST_FB, 1000119005) \ _(XR_TYPE_BINDING_MODIFICATIONS_KHR, 1000120000) \ _(XR_TYPE_VIEW_LOCATE_FOVEATED_RENDERING_VARJO, 1000121000) \ _(XR_TYPE_FOVEATED_VIEW_CONFIGURATION_VIEW_VARJO, 1000121001) \ @@ -307,6 +323,17 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_MARKER_SPACE_CREATE_INFO_VARJO, 1000124002) \ _(XR_TYPE_SPATIAL_ANCHOR_PERSISTENCE_INFO_MSFT, 1000142000) \ _(XR_TYPE_SPATIAL_ANCHOR_FROM_PERSISTED_ANCHOR_CREATE_INFO_MSFT, 1000142001) \ + _(XR_TYPE_SPACE_QUERY_INFO_FB, 1000156001) \ + _(XR_TYPE_SPACE_QUERY_RESULTS_FB, 1000156002) \ + _(XR_TYPE_SPACE_STORAGE_LOCATION_FILTER_INFO_FB, 1000156003) \ + _(XR_TYPE_SPACE_UUID_FILTER_INFO_FB, 1000156054) \ + _(XR_TYPE_SPACE_COMPONENT_FILTER_INFO_FB, 1000156052) \ + _(XR_TYPE_EVENT_DATA_SPACE_QUERY_RESULTS_AVAILABLE_FB, 1000156103) \ + _(XR_TYPE_EVENT_DATA_SPACE_QUERY_COMPLETE_FB, 1000156104) \ + _(XR_TYPE_SPACE_SAVE_INFO_FB, 1000158000) \ + _(XR_TYPE_SPACE_ERASE_INFO_FB, 1000158001) \ + _(XR_TYPE_EVENT_DATA_SPACE_SAVE_COMPLETE_FB, 1000158106) \ + _(XR_TYPE_EVENT_DATA_SPACE_ERASE_COMPLETE_FB, 1000158107) \ _(XR_TYPE_SWAPCHAIN_IMAGE_FOVEATION_VULKAN_FB, 1000160000) \ _(XR_TYPE_SWAPCHAIN_STATE_ANDROID_SURFACE_DIMENSIONS_FB, 1000161000) \ _(XR_TYPE_SWAPCHAIN_STATE_SAMPLER_OPENGL_ES_FB, 1000162000) \ @@ -314,7 +341,12 @@ XR_ENUM_STR(XrResult); _(XR_TYPE_COMPOSITION_LAYER_SPACE_WARP_INFO_FB, 1000171000) \ _(XR_TYPE_SYSTEM_SPACE_WARP_PROPERTIES_FB, 1000171001) \ _(XR_TYPE_DIGITAL_LENS_CONTROL_ALMALENCE, 1000196000) \ + _(XR_TYPE_SPACE_CONTAINER_FB, 1000199000) \ _(XR_TYPE_PASSTHROUGH_KEYBOARD_HANDS_INTENSITY_FB, 1000203002) \ + _(XR_TYPE_COMPOSITION_LAYER_SETTINGS_FB, 1000204000) \ + _(XR_TYPE_VULKAN_SWAPCHAIN_CREATE_INFO_META, 1000227000) \ + _(XR_TYPE_PERFORMANCE_METRICS_STATE_META, 1000232001) \ + _(XR_TYPE_PERFORMANCE_METRICS_COUNTER_META, 1000232002) \ _(XR_STRUCTURE_TYPE_MAX_ENUM, 0x7FFFFFFF) #define XR_LIST_ENUM_XrFormFactor(_) \ @@ -379,6 +411,7 @@ XR_ENUM_STR(XrResult); _(XR_OBJECT_TYPE_ACTION, 6) \ _(XR_OBJECT_TYPE_DEBUG_UTILS_MESSENGER_EXT, 1000019000) \ _(XR_OBJECT_TYPE_SPATIAL_ANCHOR_MSFT, 1000039000) \ + _(XR_OBJECT_TYPE_SPATIAL_GRAPH_NODE_BINDING_MSFT, 1000049000) \ _(XR_OBJECT_TYPE_HAND_TRACKER_EXT, 1000051000) \ _(XR_OBJECT_TYPE_SCENE_OBSERVER_MSFT, 1000097000) \ _(XR_OBJECT_TYPE_SCENE_MSFT, 1000097001) \ @@ -478,6 +511,7 @@ XR_ENUM_STR(XrResult); #define XR_LIST_ENUM_XrHandJointSetEXT(_) \ _(XR_HAND_JOINT_SET_DEFAULT_EXT, 0) \ + _(XR_HAND_JOINT_SET_HAND_WITH_FOREARM_ULTRALEAP, 1000149000) \ _(XR_HAND_JOINT_SET_MAX_ENUM_EXT, 0x7FFFFFFF) #define XR_LIST_ENUM_XrHandPoseTypeMSFT(_) \ @@ -623,6 +657,12 @@ XR_ENUM_STR(XrResult); _(XR_COLOR_SPACE_ADOBE_RGB_FB, 7) \ _(XR_COLOR_SPACE_MAX_ENUM_FB, 0x7FFFFFFF) +#define XR_LIST_ENUM_XrSpaceComponentTypeFB(_) \ + _(XR_SPACE_COMPONENT_TYPE_LOCATABLE_FB, 0) \ + _(XR_SPACE_COMPONENT_TYPE_STORABLE_FB, 1) \ + _(XR_SPACE_COMPONENT_TYPE_SPACE_CONTAINER_FB, 7) \ + _(XR_SPACE_COMPONENT_TYPE_MAX_ENUM_FB, 0x7FFFFFFF) + #define XR_LIST_ENUM_XrFoveationLevelFB(_) \ _(XR_FOVEATION_LEVEL_NONE_FB, 0) \ _(XR_FOVEATION_LEVEL_LOW_FB, 1) \ @@ -645,8 +685,61 @@ XR_ENUM_STR(XrResult); _(XR_PASSTHROUGH_LAYER_PURPOSE_RECONSTRUCTION_FB, 0) \ _(XR_PASSTHROUGH_LAYER_PURPOSE_PROJECTED_FB, 1) \ _(XR_PASSTHROUGH_LAYER_PURPOSE_TRACKED_KEYBOARD_HANDS_FB, 1000203001) \ + _(XR_PASSTHROUGH_LAYER_PURPOSE_TRACKED_KEYBOARD_MASKED_HANDS_FB, 1000203002) \ _(XR_PASSTHROUGH_LAYER_PURPOSE_MAX_ENUM_FB, 0x7FFFFFFF) +#define XR_LIST_ENUM_XrHandForearmJointULTRALEAP(_) \ + _(XR_HAND_FOREARM_JOINT_PALM_ULTRALEAP, 0) \ + _(XR_HAND_FOREARM_JOINT_WRIST_ULTRALEAP, 1) \ + _(XR_HAND_FOREARM_JOINT_THUMB_METACARPAL_ULTRALEAP, 2) \ + _(XR_HAND_FOREARM_JOINT_THUMB_PROXIMAL_ULTRALEAP, 3) \ + _(XR_HAND_FOREARM_JOINT_THUMB_DISTAL_ULTRALEAP, 4) \ + _(XR_HAND_FOREARM_JOINT_THUMB_TIP_ULTRALEAP, 5) \ + _(XR_HAND_FOREARM_JOINT_INDEX_METACARPAL_ULTRALEAP, 6) \ + _(XR_HAND_FOREARM_JOINT_INDEX_PROXIMAL_ULTRALEAP, 7) \ + _(XR_HAND_FOREARM_JOINT_INDEX_INTERMEDIATE_ULTRALEAP, 8) \ + _(XR_HAND_FOREARM_JOINT_INDEX_DISTAL_ULTRALEAP, 9) \ + _(XR_HAND_FOREARM_JOINT_INDEX_TIP_ULTRALEAP, 10) \ + _(XR_HAND_FOREARM_JOINT_MIDDLE_METACARPAL_ULTRALEAP, 11) \ + _(XR_HAND_FOREARM_JOINT_MIDDLE_PROXIMAL_ULTRALEAP, 12) \ + _(XR_HAND_FOREARM_JOINT_MIDDLE_INTERMEDIATE_ULTRALEAP, 13) \ + _(XR_HAND_FOREARM_JOINT_MIDDLE_DISTAL_ULTRALEAP, 14) \ + _(XR_HAND_FOREARM_JOINT_MIDDLE_TIP_ULTRALEAP, 15) \ + _(XR_HAND_FOREARM_JOINT_RING_METACARPAL_ULTRALEAP, 16) \ + _(XR_HAND_FOREARM_JOINT_RING_PROXIMAL_ULTRALEAP, 17) \ + _(XR_HAND_FOREARM_JOINT_RING_INTERMEDIATE_ULTRALEAP, 18) \ + _(XR_HAND_FOREARM_JOINT_RING_DISTAL_ULTRALEAP, 19) \ + _(XR_HAND_FOREARM_JOINT_RING_TIP_ULTRALEAP, 20) \ + _(XR_HAND_FOREARM_JOINT_LITTLE_METACARPAL_ULTRALEAP, 21) \ + _(XR_HAND_FOREARM_JOINT_LITTLE_PROXIMAL_ULTRALEAP, 22) \ + _(XR_HAND_FOREARM_JOINT_LITTLE_INTERMEDIATE_ULTRALEAP, 23) \ + _(XR_HAND_FOREARM_JOINT_LITTLE_DISTAL_ULTRALEAP, 24) \ + _(XR_HAND_FOREARM_JOINT_LITTLE_TIP_ULTRALEAP, 25) \ + _(XR_HAND_FOREARM_JOINT_ELBOW_ULTRALEAP, 26) \ + _(XR_HAND_FOREARM_JOINT_MAX_ENUM_ULTRALEAP, 0x7FFFFFFF) + +#define XR_LIST_ENUM_XrSpaceQueryActionFB(_) \ + _(XR_SPACE_QUERY_ACTION_LOAD_FB, 0) \ + _(XR_SPACE_QUERY_ACTION_MAX_ENUM_FB, 0x7FFFFFFF) + +#define XR_LIST_ENUM_XrSpaceStorageLocationFB(_) \ + _(XR_SPACE_STORAGE_LOCATION_INVALID_FB, 0) \ + _(XR_SPACE_STORAGE_LOCATION_LOCAL_FB, 1) \ + _(XR_SPACE_STORAGE_LOCATION_MAX_ENUM_FB, 0x7FFFFFFF) + +#define XR_LIST_ENUM_XrSpacePersistenceModeFB(_) \ + _(XR_SPACE_PERSISTENCE_MODE_INVALID_FB, 0) \ + _(XR_SPACE_PERSISTENCE_MODE_INDEFINITE_FB, 1) \ + _(XR_SPACE_PERSISTENCE_MODE_MAX_ENUM_FB, 0x7FFFFFFF) + +#define XR_LIST_ENUM_XrPerformanceMetricsCounterUnitMETA(_) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_GENERIC_META, 0) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_PERCENTAGE_META, 1) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_MILLISECONDS_META, 2) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_BYTES_META, 3) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_HERTZ_META, 4) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UNIT_MAX_ENUM_META, 0x7FFFFFFF) + #define XR_LIST_BITS_XrInstanceCreateFlags(_) #define XR_LIST_BITS_XrSessionCreateFlags(_) @@ -763,13 +856,27 @@ XR_ENUM_STR(XrResult); _(XR_PASSTHROUGH_STATE_CHANGED_RECOVERABLE_ERROR_BIT_FB, 0x00000004) \ _(XR_PASSTHROUGH_STATE_CHANGED_RESTORED_ERROR_BIT_FB, 0x00000008) \ -#define XR_LIST_BITS_XrRenderModelFlagsFB(_) +#define XR_LIST_BITS_XrRenderModelFlagsFB(_) \ + _(XR_RENDER_MODEL_SUPPORTS_GLTF_2_0_SUBSET_1_BIT_FB, 0x00000001) \ + _(XR_RENDER_MODEL_SUPPORTS_GLTF_2_0_SUBSET_2_BIT_FB, 0x00000002) \ -#define XR_LIST_BITS_XrCompositionLayerSpaceWarpInfoFlagsFB(_) +#define XR_LIST_BITS_XrCompositionLayerSpaceWarpInfoFlagsFB(_) \ + _(XR_COMPOSITION_LAYER_SPACE_WARP_INFO_FRAME_SKIP_BIT_FB, 0x00000001) \ #define XR_LIST_BITS_XrDigitalLensControlFlagsALMALENCE(_) \ _(XR_DIGITAL_LENS_CONTROL_PROCESSING_DISABLE_BIT_ALMALENCE, 0x00000001) \ +#define XR_LIST_BITS_XrCompositionLayerSettingsFlagsFB(_) \ + _(XR_COMPOSITION_LAYER_SETTINGS_NORMAL_SUPER_SAMPLING_BIT_FB, 0x00000001) \ + _(XR_COMPOSITION_LAYER_SETTINGS_QUALITY_SUPER_SAMPLING_BIT_FB, 0x00000002) \ + _(XR_COMPOSITION_LAYER_SETTINGS_NORMAL_SHARPENING_BIT_FB, 0x00000004) \ + _(XR_COMPOSITION_LAYER_SETTINGS_QUALITY_SHARPENING_BIT_FB, 0x00000008) \ + +#define XR_LIST_BITS_XrPerformanceMetricsCounterFlagsMETA(_) \ + _(XR_PERFORMANCE_METRICS_COUNTER_ANY_VALUE_VALID_BIT_META, 0x00000001) \ + _(XR_PERFORMANCE_METRICS_COUNTER_UINT_VALUE_VALID_BIT_META, 0x00000002) \ + _(XR_PERFORMANCE_METRICS_COUNTER_FLOAT_VALUE_VALID_BIT_META, 0x00000004) \ + #define XR_LIST_STRUCT_XrApiLayerProperties(_) \ _(type) \ _(next) \ @@ -1568,6 +1675,23 @@ XR_ENUM_STR(XrResult); _(nodeId) \ _(pose) \ +#define XR_LIST_STRUCT_XrSpatialGraphStaticNodeBindingCreateInfoMSFT(_) \ + _(type) \ + _(next) \ + _(space) \ + _(poseInSpace) \ + _(time) \ + +#define XR_LIST_STRUCT_XrSpatialGraphNodeBindingPropertiesGetInfoMSFT(_) \ + _(type) \ + _(next) \ + +#define XR_LIST_STRUCT_XrSpatialGraphNodeBindingPropertiesMSFT(_) \ + _(type) \ + _(next) \ + _(nodeId) \ + _(poseInNodeSpace) \ + #define XR_LIST_STRUCT_XrSystemHandTrackingPropertiesEXT(_) \ _(type) \ _(next) \ @@ -1762,6 +1886,19 @@ XR_ENUM_STR(XrResult); _(next) \ _(flags) \ +#define XR_LIST_STRUCT_XrInteractionProfileDpadBindingEXT(_) \ + _(type) \ + _(next) \ + _(binding) \ + _(actionSet) \ + _(forceThreshold) \ + _(forceThresholdReleased) \ + _(centerRegion) \ + _(wedgeAngle) \ + _(isSticky) \ + _(onHaptic) \ + _(offHaptic) \ + #define XR_LIST_STRUCT_XrInteractionProfileAnalogThresholdVALVE(_) \ _(type) \ _(next) \ @@ -2047,6 +2184,52 @@ XR_ENUM_STR(XrResult); _(next) \ _(capsules) \ +#define XR_LIST_STRUCT_XrSystemSpatialEntityPropertiesFB(_) \ + _(type) \ + _(next) \ + _(supportsSpatialEntity) \ + +#define XR_LIST_STRUCT_XrSpatialAnchorCreateInfoFB(_) \ + _(type) \ + _(next) \ + _(space) \ + _(poseInSpace) \ + _(time) \ + +#define XR_LIST_STRUCT_XrSpaceComponentStatusSetInfoFB(_) \ + _(type) \ + _(next) \ + _(componentType) \ + _(enabled) \ + _(timeout) \ + +#define XR_LIST_STRUCT_XrSpaceComponentStatusFB(_) \ + _(type) \ + _(next) \ + _(enabled) \ + _(changePending) \ + +#define XR_LIST_STRUCT_XrUuidEXT(_) \ + _(data) \ + +#define XR_LIST_STRUCT_XrEventDataSpatialAnchorCreateCompleteFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + _(result) \ + _(space) \ + _(uuid) \ + +#define XR_LIST_STRUCT_XrEventDataSpaceSetStatusCompleteFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + _(result) \ + _(space) \ + _(uuid) \ + _(componentType) \ + _(enabled) \ + #define XR_LIST_STRUCT_XrFoveationProfileCreateInfoFB(_) \ _(type) \ _(next) \ @@ -2157,6 +2340,13 @@ XR_ENUM_STR(XrResult); _(next) \ _(textureColorMap) \ +#define XR_LIST_STRUCT_XrPassthroughBrightnessContrastSaturationFB(_) \ + _(type) \ + _(next) \ + _(brightness) \ + _(contrast) \ + _(saturation) \ + #define XR_LIST_STRUCT_XrEventDataPassthroughStateChangedFB(_) \ _(type) \ _(next) \ @@ -2193,6 +2383,11 @@ XR_ENUM_STR(XrResult); _(next) \ _(supportsRenderModelLoading) \ +#define XR_LIST_STRUCT_XrRenderModelCapabilitiesRequestFB(_) \ + _(type) \ + _(next) \ + _(flags) \ + #define XR_LIST_STRUCT_XrViewLocateFoveatedRenderingVARJO(_) \ _(type) \ _(next) \ @@ -2248,6 +2443,92 @@ XR_ENUM_STR(XrResult); _(spatialAnchorStore) \ _(spatialAnchorPersistenceName) \ +#define XR_LIST_STRUCT_XrSpaceQueryInfoBaseHeaderFB(_) \ + _(type) \ + _(next) \ + +#define XR_LIST_STRUCT_XrSpaceFilterInfoBaseHeaderFB(_) \ + _(type) \ + _(next) \ + +#define XR_LIST_STRUCT_XrSpaceQueryInfoFB(_) \ + _(type) \ + _(next) \ + _(queryAction) \ + _(maxResultCount) \ + _(timeout) \ + _(filter) \ + _(excludeFilter) \ + +#define XR_LIST_STRUCT_XrSpaceStorageLocationFilterInfoFB(_) \ + _(type) \ + _(next) \ + _(location) \ + +#define XR_LIST_STRUCT_XrSpaceUuidFilterInfoFB(_) \ + _(type) \ + _(next) \ + _(uuidCount) \ + _(uuids) \ + +#define XR_LIST_STRUCT_XrSpaceComponentFilterInfoFB(_) \ + _(type) \ + _(next) \ + _(componentType) \ + +#define XR_LIST_STRUCT_XrSpaceQueryResultFB(_) \ + _(space) \ + _(uuid) \ + +#define XR_LIST_STRUCT_XrSpaceQueryResultsFB(_) \ + _(type) \ + _(next) \ + _(resultCapacityInput) \ + _(resultCountOutput) \ + _(results) \ + +#define XR_LIST_STRUCT_XrEventDataSpaceQueryResultsAvailableFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + +#define XR_LIST_STRUCT_XrEventDataSpaceQueryCompleteFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + _(result) \ + +#define XR_LIST_STRUCT_XrSpaceSaveInfoFB(_) \ + _(type) \ + _(next) \ + _(space) \ + _(location) \ + _(persistenceMode) \ + +#define XR_LIST_STRUCT_XrSpaceEraseInfoFB(_) \ + _(type) \ + _(next) \ + _(space) \ + _(location) \ + +#define XR_LIST_STRUCT_XrEventDataSpaceSaveCompleteFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + _(result) \ + _(space) \ + _(uuid) \ + _(location) \ + +#define XR_LIST_STRUCT_XrEventDataSpaceEraseCompleteFB(_) \ + _(type) \ + _(next) \ + _(requestId) \ + _(result) \ + _(space) \ + _(uuid) \ + _(location) \ + #define XR_LIST_STRUCT_XrSwapchainImageFoveationVulkanFB(_) \ _(type) \ _(next) \ @@ -2313,14 +2594,42 @@ XR_ENUM_STR(XrResult); _(next) \ _(flags) \ +#define XR_LIST_STRUCT_XrSpaceContainerFB(_) \ + _(type) \ + _(next) \ + _(uuidCapacityInput) \ + _(uuidCountOutput) \ + _(uuids) \ + #define XR_LIST_STRUCT_XrPassthroughKeyboardHandsIntensityFB(_) \ _(type) \ _(next) \ _(leftHandIntensity) \ _(rightHandIntensity) \ -#define XR_LIST_STRUCT_XrUuidEXT(_) \ - _(data) \ +#define XR_LIST_STRUCT_XrCompositionLayerSettingsFB(_) \ + _(type) \ + _(next) \ + _(layerFlags) \ + +#define XR_LIST_STRUCT_XrVulkanSwapchainCreateInfoMETA(_) \ + _(type) \ + _(next) \ + _(additionalCreateFlags) \ + _(additionalUsageFlags) \ + +#define XR_LIST_STRUCT_XrPerformanceMetricsStateMETA(_) \ + _(type) \ + _(next) \ + _(enabled) \ + +#define XR_LIST_STRUCT_XrPerformanceMetricsCounterMETA(_) \ + _(type) \ + _(next) \ + _(counterFlags) \ + _(counterUnit) \ + _(uintValue) \ + _(floatValue) \ @@ -2398,6 +2707,9 @@ XR_ENUM_STR(XrResult); _(XrCompositionLayerAlphaBlendFB, XR_TYPE_COMPOSITION_LAYER_ALPHA_BLEND_FB) \ _(XrViewConfigurationDepthRangeEXT, XR_TYPE_VIEW_CONFIGURATION_DEPTH_RANGE_EXT) \ _(XrSpatialGraphNodeSpaceCreateInfoMSFT, XR_TYPE_SPATIAL_GRAPH_NODE_SPACE_CREATE_INFO_MSFT) \ + _(XrSpatialGraphStaticNodeBindingCreateInfoMSFT, XR_TYPE_SPATIAL_GRAPH_STATIC_NODE_BINDING_CREATE_INFO_MSFT) \ + _(XrSpatialGraphNodeBindingPropertiesGetInfoMSFT, XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_GET_INFO_MSFT) \ + _(XrSpatialGraphNodeBindingPropertiesMSFT, XR_TYPE_SPATIAL_GRAPH_NODE_BINDING_PROPERTIES_MSFT) \ _(XrSystemHandTrackingPropertiesEXT, XR_TYPE_SYSTEM_HAND_TRACKING_PROPERTIES_EXT) \ _(XrHandTrackerCreateInfoEXT, XR_TYPE_HAND_TRACKER_CREATE_INFO_EXT) \ _(XrHandJointsLocateInfoEXT, XR_TYPE_HAND_JOINTS_LOCATE_INFO_EXT) \ @@ -2423,6 +2735,7 @@ XR_ENUM_STR(XrResult); _(XrCompositionLayerReprojectionInfoMSFT, XR_TYPE_COMPOSITION_LAYER_REPROJECTION_INFO_MSFT) \ _(XrCompositionLayerReprojectionPlaneOverrideMSFT, XR_TYPE_COMPOSITION_LAYER_REPROJECTION_PLANE_OVERRIDE_MSFT) \ _(XrCompositionLayerSecureContentFB, XR_TYPE_COMPOSITION_LAYER_SECURE_CONTENT_FB) \ + _(XrInteractionProfileDpadBindingEXT, XR_TYPE_INTERACTION_PROFILE_DPAD_BINDING_EXT) \ _(XrInteractionProfileAnalogThresholdVALVE, XR_TYPE_INTERACTION_PROFILE_ANALOG_THRESHOLD_VALVE) \ _(XrHandJointsMotionRangeInfoEXT, XR_TYPE_HAND_JOINTS_MOTION_RANGE_INFO_EXT) \ _(XrSceneObserverCreateInfoMSFT, XR_TYPE_SCENE_OBSERVER_CREATE_INFO_MSFT) \ @@ -2457,6 +2770,12 @@ XR_ENUM_STR(XrResult); _(XrHandTrackingScaleFB, XR_TYPE_HAND_TRACKING_SCALE_FB) \ _(XrHandTrackingAimStateFB, XR_TYPE_HAND_TRACKING_AIM_STATE_FB) \ _(XrHandTrackingCapsulesStateFB, XR_TYPE_HAND_TRACKING_CAPSULES_STATE_FB) \ + _(XrSystemSpatialEntityPropertiesFB, XR_TYPE_SYSTEM_SPATIAL_ENTITY_PROPERTIES_FB) \ + _(XrSpatialAnchorCreateInfoFB, XR_TYPE_SPATIAL_ANCHOR_CREATE_INFO_FB) \ + _(XrSpaceComponentStatusSetInfoFB, XR_TYPE_SPACE_COMPONENT_STATUS_SET_INFO_FB) \ + _(XrSpaceComponentStatusFB, XR_TYPE_SPACE_COMPONENT_STATUS_FB) \ + _(XrEventDataSpatialAnchorCreateCompleteFB, XR_TYPE_EVENT_DATA_SPATIAL_ANCHOR_CREATE_COMPLETE_FB) \ + _(XrEventDataSpaceSetStatusCompleteFB, XR_TYPE_EVENT_DATA_SPACE_SET_STATUS_COMPLETE_FB) \ _(XrFoveationProfileCreateInfoFB, XR_TYPE_FOVEATION_PROFILE_CREATE_INFO_FB) \ _(XrSwapchainCreateInfoFoveationFB, XR_TYPE_SWAPCHAIN_CREATE_INFO_FOVEATION_FB) \ _(XrSwapchainStateFoveationFB, XR_TYPE_SWAPCHAIN_STATE_FOVEATION_FB) \ @@ -2474,12 +2793,14 @@ XR_ENUM_STR(XrResult); _(XrPassthroughStyleFB, XR_TYPE_PASSTHROUGH_STYLE_FB) \ _(XrPassthroughColorMapMonoToRgbaFB, XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_RGBA_FB) \ _(XrPassthroughColorMapMonoToMonoFB, XR_TYPE_PASSTHROUGH_COLOR_MAP_MONO_TO_MONO_FB) \ + _(XrPassthroughBrightnessContrastSaturationFB, XR_TYPE_PASSTHROUGH_BRIGHTNESS_CONTRAST_SATURATION_FB) \ _(XrEventDataPassthroughStateChangedFB, XR_TYPE_EVENT_DATA_PASSTHROUGH_STATE_CHANGED_FB) \ _(XrRenderModelPathInfoFB, XR_TYPE_RENDER_MODEL_PATH_INFO_FB) \ _(XrRenderModelPropertiesFB, XR_TYPE_RENDER_MODEL_PROPERTIES_FB) \ _(XrRenderModelBufferFB, XR_TYPE_RENDER_MODEL_BUFFER_FB) \ _(XrRenderModelLoadInfoFB, XR_TYPE_RENDER_MODEL_LOAD_INFO_FB) \ _(XrSystemRenderModelPropertiesFB, XR_TYPE_SYSTEM_RENDER_MODEL_PROPERTIES_FB) \ + _(XrRenderModelCapabilitiesRequestFB, XR_TYPE_RENDER_MODEL_CAPABILITIES_REQUEST_FB) \ _(XrViewLocateFoveatedRenderingVARJO, XR_TYPE_VIEW_LOCATE_FOVEATED_RENDERING_VARJO) \ _(XrFoveatedViewConfigurationViewVARJO, XR_TYPE_FOVEATED_VIEW_CONFIGURATION_VIEW_VARJO) \ _(XrSystemFoveatedRenderingPropertiesVARJO, XR_TYPE_SYSTEM_FOVEATED_RENDERING_PROPERTIES_VARJO) \ @@ -2489,10 +2810,25 @@ XR_ENUM_STR(XrResult); _(XrMarkerSpaceCreateInfoVARJO, XR_TYPE_MARKER_SPACE_CREATE_INFO_VARJO) \ _(XrSpatialAnchorPersistenceInfoMSFT, XR_TYPE_SPATIAL_ANCHOR_PERSISTENCE_INFO_MSFT) \ _(XrSpatialAnchorFromPersistedAnchorCreateInfoMSFT, XR_TYPE_SPATIAL_ANCHOR_FROM_PERSISTED_ANCHOR_CREATE_INFO_MSFT) \ + _(XrSpaceQueryInfoFB, XR_TYPE_SPACE_QUERY_INFO_FB) \ + _(XrSpaceStorageLocationFilterInfoFB, XR_TYPE_SPACE_STORAGE_LOCATION_FILTER_INFO_FB) \ + _(XrSpaceUuidFilterInfoFB, XR_TYPE_SPACE_UUID_FILTER_INFO_FB) \ + _(XrSpaceComponentFilterInfoFB, XR_TYPE_SPACE_COMPONENT_FILTER_INFO_FB) \ + _(XrSpaceQueryResultsFB, XR_TYPE_SPACE_QUERY_RESULTS_FB) \ + _(XrEventDataSpaceQueryResultsAvailableFB, XR_TYPE_EVENT_DATA_SPACE_QUERY_RESULTS_AVAILABLE_FB) \ + _(XrEventDataSpaceQueryCompleteFB, XR_TYPE_EVENT_DATA_SPACE_QUERY_COMPLETE_FB) \ + _(XrSpaceSaveInfoFB, XR_TYPE_SPACE_SAVE_INFO_FB) \ + _(XrSpaceEraseInfoFB, XR_TYPE_SPACE_ERASE_INFO_FB) \ + _(XrEventDataSpaceSaveCompleteFB, XR_TYPE_EVENT_DATA_SPACE_SAVE_COMPLETE_FB) \ + _(XrEventDataSpaceEraseCompleteFB, XR_TYPE_EVENT_DATA_SPACE_ERASE_COMPLETE_FB) \ _(XrCompositionLayerSpaceWarpInfoFB, XR_TYPE_COMPOSITION_LAYER_SPACE_WARP_INFO_FB) \ _(XrSystemSpaceWarpPropertiesFB, XR_TYPE_SYSTEM_SPACE_WARP_PROPERTIES_FB) \ _(XrDigitalLensControlALMALENCE, XR_TYPE_DIGITAL_LENS_CONTROL_ALMALENCE) \ + _(XrSpaceContainerFB, XR_TYPE_SPACE_CONTAINER_FB) \ _(XrPassthroughKeyboardHandsIntensityFB, XR_TYPE_PASSTHROUGH_KEYBOARD_HANDS_INTENSITY_FB) \ + _(XrCompositionLayerSettingsFB, XR_TYPE_COMPOSITION_LAYER_SETTINGS_FB) \ + _(XrPerformanceMetricsStateMETA, XR_TYPE_PERFORMANCE_METRICS_STATE_META) \ + _(XrPerformanceMetricsCounterMETA, XR_TYPE_PERFORMANCE_METRICS_COUNTER_META) \ @@ -2596,6 +2932,7 @@ XR_ENUM_STR(XrResult); _(XrVulkanGraphicsDeviceGetInfoKHR, XR_TYPE_VULKAN_GRAPHICS_DEVICE_GET_INFO_KHR) \ _(XrSwapchainImageFoveationVulkanFB, XR_TYPE_SWAPCHAIN_IMAGE_FOVEATION_VULKAN_FB) \ _(XrSwapchainStateSamplerVulkanFB, XR_TYPE_SWAPCHAIN_STATE_SAMPLER_VULKAN_FB) \ + _(XrVulkanSwapchainCreateInfoMETA, XR_TYPE_VULKAN_SWAPCHAIN_CREATE_INFO_META) \ #else @@ -2698,6 +3035,7 @@ XR_ENUM_STR(XrResult); _(XR_FB_android_surface_swapchain_create, 71) \ _(XR_FB_swapchain_update_state, 72) \ _(XR_FB_composition_layer_secure_content, 73) \ + _(XR_EXT_dpad_binding, 79) \ _(XR_VALVE_analog_threshold, 80) \ _(XR_EXT_hand_joints_motion_range, 81) \ _(XR_KHR_loader_init, 89) \ @@ -2714,10 +3052,13 @@ XR_ENUM_STR(XrResult); _(XR_HTCX_vive_tracker_interaction, 104) \ _(XR_HTC_facial_tracking, 105) \ _(XR_HTC_vive_focus3_controller_interaction, 106) \ + _(XR_HTC_hand_interaction, 107) \ + _(XR_HTC_vive_wrist_tracker_interaction, 108) \ _(XR_FB_color_space, 109) \ _(XR_FB_hand_tracking_mesh, 111) \ _(XR_FB_hand_tracking_aim, 112) \ _(XR_FB_hand_tracking_capsules, 113) \ + _(XR_FB_spatial_entity, 114) \ _(XR_FB_foveation, 115) \ _(XR_FB_foveation_configuration, 116) \ _(XR_FB_keyboard_tracking, 117) \ @@ -2729,7 +3070,11 @@ XR_ENUM_STR(XrResult); _(XR_VARJO_composition_layer_depth_test, 123) \ _(XR_VARJO_environment_depth_estimation, 124) \ _(XR_VARJO_marker_tracking, 125) \ + _(XR_VARJO_view_offset, 126) \ _(XR_MSFT_spatial_anchor_persistence, 143) \ + _(XR_ULTRALEAP_hand_tracking_forearm, 150) \ + _(XR_FB_spatial_entity_query, 157) \ + _(XR_FB_spatial_entity_storage, 159) \ _(XR_OCULUS_audio_device_guid, 160) \ _(XR_FB_foveation_vulkan, 161) \ _(XR_FB_swapchain_update_state_android_surface, 162) \ @@ -2738,7 +3083,11 @@ XR_ENUM_STR(XrResult); _(XR_KHR_swapchain_usage_input_attachment_bit, 166) \ _(XR_FB_space_warp, 172) \ _(XR_ALMALENCE_digital_lens_control, 197) \ + _(XR_FB_spatial_entity_container, 200) \ _(XR_FB_passthrough_keyboard_hands, 204) \ + _(XR_FB_composition_layer_settings, 205) \ + _(XR_META_vulkan_swapchain_create_info, 228) \ + _(XR_META_performance_metrics, 233) \ _(XR_EXT_uuid, 300) \ diff --git a/thirdparty/openxr/src/common/xr_linear.h b/thirdparty/openxr/src/common/xr_linear.h index 9ffb49a4b6..1f0e803b7a 100644 --- a/thirdparty/openxr/src/common/xr_linear.h +++ b/thirdparty/openxr/src/common/xr_linear.h @@ -126,12 +126,12 @@ static const XrColor4f XrColorCyan = {0.0f, 1.0f, 1.0f, 1.0f}; static const XrColor4f XrColorLightGrey = {0.7f, 0.7f, 0.7f, 1.0f}; static const XrColor4f XrColorDarkGrey = {0.3f, 0.3f, 0.3f, 1.0f}; -enum GraphicsAPI { GRAPHICS_VULKAN, GRAPHICS_OPENGL, GRAPHICS_OPENGL_ES, GRAPHICS_D3D }; +typedef enum GraphicsAPI { GRAPHICS_VULKAN, GRAPHICS_OPENGL, GRAPHICS_OPENGL_ES, GRAPHICS_D3D } GraphicsAPI; // Column-major, pre-multiplied. This type does not exist in the OpenXR API and is provided for convenience. -struct XrMatrix4x4f { +typedef struct XrMatrix4x4f { float m[16]; -}; +} XrMatrix4x4f; inline static float XrRcpSqrt(const float x) { const float SMALLEST_NON_DENORMAL = 1.1754943508222875e-038f; // ( 1U << 23 ) diff --git a/thirdparty/openxr/src/xr_generated_dispatch_table.c b/thirdparty/openxr/src/xr_generated_dispatch_table.c index 79fbefc52a..91fa0c3ca0 100644 --- a/thirdparty/openxr/src/xr_generated_dispatch_table.c +++ b/thirdparty/openxr/src/xr_generated_dispatch_table.c @@ -202,6 +202,9 @@ void GeneratedXrPopulateDispatchTable(struct XrGeneratedDispatchTable *table, // ---- XR_MSFT_spatial_graph_bridge extension commands (get_inst_proc_addr(instance, "xrCreateSpatialGraphNodeSpaceMSFT", (PFN_xrVoidFunction*)&table->CreateSpatialGraphNodeSpaceMSFT)); + (get_inst_proc_addr(instance, "xrTryCreateSpatialGraphStaticNodeBindingMSFT", (PFN_xrVoidFunction*)&table->TryCreateSpatialGraphStaticNodeBindingMSFT)); + (get_inst_proc_addr(instance, "xrDestroySpatialGraphNodeBindingMSFT", (PFN_xrVoidFunction*)&table->DestroySpatialGraphNodeBindingMSFT)); + (get_inst_proc_addr(instance, "xrGetSpatialGraphNodeBindingPropertiesMSFT", (PFN_xrVoidFunction*)&table->GetSpatialGraphNodeBindingPropertiesMSFT)); // ---- XR_EXT_hand_tracking extension commands (get_inst_proc_addr(instance, "xrCreateHandTrackerEXT", (PFN_xrVoidFunction*)&table->CreateHandTrackerEXT)); @@ -269,6 +272,13 @@ void GeneratedXrPopulateDispatchTable(struct XrGeneratedDispatchTable *table, // ---- XR_FB_hand_tracking_mesh extension commands (get_inst_proc_addr(instance, "xrGetHandMeshFB", (PFN_xrVoidFunction*)&table->GetHandMeshFB)); + // ---- XR_FB_spatial_entity extension commands + (get_inst_proc_addr(instance, "xrCreateSpatialAnchorFB", (PFN_xrVoidFunction*)&table->CreateSpatialAnchorFB)); + (get_inst_proc_addr(instance, "xrGetSpaceUuidFB", (PFN_xrVoidFunction*)&table->GetSpaceUuidFB)); + (get_inst_proc_addr(instance, "xrEnumerateSpaceSupportedComponentsFB", (PFN_xrVoidFunction*)&table->EnumerateSpaceSupportedComponentsFB)); + (get_inst_proc_addr(instance, "xrSetSpaceComponentStatusFB", (PFN_xrVoidFunction*)&table->SetSpaceComponentStatusFB)); + (get_inst_proc_addr(instance, "xrGetSpaceComponentStatusFB", (PFN_xrVoidFunction*)&table->GetSpaceComponentStatusFB)); + // ---- XR_FB_foveation extension commands (get_inst_proc_addr(instance, "xrCreateFoveationProfileFB", (PFN_xrVoidFunction*)&table->CreateFoveationProfileFB)); (get_inst_proc_addr(instance, "xrDestroyFoveationProfileFB", (PFN_xrVoidFunction*)&table->DestroyFoveationProfileFB)); @@ -316,6 +326,9 @@ void GeneratedXrPopulateDispatchTable(struct XrGeneratedDispatchTable *table, (get_inst_proc_addr(instance, "xrGetMarkerSizeVARJO", (PFN_xrVoidFunction*)&table->GetMarkerSizeVARJO)); (get_inst_proc_addr(instance, "xrCreateMarkerSpaceVARJO", (PFN_xrVoidFunction*)&table->CreateMarkerSpaceVARJO)); + // ---- XR_VARJO_view_offset extension commands + (get_inst_proc_addr(instance, "xrSetViewOffsetVARJO", (PFN_xrVoidFunction*)&table->SetViewOffsetVARJO)); + // ---- XR_MSFT_spatial_anchor_persistence extension commands (get_inst_proc_addr(instance, "xrCreateSpatialAnchorStoreConnectionMSFT", (PFN_xrVoidFunction*)&table->CreateSpatialAnchorStoreConnectionMSFT)); (get_inst_proc_addr(instance, "xrDestroySpatialAnchorStoreConnectionMSFT", (PFN_xrVoidFunction*)&table->DestroySpatialAnchorStoreConnectionMSFT)); @@ -325,6 +338,14 @@ void GeneratedXrPopulateDispatchTable(struct XrGeneratedDispatchTable *table, (get_inst_proc_addr(instance, "xrUnpersistSpatialAnchorMSFT", (PFN_xrVoidFunction*)&table->UnpersistSpatialAnchorMSFT)); (get_inst_proc_addr(instance, "xrClearSpatialAnchorStoreMSFT", (PFN_xrVoidFunction*)&table->ClearSpatialAnchorStoreMSFT)); + // ---- XR_FB_spatial_entity_query extension commands + (get_inst_proc_addr(instance, "xrQuerySpacesFB", (PFN_xrVoidFunction*)&table->QuerySpacesFB)); + (get_inst_proc_addr(instance, "xrRetrieveSpaceQueryResultsFB", (PFN_xrVoidFunction*)&table->RetrieveSpaceQueryResultsFB)); + + // ---- XR_FB_spatial_entity_storage extension commands + (get_inst_proc_addr(instance, "xrSaveSpaceFB", (PFN_xrVoidFunction*)&table->SaveSpaceFB)); + (get_inst_proc_addr(instance, "xrEraseSpaceFB", (PFN_xrVoidFunction*)&table->EraseSpaceFB)); + // ---- XR_OCULUS_audio_device_guid extension commands #if defined(XR_USE_PLATFORM_WIN32) (get_inst_proc_addr(instance, "xrGetAudioOutputDeviceGuidOculus", (PFN_xrVoidFunction*)&table->GetAudioOutputDeviceGuidOculus)); @@ -336,8 +357,17 @@ void GeneratedXrPopulateDispatchTable(struct XrGeneratedDispatchTable *table, // ---- XR_ALMALENCE_digital_lens_control extension commands (get_inst_proc_addr(instance, "xrSetDigitalLensControlALMALENCE", (PFN_xrVoidFunction*)&table->SetDigitalLensControlALMALENCE)); + // ---- XR_FB_spatial_entity_container extension commands + (get_inst_proc_addr(instance, "xrGetSpaceContainerFB", (PFN_xrVoidFunction*)&table->GetSpaceContainerFB)); + // ---- XR_FB_passthrough_keyboard_hands extension commands (get_inst_proc_addr(instance, "xrPassthroughLayerSetKeyboardHandsIntensityFB", (PFN_xrVoidFunction*)&table->PassthroughLayerSetKeyboardHandsIntensityFB)); + + // ---- XR_META_performance_metrics extension commands + (get_inst_proc_addr(instance, "xrEnumeratePerformanceMetricsCounterPathsMETA", (PFN_xrVoidFunction*)&table->EnumeratePerformanceMetricsCounterPathsMETA)); + (get_inst_proc_addr(instance, "xrSetPerformanceMetricsStateMETA", (PFN_xrVoidFunction*)&table->SetPerformanceMetricsStateMETA)); + (get_inst_proc_addr(instance, "xrGetPerformanceMetricsStateMETA", (PFN_xrVoidFunction*)&table->GetPerformanceMetricsStateMETA)); + (get_inst_proc_addr(instance, "xrQueryPerformanceMetricsCounterMETA", (PFN_xrVoidFunction*)&table->QueryPerformanceMetricsCounterMETA)); } diff --git a/thirdparty/openxr/src/xr_generated_dispatch_table.h b/thirdparty/openxr/src/xr_generated_dispatch_table.h index 34e0de93f5..51d48bef43 100644 --- a/thirdparty/openxr/src/xr_generated_dispatch_table.h +++ b/thirdparty/openxr/src/xr_generated_dispatch_table.h @@ -205,6 +205,9 @@ struct XrGeneratedDispatchTable { // ---- XR_MSFT_spatial_graph_bridge extension commands PFN_xrCreateSpatialGraphNodeSpaceMSFT CreateSpatialGraphNodeSpaceMSFT; + PFN_xrTryCreateSpatialGraphStaticNodeBindingMSFT TryCreateSpatialGraphStaticNodeBindingMSFT; + PFN_xrDestroySpatialGraphNodeBindingMSFT DestroySpatialGraphNodeBindingMSFT; + PFN_xrGetSpatialGraphNodeBindingPropertiesMSFT GetSpatialGraphNodeBindingPropertiesMSFT; // ---- XR_EXT_hand_tracking extension commands PFN_xrCreateHandTrackerEXT CreateHandTrackerEXT; @@ -272,6 +275,13 @@ struct XrGeneratedDispatchTable { // ---- XR_FB_hand_tracking_mesh extension commands PFN_xrGetHandMeshFB GetHandMeshFB; + // ---- XR_FB_spatial_entity extension commands + PFN_xrCreateSpatialAnchorFB CreateSpatialAnchorFB; + PFN_xrGetSpaceUuidFB GetSpaceUuidFB; + PFN_xrEnumerateSpaceSupportedComponentsFB EnumerateSpaceSupportedComponentsFB; + PFN_xrSetSpaceComponentStatusFB SetSpaceComponentStatusFB; + PFN_xrGetSpaceComponentStatusFB GetSpaceComponentStatusFB; + // ---- XR_FB_foveation extension commands PFN_xrCreateFoveationProfileFB CreateFoveationProfileFB; PFN_xrDestroyFoveationProfileFB DestroyFoveationProfileFB; @@ -319,6 +329,9 @@ struct XrGeneratedDispatchTable { PFN_xrGetMarkerSizeVARJO GetMarkerSizeVARJO; PFN_xrCreateMarkerSpaceVARJO CreateMarkerSpaceVARJO; + // ---- XR_VARJO_view_offset extension commands + PFN_xrSetViewOffsetVARJO SetViewOffsetVARJO; + // ---- XR_MSFT_spatial_anchor_persistence extension commands PFN_xrCreateSpatialAnchorStoreConnectionMSFT CreateSpatialAnchorStoreConnectionMSFT; PFN_xrDestroySpatialAnchorStoreConnectionMSFT DestroySpatialAnchorStoreConnectionMSFT; @@ -328,6 +341,14 @@ struct XrGeneratedDispatchTable { PFN_xrUnpersistSpatialAnchorMSFT UnpersistSpatialAnchorMSFT; PFN_xrClearSpatialAnchorStoreMSFT ClearSpatialAnchorStoreMSFT; + // ---- XR_FB_spatial_entity_query extension commands + PFN_xrQuerySpacesFB QuerySpacesFB; + PFN_xrRetrieveSpaceQueryResultsFB RetrieveSpaceQueryResultsFB; + + // ---- XR_FB_spatial_entity_storage extension commands + PFN_xrSaveSpaceFB SaveSpaceFB; + PFN_xrEraseSpaceFB EraseSpaceFB; + // ---- XR_OCULUS_audio_device_guid extension commands #if defined(XR_USE_PLATFORM_WIN32) PFN_xrGetAudioOutputDeviceGuidOculus GetAudioOutputDeviceGuidOculus; @@ -339,8 +360,17 @@ struct XrGeneratedDispatchTable { // ---- XR_ALMALENCE_digital_lens_control extension commands PFN_xrSetDigitalLensControlALMALENCE SetDigitalLensControlALMALENCE; + // ---- XR_FB_spatial_entity_container extension commands + PFN_xrGetSpaceContainerFB GetSpaceContainerFB; + // ---- XR_FB_passthrough_keyboard_hands extension commands PFN_xrPassthroughLayerSetKeyboardHandsIntensityFB PassthroughLayerSetKeyboardHandsIntensityFB; + + // ---- XR_META_performance_metrics extension commands + PFN_xrEnumeratePerformanceMetricsCounterPathsMETA EnumeratePerformanceMetricsCounterPathsMETA; + PFN_xrSetPerformanceMetricsStateMETA SetPerformanceMetricsStateMETA; + PFN_xrGetPerformanceMetricsStateMETA GetPerformanceMetricsStateMETA; + PFN_xrQueryPerformanceMetricsCounterMETA QueryPerformanceMetricsCounterMETA; }; diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h index d96f2dacc0..184ee005d8 100644 --- a/thirdparty/vulkan/vk_mem_alloc.h +++ b/thirdparty/vulkan/vk_mem_alloc.h @@ -25,7 +25,7 @@ /** \mainpage Vulkan Memory Allocator -<b>Version 3.0.1-development (2022-03-28)</b> +<b>Version 3.0.1 (2022-05-26)</b> Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n License: MIT @@ -300,9 +300,9 @@ extern "C" { //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// -// +// // INTERFACE -// +// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// @@ -488,7 +488,7 @@ typedef enum VmaMemoryUsage When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT in VmaAllocationCreateInfo::flags. - + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() and not with generic memory allocation functions. @@ -552,7 +552,7 @@ typedef enum VmaAllocationCreateFlagBits */ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. - + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a null-terminated string. Instead of copying pointer value, a local copy of the string is made and stored in allocation's `pName`. The string is automatically @@ -579,14 +579,14 @@ typedef enum VmaAllocationCreateFlagBits */ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, /** \brief Set this flag if the allocated memory will have aliasing resources. - + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. */ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, /** Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. @@ -602,7 +602,7 @@ typedef enum VmaAllocationCreateFlagBits VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, /** Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. @@ -724,7 +724,7 @@ typedef enum VmaDefragmentationFlagBits VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | @@ -980,7 +980,7 @@ typedef struct VmaVulkanFunctions #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; - /// Fetch "vkGetImageMemoryRequirements 2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 @@ -1117,19 +1117,19 @@ typedef struct VmaStatistics */ uint32_t blockCount; /** \brief Number of #VmaAllocation objects allocated. - + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. */ uint32_t allocationCount; /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. - + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. */ VkDeviceSize blockBytes; /** \brief Total number of bytes occupied by all #VmaAllocation objects. - + Always less or equal than `blockBytes`. Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan but unused by any #VmaAllocation. @@ -1387,9 +1387,9 @@ typedef struct VmaAllocationInfo */ void* VMA_NULLABLE pUserData; /** \brief Custom allocation name that was set with vmaSetAllocationName(). - + It can change after call to vmaSetAllocationName() for this allocation. - + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. */ @@ -1429,7 +1429,7 @@ typedef struct VmaDefragmentationMove /// Allocation that should be moved. VmaAllocation VMA_NOT_NULL srcAllocation; /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. - + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. @@ -1446,16 +1446,16 @@ typedef struct VmaDefragmentationPassMoveInfo /// Number of elements in the `pMoves` array. uint32_t moveCount; /** \brief Array of moves to be performed by the user in the current defragmentation pass. - + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). For each element, you should: - + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. 3. Make sure these commands finished executing on the GPU. 4. Destroy the old buffer/image. - + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). After this call, the allocation will point to the new place in memory. @@ -1539,7 +1539,7 @@ typedef struct VmaVirtualAllocationCreateInfo typedef struct VmaVirtualAllocationInfo { /** \brief Offset of the allocation. - + Offset at which the allocation was made. */ VkDeviceSize offset; @@ -2364,7 +2364,7 @@ vkDestroyBuffer(device, buffer, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode -It it safe to pass null as buffer and/or allocation. +It is safe to pass null as buffer and/or allocation. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VmaAllocator VMA_NOT_NULL allocator, @@ -2396,7 +2396,7 @@ vkDestroyImage(device, image, allocationCallbacks); vmaFreeMemory(allocator, allocation); \endcode -It it safe to pass null as image and/or allocation. +It is safe to pass null as image and/or allocation. */ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( VmaAllocator VMA_NOT_NULL allocator, @@ -2555,9 +2555,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// -// +// // IMPLEMENTATION -// +// //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// @@ -2578,6 +2578,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( #ifdef _MSC_VER #include <intrin.h> // For functions like __popcnt, _BitScanForward etc. #endif +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #include <bit> // For std::popcount +#endif /******************************************************************************* CONFIGURATION SECTION @@ -3180,12 +3183,16 @@ But you need to check in runtime whether user's CPU supports these, as some old */ static inline uint32_t VmaCountBitsSet(uint32_t v) { +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + return std::popcount(v); +#else uint32_t c = v - ((v >> 1) & 0x55555555); c = ((c >> 2) & 0x33333333) + (c & 0x33333333); c = ((c >> 4) + c) & 0x0F0F0F0F; c = ((c >> 8) + c) & 0x00FF00FF; c = ((c >> 16) + c) & 0x0000FFFF; return c; +#endif } static inline uint8_t VmaBitScanLSB(uint64_t mask) @@ -3374,60 +3381,6 @@ static inline bool VmaStrIsEmpty(const char* pStr) return pStr == VMA_NULL || *pStr == '\0'; } -#if VMA_STATS_STRING_ENABLED -static const char* VmaAlgorithmToStr(uint32_t algorithm) -{ - switch (algorithm) - { - case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: - return "Linear"; - case 0: - return "TLSF"; - default: - VMA_ASSERT(0); - return ""; - } -} -#endif // VMA_STATS_STRING_ENABLED - -#ifndef VMA_SORT -template<typename Iterator, typename Compare> -Iterator VmaQuickSortPartition(Iterator beg, Iterator end, Compare cmp) -{ - Iterator centerValue = end; --centerValue; - Iterator insertIndex = beg; - for (Iterator memTypeIndex = beg; memTypeIndex < centerValue; ++memTypeIndex) - { - if (cmp(*memTypeIndex, *centerValue)) - { - if (insertIndex != memTypeIndex) - { - VMA_SWAP(*memTypeIndex, *insertIndex); - } - ++insertIndex; - } - } - if (insertIndex != centerValue) - { - VMA_SWAP(*insertIndex, *centerValue); - } - return insertIndex; -} - -template<typename Iterator, typename Compare> -void VmaQuickSort(Iterator beg, Iterator end, Compare cmp) -{ - if (beg < end) - { - Iterator it = VmaQuickSortPartition<Iterator, Compare>(beg, end, cmp); - VmaQuickSort<Iterator, Compare>(beg, it, cmp); - VmaQuickSort<Iterator, Compare>(it + 1, end, cmp); - } -} - -#define VMA_SORT(beg, end, cmp) VmaQuickSort(beg, end, cmp) -#endif // VMA_SORT - /* Returns true if two memory blocks occupy overlapping pages. ResourceA must be in less memory offset than ResourceB. @@ -5073,7 +5026,7 @@ public: VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } - + size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } ItemType* Front() { return m_Front; } @@ -5485,7 +5438,7 @@ public: // Writes a string value inside "". // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. void WriteString(const char* pStr); - + // Begins writing a string value. // Call BeginString, ContinueString, ContinueString, ..., EndString instead of // WriteString to conveniently build the string content incrementally, made of @@ -6463,7 +6416,7 @@ void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size (uint32_t)allocation->GetSuballocationType()); #endif // VMA_STATS_STRING_ENABLED } - + } #if VMA_STATS_STRING_ENABLED @@ -10941,7 +10894,7 @@ public: uint32_t GetAlgorithm() const { return m_Algorithm; } bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } float GetPriority() const { return m_Priority; } - void* const GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } // To be used only while the m_Mutex is locked. Used during defragmentation. size_t GetBlockCount() const { return m_Blocks.size(); } // To be used only while the m_Mutex is locked. Used during defragmentation. @@ -12783,7 +12736,7 @@ void VmaBlockVector::IncrementallySortBlocks() void VmaBlockVector::SortByFreeSize() { VMA_SORT(m_Blocks.begin(), m_Blocks.end(), - [](auto* b1, auto* b2) + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool { return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); }); @@ -13029,7 +12982,7 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T( } } } - + switch (m_Algorithm) { case 0: // Default algorithm @@ -13155,7 +13108,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo vector = m_pBlockVectors[vectorIndex]; VMA_ASSERT(vector != VMA_NULL); } - + switch (move.operation) { case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: @@ -13452,7 +13405,7 @@ bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, Vma case CounterStatus::Pass: break; } - + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) { @@ -13636,7 +13589,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector prevFreeRegionSize = nextFreeRegionSize; } } - + // No moves perfomed, update statistics to current vector state if (startMoveCount == m_Moves.size() && !update) { @@ -13923,7 +13876,7 @@ void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, state.avgFreeSize /= freeCount; } -bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, VmaBlockVector& vector, size_t firstFreeBlock, bool& texturePresent, bool& bufferPresent, bool& otherPresent) { @@ -15919,6 +15872,7 @@ void VmaAllocator_T::UpdateVulkanBudget() void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) { if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + hAllocation->IsMappingAllowed() && (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { void* pData = VMA_NULL; @@ -16010,8 +15964,8 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) json.ContinueString_Size(index++); if (pool->GetName()) { - json.WriteString(" - "); - json.WriteString(pool->GetName()); + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); } json.EndString(); @@ -18427,7 +18381,7 @@ for(;;) VmaAllocationInfo allocInfo; vmaGetAllocationInfo(allocator, pMoves[i].srcAllocation, &allocInfo); MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; - + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. VkImageCreateInfo imgCreateInfo = ... VkImage newImg; @@ -18439,7 +18393,7 @@ for(;;) // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); } - + // Make sure the copy commands finished executing. vkWaitForFences(...); @@ -18451,7 +18405,7 @@ for(;;) } // Update appropriate descriptors to point to the new places... - + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); if(res == VK_SUCCESS) break; @@ -18605,7 +18559,7 @@ To do that, fill VmaAllocationCreateInfo::pUserData field when creating an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, some handle, index, key, ordinal number or any other value that would associate the allocation with your custom metadata. -It it useful to identify appropriate data structures in your engine given #VmaAllocation, +It is useful to identify appropriate data structures in your engine given #VmaAllocation, e.g. when doing \ref defragmentation. \code @@ -18836,14 +18790,14 @@ To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. #include "vk_mem_alloc.h" \endcode -It makes memory of all new allocations initialized to bit pattern `0xDCDCDCDC`. +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. Memory is automatically mapped and unmapped if necessary. If you find these values while debugging your program, good chances are that you incorrectly read Vulkan memory that is allocated but not initialized, or already freed, respectively. -Memory initialization works only with memory types that are `HOST_VISIBLE`. +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. It works also with dedicated allocations. \section debugging_memory_usage_margins Margins @@ -19116,13 +19070,13 @@ so you need to create another "staging" allocation and perform explicit transfer VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; bufCreateInfo.size = 65536; bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; - + VmaAllocationCreateInfo allocCreateInfo = {}; allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; - + VkBuffer buf; VmaAllocation alloc; VmaAllocationInfo allocInfo; diff --git a/thirdparty/zstd/common/bitstream.h b/thirdparty/zstd/common/bitstream.h index 2e5a933ad3..84b6062ff3 100644 --- a/thirdparty/zstd/common/bitstream.h +++ b/thirdparty/zstd/common/bitstream.h @@ -143,10 +143,16 @@ MEM_STATIC unsigned BIT_highbit32 (U32 val) { # if defined(_MSC_VER) /* Visual */ # if STATIC_BMI2 == 1 - return _lzcnt_u32(val) ^ 31; + return _lzcnt_u32(val) ^ 31; # else - unsigned long r = 0; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ return __builtin_clz (val) ^ 31; @@ -293,22 +299,22 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si switch(srcSize) { case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); - /* fall-through */ + ZSTD_FALLTHROUGH; case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); - /* fall-through */ + ZSTD_FALLTHROUGH; case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); - /* fall-through */ + ZSTD_FALLTHROUGH; case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; - /* fall-through */ + ZSTD_FALLTHROUGH; case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; - /* fall-through */ + ZSTD_FALLTHROUGH; case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; - /* fall-through */ + ZSTD_FALLTHROUGH; default: break; } @@ -332,7 +338,16 @@ MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getMiddleBits(size_t bitContainer, U32 c U32 const regMask = sizeof(bitContainer)*8 - 1; /* if start > regMask, bitstream is corrupted, and result is undefined */ assert(nbBits < BIT_MASK_SIZE); + /* x86 transform & ((1 << nbBits) - 1) to bzhi instruction, it is better + * than accessing memory. When bmi2 instruction is not present, we consider + * such cpus old (pre-Haswell, 2013) and their performance is not of that + * importance. + */ +#if defined(__x86_64__) || defined(_M_X86) + return (bitContainer >> (start & regMask)) & ((((U64)1) << nbBits) - 1); +#else return (bitContainer >> (start & regMask)) & BIT_mask[nbBits]; +#endif } MEM_STATIC FORCE_INLINE_ATTR size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) diff --git a/thirdparty/zstd/common/compiler.h b/thirdparty/zstd/common/compiler.h index a951d0adea..516930c01e 100644 --- a/thirdparty/zstd/common/compiler.h +++ b/thirdparty/zstd/common/compiler.h @@ -11,6 +11,8 @@ #ifndef ZSTD_COMPILER_H #define ZSTD_COMPILER_H +#include "portability_macros.h" + /*-******************************************************* * Compiler specifics *********************************************************/ @@ -40,7 +42,7 @@ /** On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC). - This explictly marks such functions as __cdecl so that the code will still compile + This explicitly marks such functions as __cdecl so that the code will still compile if a CC other than __cdecl has been made the default. */ #if defined(_MSC_VER) @@ -92,29 +94,17 @@ /* target attribute */ -#ifndef __has_attribute - #define __has_attribute(x) 0 /* Compatibility with non-clang compilers. */ -#endif #if defined(__GNUC__) || defined(__ICCARM__) # define TARGET_ATTRIBUTE(target) __attribute__((__target__(target))) #else # define TARGET_ATTRIBUTE(target) #endif -/* Enable runtime BMI2 dispatch based on the CPU. - * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. +/* Target attribute for BMI2 dynamic dispatch. + * Enable lzcnt, bmi, and bmi2. + * We test for bmi1 & bmi2. lzcnt is included in bmi1. */ -#ifndef DYNAMIC_BMI2 - #if ((defined(__clang__) && __has_attribute(__target__)) \ - || (defined(__GNUC__) \ - && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ - && (defined(__x86_64__) || defined(_M_X86)) \ - && !defined(__BMI2__) - # define DYNAMIC_BMI2 1 - #else - # define DYNAMIC_BMI2 0 - #endif -#endif +#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2") /* prefetch * can be disabled, by declaring NO_PREFETCH build macro */ @@ -150,8 +140,9 @@ } /* vectorization - * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax */ -#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) + * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax, + * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */ +#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__) # if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5) # define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize"))) # else @@ -197,25 +188,91 @@ #define STATIC_BMI2 0 #endif -/* compat. with non-clang compilers */ -#ifndef __has_builtin -# define __has_builtin(x) 0 +/* compile time determination of SIMD support */ +#if !defined(ZSTD_NO_INTRINSICS) +# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) +# define ZSTD_ARCH_X86_SSE2 +# endif +# if defined(__ARM_NEON) || defined(_M_ARM64) +# define ZSTD_ARCH_ARM_NEON +# endif +# +# if defined(ZSTD_ARCH_X86_SSE2) +# include <emmintrin.h> +# elif defined(ZSTD_ARCH_ARM_NEON) +# include <arm_neon.h> +# endif #endif -/* compat. with non-clang compilers */ -#ifndef __has_feature -# define __has_feature(x) 0 +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define ZSTD_HAS_C_ATTRIBUTE(x) 0 #endif -/* detects whether we are being compiled under msan */ -#ifndef ZSTD_MEMORY_SANITIZER -# if __has_feature(memory_sanitizer) -# define ZSTD_MEMORY_SANITIZER 1 -# else -# define ZSTD_MEMORY_SANITIZER 0 -# endif +/* Only use C++ attributes in C++. Some compilers report support for C++ + * attributes when compiling with C. + */ +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0 #endif +/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute. + * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough + * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough + * - Else: __attribute__((__fallthrough__)) + */ +#ifndef ZSTD_FALLTHROUGH +# if ZSTD_HAS_C_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough) +# define ZSTD_FALLTHROUGH [[fallthrough]] +# elif __has_attribute(__fallthrough__) +/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon + * gcc complains about: a label can only be part of a statement and a declaration is not a statement. + */ +# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__)) +# else +# define ZSTD_FALLTHROUGH +# endif +#endif + +/*-************************************************************** +* Alignment check +*****************************************************************/ + +/* this test was initially positioned in mem.h, + * but this file is removed (or replaced) for linux kernel + * so it's now hosted in compiler.h, + * which remains valid for both user & kernel spaces. + */ + +#ifndef ZSTD_ALIGNOF +# if defined(__GNUC__) || defined(_MSC_VER) +/* covers gcc, clang & MSVC */ +/* note : this section must come first, before C11, + * due to a limitation in the kernel source generator */ +# define ZSTD_ALIGNOF(T) __alignof(T) + +# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) +/* C11 support */ +# include <stdalign.h> +# define ZSTD_ALIGNOF(T) alignof(T) + +# else +/* No known support for alignof() - imperfect backup */ +# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T)) + +# endif +#endif /* ZSTD_ALIGNOF */ + +/*-************************************************************** +* Sanitizer +*****************************************************************/ + #if ZSTD_MEMORY_SANITIZER /* Not all platforms that support msan provide sanitizers/msan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to @@ -237,17 +294,6 @@ void __msan_poison(const volatile void *a, size_t size); intptr_t __msan_test_shadow(const volatile void *x, size_t size); #endif -/* detects whether we are being compiled under asan */ -#ifndef ZSTD_ADDRESS_SANITIZER -# if __has_feature(address_sanitizer) -# define ZSTD_ADDRESS_SANITIZER 1 -# elif defined(__SANITIZE_ADDRESS__) -# define ZSTD_ADDRESS_SANITIZER 1 -# else -# define ZSTD_ADDRESS_SANITIZER 0 -# endif -#endif - #if ZSTD_ADDRESS_SANITIZER /* Not all platforms that support asan provide sanitizers/asan_interface.h. * We therefore declare the functions we need ourselves, rather than trying to diff --git a/thirdparty/zstd/common/entropy_common.c b/thirdparty/zstd/common/entropy_common.c index 41cd69566b..4229b40c5e 100644 --- a/thirdparty/zstd/common/entropy_common.c +++ b/thirdparty/zstd/common/entropy_common.c @@ -43,8 +43,14 @@ static U32 FSE_ctz(U32 val) assert(val != 0); { # if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - return _BitScanForward(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanForward(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_ctz(val); # elif defined(__ICCARM__) /* IAR Intrinsic */ @@ -217,7 +223,7 @@ static size_t FSE_readNCount_body_default( } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static size_t FSE_readNCount_body_bmi2( +BMI2_TARGET_ATTRIBUTE static size_t FSE_readNCount_body_bmi2( short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, const void* headerBuffer, size_t hbSize) { @@ -299,7 +305,7 @@ HUF_readStats_body(BYTE* huffWeight, size_t hwSize, U32* rankStats, ZSTD_memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; { U32 n; for (n=0; n<oSize; n++) { - if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + if (huffWeight[n] > HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; } } @@ -337,7 +343,7 @@ static size_t HUF_readStats_body_default(BYTE* huffWeight, size_t hwSize, U32* r } #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, +static BMI2_TARGET_ATTRIBUTE size_t HUF_readStats_body_bmi2(BYTE* huffWeight, size_t hwSize, U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) diff --git a/thirdparty/zstd/common/error_private.h b/thirdparty/zstd/common/error_private.h index 6d8b9f7763..007d81066a 100644 --- a/thirdparty/zstd/common/error_private.h +++ b/thirdparty/zstd/common/error_private.h @@ -22,6 +22,8 @@ extern "C" { * Dependencies ******************************************/ #include "../zstd_errors.h" /* enum list */ +#include "compiler.h" +#include "debug.h" #include "zstd_deps.h" /* size_t */ @@ -73,6 +75,83 @@ ERR_STATIC const char* ERR_getErrorName(size_t code) return ERR_getErrorString(ERR_getErrorCode(code)); } +/** + * Ignore: this is an internal helper. + * + * This is a helper function to help force C99-correctness during compilation. + * Under strict compilation modes, variadic macro arguments can't be empty. + * However, variadic function arguments can be. Using a function therefore lets + * us statically check that at least one (string) argument was passed, + * independent of the compilation flags. + */ +static INLINE_KEYWORD UNUSED_ATTR +void _force_has_format_string(const char *format, ...) { + (void)format; +} + +/** + * Ignore: this is an internal helper. + * + * We want to force this function invocation to be syntactically correct, but + * we don't want to force runtime evaluation of its arguments. + */ +#define _FORCE_HAS_FORMAT_STRING(...) \ + if (0) { \ + _force_has_format_string(__VA_ARGS__); \ + } + +#define ERR_QUOTE(str) #str + +/** + * Return the specified error if the condition evaluates to true. + * + * In debug modes, prints additional information. + * In order to do that (particularly, printing the conditional that failed), + * this can't just wrap RETURN_ERROR(). + */ +#define RETURN_ERROR_IF(cond, err, ...) \ + if (cond) { \ + RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(cond), ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } + +/** + * Unconditionally return the specified error. + * + * In debug modes, prints additional information. + */ +#define RETURN_ERROR(err, ...) \ + do { \ + RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ + __FILE__, __LINE__, ERR_QUOTE(ERROR(err))); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return ERROR(err); \ + } while(0); + +/** + * If the provided expression evaluates to an error code, returns that error code. + * + * In debug modes, prints additional information. + */ +#define FORWARD_IF_ERROR(err, ...) \ + do { \ + size_t const err_code = (err); \ + if (ERR_isError(err_code)) { \ + RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ + __FILE__, __LINE__, ERR_QUOTE(err), ERR_getErrorName(err_code)); \ + _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ + RAWLOG(3, ": " __VA_ARGS__); \ + RAWLOG(3, "\n"); \ + return err_code; \ + } \ + } while(0); + #if defined (__cplusplus) } #endif diff --git a/thirdparty/zstd/common/fse.h b/thirdparty/zstd/common/fse.h index 19dd4febcd..714bfd3e7f 100644 --- a/thirdparty/zstd/common/fse.h +++ b/thirdparty/zstd/common/fse.h @@ -336,8 +336,9 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); /* FSE_buildCTable_wksp() : * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`. + * See FSE_buildCTable_wksp() for breakdown of workspace usage. */ -#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2))) +#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (((maxSymbolValue + 2) + (1ull << (tableLog)))/2 + sizeof(U64)/sizeof(U32) /* additional 8 bytes for potential table overwrite */) #define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)) size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); diff --git a/thirdparty/zstd/common/fse_decompress.c b/thirdparty/zstd/common/fse_decompress.c index f4ff58fa0a..a5a358015f 100644 --- a/thirdparty/zstd/common/fse_decompress.c +++ b/thirdparty/zstd/common/fse_decompress.c @@ -365,7 +365,7 @@ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, co } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) +BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize) { return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1); } diff --git a/thirdparty/zstd/common/huf.h b/thirdparty/zstd/common/huf.h index 3d47ced030..85518481ec 100644 --- a/thirdparty/zstd/common/huf.h +++ b/thirdparty/zstd/common/huf.h @@ -89,9 +89,9 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, /** HUF_compress4X_wksp() : * Same as HUF_compress2(), but uses externally allocated `workSpace`. - * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */ -#define HUF_WORKSPACE_SIZE ((6 << 10) + 256) -#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) + * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */ +#define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */) +#define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64)) HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, @@ -116,11 +116,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* *** Constants *** */ -#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */ #define HUF_SYMBOLVALUE_MAX 255 -#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) # error "HUF_TABLELOG_MAX is too large !" #endif @@ -136,15 +136,11 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, /* static allocation of HUF's Compression Table */ /* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */ -struct HUF_CElt_s { - U16 val; - BYTE nbBits; -}; /* typedef'd to HUF_CElt */ -typedef struct HUF_CElt_s HUF_CElt; /* consider it an incomplete type */ -#define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */ -#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32)) +typedef size_t HUF_CElt; /* consider it an incomplete type */ +#define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */ +#define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t)) #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ - HUF_CElt name[HUF_CTABLE_SIZE_U32(maxSymbolValue)] /* no final ; */ + HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */ /* static allocation of HUF's DTable */ typedef U32 HUF_DTable; @@ -194,6 +190,7 @@ size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSym size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize); size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue); @@ -206,12 +203,13 @@ typedef enum { * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. @@ -249,11 +247,10 @@ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize, * Loading a CTable saved with HUF_writeCTable() */ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights); -/** HUF_getNbBits() : +/** HUF_getNbBitsFromCTable() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX - * Note 1 : is not inlined, as HUF_CElt definition is private - * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue); + * Note 1 : is not inlined, as HUF_CElt definition is private */ +U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue); /* * HUF_decompress() does the following: @@ -305,18 +302,20 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c /* ====================== */ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog); -size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2); /** HUF_compress1X_repeat() : * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. * If it uses hufTable it does not modify hufTable or repeat. * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. - * If preferRepeat then the old table will always be used if valid. */ + * If preferRepeat then the old table will always be used if valid. + * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */ - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2); + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible); size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */ #ifndef HUF_FORCE_DECOMPRESS_X1 @@ -354,6 +353,9 @@ size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t ds #ifndef HUF_FORCE_DECOMPRESS_X2 size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); #endif +#ifndef HUF_FORCE_DECOMPRESS_X1 +size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2); +#endif #endif /* HUF_STATIC_LINKING_ONLY */ diff --git a/thirdparty/zstd/common/mem.h b/thirdparty/zstd/common/mem.h index 9f3b81ab9d..85581c3847 100644 --- a/thirdparty/zstd/common/mem.h +++ b/thirdparty/zstd/common/mem.h @@ -51,6 +51,8 @@ extern "C" { # include <stdint.h> /* intptr_t */ # endif typedef uint8_t BYTE; + typedef uint8_t U8; + typedef int8_t S8; typedef uint16_t U16; typedef int16_t S16; typedef uint32_t U32; @@ -63,6 +65,8 @@ extern "C" { # error "this implementation requires char to be exactly 8-bit type" #endif typedef unsigned char BYTE; + typedef unsigned char U8; + typedef signed char S8; #if USHRT_MAX != 65535 # error "this implementation requires short to be exactly 16-bit type" #endif @@ -153,8 +157,22 @@ MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } MEM_STATIC unsigned MEM_isLittleEndian(void) { +#if defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) + return 1; +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) + return 0; +#elif defined(__clang__) && __LITTLE_ENDIAN__ + return 1; +#elif defined(__clang__) && __BIG_ENDIAN__ + return 0; +#elif defined(_MSC_VER) && (_M_AMD64 || _M_IX86) + return 1; +#elif defined(__DMC__) && defined(_M_IX86) + return 1; +#else const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */ return one.c[0]; +#endif } #if defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==2) diff --git a/thirdparty/zstd/common/pool.c b/thirdparty/zstd/common/pool.c index ea70b8b65a..2e37cdd73c 100644 --- a/thirdparty/zstd/common/pool.c +++ b/thirdparty/zstd/common/pool.c @@ -86,7 +86,7 @@ static void* POOL_thread(void* opaque) { { POOL_job const job = ctx->queue[ctx->queueHead]; ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize; ctx->numThreadsBusy++; - ctx->queueEmpty = ctx->queueHead == ctx->queueTail; + ctx->queueEmpty = (ctx->queueHead == ctx->queueTail); /* Unlock the mutex, signal a pusher, and run the job */ ZSTD_pthread_cond_signal(&ctx->queuePushCond); ZSTD_pthread_mutex_unlock(&ctx->queueMutex); @@ -105,6 +105,7 @@ static void* POOL_thread(void* opaque) { assert(0); /* Unreachable */ } +/* ZSTD_createThreadPool() : public access point */ POOL_ctx* ZSTD_createThreadPool(size_t numThreads) { return POOL_create (numThreads, 0); } @@ -114,7 +115,8 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { } POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, - ZSTD_customMem customMem) { + ZSTD_customMem customMem) +{ POOL_ctx* ctx; /* Check parameters */ if (!numThreads) { return NULL; } @@ -192,7 +194,7 @@ void ZSTD_freeThreadPool (ZSTD_threadPool* pool) { POOL_free (pool); } -size_t POOL_sizeof(POOL_ctx *ctx) { +size_t POOL_sizeof(const POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ return sizeof(*ctx) + ctx->queueSize * sizeof(POOL_job) @@ -257,7 +259,8 @@ static int isQueueFull(POOL_ctx const* ctx) { } -static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) +static void +POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque) { POOL_job const job = {function, opaque}; assert(ctx != NULL); @@ -313,7 +316,9 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) { return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem); } -POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) { +POOL_ctx* +POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) +{ (void)numThreads; (void)queueSize; (void)customMem; @@ -341,7 +346,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) { return 1; } -size_t POOL_sizeof(POOL_ctx* ctx) { +size_t POOL_sizeof(const POOL_ctx* ctx) { if (ctx==NULL) return 0; /* supports sizeof NULL */ assert(ctx == &g_poolCtx); return sizeof(*ctx); diff --git a/thirdparty/zstd/common/pool.h b/thirdparty/zstd/common/pool.h index e18aa0708f..0ebde1805d 100644 --- a/thirdparty/zstd/common/pool.h +++ b/thirdparty/zstd/common/pool.h @@ -53,7 +53,7 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads); * @return threadpool memory usage * note : compatible with NULL (returns 0 in this case) */ -size_t POOL_sizeof(POOL_ctx* ctx); +size_t POOL_sizeof(const POOL_ctx* ctx); /*! POOL_function : * The function type that can be added to a thread pool. @@ -70,7 +70,7 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque); /*! POOL_tryAdd() : - * Add the job `function(opaque)` to thread pool _if_ a worker is available. + * Add the job `function(opaque)` to thread pool _if_ a queue slot is available. * Returns immediately even if not (does not block). * @return : 1 if successful, 0 if not. */ diff --git a/thirdparty/zstd/common/portability_macros.h b/thirdparty/zstd/common/portability_macros.h new file mode 100644 index 0000000000..2143817f57 --- /dev/null +++ b/thirdparty/zstd/common/portability_macros.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_PORTABILITY_MACROS_H +#define ZSTD_PORTABILITY_MACROS_H + +/** + * This header file contains macro defintions to support portability. + * This header is shared between C and ASM code, so it MUST only + * contain macro definitions. It MUST not contain any C code. + * + * This header ONLY defines macros to detect platforms/feature support. + * + */ + + +/* compat. with non-clang compilers */ +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif + +/* compat. with non-clang compilers */ +#ifndef __has_builtin +# define __has_builtin(x) 0 +#endif + +/* compat. with non-clang compilers */ +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +/* detects whether we are being compiled under msan */ +#ifndef ZSTD_MEMORY_SANITIZER +# if __has_feature(memory_sanitizer) +# define ZSTD_MEMORY_SANITIZER 1 +# else +# define ZSTD_MEMORY_SANITIZER 0 +# endif +#endif + +/* detects whether we are being compiled under asan */ +#ifndef ZSTD_ADDRESS_SANITIZER +# if __has_feature(address_sanitizer) +# define ZSTD_ADDRESS_SANITIZER 1 +# elif defined(__SANITIZE_ADDRESS__) +# define ZSTD_ADDRESS_SANITIZER 1 +# else +# define ZSTD_ADDRESS_SANITIZER 0 +# endif +#endif + +/* detects whether we are being compiled under dfsan */ +#ifndef ZSTD_DATAFLOW_SANITIZER +# if __has_feature(dataflow_sanitizer) +# define ZSTD_DATAFLOW_SANITIZER 1 +# else +# define ZSTD_DATAFLOW_SANITIZER 0 +# endif +#endif + +/* Mark the internal assembly functions as hidden */ +#ifdef __ELF__ +# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func +#else +# define ZSTD_HIDE_ASM_FUNCTION(func) +#endif + +/* Enable runtime BMI2 dispatch based on the CPU. + * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default. + */ +#ifndef DYNAMIC_BMI2 + #if ((defined(__clang__) && __has_attribute(__target__)) \ + || (defined(__GNUC__) \ + && (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \ + && (defined(__x86_64__) || defined(_M_X64)) \ + && !defined(__BMI2__) + # define DYNAMIC_BMI2 1 + #else + # define DYNAMIC_BMI2 0 + #endif +#endif + +/** + * Only enable assembly for GNUC comptabile compilers, + * because other platforms may not support GAS assembly syntax. + * + * Only enable assembly for Linux / MacOS, other platforms may + * work, but they haven't been tested. This could likely be + * extended to BSD systems. + * + * Disable assembly when MSAN is enabled, because MSAN requires + * 100% of code to be instrumented to work. + */ +#if defined(__GNUC__) +# if defined(__linux__) || defined(__linux) || defined(__APPLE__) +# if ZSTD_MEMORY_SANITIZER +# define ZSTD_ASM_SUPPORTED 0 +# elif ZSTD_DATAFLOW_SANITIZER +# define ZSTD_ASM_SUPPORTED 0 +# else +# define ZSTD_ASM_SUPPORTED 1 +# endif +# else +# define ZSTD_ASM_SUPPORTED 0 +# endif +#else +# define ZSTD_ASM_SUPPORTED 0 +#endif + +/** + * Determines whether we should enable assembly for x86-64 + * with BMI2. + * + * Enable if all of the following conditions hold: + * - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM + * - Assembly is supported + * - We are compiling for x86-64 and either: + * - DYNAMIC_BMI2 is enabled + * - BMI2 is supported at compile time + */ +#if !defined(ZSTD_DISABLE_ASM) && \ + ZSTD_ASM_SUPPORTED && \ + defined(__x86_64__) && \ + (DYNAMIC_BMI2 || defined(__BMI2__)) +# define ZSTD_ENABLE_ASM_X86_64_BMI2 1 +#else +# define ZSTD_ENABLE_ASM_X86_64_BMI2 0 +#endif + +#endif /* ZSTD_PORTABILITY_MACROS_H */ diff --git a/thirdparty/zstd/common/xxhash.c b/thirdparty/zstd/common/xxhash.c index 926b33604e..d49497cf1c 100644 --- a/thirdparty/zstd/common/xxhash.c +++ b/thirdparty/zstd/common/xxhash.c @@ -5,7 +5,7 @@ * You can contact the author at : * - xxHash homepage: http://www.xxhash.com * - xxHash source repository : https://github.com/Cyan4973/xxHash - * + * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). @@ -13,812 +13,12 @@ */ -/* ************************************* -* Tuning parameters -***************************************/ -/*!XXH_FORCE_MEMORY_ACCESS : - * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable. - * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal. - * The below switch allow to select different access method for improved performance. - * Method 0 (default) : use `memcpy()`. Safe and portable. - * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable). - * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`. - * Method 2 : direct access. This method doesn't depend on compiler but violate C standard. - * It can generate buggy code on targets which do not support unaligned memory accesses. - * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6) - * See http://stackoverflow.com/a/32095106/646947 for details. - * Prefer these methods in priority order (0 > 1 > 2) - */ -#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ -# if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \ - (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \ - defined(__ICCARM__) -# define XXH_FORCE_MEMORY_ACCESS 1 -# endif -#endif - -/*!XXH_ACCEPT_NULL_INPUT_POINTER : - * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. - * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. - * By default, this option is disabled. To enable it, uncomment below define : - */ -/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ - -/*!XXH_FORCE_NATIVE_FORMAT : - * By default, xxHash library provides endian-independent Hash values, based on little-endian convention. - * Results are therefore identical for little-endian and big-endian CPU. - * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. - * Should endian-independence be of no importance for your application, you may set the #define below to 1, - * to improve speed for Big-endian CPU. - * This option has no impact on Little_Endian CPU. - */ -#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */ -# define XXH_FORCE_NATIVE_FORMAT 0 -#endif -/*!XXH_FORCE_ALIGN_CHECK : - * This is a minor performance trick, only useful with lots of very small keys. - * It means : check for aligned/unaligned input. - * The check costs one initial branch per hash; set to 0 when the input data - * is guaranteed to be aligned. +/* + * xxhash.c instantiates functions defined in xxhash.h */ -#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ -# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) -# define XXH_FORCE_ALIGN_CHECK 0 -# else -# define XXH_FORCE_ALIGN_CHECK 1 -# endif -#endif - -/* ************************************* -* Includes & Memory related functions -***************************************/ -/* Modify the local functions below should you wish to use some other memory routines */ -/* for ZSTD_malloc(), ZSTD_free() */ -#define ZSTD_DEPS_NEED_MALLOC -#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */ -static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); } -static void XXH_free (void* p) { ZSTD_free(p); } -static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); } +#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */ +#define XXH_IMPLEMENTATION /* access definitions */ -#ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -#endif #include "xxhash.h" - - -/* ************************************* -* Compiler Specific Options -***************************************/ -#include "compiler.h" - - -/* ************************************* -* Basic Types -***************************************/ -#include "mem.h" /* BYTE, U32, U64, size_t */ - -#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) - -/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ -static U32 XXH_read32(const void* memPtr) { return *(const U32*) memPtr; } -static U64 XXH_read64(const void* memPtr) { return *(const U64*) memPtr; } - -#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) - -/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */ -/* currently only defined for gcc and icc */ -typedef union { U32 u32; U64 u64; } __attribute__((packed)) unalign; - -static U32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; } -static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; } - -#else - -/* portable and safe solution. Generally efficient. - * see : http://stackoverflow.com/a/32095106/646947 - */ - -static U32 XXH_read32(const void* memPtr) -{ - U32 val; - ZSTD_memcpy(&val, memPtr, sizeof(val)); - return val; -} - -static U64 XXH_read64(const void* memPtr) -{ - U64 val; - ZSTD_memcpy(&val, memPtr, sizeof(val)); - return val; -} - -#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ - - -/* **************************************** -* Compiler-specific Functions and Macros -******************************************/ -#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) - -/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */ -#if defined(_MSC_VER) -# define XXH_rotl32(x,r) _rotl(x,r) -# define XXH_rotl64(x,r) _rotl64(x,r) -#else -#if defined(__ICCARM__) -# include <intrinsics.h> -# define XXH_rotl32(x,r) __ROR(x,(32 - r)) -#else -# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) -#endif -# define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) -#endif - -#if defined(_MSC_VER) /* Visual Studio */ -# define XXH_swap32 _byteswap_ulong -# define XXH_swap64 _byteswap_uint64 -#elif GCC_VERSION >= 403 -# define XXH_swap32 __builtin_bswap32 -# define XXH_swap64 __builtin_bswap64 -#else -static U32 XXH_swap32 (U32 x) -{ - return ((x << 24) & 0xff000000 ) | - ((x << 8) & 0x00ff0000 ) | - ((x >> 8) & 0x0000ff00 ) | - ((x >> 24) & 0x000000ff ); -} -static U64 XXH_swap64 (U64 x) -{ - return ((x << 56) & 0xff00000000000000ULL) | - ((x << 40) & 0x00ff000000000000ULL) | - ((x << 24) & 0x0000ff0000000000ULL) | - ((x << 8) & 0x000000ff00000000ULL) | - ((x >> 8) & 0x00000000ff000000ULL) | - ((x >> 24) & 0x0000000000ff0000ULL) | - ((x >> 40) & 0x000000000000ff00ULL) | - ((x >> 56) & 0x00000000000000ffULL); -} -#endif - - -/* ************************************* -* Architecture Macros -***************************************/ -typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; - -/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ -#ifndef XXH_CPU_LITTLE_ENDIAN - static const int g_one = 1; -# define XXH_CPU_LITTLE_ENDIAN (*(const char*)(&g_one)) -#endif - - -/* *************************** -* Memory reads -*****************************/ -typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; - -FORCE_INLINE_TEMPLATE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); - else - return endian==XXH_littleEndian ? *(const U32*)ptr : XXH_swap32(*(const U32*)ptr); -} - -FORCE_INLINE_TEMPLATE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE32_align(ptr, endian, XXH_unaligned); -} - -static U32 XXH_readBE32(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); -} - -FORCE_INLINE_TEMPLATE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) -{ - if (align==XXH_unaligned) - return endian==XXH_littleEndian ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); - else - return endian==XXH_littleEndian ? *(const U64*)ptr : XXH_swap64(*(const U64*)ptr); -} - -FORCE_INLINE_TEMPLATE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) -{ - return XXH_readLE64_align(ptr, endian, XXH_unaligned); -} - -static U64 XXH_readBE64(const void* ptr) -{ - return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); -} - - -/* ************************************* -* Macros -***************************************/ -#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ - - -/* ************************************* -* Constants -***************************************/ -static const U32 PRIME32_1 = 2654435761U; -static const U32 PRIME32_2 = 2246822519U; -static const U32 PRIME32_3 = 3266489917U; -static const U32 PRIME32_4 = 668265263U; -static const U32 PRIME32_5 = 374761393U; - -static const U64 PRIME64_1 = 11400714785074694791ULL; -static const U64 PRIME64_2 = 14029467366897019727ULL; -static const U64 PRIME64_3 = 1609587929392839161ULL; -static const U64 PRIME64_4 = 9650029242287828579ULL; -static const U64 PRIME64_5 = 2870177450012600261ULL; - -XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } - - -/* ************************** -* Utils -****************************/ -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState) -{ - ZSTD_memcpy(dstState, srcState, sizeof(*dstState)); -} - -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState) -{ - ZSTD_memcpy(dstState, srcState, sizeof(*dstState)); -} - - -/* *************************** -* Simple Hash Functions -*****************************/ - -static U32 XXH32_round(U32 seed, U32 input) -{ - seed += input * PRIME32_2; - seed = XXH_rotl32(seed, 13); - seed *= PRIME32_1; - return seed; -} - -FORCE_INLINE_TEMPLATE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* bEnd = p + len; - U32 h32; -#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)16; - } -#endif - - if (len>=16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = seed + PRIME32_1 + PRIME32_2; - U32 v2 = seed + PRIME32_2; - U32 v3 = seed + 0; - U32 v4 = seed - PRIME32_1; - - do { - v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; - v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; - v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; - v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; - } while (p<=limit); - - h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); - } else { - h32 = seed + PRIME32_5; - } - - h32 += (U32) len; - - while (p+4<=bEnd) { - h32 += XXH_get32bits(p) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; - p+=4; - } - - while (p<bEnd) { - h32 += (*p) * PRIME32_5; - h32 = XXH_rotl32(h32, 11) * PRIME32_1 ; - p++; - } - - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH32_CREATESTATE_STATIC(state); - XXH32_reset(state, seed); - XXH32_update(state, input, len); - return XXH32_digest(state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - -static U64 XXH64_round(U64 acc, U64 input) -{ - acc += input * PRIME64_2; - acc = XXH_rotl64(acc, 31); - acc *= PRIME64_1; - return acc; -} - -static U64 XXH64_mergeRound(U64 acc, U64 val) -{ - val = XXH64_round(0, val); - acc ^= val; - acc = acc * PRIME64_1 + PRIME64_4; - return acc; -} - -FORCE_INLINE_TEMPLATE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - U64 h64; -#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (p==NULL) { - len=0; - bEnd=p=(const BYTE*)(size_t)32; - } -#endif - - if (len>=32) { - const BYTE* const limit = bEnd - 32; - U64 v1 = seed + PRIME64_1 + PRIME64_2; - U64 v2 = seed + PRIME64_2; - U64 v3 = seed + 0; - U64 v4 = seed - PRIME64_1; - - do { - v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; - v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; - v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; - v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; - } while (p<=limit); - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - - } else { - h64 = seed + PRIME64_5; - } - - h64 += (U64) len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_get64bits(p)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p<bEnd) { - h64 ^= (*p) * PRIME64_5; - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - p++; - } - - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) -{ -#if 0 - /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ - XXH64_CREATESTATE_STATIC(state); - XXH64_reset(state, seed); - XXH64_update(state, input, len); - return XXH64_digest(state); -#else - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if (XXH_FORCE_ALIGN_CHECK) { - if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); - } } - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); - else - return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); -#endif -} - - -/* ************************************************** -* Advanced Hash Functions -****************************************************/ - -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) -{ - return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) -{ - return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); -} -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) -{ - XXH_free(statePtr); - return XXH_OK; -} - - -/*** Hash feed ***/ - -XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) -{ - XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME32_1 + PRIME32_2; - state.v2 = seed + PRIME32_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME32_1; - ZSTD_memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - - -XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) -{ - XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ - ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ - state.v1 = seed + PRIME64_1 + PRIME64_2; - state.v2 = seed + PRIME64_2; - state.v3 = seed + 0; - state.v4 = seed - PRIME64_1; - ZSTD_memcpy(statePtr, &state, sizeof(state)); - return XXH_OK; -} - - -FORCE_INLINE_TEMPLATE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len_32 += (unsigned)len; - state->large_len |= (len>=16) | (state->total_len_32>=16); - - if (state->memsize + len < 16) { /* fill in tmp buffer */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); - state->memsize += (unsigned)len; - return XXH_OK; - } - - if (state->memsize) { /* some data left from previous update */ - XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); - { const U32* p32 = state->mem32; - state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; - state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; - state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; - state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; - } - p += 16-state->memsize; - state->memsize = 0; - } - - if (p <= bEnd-16) { - const BYTE* const limit = bEnd - 16; - U32 v1 = state->v1; - U32 v2 = state->v2; - U32 v3 = state->v3; - U32 v4 = state->v4; - - do { - v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; - v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; - v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; - v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH32_update_endian(state_in, input, len, XXH_bigEndian); -} - - - -FORCE_INLINE_TEMPLATE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem32; - const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; - U32 h32; - - if (state->large_len) { - h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); - } else { - h32 = state->v3 /* == seed */ + PRIME32_5; - } - - h32 += state->total_len_32; - - while (p+4<=bEnd) { - h32 += XXH_readLE32(p, endian) * PRIME32_3; - h32 = XXH_rotl32(h32, 17) * PRIME32_4; - p+=4; - } - - while (p<bEnd) { - h32 += (*p) * PRIME32_5; - h32 = XXH_rotl32(h32, 11) * PRIME32_1; - p++; - } - - h32 ^= h32 >> 15; - h32 *= PRIME32_2; - h32 ^= h32 >> 13; - h32 *= PRIME32_3; - h32 ^= h32 >> 16; - - return h32; -} - - -XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH32_digest_endian(state_in, XXH_littleEndian); - else - return XXH32_digest_endian(state_in, XXH_bigEndian); -} - - - -/* **** XXH64 **** */ - -FORCE_INLINE_TEMPLATE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) -{ - const BYTE* p = (const BYTE*)input; - const BYTE* const bEnd = p + len; - -#ifdef XXH_ACCEPT_NULL_INPUT_POINTER - if (input==NULL) return XXH_ERROR; -#endif - - state->total_len += len; - - if (state->memsize + len < 32) { /* fill in tmp buffer */ - if (input != NULL) { - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); - } - state->memsize += (U32)len; - return XXH_OK; - } - - if (state->memsize) { /* tmp buffer is full */ - XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); - state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); - state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); - state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); - state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); - p += 32-state->memsize; - state->memsize = 0; - } - - if (p+32 <= bEnd) { - const BYTE* const limit = bEnd - 32; - U64 v1 = state->v1; - U64 v2 = state->v2; - U64 v3 = state->v3; - U64 v4 = state->v4; - - do { - v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; - v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; - v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; - v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; - } while (p<=limit); - - state->v1 = v1; - state->v2 = v2; - state->v3 = v3; - state->v4 = v4; - } - - if (p < bEnd) { - XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); - state->memsize = (unsigned)(bEnd-p); - } - - return XXH_OK; -} - -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_update_endian(state_in, input, len, XXH_littleEndian); - else - return XXH64_update_endian(state_in, input, len, XXH_bigEndian); -} - - - -FORCE_INLINE_TEMPLATE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) -{ - const BYTE * p = (const BYTE*)state->mem64; - const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; - U64 h64; - - if (state->total_len >= 32) { - U64 const v1 = state->v1; - U64 const v2 = state->v2; - U64 const v3 = state->v3; - U64 const v4 = state->v4; - - h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); - h64 = XXH64_mergeRound(h64, v1); - h64 = XXH64_mergeRound(h64, v2); - h64 = XXH64_mergeRound(h64, v3); - h64 = XXH64_mergeRound(h64, v4); - } else { - h64 = state->v3 + PRIME64_5; - } - - h64 += (U64) state->total_len; - - while (p+8<=bEnd) { - U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); - h64 ^= k1; - h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; - p+=8; - } - - if (p+4<=bEnd) { - h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; - h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; - p+=4; - } - - while (p<bEnd) { - h64 ^= (*p) * PRIME64_5; - h64 = XXH_rotl64(h64, 11) * PRIME64_1; - p++; - } - - h64 ^= h64 >> 33; - h64 *= PRIME64_2; - h64 ^= h64 >> 29; - h64 *= PRIME64_3; - h64 ^= h64 >> 32; - - return h64; -} - - -XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) -{ - XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; - - if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) - return XXH64_digest_endian(state_in, XXH_littleEndian); - else - return XXH64_digest_endian(state_in, XXH_bigEndian); -} - - -/* ************************** -* Canonical representation -****************************/ - -/*! Default XXH result types are basic unsigned 32 and 64 bits. -* The canonical representation follows human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. -*/ - -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); - ZSTD_memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) -{ - XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); - if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); - ZSTD_memcpy(dst, &hash, sizeof(*dst)); -} - -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) -{ - return XXH_readBE32(src); -} - -XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) -{ - return XXH_readBE64(src); -} diff --git a/thirdparty/zstd/common/xxhash.h b/thirdparty/zstd/common/xxhash.h index 16c1f1617b..8ebbfdd626 100644 --- a/thirdparty/zstd/common/xxhash.h +++ b/thirdparty/zstd/common/xxhash.h @@ -1,20 +1,36 @@ /* - * xxHash - Extremely Fast Hash algorithm - * Header File - * Copyright (c) Yann Collet, Facebook, Inc. + * xxHash - Fast Hash algorithm + * Copyright (c) Yann Collet, Facebook, Inc. + * + * You can contact the author at : + * - xxHash homepage: http://www.xxhash.com + * - xxHash source repository : https://github.com/Cyan4973/xxHash * - * You can contact the author at : - * - xxHash source repository : https://github.com/Cyan4973/xxHash - * * This source code is licensed under both the BSD-style license (found in the * LICENSE file in the root directory of this source tree) and the GPLv2 (found * in the COPYING file in the root directory of this source tree). * You may select, at your option, one of the above-listed licenses. */ -/* Notice extracted from xxHash homepage : -xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +#ifndef XXH_NO_XXH3 +# define XXH_NO_XXH3 +#endif + +#ifndef XXH_NAMESPACE +# define XXH_NAMESPACE ZSTD_ +#endif + +/*! + * @mainpage xxHash + * + * @file xxhash.h + * xxHash prototypes and implementation + */ +/* TODO: update */ +/* Notice extracted from xxHash homepage: + +xxHash is an extremely fast hash algorithm, running at RAM speed limits. It also successfully passes all tests from the SMHasher suite. Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) @@ -22,7 +38,7 @@ Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo Name Speed Q.Score Author xxHash 5.4 GB/s 10 CrapWow 3.2 GB/s 2 Andrew -MumurHash 3a 2.7 GB/s 10 Austin Appleby +MurmurHash 3a 2.7 GB/s 10 Austin Appleby SpookyHash 2.0 GB/s 10 Bob Jenkins SBox 1.4 GB/s 9 Bret Mulvey Lookup3 1.2 GB/s 9 Bob Jenkins @@ -37,8 +53,13 @@ Q.Score is a measure of quality of the hash function. It depends on successfully passing SMHasher test set. 10 is a perfect score. -A 64-bits version, named XXH64, is available since r35. -It offers much better speed, but for 64-bits applications only. +Note: SMHasher's CRC32 implementation is not the fastest one. +Other speed-oriented implementations can be faster, +especially in combination with PCLMUL instruction: +https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735 + +A 64-bit version, named XXH64, is available since r35. +It offers much better speed, but for 64-bit applications only. Name Speed on 64 bits Speed on 32 bits XXH64 13.8 GB/s 1.9 GB/s XXH32 6.8 GB/s 6.0 GB/s @@ -48,33 +69,34 @@ XXH32 6.8 GB/s 6.0 GB/s extern "C" { #endif -#ifndef XXHASH_H_5627135585666179 -#define XXHASH_H_5627135585666179 1 - - -/* **************************** -* Definitions -******************************/ -#include "zstd_deps.h" -typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; - - /* **************************** -* API modifier -******************************/ -/** XXH_PRIVATE_API -* This is useful if you want to include xxhash functions in `static` mode -* in order to inline them, and remove their symbol from the public list. -* Methodology : -* #define XXH_PRIVATE_API -* #include "xxhash.h" -* `xxhash.c` is automatically included. -* It's not useful to compile and link it as a separate module anymore. -*/ -#ifdef XXH_PRIVATE_API -# ifndef XXH_STATIC_LINKING_ONLY -# define XXH_STATIC_LINKING_ONLY -# endif + * INLINE mode + ******************************/ +/*! + * XXH_INLINE_ALL (and XXH_PRIVATE_API) + * Use these build macros to inline xxhash into the target unit. + * Inlining improves performance on small inputs, especially when the length is + * expressed as a compile-time constant: + * + * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html + * + * It also keeps xxHash symbols private to the unit, so they are not exported. + * + * Usage: + * #define XXH_INLINE_ALL + * #include "xxhash.h" + * + * Do not compile and link xxhash.o as a separate object, as it is not useful. + */ +#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ + && !defined(XXH_INLINE_ALL_31684351384) + /* this section should be traversed only once */ +# define XXH_INLINE_ALL_31684351384 + /* give access to the advanced API, required to compile implementations */ +# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ +# define XXH_STATIC_LINKING_ONLY + /* make all functions private */ +# undef XXH_PUBLIC_API # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) @@ -82,45 +104,205 @@ typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else -# define XXH_PUBLIC_API static /* this version may generate warnings for unused static functions; disable the relevant warning */ + /* note: this version may generate warnings for unused static functions */ +# define XXH_PUBLIC_API static # endif -#else -# define XXH_PUBLIC_API /* do nothing */ -#endif /* XXH_PRIVATE_API */ -/*!XXH_NAMESPACE, aka Namespace Emulation : + /* + * This part deals with the special case where a unit wants to inline xxHash, + * but "xxhash.h" has previously been included without XXH_INLINE_ALL, + * such as part of some previously included *.h header file. + * Without further action, the new include would just be ignored, + * and functions would effectively _not_ be inlined (silent failure). + * The following macros solve this situation by prefixing all inlined names, + * avoiding naming collision with previous inclusions. + */ + /* Before that, we unconditionally #undef all symbols, + * in case they were already defined with XXH_NAMESPACE. + * They will then be redefined for XXH_INLINE_ALL + */ +# undef XXH_versionNumber + /* XXH32 */ +# undef XXH32 +# undef XXH32_createState +# undef XXH32_freeState +# undef XXH32_reset +# undef XXH32_update +# undef XXH32_digest +# undef XXH32_copyState +# undef XXH32_canonicalFromHash +# undef XXH32_hashFromCanonical + /* XXH64 */ +# undef XXH64 +# undef XXH64_createState +# undef XXH64_freeState +# undef XXH64_reset +# undef XXH64_update +# undef XXH64_digest +# undef XXH64_copyState +# undef XXH64_canonicalFromHash +# undef XXH64_hashFromCanonical + /* XXH3_64bits */ +# undef XXH3_64bits +# undef XXH3_64bits_withSecret +# undef XXH3_64bits_withSeed +# undef XXH3_64bits_withSecretandSeed +# undef XXH3_createState +# undef XXH3_freeState +# undef XXH3_copyState +# undef XXH3_64bits_reset +# undef XXH3_64bits_reset_withSeed +# undef XXH3_64bits_reset_withSecret +# undef XXH3_64bits_update +# undef XXH3_64bits_digest +# undef XXH3_generateSecret + /* XXH3_128bits */ +# undef XXH128 +# undef XXH3_128bits +# undef XXH3_128bits_withSeed +# undef XXH3_128bits_withSecret +# undef XXH3_128bits_reset +# undef XXH3_128bits_reset_withSeed +# undef XXH3_128bits_reset_withSecret +# undef XXH3_128bits_reset_withSecretandSeed +# undef XXH3_128bits_update +# undef XXH3_128bits_digest +# undef XXH128_isEqual +# undef XXH128_cmp +# undef XXH128_canonicalFromHash +# undef XXH128_hashFromCanonical + /* Finally, free the namespace itself */ +# undef XXH_NAMESPACE -If you want to include _and expose_ xxHash functions from within your own library, -but also want to avoid symbol collisions with another library which also includes xxHash, + /* employ the namespace for XXH_INLINE_ALL */ +# define XXH_NAMESPACE XXH_INLINE_ + /* + * Some identifiers (enums, type names) are not symbols, + * but they must nonetheless be renamed to avoid redeclaration. + * Alternative solution: do not redeclare them. + * However, this requires some #ifdefs, and has a more dispersed impact. + * Meanwhile, renaming can be achieved in a single place. + */ +# define XXH_IPREF(Id) XXH_NAMESPACE ## Id +# define XXH_OK XXH_IPREF(XXH_OK) +# define XXH_ERROR XXH_IPREF(XXH_ERROR) +# define XXH_errorcode XXH_IPREF(XXH_errorcode) +# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t) +# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) +# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t) +# define XXH32_state_s XXH_IPREF(XXH32_state_s) +# define XXH32_state_t XXH_IPREF(XXH32_state_t) +# define XXH64_state_s XXH_IPREF(XXH64_state_s) +# define XXH64_state_t XXH_IPREF(XXH64_state_t) +# define XXH3_state_s XXH_IPREF(XXH3_state_s) +# define XXH3_state_t XXH_IPREF(XXH3_state_t) +# define XXH128_hash_t XXH_IPREF(XXH128_hash_t) + /* Ensure the header is parsed again, even if it was previously included */ +# undef XXHASH_H_5627135585666179 +# undef XXHASH_H_STATIC_13879238742 +#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ -you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library -with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). -Note that no change is required within the calling program as long as it includes `xxhash.h` : -regular symbol name will be automatically translated by this header. -*/ + +/* **************************************************************** + * Stable API + *****************************************************************/ +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + + +/*! + * @defgroup public Public API + * Contains details on the public xxHash functions. + * @{ + */ +/* specific declaration modes for Windows */ +#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) +# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) +# ifdef XXH_EXPORT +# define XXH_PUBLIC_API __declspec(dllexport) +# elif XXH_IMPORT +# define XXH_PUBLIC_API __declspec(dllimport) +# endif +# else +# define XXH_PUBLIC_API /* do nothing */ +# endif +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Emulate a namespace by transparently prefixing all symbols. + * + * If you want to include _and expose_ xxHash functions from within your own + * library, but also want to avoid symbol collisions with other libraries which + * may also include xxHash, you can use XXH_NAMESPACE to automatically prefix + * any public symbol from xxhash library with the value of XXH_NAMESPACE + * (therefore, avoid empty or numeric values). + * + * Note that no change is required within the calling program as long as it + * includes `xxhash.h`: Regular symbol names will be automatically translated + * by this header. + */ +# define XXH_NAMESPACE /* YOUR NAME HERE */ +# undef XXH_NAMESPACE +#endif + #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) -# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) -# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) +/* XXH32 */ +# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32) # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState) -# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState) -# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset) -# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update) -# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest) -# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState) -# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash) -# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical) +/* XXH64 */ +# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) +# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) +# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) +# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) +# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) +# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) +# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) +# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) +/* XXH3_64bits */ +# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits) +# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret) +# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed) +# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed) +# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState) +# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState) +# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState) +# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset) +# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed) +# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret) +# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed) +# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update) +# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest) +# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret) +# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed) +/* XXH3_128bits */ +# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128) +# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits) +# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed) +# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret) +# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed) +# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset) +# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed) +# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret) +# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed) +# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update) +# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest) +# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual) +# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp) +# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash) +# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical) #endif @@ -128,156 +310,5375 @@ regular symbol name will be automatically translated by this header. * Version ***************************************/ #define XXH_VERSION_MAJOR 0 -#define XXH_VERSION_MINOR 6 -#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_MINOR 8 +#define XXH_VERSION_RELEASE 1 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) + +/*! + * @brief Obtains the xxHash version. + * + * This is mostly useful when xxHash is compiled as a shared library, + * since the returned value comes from the library, as opposed to header file. + * + * @return `XXH_VERSION_NUMBER` of the invoked library. + */ XXH_PUBLIC_API unsigned XXH_versionNumber (void); /* **************************** -* Simple Hash Functions +* Common basic types ******************************/ -typedef unsigned int XXH32_hash_t; -typedef unsigned long long XXH64_hash_t; - -XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); -XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); - -/*! -XXH32() : - Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". - The memory between input & input+length must be valid (allocated and read-accessible). - "seed" can be used to alter the result predictably. - Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s -XXH64() : - Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". - "seed" can be used to alter the result predictably. - This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). -*/ +#include <stddef.h> /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; -/* **************************** -* Streaming Hash Functions -******************************/ -typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ -typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +/*-********************************************************************** +* 32-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */ +/*! + * @brief An unsigned 32-bit integer. + * + * Not necessarily defined to `uint32_t` but functionally equivalent. + */ +typedef uint32_t XXH32_hash_t; -/*! State allocation, compatible with dynamic libraries */ +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint32_t XXH32_hash_t; -XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +#else +# include <limits.h> +# if UINT_MAX == 0xFFFFFFFFUL + typedef unsigned int XXH32_hash_t; +# else +# if ULONG_MAX == 0xFFFFFFFFUL + typedef unsigned long XXH32_hash_t; +# else +# error "unsupported platform: need a 32-bit type" +# endif +# endif +#endif -XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); -XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +/*! + * @} + * + * @defgroup xxh32_family XXH32 family + * @ingroup public + * Contains functions used in the classic 32-bit xxHash algorithm. + * + * @note + * XXH32 is useful for older platforms, with no or poor 64-bit performance. + * Note that @ref xxh3_family provides competitive speed + * for both 32-bit and 64-bit systems, and offers true 64/128 bit hash results. + * + * @see @ref xxh64_family, @ref xxh3_family : Other xxHash families + * @see @ref xxh32_impl for implementation details + * @{ + */ + +/*! + * @brief Calculates the 32-bit hash of @p input using xxHash32. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 32-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 32-bit hash value. + * + * @see + * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); + +/*! + * Streaming functions generate the xxHash value from an incremental input. + * This method is slower than single-call functions, due to state management. + * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized. + * + * An XXH state must first be allocated using `XXH*_createState()`. + * + * Start a new hash by initializing the state with a seed using `XXH*_reset()`. + * + * Then, feed the hash state by calling `XXH*_update()` as many times as necessary. + * + * The function returns an error code, with 0 meaning OK, and any other value + * meaning there is an error. + * + * Finally, a hash value can be produced anytime, by using `XXH*_digest()`. + * This function returns the nn-bits hash as an int or long long. + * + * It's still possible to continue inserting input into the hash state after a + * digest, and generate new hash values later on by invoking `XXH*_digest()`. + * + * When done, release the state using `XXH*_freeState()`. + * + * Example code for incrementally hashing a file: + * @code{.c} + * #include <stdio.h> + * #include <xxhash.h> + * #define BUFFER_SIZE 256 + * + * // Note: XXH64 and XXH3 use the same interface. + * XXH32_hash_t + * hashFile(FILE* stream) + * { + * XXH32_state_t* state; + * unsigned char buf[BUFFER_SIZE]; + * size_t amt; + * XXH32_hash_t hash; + * + * state = XXH32_createState(); // Create a state + * assert(state != NULL); // Error check here + * XXH32_reset(state, 0xbaad5eed); // Reset state with our seed + * while ((amt = fread(buf, 1, sizeof(buf), stream)) != 0) { + * XXH32_update(state, buf, amt); // Hash the file in chunks + * } + * hash = XXH32_digest(state); // Finalize the hash + * XXH32_freeState(state); // Clean up + * return hash; + * } + * @endcode + */ +/*! + * @typedef struct XXH32_state_s XXH32_state_t + * @brief The opaque state struct for the XXH32 streaming API. + * + * @see XXH32_state_s for details. + */ +typedef struct XXH32_state_s XXH32_state_t; + +/*! + * @brief Allocates an @ref XXH32_state_t. + * + * Must be freed with XXH32_freeState(). + * @return An allocated XXH32_state_t on success, `NULL` on failure. + */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void); +/*! + * @brief Frees an @ref XXH32_state_t. + * + * Must be allocated with XXH32_createState(). + * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState(). + * @return XXH_OK. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr); +/*! + * @brief Copies one @ref XXH32_state_t to another. + * + * @param dst_state The state to copy to. + * @param src_state The state to copy from. + * @pre + * @p dst_state and @p src_state must not be `NULL` and must not overlap. + */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); -/* hash streaming */ +/*! + * @brief Resets an @ref XXH32_state_t to begin a new hash. + * + * This function resets and seeds a state. Call it before @ref XXH32_update(). + * + * @param statePtr The state struct to reset. + * @param seed The 32-bit seed to alter the hash result predictably. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed); -XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +/*! + * @brief Consumes a block of @p input to an @ref XXH32_state_t. + * + * Call this to incrementally consume blocks of data. + * + * @param statePtr The state struct to update. + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * + * @pre + * @p statePtr must not be `NULL`. + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return @ref XXH_OK on success, @ref XXH_ERROR on failure. + */ XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); + +/*! + * @brief Returns the calculated hash value from an @ref XXH32_state_t. + * + * @note + * Calling XXH32_digest() will not affect @p statePtr, so you can update, + * digest, and update again. + * + * @param statePtr The state struct to calculate the hash from. + * + * @pre + * @p statePtr must not be `NULL`. + * + * @return The calculated xxHash32 value from that state. + */ XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); -XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); -XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); -XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); +/******* Canonical representation *******/ /* -These functions generate the xxHash of an input provided in multiple segments. -Note that, for small input, they are slower than single-call functions, due to state management. -For small input, prefer `XXH32()` and `XXH64()` . + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * This the simplest and fastest format for further post-processing. + * + * However, this leaves open the question of what is the order on the byte level, + * since little and big endian conventions will store the same number differently. + * + * The canonical representation settles this issue by mandating big-endian + * convention, the same convention as human-readable numbers (large digits first). + * + * When writing hash values to storage, sending them over a network, or printing + * them, it's highly recommended to use the canonical representation to ensure + * portability across a wider range of systems, present and future. + * + * The following functions allow transformation of hash values to and from + * canonical format. + */ + +/*! + * @brief Canonical (big endian) representation of @ref XXH32_hash_t. + */ +typedef struct { + unsigned char digest[4]; /*!< Hash bytes, big endian */ +} XXH32_canonical_t; + +/*! + * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. + * + * @param dst The @ref XXH32_canonical_t pointer to be stored to. + * @param hash The @ref XXH32_hash_t to be converted. + * + * @pre + * @p dst must not be `NULL`. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH state must first be allocated, using XXH*_createState() . +/*! + * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. + * + * @param src The @ref XXH32_canonical_t to convert. + * + * @pre + * @p src must not be `NULL`. + * + * @return The converted hash. + */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); -Start a new hash by initializing state with a seed, using XXH*_reset(). -Then, feed the hash state by calling XXH*_update() as many times as necessary. -Obviously, input must be allocated and read accessible. -The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. +#ifdef __has_attribute +# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +# define XXH_HAS_ATTRIBUTE(x) 0 +#endif -Finally, a hash value can be produced anytime, by using XXH*_digest(). -This function returns the nn-bits hash as an int or long long. +/* C-language Attributes are added in C23. */ +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) +# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) +#else +# define XXH_HAS_C_ATTRIBUTE(x) 0 +#endif -It's still possible to continue inserting input into the hash state after a digest, -and generate some new hashes later on, by calling again XXH*_digest(). +#if defined(__cplusplus) && defined(__has_cpp_attribute) +# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define XXH_HAS_CPP_ATTRIBUTE(x) 0 +#endif -When done, free XXH state space if it was allocated dynamically. +/* +Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute +introduced in CPP17 and C23. +CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough +C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough */ +#if XXH_HAS_C_ATTRIBUTE(x) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_CPP_ATTRIBUTE(x) +# define XXH_FALLTHROUGH [[fallthrough]] +#elif XXH_HAS_ATTRIBUTE(__fallthrough__) +# define XXH_FALLTHROUGH __attribute__ ((fallthrough)) +#else +# define XXH_FALLTHROUGH +#endif +/*! + * @} + * @ingroup public + * @{ + */ -/* ************************** -* Utils -****************************/ -#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* ! C99 */ -# define restrict /* disable restrict */ +#ifndef XXH_NO_LONG_LONG +/*-********************************************************************** +* 64-bit hash +************************************************************************/ +#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */ +/*! + * @brief An unsigned 64-bit integer. + * + * Not necessarily defined to `uint64_t` but functionally equivalent. + */ +typedef uint64_t XXH64_hash_t; +#elif !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint64_t XXH64_hash_t; +#else +# include <limits.h> +# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL + /* LP64 ABI says uint64_t is unsigned long */ + typedef unsigned long XXH64_hash_t; +# else + /* the following type must have a width of 64-bit */ + typedef unsigned long long XXH64_hash_t; +# endif #endif -XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dst_state, const XXH32_state_t* restrict src_state); -XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dst_state, const XXH64_state_t* restrict src_state); +/*! + * @} + * + * @defgroup xxh64_family XXH64 family + * @ingroup public + * @{ + * Contains functions used in the classic 64-bit xxHash algorithm. + * + * @note + * XXH3 provides competitive speed for both 32-bit and 64-bit systems, + * and offers true 64/128 bit hash results. + * It provides better speed for systems with vector processing capabilities. + */ -/* ************************** -* Canonical representation -****************************/ -/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. -* The canonical representation uses human-readable write convention, aka big-endian (large digits first). -* These functions allow transformation of hash result into and from its canonical format. -* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. -*/ -typedef struct { unsigned char digest[4]; } XXH32_canonical_t; -typedef struct { unsigned char digest[8]; } XXH64_canonical_t; +/*! + * @brief Calculates the 64-bit hash of @p input using xxHash64. + * + * This function usually runs faster on 64-bit systems, but slower on 32-bit + * systems (see benchmark). + * + * @param input The block of data to be hashed, at least @p length bytes in size. + * @param length The length of @p input, in bytes. + * @param seed The 64-bit seed to alter the hash's output predictably. + * + * @pre + * The memory between @p input and @p input + @p length must be valid, + * readable, contiguous memory. However, if @p length is `0`, @p input may be + * `NULL`. In C++, this also must be *TriviallyCopyable*. + * + * @return The calculated 64-bit hash. + * + * @see + * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): + * Direct equivalents for the other variants of xxHash. + * @see + * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. + */ +XXH_PUBLIC_API XXH64_hash_t XXH64(const void* input, size_t length, XXH64_hash_t seed); -XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); -XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); +/******* Streaming *******/ +/*! + * @brief The opaque state struct for the XXH64 streaming API. + * + * @see XXH64_state_s for details. + */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); -XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); +#ifndef XXH_NO_XXH3 +/*! + * @} + * ************************************************************************ + * @defgroup xxh3_family XXH3 family + * @ingroup public + * @{ + * + * XXH3 is a more recent hash algorithm featuring: + * - Improved speed for both small and large inputs + * - True 64-bit and 128-bit outputs + * - SIMD acceleration + * - Improved 32-bit viability + * + * Speed analysis methodology is explained here: + * + * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html + * + * Compared to XXH64, expect XXH3 to run approximately + * ~2x faster on large inputs and >3x faster on small ones, + * exact differences vary depending on platform. + * + * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic, + * but does not require it. + * Any 32-bit and 64-bit targets that can run XXH32 smoothly + * can run XXH3 at competitive speeds, even without vector support. + * Further details are explained in the implementation. + * + * Optimized implementations are provided for AVX512, AVX2, SSE2, NEON, POWER8, + * ZVector and scalar targets. This can be controlled via the XXH_VECTOR macro. + * + * XXH3 implementation is portable: + * it has a generic C90 formulation that can be compiled on any platform, + * all implementations generage exactly the same hash value on all platforms. + * Starting from v0.8.0, it's also labelled "stable", meaning that + * any future version will also generate the same hash value. + * + * XXH3 offers 2 variants, _64bits and _128bits. + * + * When only 64 bits are needed, prefer invoking the _64bits variant, as it + * reduces the amount of mixing, resulting in faster speed on small inputs. + * It's also generally simpler to manipulate a scalar return type than a struct. + * + * The API supports one-shot hashing, streaming mode, and custom secrets. + */ + +/*-********************************************************************** +* XXH3 64-bit variant +************************************************************************/ + +/* XXH3_64bits(): + * default 64-bit variant, using default secret and default seed of 0. + * It's the fastest variant. */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len); + +/* + * XXH3_64bits_withSeed(): + * This variant generates a custom secret on the fly + * based on default secret altered using the `seed` value. + * While this operation is decently fast, note that it's not completely free. + * Note: seed==0 produces the same results as XXH3_64bits(). + */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); + +/*! + * The bare minimum size for a custom secret. + * + * @see + * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(), + * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret(). + */ +#define XXH3_SECRET_SIZE_MIN 136 + +/* + * XXH3_64bits_withSecret(): + * It's possible to provide any blob of bytes as a "secret" to generate the hash. + * This makes it more difficult for an external actor to prepare an intentional collision. + * The main condition is that secretSize *must* be large enough (>= XXH3_SECRET_SIZE_MIN). + * However, the quality of the secret impacts the dispersion of the hash algorithm. + * Therefore, the secret _must_ look like a bunch of random bytes. + * Avoid "trivial" or structured data such as repeated sequences or a text document. + * Whenever in doubt about the "randomness" of the blob of bytes, + * consider employing "XXH3_generateSecret()" instead (see below). + * It will generate a proper high entropy secret derived from the blob of bytes. + * Another advantage of using XXH3_generateSecret() is that + * it guarantees that all bits within the initial blob of bytes + * will impact every bit of the output. + * This is not necessarily the case when using the blob of bytes directly + * because, when hashing _small_ inputs, only a portion of the secret is employed. + */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + + +/******* Streaming *******/ +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + */ + +/*! + * @brief The state struct for the XXH3 streaming API. + * + * @see XXH3_state_s for details. + */ +typedef struct XXH3_state_s XXH3_state_t; +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void); +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr); +XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state); + +/* + * XXH3_64bits_reset(): + * Initialize with default parameters. + * digest will be equivalent to `XXH3_64bits()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr); +/* + * XXH3_64bits_reset_withSeed(): + * Generate a custom secret from `seed`, and store it into `statePtr`. + * digest will be equivalent to `XXH3_64bits_withSeed()`. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); +/* + * XXH3_64bits_reset_withSecret(): + * `secret` is referenced, it _must outlive_ the hash streaming session. + * Similar to one-shot API, `secretSize` must be >= `XXH3_SECRET_SIZE_MIN`, + * and the quality of produced hash values depends on secret's entropy + * (secret's content should look like a bunch of random bytes). + * When in doubt about the randomness of a candidate `secret`, + * consider employing `XXH3_generateSecret()` instead (see below). + */ +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr); + +/* note : canonical representation of XXH3 is the same as XXH64 + * since they both produce XXH64_hash_t values */ + + +/*-********************************************************************** +* XXH3 128-bit variant +************************************************************************/ + +/*! + * @brief The return value from 128-bit hashes. + * + * Stored in little endian order, although the fields themselves are in native + * endianness. + */ +typedef struct { + XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */ + XXH64_hash_t high64; /*!< `value >> 64` */ +} XXH128_hash_t; + +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize); + +/******* Streaming *******/ +/* + * Streaming requires state maintenance. + * This operation costs memory and CPU. + * As a consequence, streaming is slower than one-shot hashing. + * For better performance, prefer one-shot functions whenever applicable. + * + * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits(). + * Use already declared XXH3_createState() and XXH3_freeState(). + * + * All reset and streaming functions have same meaning as their 64-bit counterpart. + */ + +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed); +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize); + +XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr); + +/* Following helper functions make it possible to compare XXH128_hast_t values. + * Since XXH128_hash_t is a structure, this capability is not offered by the language. + * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */ + +/*! + * XXH128_isEqual(): + * Return: 1 if `h1` and `h2` are equal, 0 if they are not. + */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2); + +/*! + * XXH128_cmp(): + * + * This comparator is compatible with stdlib's `qsort()`/`bsearch()`. + * + * return: >0 if *h128_1 > *h128_2 + * =0 if *h128_1 == *h128_2 + * <0 if *h128_1 < *h128_2 + */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2); + + +/******* Canonical representation *******/ +typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t; +XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash); +XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src); + + +#endif /* !XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ + +/*! + * @} + */ #endif /* XXHASH_H_5627135585666179 */ -/* ================================================================================================ - This section contains definitions which are not guaranteed to remain stable. - They may change in future versions, becoming incompatible with a different version of the library. - They shall only be used with static linking. - Never use these definitions in association with dynamic linking ! -=================================================================================================== */ -#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXH_STATIC_H_3543687687345) -#define XXH_STATIC_H_3543687687345 - -/* These definitions are only meant to allow allocation of XXH state - statically, on stack, or in a struct for example. - Do not use members directly. */ - - struct XXH32_state_s { - unsigned total_len_32; - unsigned large_len; - unsigned v1; - unsigned v2; - unsigned v3; - unsigned v4; - unsigned mem32[4]; /* buffer defined as U32 for alignment */ - unsigned memsize; - unsigned reserved; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH32_state_t */ - - struct XXH64_state_s { - unsigned long long total_len; - unsigned long long v1; - unsigned long long v2; - unsigned long long v3; - unsigned long long v4; - unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ - unsigned memsize; - unsigned reserved[2]; /* never read nor write, will be removed in a future version */ - }; /* typedef'd to XXH64_state_t */ - - -# ifdef XXH_PRIVATE_API -# include "xxhash.c" /* include xxhash functions as `static`, for inlining */ +#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) +#define XXHASH_H_STATIC_13879238742 +/* **************************************************************************** + * This section contains declarations which are not guaranteed to remain stable. + * They may change in future versions, becoming incompatible with a different + * version of the library. + * These declarations should only be used with static linking. + * Never use them in association with dynamic linking! + ***************************************************************************** */ + +/* + * These definitions are only present to allow static allocation + * of XXH states, on stack or in a struct, for example. + * Never **ever** access their members directly. + */ + +/*! + * @internal + * @brief Structure for XXH32 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH32_state_t. + * Do not access the members of this struct directly. + * @see XXH64_state_s, XXH3_state_s + */ +struct XXH32_state_s { + XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ + XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ + XXH32_hash_t v[4]; /*!< Accumulator lanes */ + XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ + XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ +}; /* typedef'd to XXH32_state_t */ + + +#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ + +/*! + * @internal + * @brief Structure for XXH64 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is + * an opaque type. This allows fields to safely be changed. + * + * Typedef'd to @ref XXH64_state_t. + * Do not access the members of this struct directly. + * @see XXH32_state_s, XXH3_state_s + */ +struct XXH64_state_s { + XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ + XXH64_hash_t v[4]; /*!< Accumulator lanes */ + XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ + XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ + XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ + XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ +}; /* typedef'd to XXH64_state_t */ + + +#ifndef XXH_NO_XXH3 + +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */ +# include <stdalign.h> +# define XXH_ALIGN(n) alignas(n) +#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */ +/* In C++ alignas() is a keyword */ +# define XXH_ALIGN(n) alignas(n) +#elif defined(__GNUC__) +# define XXH_ALIGN(n) __attribute__ ((aligned(n))) +#elif defined(_MSC_VER) +# define XXH_ALIGN(n) __declspec(align(n)) +#else +# define XXH_ALIGN(n) /* disabled */ +#endif + +/* Old GCC versions only accept the attribute after the type in structures. */ +#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \ + && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \ + && defined(__GNUC__) +# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align) +#else +# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type +#endif + +/*! + * @brief The size of the internal XXH3 buffer. + * + * This is the optimal update size for incremental hashing. + * + * @see XXH3_64b_update(), XXH3_128b_update(). + */ +#define XXH3_INTERNALBUFFER_SIZE 256 + +/*! + * @brief Default size of the secret buffer (and @ref XXH3_kSecret). + * + * This is the size used in @ref XXH3_kSecret and the seeded functions. + * + * Not to be confused with @ref XXH3_SECRET_SIZE_MIN. + */ +#define XXH3_SECRET_DEFAULT_SIZE 192 + +/*! + * @internal + * @brief Structure for XXH3 streaming API. + * + * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, + * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. + * Otherwise it is an opaque type. + * Never use this definition in combination with dynamic library. + * This allows fields to safely be changed in the future. + * + * @note ** This structure has a strict alignment requirement of 64 bytes!! ** + * Do not allocate this with `malloc()` or `new`, + * it will not be sufficiently aligned. + * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation. + * + * Typedef'd to @ref XXH3_state_t. + * Do never access the members of this struct directly. + * + * @see XXH3_INITSTATE() for stack initialization. + * @see XXH3_createState(), XXH3_freeState(). + * @see XXH32_state_s, XXH64_state_s + */ +struct XXH3_state_s { + XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]); + /*!< The 8 accumulators. Similar to `vN` in @ref XXH32_state_s::v1 and @ref XXH64_state_s */ + XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]); + /*!< Used to store a custom secret generated from a seed. */ + XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]); + /*!< The internal buffer. @see XXH32_state_s::mem32 */ + XXH32_hash_t bufferedSize; + /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */ + XXH32_hash_t useSeed; + /*!< Reserved field. Needed for padding on 64-bit. */ + size_t nbStripesSoFar; + /*!< Number or stripes processed. */ + XXH64_hash_t totalLen; + /*!< Total length hashed. 64-bit even on 32-bit targets. */ + size_t nbStripesPerBlock; + /*!< Number of stripes per block. */ + size_t secretLimit; + /*!< Size of @ref customSecret or @ref extSecret */ + XXH64_hash_t seed; + /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */ + XXH64_hash_t reserved64; + /*!< Reserved field. */ + const unsigned char* extSecret; + /*!< Reference to an external secret for the _withSecret variants, NULL + * for other variants. */ + /* note: there may be some padding at the end due to alignment on 64 bytes */ +}; /* typedef'd to XXH3_state_t */ + +#undef XXH_ALIGN_MEMBER + +/*! + * @brief Initializes a stack-allocated `XXH3_state_s`. + * + * When the @ref XXH3_state_t structure is merely emplaced on stack, + * it should be initialized with XXH3_INITSTATE() or a memset() + * in case its first reset uses XXH3_NNbits_reset_withSeed(). + * This init can be omitted if the first reset uses default or _withSecret mode. + * This operation isn't necessary when the state is created with XXH3_createState(). + * Note that this doesn't prepare the state for a streaming operation, + * it's still necessary to use XXH3_NNbits_reset*() afterwards. + */ +#define XXH3_INITSTATE(XXH3_state_ptr) { (XXH3_state_ptr)->seed = 0; } + + +/* XXH128() : + * simple alias to pre-selected XXH3_128bits variant + */ +XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed); + + +/* === Experimental API === */ +/* Symbols defined below must be considered tied to a specific library version. */ + +/* + * XXH3_generateSecret(): + * + * Derive a high-entropy secret from any user-defined content, named customSeed. + * The generated secret can be used in combination with `*_withSecret()` functions. + * The `_withSecret()` variants are useful to provide a higher level of protection than 64-bit seed, + * as it becomes much more difficult for an external actor to guess how to impact the calculation logic. + * + * The function accepts as input a custom seed of any length and any content, + * and derives from it a high-entropy secret of length @secretSize + * into an already allocated buffer @secretBuffer. + * @secretSize must be >= XXH3_SECRET_SIZE_MIN + * + * The generated secret can then be used with any `*_withSecret()` variant. + * Functions `XXH3_128bits_withSecret()`, `XXH3_64bits_withSecret()`, + * `XXH3_128bits_reset_withSecret()` and `XXH3_64bits_reset_withSecret()` + * are part of this list. They all accept a `secret` parameter + * which must be large enough for implementation reasons (>= XXH3_SECRET_SIZE_MIN) + * _and_ feature very high entropy (consist of random-looking bytes). + * These conditions can be a high bar to meet, so + * XXH3_generateSecret() can be employed to ensure proper quality. + * + * customSeed can be anything. It can have any size, even small ones, + * and its content can be anything, even "poor entropy" sources such as a bunch of zeroes. + * The resulting `secret` will nonetheless provide all required qualities. + * + * When customSeedSize > 0, supplying NULL as customSeed is undefined behavior. + */ +XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize); + + +/* + * XXH3_generateSecret_fromSeed(): + * + * Generate the same secret as the _withSeed() variants. + * + * The resulting secret has a length of XXH3_SECRET_DEFAULT_SIZE (necessarily). + * @secretBuffer must be already allocated, of size at least XXH3_SECRET_DEFAULT_SIZE bytes. + * + * The generated secret can be used in combination with + *`*_withSecret()` and `_withSecretandSeed()` variants. + * This generator is notably useful in combination with `_withSecretandSeed()`, + * as a way to emulate a faster `_withSeed()` variant. + */ +XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed); + +/* + * *_withSecretandSeed() : + * These variants generate hash values using either + * @seed for "short" keys (< XXH3_MIDSIZE_MAX = 240 bytes) + * or @secret for "large" keys (>= XXH3_MIDSIZE_MAX). + * + * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`. + * `_withSeed()` has to generate the secret on the fly for "large" keys. + * It's fast, but can be perceptible for "not so large" keys (< 1 KB). + * `_withSecret()` has to generate the masks on the fly for "small" keys, + * which requires more instructions than _withSeed() variants. + * Therefore, _withSecretandSeed variant combines the best of both worlds. + * + * When @secret has been generated by XXH3_generateSecret_fromSeed(), + * this variant produces *exactly* the same results as `_withSeed()` variant, + * hence offering only a pure speed benefit on "large" input, + * by skipping the need to regenerate the secret for every large input. + * + * Another usage scenario is to hash the secret to a 64-bit hash value, + * for example with XXH3_64bits(), which then becomes the seed, + * and then employ both the seed and the secret in _withSecretandSeed(). + * On top of speed, an added benefit is that each bit in the secret + * has a 50% chance to swap each bit in the output, + * via its impact to the seed. + * This is not guaranteed when using the secret directly in "small data" scenarios, + * because only portions of the secret are employed for small data. + */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(const void* data, size_t len, + const void* secret, size_t secretSize, + XXH64_hash_t seed); + +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(const void* data, size_t len, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); + +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); + +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, + const void* secret, size_t secretSize, + XXH64_hash_t seed64); + + +#endif /* XXH_NO_XXH3 */ +#endif /* XXH_NO_LONG_LONG */ +#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) +# define XXH_IMPLEMENTATION +#endif + +#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ + + +/* ======================================================================== */ +/* ======================================================================== */ +/* ======================================================================== */ + + +/*-********************************************************************** + * xxHash implementation + *-********************************************************************** + * xxHash's implementation used to be hosted inside xxhash.c. + * + * However, inlining requires implementation to be visible to the compiler, + * hence be included alongside the header. + * Previously, implementation was hosted inside xxhash.c, + * which was then #included when inlining was activated. + * This construction created issues with a few build and install systems, + * as it required xxhash.c to be stored in /include directory. + * + * xxHash implementation is now directly integrated within xxhash.h. + * As a consequence, xxhash.c is no longer needed in /include. + * + * xxhash.c is still available and is still useful. + * In a "normal" setup, when xxhash is not inlined, + * xxhash.h only exposes the prototypes and public symbols, + * while xxhash.c can be built into an object file xxhash.o + * which can then be linked into the final binary. + ************************************************************************/ + +#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ + || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) +# define XXH_IMPLEM_13a8737387 + +/* ************************************* +* Tuning parameters +***************************************/ + +/*! + * @defgroup tuning Tuning parameters + * @{ + * + * Various macros to control xxHash's behavior. + */ +#ifdef XXH_DOXYGEN +/*! + * @brief Define this to disable 64-bit code. + * + * Useful if only using the @ref xxh32_family and you have a strict C90 compiler. + */ +# define XXH_NO_LONG_LONG +# undef XXH_NO_LONG_LONG /* don't actually */ +/*! + * @brief Controls how unaligned memory is accessed. + * + * By default, access to unaligned memory is controlled by `memcpy()`, which is + * safe and portable. + * + * Unfortunately, on some target/compiler combinations, the generated assembly + * is sub-optimal. + * + * The below switch allow selection of a different access method + * in the search for improved performance. + * + * @par Possible options: + * + * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy` + * @par + * Use `memcpy()`. Safe and portable. Note that most modern compilers will + * eliminate the function call and treat it as an unaligned access. + * + * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((packed))` + * @par + * Depends on compiler extensions and is therefore not portable. + * This method is safe _if_ your compiler supports it, + * and *generally* as fast or faster than `memcpy`. + * + * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast + * @par + * Casts directly and dereferences. This method doesn't depend on the + * compiler, but it violates the C standard as it directly dereferences an + * unaligned pointer. It can generate buggy code on targets which do not + * support unaligned memory accesses, but in some circumstances, it's the + * only known way to get the most performance. + * + * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift + * @par + * Also portable. This can generate the best code on old compilers which don't + * inline small `memcpy()` calls, and it might also be faster on big-endian + * systems which lack a native byteswap instruction. However, some compilers + * will emit literal byteshifts even if the target supports unaligned access. + * . + * + * @warning + * Methods 1 and 2 rely on implementation-defined behavior. Use these with + * care, as what works on one compiler/platform/optimization level may cause + * another to read garbage data or even crash. + * + * See http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details. + * + * Prefer these methods in priority order (0 > 3 > 1 > 2) + */ +# define XXH_FORCE_MEMORY_ACCESS 0 + +/*! + * @def XXH_FORCE_ALIGN_CHECK + * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32() + * and XXH64() only). + * + * This is an important performance trick for architectures without decent + * unaligned memory access performance. + * + * It checks for input alignment, and when conditions are met, uses a "fast + * path" employing direct 32-bit/64-bit reads, resulting in _dramatically + * faster_ read speed. + * + * The check costs one initial branch per hash, which is generally negligible, + * but not zero. + * + * Moreover, it's not useful to generate an additional code path if memory + * access uses the same instruction for both aligned and unaligned + * addresses (e.g. x86 and aarch64). + * + * In these cases, the alignment check can be removed by setting this macro to 0. + * Then the code will always use unaligned memory access. + * Align check is automatically disabled on x86, x64 & arm64, + * which are platforms known to offer good unaligned memory accesses performance. + * + * This option does not affect XXH3 (only XXH32 and XXH64). + */ +# define XXH_FORCE_ALIGN_CHECK 0 + +/*! + * @def XXH_NO_INLINE_HINTS + * @brief When non-zero, sets all functions to `static`. + * + * By default, xxHash tries to force the compiler to inline almost all internal + * functions. + * + * This can usually improve performance due to reduced jumping and improved + * constant folding, but significantly increases the size of the binary which + * might not be favorable. + * + * Additionally, sometimes the forced inlining can be detrimental to performance, + * depending on the architecture. + * + * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the + * compiler full control on whether to inline or not. + * + * When not optimizing (-O0), optimizing for size (-Os, -Oz), or using + * -fno-inline with GCC or Clang, this will automatically be defined. + */ +# define XXH_NO_INLINE_HINTS 0 + +/*! + * @def XXH32_ENDJMP + * @brief Whether to use a jump for `XXH32_finalize`. + * + * For performance, `XXH32_finalize` uses multiple branches in the finalizer. + * This is generally preferable for performance, + * but depending on exact architecture, a jmp may be preferable. + * + * This setting is only possibly making a difference for very small inputs. + */ +# define XXH32_ENDJMP 0 + +/*! + * @internal + * @brief Redefines old internal names. + * + * For compatibility with code that uses xxHash's internals before the names + * were changed to improve namespacing. There is no other reason to use this. + */ +# define XXH_OLD_NAMES +# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */ +#endif /* XXH_DOXYGEN */ +/*! + * @} + */ + +#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ + /* prefer __packed__ structures (method 1) for gcc on armv7+ and mips */ +# if !defined(__clang__) && \ +( \ + (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \ + ( \ + defined(__GNUC__) && ( \ + (defined(__ARM_ARCH) && __ARM_ARCH >= 7) || \ + ( \ + defined(__mips__) && \ + (__mips <= 5 || __mips_isa_rev < 6) && \ + (!defined(__mips16) || defined(__mips_mips16e2)) \ + ) \ + ) \ + ) \ +) +# define XXH_FORCE_MEMORY_ACCESS 1 +# endif +#endif + +#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ +# if defined(__i386) || defined(__x86_64__) || defined(__aarch64__) \ + || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) /* visual */ +# define XXH_FORCE_ALIGN_CHECK 0 +# else +# define XXH_FORCE_ALIGN_CHECK 1 +# endif +#endif + +#ifndef XXH_NO_INLINE_HINTS +# if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \ + || defined(__NO_INLINE__) /* -O0, -fno-inline */ +# define XXH_NO_INLINE_HINTS 1 +# else +# define XXH_NO_INLINE_HINTS 0 +# endif +#endif + +#ifndef XXH32_ENDJMP +/* generally preferable for performance */ +# define XXH32_ENDJMP 0 +#endif + +/*! + * @defgroup impl Implementation + * @{ + */ + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/* Modify the local functions below should you wish to use some other memory routines */ +/* for ZSTD_malloc(), ZSTD_free() */ +#define ZSTD_DEPS_NEED_MALLOC +#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */ +static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); } +static void XXH_free (void* p) { ZSTD_free(p); } +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); } + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#ifdef _MSC_VER /* Visual Studio warning fix */ +# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ +#endif + +#if XXH_NO_INLINE_HINTS /* disable inlining hints */ +# if defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __attribute__((unused)) +# else +# define XXH_FORCE_INLINE static +# endif +# define XXH_NO_INLINE static +/* enable inlining hints */ +#elif defined(__GNUC__) || defined(__clang__) +# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) +# define XXH_NO_INLINE static __attribute__((noinline)) +#elif defined(_MSC_VER) /* Visual Studio */ +# define XXH_FORCE_INLINE static __forceinline +# define XXH_NO_INLINE static __declspec(noinline) +#elif defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ +# define XXH_FORCE_INLINE static inline +# define XXH_NO_INLINE static +#else +# define XXH_FORCE_INLINE static +# define XXH_NO_INLINE static +#endif + + + +/* ************************************* +* Debug +***************************************/ +/*! + * @ingroup tuning + * @def XXH_DEBUGLEVEL + * @brief Sets the debugging level. + * + * XXH_DEBUGLEVEL is expected to be defined externally, typically via the + * compiler's command line options. The value must be a number. + */ +#ifndef XXH_DEBUGLEVEL +# ifdef DEBUGLEVEL /* backwards compat */ +# define XXH_DEBUGLEVEL DEBUGLEVEL +# else +# define XXH_DEBUGLEVEL 0 +# endif +#endif + +#if (XXH_DEBUGLEVEL>=1) +# include <assert.h> /* note: can still be disabled with NDEBUG */ +# define XXH_ASSERT(c) assert(c) +#else +# define XXH_ASSERT(c) ((void)0) +#endif + +/* note: use after variable declarations */ +#ifndef XXH_STATIC_ASSERT +# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ +# include <assert.h> +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) +# else +# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) +# endif +# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) +#endif + +/*! + * @internal + * @def XXH_COMPILER_GUARD(var) + * @brief Used to prevent unwanted optimizations for @p var. + * + * It uses an empty GCC inline assembly statement with a register constraint + * which forces @p var into a general purpose register (eg eax, ebx, ecx + * on x86) and marks it as modified. + * + * This is used in a few places to avoid unwanted autovectorization (e.g. + * XXH32_round()). All vectorization we want is explicit via intrinsics, + * and _usually_ isn't wanted elsewhere. + * + * We also use it to prevent unwanted constant folding for AArch64 in + * XXH3_initCustomSecret_scalar(). + */ +#if defined(__GNUC__) || defined(__clang__) +# define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var)) +#else +# define XXH_COMPILER_GUARD(var) ((void)0) +#endif + +/* ************************************* +* Basic Types +***************************************/ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) +# include <stdint.h> + typedef uint8_t xxh_u8; +#else + typedef unsigned char xxh_u8; +#endif +typedef XXH32_hash_t xxh_u32; + +#ifdef XXH_OLD_NAMES +# define BYTE xxh_u8 +# define U8 xxh_u8 +# define U32 xxh_u32 +#endif + +/* *** Memory access *** */ + +/*! + * @internal + * @fn xxh_u32 XXH_read32(const void* ptr) + * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit native endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32(const void* ptr) + * @brief Reads an unaligned 32-bit little endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readBE32(const void* ptr) + * @brief Reads an unaligned 32-bit big endian integer from @p ptr. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * + * @param ptr The pointer to read from. + * @return The 32-bit big endian integer from the bytes at @p ptr. + */ + +/*! + * @internal + * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) + * @brief Like @ref XXH_readLE32(), but has an option for aligned reads. + * + * Affected by @ref XXH_FORCE_MEMORY_ACCESS. + * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is + * always @ref XXH_alignment::XXH_unaligned. + * + * @param ptr The pointer to read from. + * @param align Whether @p ptr is aligned. + * @pre + * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte + * aligned. + * @return The 32-bit little endian integer from the bytes at @p ptr. + */ + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE32 and XXH_readBE32. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* + * Force direct memory access. Only works on CPU which support unaligned memory + * access in hardware. + */ +static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __pack instructions are safer but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; +#endif +static xxh_u32 XXH_read32(const void* ptr) +{ + typedef union { xxh_u32 u32; } __attribute__((packed)) xxh_unalign; + return ((const xxh_unalign*)ptr)->u32; +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u32 XXH_read32(const void* memPtr) +{ + xxh_u32 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + + +/* *** Endianness *** */ + +/*! + * @ingroup tuning + * @def XXH_CPU_LITTLE_ENDIAN + * @brief Whether the target is little endian. + * + * Defined to 1 if the target is little endian, or 0 if it is big endian. + * It can be defined externally, for example on the compiler command line. + * + * If it is not defined, + * a runtime check (which is usually constant folded) is used instead. + * + * @note + * This is not necessarily defined to an integer constant. + * + * @see XXH_isLittleEndian() for the runtime check. + */ +#ifndef XXH_CPU_LITTLE_ENDIAN +/* + * Try to detect endianness automatically, to avoid the nonstandard behavior + * in `XXH_isLittleEndian()` + */ +# if defined(_WIN32) /* Windows is always little endian */ \ + || defined(__LITTLE_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 1 +# elif defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_CPU_LITTLE_ENDIAN 0 +# else +/*! + * @internal + * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. + * + * Most compilers will constant fold this. + */ +static int XXH_isLittleEndian(void) +{ + /* + * Portable and well-defined behavior. + * Don't use static: it is detrimental to performance. + */ + const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; + return one.c[0]; +} +# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() +# endif +#endif + + + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) + +#ifdef __has_builtin +# define XXH_HAS_BUILTIN(x) __has_builtin(x) +#else +# define XXH_HAS_BUILTIN(x) 0 +#endif + +/*! + * @internal + * @def XXH_rotl32(x,r) + * @brief 32-bit rotate left. + * + * @param x The 32-bit integer to be rotated. + * @param r The number of bits to rotate. + * @pre + * @p r > 0 && @p r < 32 + * @note + * @p x and @p r may be evaluated multiple times. + * @return The rotated result. + */ +#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ + && XXH_HAS_BUILTIN(__builtin_rotateleft64) +# define XXH_rotl32 __builtin_rotateleft32 +# define XXH_rotl64 __builtin_rotateleft64 +/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ +#elif defined(_MSC_VER) +# define XXH_rotl32(x,r) _rotl(x,r) +# define XXH_rotl64(x,r) _rotl64(x,r) +#else +# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) +# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) +#endif + +/*! + * @internal + * @fn xxh_u32 XXH_swap32(xxh_u32 x) + * @brief A 32-bit byteswap. + * + * @param x The 32-bit integer to byteswap. + * @return @p x, byteswapped. + */ +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap32 _byteswap_ulong +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap32 __builtin_bswap32 +#else +static xxh_u32 XXH_swap32 (xxh_u32 x) +{ + return ((x << 24) & 0xff000000 ) | + ((x << 8) & 0x00ff0000 ) | + ((x >> 8) & 0x0000ff00 ) | + ((x >> 24) & 0x000000ff ); +} +#endif + + +/* *************************** +* Memory reads +*****************************/ + +/*! + * @internal + * @brief Enum to indicate whether a pointer is aligned. + */ +typedef enum { + XXH_aligned, /*!< Aligned */ + XXH_unaligned /*!< Possibly unaligned */ +} XXH_alignment; + +/* + * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. + * + * This is ideal for older compilers which don't inline memcpy. + */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u32)bytePtr[1] << 8) + | ((xxh_u32)bytePtr[2] << 16) + | ((xxh_u32)bytePtr[3] << 24); +} + +XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[3] + | ((xxh_u32)bytePtr[2] << 8) + | ((xxh_u32)bytePtr[1] << 16) + | ((xxh_u32)bytePtr[0] << 24); +} + +#else +XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); +} + +static xxh_u32 XXH_readBE32(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u32 +XXH_readLE32_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) { + return XXH_readLE32(ptr); + } else { + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); + } +} + + +/* ************************************* +* Misc +***************************************/ +/*! @ingroup public */ +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ******************************************************************* +* 32-bit hash functions +*********************************************************************/ +/*! + * @} + * @defgroup xxh32_impl XXH32 implementation + * @ingroup impl + * @{ + */ + /* #define instead of static const, to be used as initializers */ +#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */ +#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */ +#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */ +#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */ +#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */ + +#ifdef XXH_OLD_NAMES +# define PRIME32_1 XXH_PRIME32_1 +# define PRIME32_2 XXH_PRIME32_2 +# define PRIME32_3 XXH_PRIME32_3 +# define PRIME32_4 XXH_PRIME32_4 +# define PRIME32_5 XXH_PRIME32_5 +#endif + +/*! + * @internal + * @brief Normal stripe processing routine. + * + * This shuffles the bits so that any bit from @p input impacts several bits in + * @p acc. + * + * @param acc The accumulator lane. + * @param input The stripe of input to mix. + * @return The mixed accumulator lane. + */ +static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input) +{ + acc += input * XXH_PRIME32_2; + acc = XXH_rotl32(acc, 13); + acc *= XXH_PRIME32_1; +#if (defined(__SSE4_1__) || defined(__aarch64__)) && !defined(XXH_ENABLE_AUTOVECTORIZE) + /* + * UGLY HACK: + * A compiler fence is the only thing that prevents GCC and Clang from + * autovectorizing the XXH32 loop (pragmas and attributes don't work for some + * reason) without globally disabling SSE4.1. + * + * The reason we want to avoid vectorization is because despite working on + * 4 integers at a time, there are multiple factors slowing XXH32 down on + * SSE4: + * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on + * newer chips!) making it slightly slower to multiply four integers at + * once compared to four integers independently. Even when pmulld was + * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE + * just to multiply unless doing a long operation. + * + * - Four instructions are required to rotate, + * movqda tmp, v // not required with VEX encoding + * pslld tmp, 13 // tmp <<= 13 + * psrld v, 19 // x >>= 19 + * por v, tmp // x |= tmp + * compared to one for scalar: + * roll v, 13 // reliably fast across the board + * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason + * + * - Instruction level parallelism is actually more beneficial here because + * the SIMD actually serializes this operation: While v1 is rotating, v2 + * can load data, while v3 can multiply. SSE forces them to operate + * together. + * + * This is also enabled on AArch64, as Clang autovectorizes it incorrectly + * and it is pointless writing a NEON implementation that is basically the + * same speed as scalar for XXH32. + */ + XXH_COMPILER_GUARD(acc); +#endif + return acc; +} + +/*! + * @internal + * @brief Mixes all bits to finalize the hash. + * + * The final mix ensures that all input bits have a chance to impact any bit in + * the output digest, resulting in an unbiased distribution. + * + * @param h32 The hash to avalanche. + * @return The avalanched hash. + */ +static xxh_u32 XXH32_avalanche(xxh_u32 h32) +{ + h32 ^= h32 >> 15; + h32 *= XXH_PRIME32_2; + h32 ^= h32 >> 13; + h32 *= XXH_PRIME32_3; + h32 ^= h32 >> 16; + return(h32); +} + +#define XXH_get32bits(p) XXH_readLE32_align(p, align) + +/*! + * @internal + * @brief Processes the last 0-15 bytes of @p ptr. + * + * There may be up to 15 bytes remaining to consume from the input. + * This final stage will digest them to ensure that all input bytes are present + * in the final mix. + * + * @param h32 The hash to finalize. + * @param ptr The pointer to the remaining input. + * @param len The remaining length, modulo 16. + * @param align Whether @p ptr is aligned. + * @return The finalized hash. + */ +static xxh_u32 +XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ +#define XXH_PROCESS1 do { \ + h32 += (*ptr++) * XXH_PRIME32_5; \ + h32 = XXH_rotl32(h32, 11) * XXH_PRIME32_1; \ +} while (0) + +#define XXH_PROCESS4 do { \ + h32 += XXH_get32bits(ptr) * XXH_PRIME32_3; \ + ptr += 4; \ + h32 = XXH_rotl32(h32, 17) * XXH_PRIME32_4; \ +} while (0) + + if (ptr==NULL) XXH_ASSERT(len == 0); + + /* Compact rerolled version; generally faster */ + if (!XXH32_ENDJMP) { + len &= 15; + while (len >= 4) { + XXH_PROCESS4; + len -= 4; + } + while (len > 0) { + XXH_PROCESS1; + --len; + } + return XXH32_avalanche(h32); + } else { + switch(len&15) /* or switch(bEnd - p) */ { + case 12: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 8: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 4: XXH_PROCESS4; + return XXH32_avalanche(h32); + + case 13: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 9: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 5: XXH_PROCESS4; + XXH_PROCESS1; + return XXH32_avalanche(h32); + + case 14: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 10: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 6: XXH_PROCESS4; + XXH_PROCESS1; + XXH_PROCESS1; + return XXH32_avalanche(h32); + + case 15: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 11: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 7: XXH_PROCESS4; + XXH_FALLTHROUGH; + case 3: XXH_PROCESS1; + XXH_FALLTHROUGH; + case 2: XXH_PROCESS1; + XXH_FALLTHROUGH; + case 1: XXH_PROCESS1; + XXH_FALLTHROUGH; + case 0: return XXH32_avalanche(h32); + } + XXH_ASSERT(0); + return h32; /* reaching this point is deemed impossible */ + } +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1 XXH_PROCESS1 +# define PROCESS4 XXH_PROCESS4 +#else +# undef XXH_PROCESS1 +# undef XXH_PROCESS4 +#endif + +/*! + * @internal + * @brief The implementation for @ref XXH32(). + * + * @param input , len , seed Directly passed from @ref XXH32(). + * @param align Whether @p input is aligned. + * @return The calculated hash. + */ +XXH_FORCE_INLINE xxh_u32 +XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align) +{ + xxh_u32 h32; + + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=16) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 15; + xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + xxh_u32 v2 = seed + XXH_PRIME32_2; + xxh_u32 v3 = seed + 0; + xxh_u32 v4 = seed - XXH_PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4; + v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4; + v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4; + v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4; + } while (input < limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + XXH_PRIME32_5; + } + + h32 += (xxh_u32)len; + + return XXH32_finalize(h32, input, len&15, align); +} + +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_state_t state; + XXH32_reset(&state, seed); + XXH32_update(&state, (const xxh_u8*)input, len); + return XXH32_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); +#endif +} + + + +/******* Hash streaming *******/ +/*! + * @ingroup xxh32_family + */ +XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void) +{ + return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t)); +} +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2; + statePtr->v[1] = seed + XXH_PRIME32_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME32_1; + return XXH_OK; +} + + +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH_errorcode +XXH32_update(XXH32_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len_32 += (XXH32_hash_t)len; + state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16)); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len); + state->memsize += (XXH32_hash_t)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const xxh_u32* p32 = state->mem32; + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32)); + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const xxh_u8* const limit = bEnd - 16; + + do { + state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4; + state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4; + state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4; + state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state) +{ + xxh_u32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v[0], 1) + + XXH_rotl32(state->v[1], 7) + + XXH_rotl32(state->v[2], 12) + + XXH_rotl32(state->v[3], 18); + } else { + h32 = state->v[2] /* == seed */ + XXH_PRIME32_5; + } + + h32 += state->total_len_32; + + return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned); +} + + +/******* Canonical representation *******/ + +/*! + * @ingroup xxh32_family + * The default return values from XXH functions are unsigned 32 and 64 bit + * integers. + * + * The canonical representation uses big endian convention, the same convention + * as human-readable numbers (large digits first). + * + * This way, hash values can be written into a file or buffer, remaining + * comparable across different systems. + * + * The following functions allow transformation of hash values to and from their + * canonical format. + */ +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + /* XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); */ + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} +/*! @ingroup xxh32_family */ +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + + +#ifndef XXH_NO_LONG_LONG + +/* ******************************************************************* +* 64-bit hash functions +*********************************************************************/ +/*! + * @} + * @ingroup impl + * @{ + */ +/******* Memory access *******/ + +typedef XXH64_hash_t xxh_u64; + +#ifdef XXH_OLD_NAMES +# define U64 xxh_u64 +#endif + +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) +/* + * Manual byteshift. Best for old compilers which don't inline memcpy. + * We actually directly use XXH_readLE64 and XXH_readBE64. + */ +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) + +/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + return *(const xxh_u64*) memPtr; +} + +#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) + +/* + * __pack instructions are safer, but compiler specific, hence potentially + * problematic for some compilers. + * + * Currently only defined for GCC and ICC. + */ +#ifdef XXH_OLD_NAMES +typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; +#endif +static xxh_u64 XXH_read64(const void* ptr) +{ + typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) xxh_unalign64; + return ((const xxh_unalign64*)ptr)->u64; +} + +#else + +/* + * Portable and safe solution. Generally efficient. + * see: http://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html + */ +static xxh_u64 XXH_read64(const void* memPtr) +{ + xxh_u64 val; + XXH_memcpy(&val, memPtr, sizeof(val)); + return val; +} + +#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ + +#if defined(_MSC_VER) /* Visual Studio */ +# define XXH_swap64 _byteswap_uint64 +#elif XXH_GCC_VERSION >= 403 +# define XXH_swap64 __builtin_bswap64 +#else +static xxh_u64 XXH_swap64(xxh_u64 x) +{ + return ((x << 56) & 0xff00000000000000ULL) | + ((x << 40) & 0x00ff000000000000ULL) | + ((x << 24) & 0x0000ff0000000000ULL) | + ((x << 8) & 0x000000ff00000000ULL) | + ((x >> 8) & 0x00000000ff000000ULL) | + ((x >> 24) & 0x0000000000ff0000ULL) | + ((x >> 40) & 0x000000000000ff00ULL) | + ((x >> 56) & 0x00000000000000ffULL); +} +#endif + + +/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ +#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) + +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[0] + | ((xxh_u64)bytePtr[1] << 8) + | ((xxh_u64)bytePtr[2] << 16) + | ((xxh_u64)bytePtr[3] << 24) + | ((xxh_u64)bytePtr[4] << 32) + | ((xxh_u64)bytePtr[5] << 40) + | ((xxh_u64)bytePtr[6] << 48) + | ((xxh_u64)bytePtr[7] << 56); +} + +XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) +{ + const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; + return bytePtr[7] + | ((xxh_u64)bytePtr[6] << 8) + | ((xxh_u64)bytePtr[5] << 16) + | ((xxh_u64)bytePtr[4] << 24) + | ((xxh_u64)bytePtr[3] << 32) + | ((xxh_u64)bytePtr[2] << 40) + | ((xxh_u64)bytePtr[1] << 48) + | ((xxh_u64)bytePtr[0] << 56); +} + +#else +XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); +} + +static xxh_u64 XXH_readBE64(const void* ptr) +{ + return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); +} +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH_readLE64_align(const void* ptr, XXH_alignment align) +{ + if (align==XXH_unaligned) + return XXH_readLE64(ptr); + else + return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); +} + + +/******* xxh64 *******/ +/*! + * @} + * @defgroup xxh64_impl XXH64 implementation + * @ingroup impl + * @{ + */ +/* #define rather that static const, to be used as initializers */ +#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ +#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ +#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ +#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ +#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ + +#ifdef XXH_OLD_NAMES +# define PRIME64_1 XXH_PRIME64_1 +# define PRIME64_2 XXH_PRIME64_2 +# define PRIME64_3 XXH_PRIME64_3 +# define PRIME64_4 XXH_PRIME64_4 +# define PRIME64_5 XXH_PRIME64_5 +#endif + +static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) +{ + acc += input * XXH_PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= XXH_PRIME64_1; + return acc; +} + +static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; + return acc; +} + +static xxh_u64 XXH64_avalanche(xxh_u64 h64) +{ + h64 ^= h64 >> 33; + h64 *= XXH_PRIME64_2; + h64 ^= h64 >> 29; + h64 *= XXH_PRIME64_3; + h64 ^= h64 >> 32; + return h64; +} + + +#define XXH_get64bits(p) XXH_readLE64_align(p, align) + +static xxh_u64 +XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align) +{ + if (ptr==NULL) XXH_ASSERT(len == 0); + len &= 31; + while (len >= 8) { + xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); + ptr += 8; + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * XXH_PRIME64_1 + XXH_PRIME64_4; + len -= 8; + } + if (len >= 4) { + h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; + ptr += 4; + h64 = XXH_rotl64(h64, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; + len -= 4; + } + while (len > 0) { + h64 ^= (*ptr++) * XXH_PRIME64_5; + h64 = XXH_rotl64(h64, 11) * XXH_PRIME64_1; + --len; + } + return XXH64_avalanche(h64); +} + +#ifdef XXH_OLD_NAMES +# define PROCESS1_64 XXH_PROCESS1_64 +# define PROCESS4_64 XXH_PROCESS4_64 +# define PROCESS8_64 XXH_PROCESS8_64 +#else +# undef XXH_PROCESS1_64 +# undef XXH_PROCESS4_64 +# undef XXH_PROCESS8_64 +#endif + +XXH_FORCE_INLINE xxh_u64 +XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) +{ + xxh_u64 h64; + if (input==NULL) XXH_ASSERT(len == 0); + + if (len>=32) { + const xxh_u8* const bEnd = input + len; + const xxh_u8* const limit = bEnd - 31; + xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + xxh_u64 v2 = seed + XXH_PRIME64_2; + xxh_u64 v3 = seed + 0; + xxh_u64 v4 = seed - XXH_PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; + v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; + v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; + v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; + } while (input<limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + XXH_PRIME64_5; + } + + h64 += (xxh_u64) len; + + return XXH64_finalize(h64, input, len, align); +} + + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_state_t state; + XXH64_reset(&state, seed); + XXH64_update(&state, (const xxh_u8*)input, len); + return XXH64_digest(&state); +#else + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); + } } + + return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); + +#endif +} + +/******* Hash Streaming *******/ + +/*! @ingroup xxh64_family*/ +XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void) +{ + return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t)); +} +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr) +{ + XXH_free(statePtr); + return XXH_OK; +} + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + XXH_memcpy(dstState, srcState, sizeof(*dstState)); +} + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed) +{ + XXH_ASSERT(statePtr != NULL); + memset(statePtr, 0, sizeof(*statePtr)); + statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2; + statePtr->v[1] = seed + XXH_PRIME64_2; + statePtr->v[2] = seed + 0; + statePtr->v[3] = seed - XXH_PRIME64_1; + return XXH_OK; +} + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH_errorcode +XXH64_update (XXH64_state_t* state, const void* input, size_t len) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + { const xxh_u8* p = (const xxh_u8*)input; + const xxh_u8* const bEnd = p + len; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len); + state->memsize += (xxh_u32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0)); + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1)); + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2)); + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3)); + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const xxh_u8* const limit = bEnd - 32; + + do { + state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8; + state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8; + state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8; + state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8; + } while (p<=limit); + + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + } + + return XXH_OK; +} + + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t* state) +{ + xxh_u64 h64; + + if (state->total_len >= 32) { + h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18); + h64 = XXH64_mergeRound(h64, state->v[0]); + h64 = XXH64_mergeRound(h64, state->v[1]); + h64 = XXH64_mergeRound(h64, state->v[2]); + h64 = XXH64_mergeRound(h64, state->v[3]); + } else { + h64 = state->v[2] /*seed*/ + XXH_PRIME64_5; + } + + h64 += (xxh_u64) state->total_len; + + return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned); +} + + +/******* Canonical representation *******/ + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + /* XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); */ + if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); + XXH_memcpy(dst, &hash, sizeof(*dst)); +} + +/*! @ingroup xxh64_family */ +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} + +#ifndef XXH_NO_XXH3 + +/* ********************************************************************* +* XXH3 +* New generation hash designed for speed on small keys and vectorization +************************************************************************ */ +/*! + * @} + * @defgroup xxh3_impl XXH3 implementation + * @ingroup impl + * @{ + */ + +/* === Compiler specifics === */ + +#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */ +# define XXH_RESTRICT /* disable */ +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */ +# define XXH_RESTRICT restrict +#else +/* Note: it might be useful to define __restrict or __restrict__ for some C++ compilers */ +# define XXH_RESTRICT /* disable */ +#endif + +#if (defined(__GNUC__) && (__GNUC__ >= 3)) \ + || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \ + || defined(__clang__) +# define XXH_likely(x) __builtin_expect(x, 1) +# define XXH_unlikely(x) __builtin_expect(x, 0) +#else +# define XXH_likely(x) (x) +# define XXH_unlikely(x) (x) +#endif + +#if defined(__GNUC__) || defined(__clang__) +# if defined(__ARM_NEON__) || defined(__ARM_NEON) \ + || defined(__aarch64__) || defined(_M_ARM) \ + || defined(_M_ARM64) || defined(_M_ARM64EC) +# define inline __inline__ /* circumvent a clang bug */ +# include <arm_neon.h> +# undef inline +# elif defined(__AVX2__) +# include <immintrin.h> +# elif defined(__SSE2__) +# include <emmintrin.h> # endif +#endif + +#if defined(_MSC_VER) +# include <intrin.h> +#endif + +/* + * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while + * remaining a true 64-bit/128-bit hash function. + * + * This is done by prioritizing a subset of 64-bit operations that can be + * emulated without too many steps on the average 32-bit machine. + * + * For example, these two lines seem similar, and run equally fast on 64-bit: + * + * xxh_u64 x; + * x ^= (x >> 47); // good + * x ^= (x >> 13); // bad + * + * However, to a 32-bit machine, there is a major difference. + * + * x ^= (x >> 47) looks like this: + * + * x.lo ^= (x.hi >> (47 - 32)); + * + * while x ^= (x >> 13) looks like this: + * + * // note: funnel shifts are not usually cheap. + * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13)); + * x.hi ^= (x.hi >> 13); + * + * The first one is significantly faster than the second, simply because the + * shift is larger than 32. This means: + * - All the bits we need are in the upper 32 bits, so we can ignore the lower + * 32 bits in the shift. + * - The shift result will always fit in the lower 32 bits, and therefore, + * we can ignore the upper 32 bits in the xor. + * + * Thanks to this optimization, XXH3 only requires these features to be efficient: + * + * - Usable unaligned access + * - A 32-bit or 64-bit ALU + * - If 32-bit, a decent ADC instruction + * - A 32 or 64-bit multiply with a 64-bit result + * - For the 128-bit variant, a decent byteswap helps short inputs. + * + * The first two are already required by XXH32, and almost all 32-bit and 64-bit + * platforms which can run XXH32 can run XXH3 efficiently. + * + * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one + * notable exception. + * + * First of all, Thumb-1 lacks support for the UMULL instruction which + * performs the important long multiply. This means numerous __aeabi_lmul + * calls. + * + * Second of all, the 8 functional registers are just not enough. + * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need + * Lo registers, and this shuffling results in thousands more MOVs than A32. + * + * A32 and T32 don't have this limitation. They can access all 14 registers, + * do a 32->64 multiply with UMULL, and the flexible operand allowing free + * shifts is helpful, too. + * + * Therefore, we do a quick sanity check. + * + * If compiling Thumb-1 for a target which supports ARM instructions, we will + * emit a warning, as it is not a "sane" platform to compile for. + * + * Usually, if this happens, it is because of an accident and you probably need + * to specify -march, as you likely meant to compile for a newer architecture. + * + * Credit: large sections of the vectorial and asm source code paths + * have been contributed by @easyaspi314 + */ +#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM) +# warning "XXH3 is highly inefficient without ARM or Thumb-2." +#endif + +/* ========================================== + * Vectorization detection + * ========================================== */ + +#ifdef XXH_DOXYGEN +/*! + * @ingroup tuning + * @brief Overrides the vectorization implementation chosen for XXH3. + * + * Can be defined to 0 to disable SIMD or any of the values mentioned in + * @ref XXH_VECTOR_TYPE. + * + * If this is not defined, it uses predefined macros to determine the best + * implementation. + */ +# define XXH_VECTOR XXH_SCALAR +/*! + * @ingroup tuning + * @brief Possible values for @ref XXH_VECTOR. + * + * Note that these are actually implemented as macros. + * + * If this is not defined, it is detected automatically. + * @ref XXH_X86DISPATCH overrides this. + */ +enum XXH_VECTOR_TYPE /* fake enum */ { + XXH_SCALAR = 0, /*!< Portable scalar version */ + XXH_SSE2 = 1, /*!< + * SSE2 for Pentium 4, Opteron, all x86_64. + * + * @note SSE2 is also guaranteed on Windows 10, macOS, and + * Android x86. + */ + XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */ + XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */ + XXH_NEON = 4, /*!< NEON for most ARMv7-A and all AArch64 */ + XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */ +}; +/*! + * @ingroup tuning + * @brief Selects the minimum alignment for XXH3's accumulators. + * + * When using SIMD, this should match the alignment reqired for said vector + * type, so, for example, 32 for AVX2. + * + * Default: Auto detected. + */ +# define XXH_ACC_ALIGN 8 +#endif + +/* Actual definition */ +#ifndef XXH_DOXYGEN +# define XXH_SCALAR 0 +# define XXH_SSE2 1 +# define XXH_AVX2 2 +# define XXH_AVX512 3 +# define XXH_NEON 4 +# define XXH_VSX 5 +#endif + +#ifndef XXH_VECTOR /* can be defined on command line */ +# if ( \ + defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \ + || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \ + ) && ( \ + defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \ + ) +# define XXH_VECTOR XXH_NEON +# elif defined(__AVX512F__) +# define XXH_VECTOR XXH_AVX512 +# elif defined(__AVX2__) +# define XXH_VECTOR XXH_AVX2 +# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2)) +# define XXH_VECTOR XXH_SSE2 +# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \ + || (defined(__s390x__) && defined(__VEC__)) \ + && defined(__GNUC__) /* TODO: IBM XL */ +# define XXH_VECTOR XXH_VSX +# else +# define XXH_VECTOR XXH_SCALAR +# endif +#endif + +/* + * Controls the alignment of the accumulator, + * for compatibility with aligned vector loads, which are usually faster. + */ +#ifndef XXH_ACC_ALIGN +# if defined(XXH_X86DISPATCH) +# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */ +# elif XXH_VECTOR == XXH_SCALAR /* scalar */ +# define XXH_ACC_ALIGN 8 +# elif XXH_VECTOR == XXH_SSE2 /* sse2 */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX2 /* avx2 */ +# define XXH_ACC_ALIGN 32 +# elif XXH_VECTOR == XXH_NEON /* neon */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_VSX /* vsx */ +# define XXH_ACC_ALIGN 16 +# elif XXH_VECTOR == XXH_AVX512 /* avx512 */ +# define XXH_ACC_ALIGN 64 +# endif +#endif + +#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \ + || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512 +# define XXH_SEC_ALIGN XXH_ACC_ALIGN +#else +# define XXH_SEC_ALIGN 8 +#endif + +/* + * UGLY HACK: + * GCC usually generates the best code with -O3 for xxHash. + * + * However, when targeting AVX2, it is overzealous in its unrolling resulting + * in code roughly 3/4 the speed of Clang. + * + * There are other issues, such as GCC splitting _mm256_loadu_si256 into + * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which + * only applies to Sandy and Ivy Bridge... which don't even support AVX2. + * + * That is why when compiling the AVX2 version, it is recommended to use either + * -O2 -mavx2 -march=haswell + * or + * -O2 -mavx2 -mno-avx256-split-unaligned-load + * for decent performance, or to use Clang instead. + * + * Fortunately, we can control the first one with a pragma that forces GCC into + * -O2, but the other one we can't control without "failed to inline always + * inline function due to target mismatch" warnings. + */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ +# pragma GCC push_options +# pragma GCC optimize("-O2") +#endif + + +#if XXH_VECTOR == XXH_NEON +/* + * NEON's setup for vmlal_u32 is a little more complicated than it is on + * SSE2, AVX2, and VSX. + * + * While PMULUDQ and VMULEUW both perform a mask, VMLAL.U32 performs an upcast. + * + * To do the same operation, the 128-bit 'Q' register needs to be split into + * two 64-bit 'D' registers, performing this operation:: + * + * [ a | b ] + * | '---------. .--------' | + * | x | + * | .---------' '--------. | + * [ a & 0xFFFFFFFF | b & 0xFFFFFFFF ],[ a >> 32 | b >> 32 ] + * + * Due to significant changes in aarch64, the fastest method for aarch64 is + * completely different than the fastest method for ARMv7-A. + * + * ARMv7-A treats D registers as unions overlaying Q registers, so modifying + * D11 will modify the high half of Q5. This is similar to how modifying AH + * will only affect bits 8-15 of AX on x86. + * + * VZIP takes two registers, and puts even lanes in one register and odd lanes + * in the other. + * + * On ARMv7-A, this strangely modifies both parameters in place instead of + * taking the usual 3-operand form. + * + * Therefore, if we want to do this, we can simply use a D-form VZIP.32 on the + * lower and upper halves of the Q register to end up with the high and low + * halves where we want - all in one instruction. + * + * vzip.32 d10, d11 @ d10 = { d10[0], d11[0] }; d11 = { d10[1], d11[1] } + * + * Unfortunately we need inline assembly for this: Instructions modifying two + * registers at once is not possible in GCC or Clang's IR, and they have to + * create a copy. + * + * aarch64 requires a different approach. + * + * In order to make it easier to write a decent compiler for aarch64, many + * quirks were removed, such as conditional execution. + * + * NEON was also affected by this. + * + * aarch64 cannot access the high bits of a Q-form register, and writes to a + * D-form register zero the high bits, similar to how writes to W-form scalar + * registers (or DWORD registers on x86_64) work. + * + * The formerly free vget_high intrinsics now require a vext (with a few + * exceptions) + * + * Additionally, VZIP was replaced by ZIP1 and ZIP2, which are the equivalent + * of PUNPCKL* and PUNPCKH* in SSE, respectively, in order to only modify one + * operand. + * + * The equivalent of the VZIP.32 on the lower and upper halves would be this + * mess: + * + * ext v2.4s, v0.4s, v0.4s, #2 // v2 = { v0[2], v0[3], v0[0], v0[1] } + * zip1 v1.2s, v0.2s, v2.2s // v1 = { v0[0], v2[0] } + * zip2 v0.2s, v0.2s, v1.2s // v0 = { v0[1], v2[1] } + * + * Instead, we use a literal downcast, vmovn_u64 (XTN), and vshrn_n_u64 (SHRN): + * + * shrn v1.2s, v0.2d, #32 // v1 = (uint32x2_t)(v0 >> 32); + * xtn v0.2s, v0.2d // v0 = (uint32x2_t)(v0 & 0xFFFFFFFF); + * + * This is available on ARMv7-A, but is less efficient than a single VZIP.32. + */ + +/*! + * Function-like macro: + * void XXH_SPLIT_IN_PLACE(uint64x2_t &in, uint32x2_t &outLo, uint32x2_t &outHi) + * { + * outLo = (uint32x2_t)(in & 0xFFFFFFFF); + * outHi = (uint32x2_t)(in >> 32); + * in = UNDEFINED; + * } + */ +# if !defined(XXH_NO_VZIP_HACK) /* define to disable */ \ + && (defined(__GNUC__) || defined(__clang__)) \ + && (defined(__arm__) || defined(__thumb__) || defined(_M_ARM)) +# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ + do { \ + /* Undocumented GCC/Clang operand modifier: %e0 = lower D half, %f0 = upper D half */ \ + /* https://github.com/gcc-mirror/gcc/blob/38cf91e5/gcc/config/arm/arm.c#L22486 */ \ + /* https://github.com/llvm-mirror/llvm/blob/2c4ca683/lib/Target/ARM/ARMAsmPrinter.cpp#L399 */ \ + __asm__("vzip.32 %e0, %f0" : "+w" (in)); \ + (outLo) = vget_low_u32 (vreinterpretq_u32_u64(in)); \ + (outHi) = vget_high_u32(vreinterpretq_u32_u64(in)); \ + } while (0) +# else +# define XXH_SPLIT_IN_PLACE(in, outLo, outHi) \ + do { \ + (outLo) = vmovn_u64 (in); \ + (outHi) = vshrn_n_u64 ((in), 32); \ + } while (0) +# endif + +/*! + * @ingroup tuning + * @brief Controls the NEON to scalar ratio for XXH3 + * + * On AArch64 when not optimizing for size, XXH3 will run 6 lanes using NEON and + * 2 lanes on scalar by default. + * + * This can be set to 2, 4, 6, or 8. ARMv7 will default to all 8 NEON lanes, as the + * emulated 64-bit arithmetic is too slow. + * + * Modern ARM CPUs are _very_ sensitive to how their pipelines are used. + * + * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but it can't + * have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions, + * you are only using 2/3 of the CPU bandwidth. + * + * This is even more noticable on the more advanced cores like the A76 which + * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once. + * + * Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the + * remaining lanes will use scalar instructions. This improves the bandwidth + * and also gives the integer pipelines something to do besides twiddling loop + * counters and pointers. + * + * This change benefits CPUs with large micro-op buffers without negatively affecting + * other CPUs: + * + * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. | + * |:----------------------|:--------------------|----------:|-----------:|------:| + * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% | + * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% | + * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% | + * + * It also seems to fix some bad codegen on GCC, making it almost as fast as clang. + * + * @see XXH3_accumulate_512_neon() + */ +# ifndef XXH3_NEON_LANES +# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \ + && !defined(__OPTIMIZE_SIZE__) +# define XXH3_NEON_LANES 6 +# else +# define XXH3_NEON_LANES XXH_ACC_NB +# endif +# endif +#endif /* XXH_VECTOR == XXH_NEON */ + +/* + * VSX and Z Vector helpers. + * + * This is very messy, and any pull requests to clean this up are welcome. + * + * There are a lot of problems with supporting VSX and s390x, due to + * inconsistent intrinsics, spotty coverage, and multiple endiannesses. + */ +#if XXH_VECTOR == XXH_VSX +# if defined(__s390x__) +# include <s390intrin.h> +# else +/* gcc's altivec.h can have the unwanted consequence to unconditionally + * #define bool, vector, and pixel keywords, + * with bad consequences for programs already using these keywords for other purposes. + * The paragraph defining these macros is skipped when __APPLE_ALTIVEC__ is defined. + * __APPLE_ALTIVEC__ is _generally_ defined automatically by the compiler, + * but it seems that, in some cases, it isn't. + * Force the build macro to be defined, so that keywords are not altered. + */ +# if defined(__GNUC__) && !defined(__APPLE_ALTIVEC__) +# define __APPLE_ALTIVEC__ +# endif +# include <altivec.h> +# endif + +typedef __vector unsigned long long xxh_u64x2; +typedef __vector unsigned char xxh_u8x16; +typedef __vector unsigned xxh_u32x4; + +# ifndef XXH_VSX_BE +# if defined(__BIG_ENDIAN__) \ + || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) +# define XXH_VSX_BE 1 +# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__ +# warning "-maltivec=be is not recommended. Please use native endianness." +# define XXH_VSX_BE 1 +# else +# define XXH_VSX_BE 0 +# endif +# endif /* !defined(XXH_VSX_BE) */ + +# if XXH_VSX_BE +# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__)) +# define XXH_vec_revb vec_revb +# else +/*! + * A polyfill for POWER9's vec_revb(). + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val) +{ + xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, + 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 }; + return vec_perm(val, val, vByteSwap); +} +# endif +# endif /* XXH_VSX_BE */ + +/*! + * Performs an unaligned vector load and byte swaps it on big endian. + */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr) +{ + xxh_u64x2 ret; + XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2)); +# if XXH_VSX_BE + ret = XXH_vec_revb(ret); +# endif + return ret; +} + +/* + * vec_mulo and vec_mule are very problematic intrinsics on PowerPC + * + * These intrinsics weren't added until GCC 8, despite existing for a while, + * and they are endian dependent. Also, their meaning swap depending on version. + * */ +# if defined(__s390x__) + /* s390x is always big endian, no issue on this platform */ +# define XXH_vec_mulo vec_mulo +# define XXH_vec_mule vec_mule +# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) +/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */ +# define XXH_vec_mulo __builtin_altivec_vmulouw +# define XXH_vec_mule __builtin_altivec_vmuleuw +# else +/* gcc needs inline assembly */ +/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */ +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b) +{ + xxh_u64x2 result; + __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b)); + return result; +} +# endif /* XXH_vec_mulo, XXH_vec_mule */ +#endif /* XXH_VECTOR == XXH_VSX */ + + +/* prefetch + * can be disabled, by declaring XXH_NO_PREFETCH build macro */ +#if defined(XXH_NO_PREFETCH) +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +#else +# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */ +# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */ +# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0) +# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) ) +# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */) +# else +# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */ +# endif +#endif /* XXH_NO_PREFETCH */ + + +/* ========================================== + * XXH3 default settings + * ========================================== */ + +#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */ + +#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN) +# error "default keyset is not large enough" +#endif + +/*! Pseudorandom secret taken directly from FARSH. */ +XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = { + 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c, + 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f, + 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21, + 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c, + 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3, + 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8, + 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d, + 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64, + 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb, + 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e, + 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce, + 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e, +}; + + +#ifdef XXH_OLD_NAMES +# define kSecret XXH3_kSecret +#endif + +#ifdef XXH_DOXYGEN +/*! + * @brief Calculates a 32-bit to 64-bit long multiply. + * + * Implemented as a macro. + * + * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't + * need to (but it shouldn't need to anyways, it is about 7 instructions to do + * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we + * use that instead of the normal method. + * + * If you are compiling for platforms like Thumb-1 and don't have a better option, + * you may also want to write your own long multiply routine here. + * + * @param x, y Numbers to be multiplied + * @return 64-bit product of the low 32 bits of @p x and @p y. + */ +XXH_FORCE_INLINE xxh_u64 +XXH_mult32to64(xxh_u64 x, xxh_u64 y) +{ + return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF); +} +#elif defined(_MSC_VER) && defined(_M_IX86) +# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y)) +#else +/* + * Downcast + upcast is usually better than masking on older compilers like + * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers. + * + * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands + * and perform a full 64x64 multiply -- entirely redundant on 32-bit. + */ +# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y)) +#endif + +/*! + * @brief Calculates a 64->128-bit long multiply. + * + * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar + * version. + * + * @param lhs , rhs The 64-bit integers to be multiplied + * @return The 128-bit result represented in an @ref XXH128_hash_t. + */ +static XXH128_hash_t +XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs) +{ + /* + * GCC/Clang __uint128_t method. + * + * On most 64-bit targets, GCC and Clang define a __uint128_t type. + * This is usually the best way as it usually uses a native long 64-bit + * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64. + * + * Usually. + * + * Despite being a 32-bit platform, Clang (and emscripten) define this type + * despite not having the arithmetic for it. This results in a laggy + * compiler builtin call which calculates a full 128-bit multiply. + * In that case it is best to use the portable one. + * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677 + */ +#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \ + && defined(__SIZEOF_INT128__) \ + || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128) + + __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs; + XXH128_hash_t r128; + r128.low64 = (xxh_u64)(product); + r128.high64 = (xxh_u64)(product >> 64); + return r128; + + /* + * MSVC for x64's _umul128 method. + * + * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct); + * + * This compiles to single operand MUL on x64. + */ +#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(_umul128) +#endif + xxh_u64 product_high; + xxh_u64 const product_low = _umul128(lhs, rhs, &product_high); + XXH128_hash_t r128; + r128.low64 = product_low; + r128.high64 = product_high; + return r128; + + /* + * MSVC for ARM64's __umulh method. + * + * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method. + */ +#elif defined(_M_ARM64) || defined(_M_ARM64EC) + +#ifndef _MSC_VER +# pragma intrinsic(__umulh) +#endif + XXH128_hash_t r128; + r128.low64 = lhs * rhs; + r128.high64 = __umulh(lhs, rhs); + return r128; + +#else + /* + * Portable scalar method. Optimized for 32-bit and 64-bit ALUs. + * + * This is a fast and simple grade school multiply, which is shown below + * with base 10 arithmetic instead of base 0x100000000. + * + * 9 3 // D2 lhs = 93 + * x 7 5 // D2 rhs = 75 + * ---------- + * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15 + * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45 + * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21 + * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63 + * --------- + * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27 + * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67 + * --------- + * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975 + * + * The reasons for adding the products like this are: + * 1. It avoids manual carry tracking. Just like how + * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX. + * This avoids a lot of complexity. + * + * 2. It hints for, and on Clang, compiles to, the powerful UMAAL + * instruction available in ARM's Digital Signal Processing extension + * in 32-bit ARMv6 and later, which is shown below: + * + * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm) + * { + * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm; + * *RdLo = (xxh_u32)(product & 0xFFFFFFFF); + * *RdHi = (xxh_u32)(product >> 32); + * } + * + * This instruction was designed for efficient long multiplication, and + * allows this to be calculated in only 4 instructions at speeds + * comparable to some 64-bit ALUs. + * + * 3. It isn't terrible on other platforms. Usually this will be a couple + * of 32-bit ADD/ADCs. + */ + + /* First calculate all of the cross products. */ + xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF); + xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF); + xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32); + xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32); + + /* Now add the products together. These will never overflow. */ + xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi; + xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi; + xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF); + + XXH128_hash_t r128; + r128.low64 = lower; + r128.high64 = upper; + return r128; +#endif +} + +/*! + * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it. + * + * The reason for the separate function is to prevent passing too many structs + * around by value. This will hopefully inline the multiply, but we don't force it. + * + * @param lhs , rhs The 64-bit integers to multiply + * @return The low 64 bits of the product XOR'd by the high 64 bits. + * @see XXH_mult64to128() + */ +static xxh_u64 +XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs) +{ + XXH128_hash_t product = XXH_mult64to128(lhs, rhs); + return product.low64 ^ product.high64; +} + +/*! Seems to produce slightly better code on GCC for some reason. */ +XXH_FORCE_INLINE xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift) +{ + XXH_ASSERT(0 <= shift && shift < 64); + return v64 ^ (v64 >> shift); +} + +/* + * This is a fast avalanche stage, + * suitable when input bits are already partially mixed + */ +static XXH64_hash_t XXH3_avalanche(xxh_u64 h64) +{ + h64 = XXH_xorshift64(h64, 37); + h64 *= 0x165667919E3779F9ULL; + h64 = XXH_xorshift64(h64, 32); + return h64; +} + +/* + * This is a stronger avalanche, + * inspired by Pelle Evensen's rrmxmx + * preferable when input has not been previously mixed + */ +static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len) +{ + /* this mix is inspired by Pelle Evensen's rrmxmx */ + h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24); + h64 *= 0x9FB21C651E98DF25ULL; + h64 ^= (h64 >> 35) + len ; + h64 *= 0x9FB21C651E98DF25ULL; + return XXH_xorshift64(h64, 28); +} + + +/* ========================================== + * Short keys + * ========================================== + * One of the shortcomings of XXH32 and XXH64 was that their performance was + * sub-optimal on short lengths. It used an iterative algorithm which strongly + * favored lengths that were a multiple of 4 or 8. + * + * Instead of iterating over individual inputs, we use a set of single shot + * functions which piece together a range of lengths and operate in constant time. + * + * Additionally, the number of multiplies has been significantly reduced. This + * reduces latency, especially when emulating 64-bit multiplies on 32-bit. + * + * Depending on the platform, this may or may not be faster than XXH32, but it + * is almost guaranteed to be faster than XXH64. + */ + +/* + * At very short lengths, there isn't enough input to fully hide secrets, or use + * the entire secret. + * + * There is also only a limited amount of mixing we can do before significantly + * impacting performance. + * + * Therefore, we use different sections of the secret and always mix two secret + * samples with an XOR. This should have no effect on performance on the + * seedless or withSeed variants because everything _should_ be constant folded + * by modern compilers. + * + * The XOR mixing hides individual parts of the secret and increases entropy. + * + * This adds an extra layer of strength for custom secrets. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combined = { input[0], 0x01, input[0], input[0] } + * len = 2: combined = { input[1], 0x02, input[0], input[1] } + * len = 3: combined = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const keyed = (xxh_u64)combined ^ bitflip; + return XXH64_avalanche(keyed); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input1 = XXH_readLE32(input); + xxh_u32 const input2 = XXH_readLE32(input + len - 4); + xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed; + xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32); + xxh_u64 const keyed = input64 ^ bitflip; + return XXH3_rrmxmx(keyed, len); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed; + xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed; + xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1; + xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2; + xxh_u64 const acc = len + + XXH_swap64(input_lo) + input_hi + + XXH3_mul128_fold64(input_lo, input_hi); + return XXH3_avalanche(acc); + } +} + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed); + if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed); + if (len) return XXH3_len_1to3_64b(input, len, secret, seed); + return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64))); + } +} + +/* + * DISCLAIMER: There are known *seed-dependent* multicollisions here due to + * multiplication by zero, affecting hashes of lengths 17 to 240. + * + * However, they are very unlikely. + * + * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all + * unseeded non-cryptographic hashes, it does not attempt to defend itself + * against specially crafted inputs, only random inputs. + * + * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes + * cancelling out the secret is taken an arbitrary number of times (addressed + * in XXH3_accumulate_512), this collision is very unlikely with random inputs + * and/or proper seeding: + * + * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a + * function that is only called up to 16 times per hash with up to 240 bytes of + * input. + * + * This is not too bad for a non-cryptographic hash function, especially with + * only 64 bit outputs. + * + * The 128-bit variant (which trades some speed for strength) is NOT affected + * by this, although it is always a good idea to use a proper seed if you care + * about strength. + */ +XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64) +{ +#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */ + /* + * UGLY HACK: + * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in + * slower code. + * + * By forcing seed64 into a register, we disrupt the cost model and + * cause it to scalarize. See `XXH32_round()` + * + * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600, + * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on + * GCC 9.2, despite both emitting scalar code. + * + * GCC generates much better scalar code than Clang for the rest of XXH3, + * which is why finding a more optimal codepath is an interest. + */ + XXH_COMPILER_GUARD(seed64); +#endif + { xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 const input_hi = XXH_readLE64(input+8); + return XXH3_mul128_fold64( + input_lo ^ (XXH_readLE64(secret) + seed64), + input_hi ^ (XXH_readLE64(secret+8) - seed64) + ); + } +} + +/* For mid range keys, XXH3 uses a Mum-hash variant. */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { xxh_u64 acc = len * XXH_PRIME64_1; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc += XXH3_mix16B(input+48, secret+96, seed); + acc += XXH3_mix16B(input+len-64, secret+112, seed); + } + acc += XXH3_mix16B(input+32, secret+64, seed); + acc += XXH3_mix16B(input+len-48, secret+80, seed); + } + acc += XXH3_mix16B(input+16, secret+32, seed); + acc += XXH3_mix16B(input+len-32, secret+48, seed); + } + acc += XXH3_mix16B(input+0, secret+0, seed); + acc += XXH3_mix16B(input+len-16, secret+16, seed); + + return XXH3_avalanche(acc); + } +} + +#define XXH3_MIDSIZE_MAX 240 + +XXH_NO_INLINE XXH64_hash_t +XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + #define XXH3_MIDSIZE_STARTOFFSET 3 + #define XXH3_MIDSIZE_LASTOFFSET 17 + + { xxh_u64 acc = len * XXH_PRIME64_1; + int const nbRounds = (int)len / 16; + int i; + for (i=0; i<8; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed); + } + acc = XXH3_avalanche(acc); + XXH_ASSERT(nbRounds >= 8); +#if defined(__clang__) /* Clang */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86. + * In everywhere else, it uses scalar code. + * + * For 64->128-bit multiplies, even if the NEON was 100% optimal, it + * would still be slower than UMAAL (see XXH_mult64to128). + * + * Unfortunately, Clang doesn't handle the long multiplies properly and + * converts them to the nonexistent "vmulq_u64" intrinsic, which is then + * scalarized into an ugly mess of VMOV.32 instructions. + * + * This mess is difficult to avoid without turning autovectorization + * off completely, but they are usually relatively minor and/or not + * worth it to fix. + * + * This loop is the easiest to fix, as unlike XXH32, this pragma + * _actually works_ because it is a loop vectorization instead of an + * SLP vectorization. + */ + #pragma clang loop vectorize(disable) +#endif + for (i=8 ; i < nbRounds; i++) { + acc += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed); + } + /* last bytes */ + acc += XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed); + return XXH3_avalanche(acc); + } +} + + +/* ======= Long Keys ======= */ + +#define XXH_STRIPE_LEN 64 +#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */ +#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64)) + +#ifdef XXH_OLD_NAMES +# define STRIPE_LEN XXH_STRIPE_LEN +# define ACC_NB XXH_ACC_NB +#endif + +XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64) +{ + if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64); + XXH_memcpy(dst, &v64, sizeof(v64)); +} + +/* Several intrinsic functions below are supposed to accept __int64 as argument, + * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ . + * However, several environments do not define __int64 type, + * requiring a workaround. + */ +#if !defined (__VMS) \ + && (defined (__cplusplus) \ + || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) + typedef int64_t xxh_i64; +#else + /* the following type must have a width of 64-bit */ + typedef long long xxh_i64; +#endif + + +/* + * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized. + * + * It is a hardened version of UMAC, based off of FARSH's implementation. + * + * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD + * implementations, and it is ridiculously fast. + * + * We harden it by mixing the original input to the accumulators as well as the product. + * + * This means that in the (relatively likely) case of a multiply by zero, the + * original input is preserved. + * + * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve + * cross-pollination, as otherwise the upper and lower halves would be + * essentially independent. + * + * This doesn't matter on 64-bit hashes since they all get merged together in + * the end, so we skip the extra step. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +#if (XXH_VECTOR == XXH_AVX512) \ + || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0) + +#ifndef XXH_TARGET_AVX512 +# define XXH_TARGET_AVX512 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + __m512i* const xacc = (__m512i *) acc; + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + + { + /* data_vec = input[0]; */ + __m512i const data_vec = _mm512_loadu_si512 (input); + /* key_vec = secret[0]; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + /* data_key = data_vec ^ key_vec; */ + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m512i const data_key_lo = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo); + /* xacc[0] += swap(data_vec); */ + __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2)); + __m512i const sum = _mm512_add_epi64(*xacc, data_swap); + /* xacc[0] += product; */ + *xacc = _mm512_add_epi64(product, sum); + } +} + +/* + * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing. + * + * Multiplication isn't perfect, as explained by Google in HighwayHash: + * + * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to + * // varying degrees. In descending order of goodness, bytes + * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32. + * // As expected, the upper and lower bytes are much worse. + * + * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291 + * + * Since our algorithm uses a pseudorandom secret to add some variance into the + * mix, we don't need to (or want to) mix as often or as much as HighwayHash does. + * + * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid + * extraction. + * + * Both XXH3_64bits and XXH3_128bits use this subroutine. + */ + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 63) == 0); + XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i)); + { __m512i* const xacc = (__m512i*) acc; + const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1); + + /* xacc[0] ^= (xacc[0] >> 47) */ + __m512i const acc_vec = *xacc; + __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47); + __m512i const data_vec = _mm512_xor_si512 (acc_vec, shifted); + /* xacc[0] ^= secret; */ + __m512i const key_vec = _mm512_loadu_si512 (secret); + __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec); + + /* xacc[0] *= XXH_PRIME32_1; */ + __m512i const data_key_hi = _mm512_shuffle_epi32 (data_key, (_MM_PERM_ENUM)_MM_SHUFFLE(0, 3, 0, 1)); + __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32); + __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32); + *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32)); + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX512 void +XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64); + XXH_ASSERT(((size_t)customSecret & 63) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i); + __m512i const seed = _mm512_mask_set1_epi64(_mm512_set1_epi64((xxh_i64)seed64), 0xAA, (xxh_i64)(0U - seed64)); + + const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret); + __m512i* const dest = ( __m512i*) customSecret; + int i; + XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 63) == 0); + for (i=0; i < nbRounds; ++i) { + /* GCC has a bug, _mm512_stream_load_si512 accepts 'void*', not 'void const*', + * this will warn "discards 'const' qualifier". */ + union { + const __m512i* cp; + void* p; + } remote_const_void; + remote_const_void.cp = src + i; + dest[i] = _mm512_add_epi64(_mm512_stream_load_si512(remote_const_void.p), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_AVX2) \ + || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0) + +#ifndef XXH_TARGET_AVX2 +# define XXH_TARGET_AVX2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xinput = (const __m256i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* data_vec = xinput[i]; */ + __m256i const data_vec = _mm256_loadu_si256 (xinput+i); + /* key_vec = xsecret[i]; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m256i const data_key_lo = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2)); + __m256i const sum = _mm256_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm256_add_epi64(product, sum); + } } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void +XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 31) == 0); + { __m256i* const xacc = (__m256i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */ + const __m256i* const xsecret = (const __m256i *) secret; + const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m256i const acc_vec = xacc[i]; + __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47); + __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted); + /* xacc[i] ^= xsecret; */ + __m256i const key_vec = _mm256_loadu_si256 (xsecret+i); + __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m256i const data_key_hi = _mm256_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32); + __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0); + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6); + XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64); + (void)(&XXH_writeLE64); + XXH_PREFETCH(customSecret); + { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64); + + const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret); + __m256i* dest = ( __m256i*) customSecret; + +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dest); +# endif + XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dest & 31) == 0); + + /* GCC -O2 need unroll loop manually */ + dest[0] = _mm256_add_epi64(_mm256_stream_load_si256(src+0), seed); + dest[1] = _mm256_add_epi64(_mm256_stream_load_si256(src+1), seed); + dest[2] = _mm256_add_epi64(_mm256_stream_load_si256(src+2), seed); + dest[3] = _mm256_add_epi64(_mm256_stream_load_si256(src+3), seed); + dest[4] = _mm256_add_epi64(_mm256_stream_load_si256(src+4), seed); + dest[5] = _mm256_add_epi64(_mm256_stream_load_si256(src+5), seed); + } +} + +#endif + +/* x86dispatch always generates SSE2 */ +#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH) + +#ifndef XXH_TARGET_SSE2 +# define XXH_TARGET_SSE2 /* disable attribute target */ +#endif + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* SSE2 is just a half-scale version of the AVX2 version. */ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i *) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xinput = (const __m128i *) input; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* data_vec = xinput[i]; */ + __m128i const data_vec = _mm_loadu_si128 (xinput+i); + /* key_vec = xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + /* data_key = data_vec ^ key_vec; */ + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + /* data_key_lo = data_key >> 32; */ + __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */ + __m128i const product = _mm_mul_epu32 (data_key, data_key_lo); + /* xacc[i] += swap(data_vec); */ + __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2)); + __m128i const sum = _mm_add_epi64(xacc[i], data_swap); + /* xacc[i] += product; */ + xacc[i] = _mm_add_epi64(product, sum); + } } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void +XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + { __m128i* const xacc = (__m128i*) acc; + /* Unaligned. This is mainly for pointer arithmetic, and because + * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */ + const __m128i* const xsecret = (const __m128i *) secret; + const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1); + + size_t i; + for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) { + /* xacc[i] ^= (xacc[i] >> 47) */ + __m128i const acc_vec = xacc[i]; + __m128i const shifted = _mm_srli_epi64 (acc_vec, 47); + __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted); + /* xacc[i] ^= xsecret[i]; */ + __m128i const key_vec = _mm_loadu_si128 (xsecret+i); + __m128i const data_key = _mm_xor_si128 (data_vec, key_vec); + + /* xacc[i] *= XXH_PRIME32_1; */ + __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1)); + __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32); + __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32); + xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32)); + } + } +} + +XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + (void)(&XXH_writeLE64); + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i); + +# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900 + /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */ + XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) }; + __m128i const seed = _mm_load_si128((__m128i const*)seed64x2); +# else + __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64); +# endif + int i; + + const void* const src16 = XXH3_kSecret; + __m128i* dst16 = (__m128i*) customSecret; +# if defined(__GNUC__) || defined(__clang__) + /* + * On GCC & Clang, marking 'dest' as modified will cause the compiler: + * - do not extract the secret from sse registers in the internal loop + * - use less common registers, and avoid pushing these reg into stack + */ + XXH_COMPILER_GUARD(dst16); +# endif + XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */ + XXH_ASSERT(((size_t)dst16 & 15) == 0); + + for (i=0; i < nbRounds; ++i) { + dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed); + } } +} + +#endif + +#if (XXH_VECTOR == XXH_NEON) + +/* forward declarations for the scalar routines */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, size_t lane); + +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, size_t lane); + +/*! + * @internal + * @brief The bulk processing loop for NEON. + * + * The NEON code path is actually partially scalar when running on AArch64. This + * is to optimize the pipelining and can have up to 15% speedup depending on the + * CPU, and it also mitigates some GCC codegen issues. + * + * @see XXH3_NEON_LANES for configuring this and details about this optimization. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_neon( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0); + { + uint64x2_t* const xacc = (uint64x2_t *) acc; + /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */ + uint8_t const* const xinput = (const uint8_t *) input; + uint8_t const* const xsecret = (const uint8_t *) secret; + + size_t i; + /* NEON for the first few lanes (these loops are normally interleaved) */ + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* data_vec = xinput[i]; */ + uint8x16_t data_vec = vld1q_u8(xinput + (i * 16)); + /* key_vec = xsecret[i]; */ + uint8x16_t key_vec = vld1q_u8(xsecret + (i * 16)); + uint64x2_t data_key; + uint32x2_t data_key_lo, data_key_hi; + /* xacc[i] += swap(data_vec); */ + uint64x2_t const data64 = vreinterpretq_u64_u8(data_vec); + uint64x2_t const swapped = vextq_u64(data64, data64, 1); + xacc[i] = vaddq_u64 (xacc[i], swapped); + /* data_key = data_vec ^ key_vec; */ + data_key = vreinterpretq_u64_u8(veorq_u8(data_vec, key_vec)); + /* data_key_lo = (uint32x2_t) (data_key & 0xFFFFFFFF); + * data_key_hi = (uint32x2_t) (data_key >> 32); + * data_key = UNDEFINED; */ + XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); + /* xacc[i] += (uint64x2_t) data_key_lo * (uint64x2_t) data_key_hi; */ + xacc[i] = vmlal_u32 (xacc[i], data_key_lo, data_key_hi); + + } + /* Scalar for the remainder. This may be a zero iteration loop. */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } + } +} + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { uint64x2_t* xacc = (uint64x2_t*) acc; + uint8_t const* xsecret = (uint8_t const*) secret; + uint32x2_t prime = vdup_n_u32 (XXH_PRIME32_1); + + size_t i; + /* NEON for the first few lanes (these loops are normally interleaved) */ + for (i=0; i < XXH3_NEON_LANES / 2; i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + uint64x2_t acc_vec = xacc[i]; + uint64x2_t shifted = vshrq_n_u64 (acc_vec, 47); + uint64x2_t data_vec = veorq_u64 (acc_vec, shifted); + + /* xacc[i] ^= xsecret[i]; */ + uint8x16_t key_vec = vld1q_u8 (xsecret + (i * 16)); + uint64x2_t data_key = veorq_u64 (data_vec, vreinterpretq_u64_u8(key_vec)); + + /* xacc[i] *= XXH_PRIME32_1 */ + uint32x2_t data_key_lo, data_key_hi; + /* data_key_lo = (uint32x2_t) (xacc[i] & 0xFFFFFFFF); + * data_key_hi = (uint32x2_t) (xacc[i] >> 32); + * xacc[i] = UNDEFINED; */ + XXH_SPLIT_IN_PLACE(data_key, data_key_lo, data_key_hi); + { /* + * prod_hi = (data_key >> 32) * XXH_PRIME32_1; + * + * Avoid vmul_u32 + vshll_n_u32 since Clang 6 and 7 will + * incorrectly "optimize" this: + * tmp = vmul_u32(vmovn_u64(a), vmovn_u64(b)); + * shifted = vshll_n_u32(tmp, 32); + * to this: + * tmp = "vmulq_u64"(a, b); // no such thing! + * shifted = vshlq_n_u64(tmp, 32); + * + * However, unlike SSE, Clang lacks a 64-bit multiply routine + * for NEON, and it scalarizes two 64-bit multiplies instead. + * + * vmull_u32 has the same timing as vmul_u32, and it avoids + * this bug completely. + * See https://bugs.llvm.org/show_bug.cgi?id=39967 + */ + uint64x2_t prod_hi = vmull_u32 (data_key_hi, prime); + /* xacc[i] = prod_hi << 32; */ + xacc[i] = vshlq_n_u64(prod_hi, 32); + /* xacc[i] += (prod_hi & 0xFFFFFFFF) * XXH_PRIME32_1; */ + xacc[i] = vmlal_u32(xacc[i], data_key_lo, prime); + } + } + /* Scalar for the remainder. This may be a zero iteration loop. */ + for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } + } +} + +#endif + +#if (XXH_VECTOR == XXH_VSX) + +XXH_FORCE_INLINE void +XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + /* presumed aligned */ + unsigned int* const xacc = (unsigned int*) acc; + xxh_u64x2 const* const xinput = (xxh_u64x2 const*) input; /* no alignment restriction */ + xxh_u64x2 const* const xsecret = (xxh_u64x2 const*) secret; /* no alignment restriction */ + xxh_u64x2 const v32 = { 32, 32 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* data_vec = xinput[i]; */ + xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + i); + /* key_vec = xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + /* shuffled = (data_key << 32) | (data_key >> 32); */ + xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32); + /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */ + xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled); + /* acc_vec = xacc[i]; */ + xxh_u64x2 acc_vec = (xxh_u64x2)vec_xl(0, xacc + 4 * i); + acc_vec += product; + + /* swap high and low halves */ +#ifdef __s390x__ + acc_vec += vec_permi(data_vec, data_vec, 2); +#else + acc_vec += vec_xxpermdi(data_vec, data_vec, 2); +#endif + /* xacc[i] = acc_vec; */ + vec_xst((xxh_u32x4)acc_vec, 0, xacc + 4 * i); + } +} + +XXH_FORCE_INLINE void +XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + XXH_ASSERT((((size_t)acc) & 15) == 0); + + { xxh_u64x2* const xacc = (xxh_u64x2*) acc; + const xxh_u64x2* const xsecret = (const xxh_u64x2*) secret; + /* constants */ + xxh_u64x2 const v32 = { 32, 32 }; + xxh_u64x2 const v47 = { 47, 47 }; + xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 }; + size_t i; + for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) { + /* xacc[i] ^= (xacc[i] >> 47); */ + xxh_u64x2 const acc_vec = xacc[i]; + xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47); + + /* xacc[i] ^= xsecret[i]; */ + xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + i); + xxh_u64x2 const data_key = data_vec ^ key_vec; + + /* xacc[i] *= XXH_PRIME32_1 */ + /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */ + xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime); + /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */ + xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime); + xacc[i] = prod_odd + (prod_even << v32); + } } +} + +#endif + +/* scalar variants - universal */ + +/*! + * @internal + * @brief Scalar round for @ref XXH3_accumulate_512_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT input, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* xacc = (xxh_u64*) acc; + xxh_u8 const* xinput = (xxh_u8 const*) input; + xxh_u8 const* xsecret = (xxh_u8 const*) secret; + XXH_ASSERT(lane < XXH_ACC_NB); + XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0); + { + xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8); + xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8); + xacc[lane ^ 1] += data_val; /* swap adjacent lanes */ + xacc[lane] += XXH_mult32to64(data_key & 0xFFFFFFFF, data_key >> 32); + } +} + +/*! + * @internal + * @brief Processes a 64 byte block of data using the scalar path. + */ +XXH_FORCE_INLINE void +XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc, + const void* XXH_RESTRICT input, + const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarRound(acc, input, secret, i); + } +} + +/*! + * @internal + * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar(). + * + * This is extracted to its own function because the NEON path uses a combination + * of NEON and scalar. + */ +XXH_FORCE_INLINE void +XXH3_scalarScrambleRound(void* XXH_RESTRICT acc, + void const* XXH_RESTRICT secret, + size_t lane) +{ + xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */ + const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */ + XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0); + XXH_ASSERT(lane < XXH_ACC_NB); + { + xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8); + xxh_u64 acc64 = xacc[lane]; + acc64 = XXH_xorshift64(acc64, 47); + acc64 ^= key64; + acc64 *= XXH_PRIME32_1; + xacc[lane] = acc64; + } +} + +/*! + * @internal + * @brief Scrambles the accumulators after a large chunk has been read + */ +XXH_FORCE_INLINE void +XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret) +{ + size_t i; + for (i=0; i < XXH_ACC_NB; i++) { + XXH3_scalarScrambleRound(acc, secret, i); + } +} -#endif /* XXH_STATIC_LINKING_ONLY && XXH_STATIC_H_3543687687345 */ +XXH_FORCE_INLINE void +XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64) +{ + /* + * We need a separate pointer for the hack below, + * which requires a non-const pointer. + * Any decent compiler will optimize this out otherwise. + */ + const xxh_u8* kSecretPtr = XXH3_kSecret; + XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0); + +#if defined(__clang__) && defined(__aarch64__) + /* + * UGLY HACK: + * Clang generates a bunch of MOV/MOVK pairs for aarch64, and they are + * placed sequentially, in order, at the top of the unrolled loop. + * + * While MOVK is great for generating constants (2 cycles for a 64-bit + * constant compared to 4 cycles for LDR), it fights for bandwidth with + * the arithmetic instructions. + * + * I L S + * MOVK + * MOVK + * MOVK + * MOVK + * ADD + * SUB STR + * STR + * By forcing loads from memory (as the asm line causes Clang to assume + * that XXH3_kSecretPtr has been changed), the pipelines are used more + * efficiently: + * I L S + * LDR + * ADD LDR + * SUB STR + * STR + * + * See XXH3_NEON_LANES for details on the pipsline. + * + * XXH3_64bits_withSeed, len == 256, Snapdragon 835 + * without hack: 2654.4 MB/s + * with hack: 3202.9 MB/s + */ + XXH_COMPILER_GUARD(kSecretPtr); +#endif + /* + * Note: in debug mode, this overrides the asm optimization + * and Clang will emit MOVK chains again. + */ + XXH_ASSERT(kSecretPtr == XXH3_kSecret); + + { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16; + int i; + for (i=0; i < nbRounds; i++) { + /* + * The asm hack causes Clang to assume that kSecretPtr aliases with + * customSecret, and on aarch64, this prevented LDP from merging two + * loads together for free. Putting the loads together before the stores + * properly generates LDP. + */ + xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64; + xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64; + XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo); + XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi); + } } +} + + +typedef void (*XXH3_f_accumulate_512)(void* XXH_RESTRICT, const void*, const void*); +typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*); +typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64); + + +#if (XXH_VECTOR == XXH_AVX512) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx512 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512 + +#elif (XXH_VECTOR == XXH_AVX2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_avx2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2 + +#elif (XXH_VECTOR == XXH_SSE2) + +#define XXH3_accumulate_512 XXH3_accumulate_512_sse2 +#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2 +#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2 + +#elif (XXH_VECTOR == XXH_NEON) + +#define XXH3_accumulate_512 XXH3_accumulate_512_neon +#define XXH3_scrambleAcc XXH3_scrambleAcc_neon +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#elif (XXH_VECTOR == XXH_VSX) + +#define XXH3_accumulate_512 XXH3_accumulate_512_vsx +#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#else /* scalar */ + +#define XXH3_accumulate_512 XXH3_accumulate_512_scalar +#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar +#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar + +#endif + + + +#ifndef XXH_PREFETCH_DIST +# ifdef __clang__ +# define XXH_PREFETCH_DIST 320 +# else +# if (XXH_VECTOR == XXH_AVX512) +# define XXH_PREFETCH_DIST 512 +# else +# define XXH_PREFETCH_DIST 384 +# endif +# endif /* __clang__ */ +#endif /* XXH_PREFETCH_DIST */ + +/* + * XXH3_accumulate() + * Loops over XXH3_accumulate_512(). + * Assumption: nbStripes will not overflow the secret size + */ +XXH_FORCE_INLINE void +XXH3_accumulate( xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, + const xxh_u8* XXH_RESTRICT secret, + size_t nbStripes, + XXH3_f_accumulate_512 f_acc512) +{ + size_t n; + for (n = 0; n < nbStripes; n++ ) { + const xxh_u8* const in = input + n*XXH_STRIPE_LEN; + XXH_PREFETCH(in + XXH_PREFETCH_DIST); + f_acc512(acc, + in, + secret + n*XXH_SECRET_CONSUME_RATE); + } +} + +XXH_FORCE_INLINE void +XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc, + const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE; + size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock; + size_t const nb_blocks = (len - 1) / block_len; + + size_t n; + + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + + for (n = 0; n < nb_blocks; n++) { + XXH3_accumulate(acc, input + n*block_len, secret, nbStripesPerBlock, f_acc512); + f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN); + } + + /* last partial block */ + XXH_ASSERT(len > XXH_STRIPE_LEN); + { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN; + XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE)); + XXH3_accumulate(acc, input + nb_blocks*block_len, secret, nbStripes, f_acc512); + + /* last stripe */ + { const xxh_u8* const p = input + len - XXH_STRIPE_LEN; +#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */ + f_acc512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START); + } } +} + +XXH_FORCE_INLINE xxh_u64 +XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret) +{ + return XXH3_mul128_fold64( + acc[0] ^ XXH_readLE64(secret), + acc[1] ^ XXH_readLE64(secret+8) ); +} + +static XXH64_hash_t +XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start) +{ + xxh_u64 result64 = start; + size_t i = 0; + + for (i = 0; i < 4; i++) { + result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i); +#if defined(__clang__) /* Clang */ \ + && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \ + && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \ + && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */ + /* + * UGLY HACK: + * Prevent autovectorization on Clang ARMv7-a. Exact same problem as + * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b. + * XXH3_64bits, len == 256, Snapdragon 835: + * without hack: 2063.7 MB/s + * with hack: 2560.7 MB/s + */ + XXH_COMPILER_GUARD(result64); +#endif + } + + return XXH3_avalanche(result64); +} + +#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \ + XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 } + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len, + const void* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc512, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + /* do not align on 8, so that the secret is different from the accumulator */ +#define XXH_SECRET_MERGEACCS_START 11 + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1); +} + +/* + * It's important for performance to transmit secret's size (when it's static) + * so that the compiler can properly optimize the vectorized loop. + * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * It's preferable for performance that XXH3_hashLong is not inlined, + * as it results in a smaller function for small data, easier to the instruction cache. + * Note that inside this no_inline function, we do inline the internal loop, + * and provide a statically defined secret size to allow optimization of vector loop. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * XXH3_hashLong_64b_withSeed(): + * Generate a custom key based on alteration of default XXH3_kSecret with the seed, + * and then use this key for long mode hashing. + * + * This operation is decently fast but nonetheless costs a little bit of time. + * Try to avoid it whenever possible (typically when seed==0). + * + * It's important for performance that XXH3_hashLong is not inlined. Not sure + * why (uop cache maybe?), but the difference is large and easily measurable. + */ +XXH_FORCE_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len, + XXH64_hash_t seed, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed == 0) + return XXH3_hashLong_64b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc512, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed); + return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret), + f_acc512, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH64_hash_t +XXH3_hashLong_64b_withSeed(const void* input, size_t len, + XXH64_hash_t seed, const xxh_u8* secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_64b_withSeed_internal(input, len, seed, + XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + + +typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH64_hash_t +XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong64_f f_hashLong) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secretLen` condition is not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + * Also, note that function signature doesn't offer room to return an error. + */ + if (len <= 16) + return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen); +} + + +/* === Public entry point === */ + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* input, size_t len) +{ + return XXH3_64bits_internal(input, len, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + return XXH3_64bits_internal(input, len, 0, secret, secretSize, XXH3_hashLong_64b_withSecret); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed); +} + +XXH_PUBLIC_API XXH64_hash_t +XXH3_64bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_64bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_64b_withSecret(input, len, seed, (const xxh_u8*)secret, secretSize); +} + + +/* === XXH3 streaming === */ + +/* + * Malloc's a pointer that is always aligned to align. + * + * This must be freed with `XXH_alignedFree()`. + * + * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte + * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2 + * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON. + * + * This underalignment previously caused a rather obvious crash which went + * completely unnoticed due to XXH3_createState() not actually being tested. + * Credit to RedSpah for noticing this bug. + * + * The alignment is done manually: Functions like posix_memalign or _mm_malloc + * are avoided: To maintain portability, we would have to write a fallback + * like this anyways, and besides, testing for the existence of library + * functions without relying on external build tools is impossible. + * + * The method is simple: Overallocate, manually align, and store the offset + * to the original behind the returned pointer. + * + * Align must be a power of 2 and 8 <= align <= 128. + */ +static void* XXH_alignedMalloc(size_t s, size_t align) +{ + XXH_ASSERT(align <= 128 && align >= 8); /* range check */ + XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */ + XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */ + { /* Overallocate to make room for manual realignment and an offset byte */ + xxh_u8* base = (xxh_u8*)XXH_malloc(s + align); + if (base != NULL) { + /* + * Get the offset needed to align this pointer. + * + * Even if the returned pointer is aligned, there will always be + * at least one byte to store the offset to the original pointer. + */ + size_t offset = align - ((size_t)base & (align - 1)); /* base % align */ + /* Add the offset for the now-aligned pointer */ + xxh_u8* ptr = base + offset; + + XXH_ASSERT((size_t)ptr % align == 0); + + /* Store the offset immediately before the returned pointer. */ + ptr[-1] = (xxh_u8)offset; + return ptr; + } + return NULL; + } +} +/* + * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass + * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout. + */ +static void XXH_alignedFree(void* p) +{ + if (p != NULL) { + xxh_u8* ptr = (xxh_u8*)p; + /* Get the offset byte we added in XXH_malloc. */ + xxh_u8 offset = ptr[-1]; + /* Free the original malloc'd pointer */ + xxh_u8* base = ptr - offset; + XXH_free(base); + } +} +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void) +{ + XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64); + if (state==NULL) return NULL; + XXH3_INITSTATE(state); + return state; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr) +{ + XXH_alignedFree(statePtr); + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API void +XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state) +{ + XXH_memcpy(dst_state, src_state, sizeof(*dst_state)); +} + +static void +XXH3_reset_internal(XXH3_state_t* statePtr, + XXH64_hash_t seed, + const void* secret, size_t secretSize) +{ + size_t const initStart = offsetof(XXH3_state_t, bufferedSize); + size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart; + XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart); + XXH_ASSERT(statePtr != NULL); + /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */ + memset((char*)statePtr + initStart, 0, initLength); + statePtr->acc[0] = XXH_PRIME32_3; + statePtr->acc[1] = XXH_PRIME64_1; + statePtr->acc[2] = XXH_PRIME64_2; + statePtr->acc[3] = XXH_PRIME64_3; + statePtr->acc[4] = XXH_PRIME64_4; + statePtr->acc[5] = XXH_PRIME32_2; + statePtr->acc[6] = XXH_PRIME64_5; + statePtr->acc[7] = XXH_PRIME32_1; + statePtr->seed = seed; + statePtr->useSeed = (seed != 0); + statePtr->extSecret = (const unsigned char*)secret; + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); + statePtr->secretLimit = secretSize - XXH_STRIPE_LEN; + statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset(XXH3_state_t* statePtr) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +{ + if (statePtr == NULL) return XXH_ERROR; + XXH3_reset_internal(statePtr, 0, secret, secretSize); + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + if (statePtr == NULL) return XXH_ERROR; + if (seed==0) return XXH3_64bits_reset(statePtr); + if ((seed != statePtr->seed) || (statePtr->extSecret != NULL)) + XXH3_initCustomSecret(statePtr->customSecret, seed); + XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE); + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed64) +{ + if (statePtr == NULL) return XXH_ERROR; + if (secret == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; + XXH3_reset_internal(statePtr, seed64, secret, secretSize); + statePtr->useSeed = 1; /* always, even if seed64==0 */ + return XXH_OK; +} + +/* Note : when XXH3_consumeStripes() is invoked, + * there must be a guarantee that at least one more byte must be consumed from input + * so that the function can blindly consume all stripes using the "normal" secret segment */ +XXH_FORCE_INLINE void +XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc, + size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock, + const xxh_u8* XXH_RESTRICT input, size_t nbStripes, + const xxh_u8* XXH_RESTRICT secret, size_t secretLimit, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ASSERT(nbStripes <= nbStripesPerBlock); /* can handle max 1 scramble per invocation */ + XXH_ASSERT(*nbStripesSoFarPtr < nbStripesPerBlock); + if (nbStripesPerBlock - *nbStripesSoFarPtr <= nbStripes) { + /* need a scrambling operation */ + size_t const nbStripesToEndofBlock = nbStripesPerBlock - *nbStripesSoFarPtr; + size_t const nbStripesAfterBlock = nbStripes - nbStripesToEndofBlock; + XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripesToEndofBlock, f_acc512); + f_scramble(acc, secret + secretLimit); + XXH3_accumulate(acc, input + nbStripesToEndofBlock * XXH_STRIPE_LEN, secret, nbStripesAfterBlock, f_acc512); + *nbStripesSoFarPtr = nbStripesAfterBlock; + } else { + XXH3_accumulate(acc, input, secret + nbStripesSoFarPtr[0] * XXH_SECRET_CONSUME_RATE, nbStripes, f_acc512); + *nbStripesSoFarPtr += nbStripes; + } +} + +#ifndef XXH3_STREAM_USE_STACK +# ifndef __clang__ /* clang doesn't need additional stack space */ +# define XXH3_STREAM_USE_STACK 1 +# endif +#endif +/* + * Both XXH3_64bits_update and XXH3_128bits_update use this routine. + */ +XXH_FORCE_INLINE XXH_errorcode +XXH3_update(XXH3_state_t* XXH_RESTRICT const state, + const xxh_u8* XXH_RESTRICT input, size_t len, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + if (input==NULL) { + XXH_ASSERT(len == 0); + return XXH_OK; + } + + XXH_ASSERT(state != NULL); + { const xxh_u8* const bEnd = input + len; + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* For some reason, gcc and MSVC seem to suffer greatly + * when operating accumulators directly into state. + * Operating into stack space seems to enable proper optimization. + * clang, on the other hand, doesn't seem to need this trick */ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8]; memcpy(acc, state->acc, sizeof(acc)); +#else + xxh_u64* XXH_RESTRICT const acc = state->acc; +#endif + state->totalLen += len; + XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE); + + /* small input : just fill in tmp buffer */ + if (state->bufferedSize + len <= XXH3_INTERNALBUFFER_SIZE) { + XXH_memcpy(state->buffer + state->bufferedSize, input, len); + state->bufferedSize += (XXH32_hash_t)len; + return XXH_OK; + } + + /* total input is now > XXH3_INTERNALBUFFER_SIZE */ + #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN) + XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */ + + /* + * Internal buffer is partially filled (always, except at beginning) + * Complete it, then consume it. + */ + if (state->bufferedSize) { + size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize; + XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize); + input += loadSize; + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc512, f_scramble); + state->bufferedSize = 0; + } + XXH_ASSERT(input < bEnd); + + /* large input to consume : ingest per full block */ + if ((size_t)(bEnd - input) > state->nbStripesPerBlock * XXH_STRIPE_LEN) { + size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN; + XXH_ASSERT(state->nbStripesPerBlock >= state->nbStripesSoFar); + /* join to current block's end */ + { size_t const nbStripesToEnd = state->nbStripesPerBlock - state->nbStripesSoFar; + XXH_ASSERT(nbStripesToEnd <= nbStripes); + XXH3_accumulate(acc, input, secret + state->nbStripesSoFar * XXH_SECRET_CONSUME_RATE, nbStripesToEnd, f_acc512); + f_scramble(acc, secret + state->secretLimit); + state->nbStripesSoFar = 0; + input += nbStripesToEnd * XXH_STRIPE_LEN; + nbStripes -= nbStripesToEnd; + } + /* consume per entire blocks */ + while(nbStripes >= state->nbStripesPerBlock) { + XXH3_accumulate(acc, input, secret, state->nbStripesPerBlock, f_acc512); + f_scramble(acc, secret + state->secretLimit); + input += state->nbStripesPerBlock * XXH_STRIPE_LEN; + nbStripes -= state->nbStripesPerBlock; + } + /* consume last partial block */ + XXH3_accumulate(acc, input, secret, nbStripes, f_acc512); + input += nbStripes * XXH_STRIPE_LEN; + XXH_ASSERT(input < bEnd); /* at least some bytes left */ + state->nbStripesSoFar = nbStripes; + /* buffer predecessor of last partial stripe */ + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + XXH_ASSERT(bEnd - input <= XXH_STRIPE_LEN); + } else { + /* content to consume <= block size */ + /* Consume input by a multiple of internal buffer size */ + if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) { + const xxh_u8* const limit = bEnd - XXH3_INTERNALBUFFER_SIZE; + do { + XXH3_consumeStripes(acc, + &state->nbStripesSoFar, state->nbStripesPerBlock, + input, XXH3_INTERNALBUFFER_STRIPES, + secret, state->secretLimit, + f_acc512, f_scramble); + input += XXH3_INTERNALBUFFER_SIZE; + } while (input<limit); + /* buffer predecessor of last partial stripe */ + XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN); + } + } + + /* Some remaining input (always) : buffer it */ + XXH_ASSERT(input < bEnd); + XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE); + XXH_ASSERT(state->bufferedSize == 0); + XXH_memcpy(state->buffer, input, (size_t)(bEnd-input)); + state->bufferedSize = (XXH32_hash_t)(bEnd-input); +#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1 + /* save stack accumulators into state */ + memcpy(state->acc, acc, sizeof(acc)); +#endif + } + + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_64bits_update(XXH3_state_t* state, const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + + +XXH_FORCE_INLINE void +XXH3_digest_long (XXH64_hash_t* acc, + const XXH3_state_t* state, + const unsigned char* secret) +{ + /* + * Digest on a local copy. This way, the state remains unaltered, and it can + * continue ingesting more input afterwards. + */ + XXH_memcpy(acc, state->acc, sizeof(state->acc)); + if (state->bufferedSize >= XXH_STRIPE_LEN) { + size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN; + size_t nbStripesSoFar = state->nbStripesSoFar; + XXH3_consumeStripes(acc, + &nbStripesSoFar, state->nbStripesPerBlock, + state->buffer, nbStripes, + secret, state->secretLimit, + XXH3_accumulate_512, XXH3_scrambleAcc); + /* last stripe */ + XXH3_accumulate_512(acc, + state->buffer + state->bufferedSize - XXH_STRIPE_LEN, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); + } else { /* bufferedSize < XXH_STRIPE_LEN */ + xxh_u8 lastStripe[XXH_STRIPE_LEN]; + size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize; + XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */ + XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize); + XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize); + XXH3_accumulate_512(acc, + lastStripe, + secret + state->secretLimit - XXH_SECRET_LASTACC_START); + } +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + return XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + } + /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */ + if (state->useSeed) + return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} + + + +/* ========================================== + * XXH3 128 bits (a.k.a XXH128) + * ========================================== + * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant, + * even without counting the significantly larger output size. + * + * For example, extra steps are taken to avoid the seed-dependent collisions + * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B). + * + * This strength naturally comes at the cost of some speed, especially on short + * lengths. Note that longer hashes are about as fast as the 64-bit version + * due to it using only a slight modification of the 64-bit loop. + * + * XXH128 is also more oriented towards 64-bit machines. It is still extremely + * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64). + */ + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + /* A doubled version of 1to3_64b with different constants. */ + XXH_ASSERT(input != NULL); + XXH_ASSERT(1 <= len && len <= 3); + XXH_ASSERT(secret != NULL); + /* + * len = 1: combinedl = { input[0], 0x01, input[0], input[0] } + * len = 2: combinedl = { input[1], 0x02, input[0], input[1] } + * len = 3: combinedl = { input[2], 0x03, input[0], input[1] } + */ + { xxh_u8 const c1 = input[0]; + xxh_u8 const c2 = input[len >> 1]; + xxh_u8 const c3 = input[len - 1]; + xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24) + | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8); + xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13); + xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed; + xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed; + xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl; + xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph; + XXH128_hash_t h128; + h128.low64 = XXH64_avalanche(keyed_lo); + h128.high64 = XXH64_avalanche(keyed_hi); + return h128; + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(4 <= len && len <= 8); + seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32; + { xxh_u32 const input_lo = XXH_readLE32(input); + xxh_u32 const input_hi = XXH_readLE32(input + len - 4); + xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32); + xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed; + xxh_u64 const keyed = input_64 ^ bitflip; + + /* Shift len to the left to ensure it is even, this avoids even multiplies. */ + XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2)); + + m128.high64 += (m128.low64 << 1); + m128.low64 ^= (m128.high64 >> 3); + + m128.low64 = XXH_xorshift64(m128.low64, 35); + m128.low64 *= 0x9FB21C651E98DF25ULL; + m128.low64 = XXH_xorshift64(m128.low64, 28); + m128.high64 = XXH3_avalanche(m128.high64); + return m128; + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(input != NULL); + XXH_ASSERT(secret != NULL); + XXH_ASSERT(9 <= len && len <= 16); + { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed; + xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed; + xxh_u64 const input_lo = XXH_readLE64(input); + xxh_u64 input_hi = XXH_readLE64(input + len - 8); + XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1); + /* + * Put len in the middle of m128 to ensure that the length gets mixed to + * both the low and high bits in the 128x64 multiply below. + */ + m128.low64 += (xxh_u64)(len - 1) << 54; + input_hi ^= bitfliph; + /* + * Add the high 32 bits of input_hi to the high 32 bits of m128, then + * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to + * the high 64 bits of m128. + * + * The best approach to this operation is different on 32-bit and 64-bit. + */ + if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */ + /* + * 32-bit optimized version, which is more readable. + * + * On 32-bit, it removes an ADC and delays a dependency between the two + * halves of m128.high64, but it generates an extra mask on 64-bit. + */ + m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2); + } else { + /* + * 64-bit optimized (albeit more confusing) version. + * + * Uses some properties of addition and multiplication to remove the mask: + * + * Let: + * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF) + * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000) + * c = XXH_PRIME32_2 + * + * a + (b * c) + * Inverse Property: x + y - x == y + * a + (b * (1 + c - 1)) + * Distributive Property: x * (y + z) == (x * y) + (x * z) + * a + (b * 1) + (b * (c - 1)) + * Identity Property: x * 1 == x + * a + b + (b * (c - 1)) + * + * Substitute a, b, and c: + * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + * + * Since input_hi.hi + input_hi.lo == input_hi, we get this: + * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1)) + */ + m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1); + } + /* m128 ^= XXH_swap64(m128 >> 64); */ + m128.low64 ^= XXH_swap64(m128.high64); + + { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */ + XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2); + h128.high64 += m128.high64 * XXH_PRIME64_2; + + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = XXH3_avalanche(h128.high64); + return h128; + } } +} + +/* + * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed) +{ + XXH_ASSERT(len <= 16); + { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed); + if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed); + if (len) return XXH3_len_1to3_128b(input, len, secret, seed); + { XXH128_hash_t h128; + xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72); + xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88); + h128.low64 = XXH64_avalanche(seed ^ bitflipl); + h128.high64 = XXH64_avalanche( seed ^ bitfliph); + return h128; + } } +} + +/* + * A bit slower than XXH3_mix16B, but handles multiply by zero better. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2, + const xxh_u8* secret, XXH64_hash_t seed) +{ + acc.low64 += XXH3_mix16B (input_1, secret+0, seed); + acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8); + acc.high64 += XXH3_mix16B (input_2, secret+16, seed); + acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8); + return acc; +} + + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(16 < len && len <= 128); + + { XXH128_hash_t acc; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + if (len > 32) { + if (len > 64) { + if (len > 96) { + acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed); + } + acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed); + } + acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed); + } + acc = XXH128_mix32B(acc, input, input+len-16, secret, seed); + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_NO_INLINE XXH128_hash_t +XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH64_hash_t seed) +{ + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize; + XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX); + + { XXH128_hash_t acc; + int const nbRounds = (int)len / 32; + int i; + acc.low64 = len * XXH_PRIME64_1; + acc.high64 = 0; + for (i=0; i<4; i++) { + acc = XXH128_mix32B(acc, + input + (32 * i), + input + (32 * i) + 16, + secret + (32 * i), + seed); + } + acc.low64 = XXH3_avalanche(acc.low64); + acc.high64 = XXH3_avalanche(acc.high64); + XXH_ASSERT(nbRounds >= 4); + for (i=4 ; i < nbRounds; i++) { + acc = XXH128_mix32B(acc, + input + (32 * i), + input + (32 * i) + 16, + secret + XXH3_MIDSIZE_STARTOFFSET + (32 * (i - 4)), + seed); + } + /* last bytes */ + acc = XXH128_mix32B(acc, + input + len - 16, + input + len - 32, + secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16, + 0ULL - seed); + + { XXH128_hash_t h128; + h128.low64 = acc.low64 + acc.high64; + h128.high64 = (acc.low64 * XXH_PRIME64_1) + + (acc.high64 * XXH_PRIME64_4) + + ((len - seed) * XXH_PRIME64_2); + h128.low64 = XXH3_avalanche(h128.low64); + h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64); + return h128; + } + } +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len, + const xxh_u8* XXH_RESTRICT secret, size_t secretSize, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble) +{ + XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC; + + XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc512, f_scramble); + + /* converge into final hash */ + XXH_STATIC_ASSERT(sizeof(acc) == 64); + XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)len * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + secretSize + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)len * XXH_PRIME64_2)); + return h128; + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; (void)secret; (void)secretLen; + return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/* + * It's important for performance to pass @secretLen (when it's static) + * to the compiler, so that it can properly optimize the vectorized loop. + */ +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)seed64; + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len, + XXH64_hash_t seed64, + XXH3_f_accumulate_512 f_acc512, + XXH3_f_scrambleAcc f_scramble, + XXH3_f_initCustomSecret f_initSec) +{ + if (seed64 == 0) + return XXH3_hashLong_128b_internal(input, len, + XXH3_kSecret, sizeof(XXH3_kSecret), + f_acc512, f_scramble); + { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + f_initSec(secret, seed64); + return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret), + f_acc512, f_scramble); + } +} + +/* + * It's important for performance that XXH3_hashLong is not inlined. + */ +XXH_NO_INLINE XXH128_hash_t +XXH3_hashLong_128b_withSeed(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen) +{ + (void)secret; (void)secretLen; + return XXH3_hashLong_128b_withSeed_internal(input, len, seed64, + XXH3_accumulate_512, XXH3_scrambleAcc, XXH3_initCustomSecret); +} + +typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t, + XXH64_hash_t, const void* XXH_RESTRICT, size_t); + +XXH_FORCE_INLINE XXH128_hash_t +XXH3_128bits_internal(const void* input, size_t len, + XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen, + XXH3_hashLong128_f f_hl128) +{ + XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN); + /* + * If an action is to be taken if `secret` conditions are not respected, + * it should be done here. + * For now, it's a contract pre-condition. + * Adding a check and a branch here would cost performance at every hash. + */ + if (len <= 16) + return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64); + if (len <= 128) + return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64); + return f_hl128(input, len, seed64, secret, secretLen); +} + + +/* === Public XXH128 API === */ + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* input, size_t len) +{ + return XXH3_128bits_internal(input, len, 0, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_default); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecret(const void* input, size_t len, const void* secret, size_t secretSize) +{ + return XXH3_128bits_internal(input, len, 0, + (const xxh_u8*)secret, secretSize, + XXH3_hashLong_128b_withSecret); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSeed(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_internal(input, len, seed, + XXH3_kSecret, sizeof(XXH3_kSecret), + XXH3_hashLong_128b_withSeed); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH3_128bits_withSecretandSeed(const void* input, size_t len, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + if (len <= XXH3_MIDSIZE_MAX) + return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL); + return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128(const void* input, size_t len, XXH64_hash_t seed) +{ + return XXH3_128bits_withSeed(input, len, seed); +} + + +/* === XXH3 128-bit streaming === */ + +/* + * All initialization and update functions are identical to 64-bit streaming variant. + * The only difference is the finalization routine. + */ + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset(XXH3_state_t* statePtr) +{ + return XXH3_64bits_reset(statePtr); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize) +{ + return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSeed(XXH3_state_t* statePtr, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSeed(statePtr, seed); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr, const void* secret, size_t secretSize, XXH64_hash_t seed) +{ + return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_128bits_update(XXH3_state_t* state, const void* input, size_t len) +{ + return XXH3_update(state, (const xxh_u8*)input, len, + XXH3_accumulate_512, XXH3_scrambleAcc); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* state) +{ + const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret; + if (state->totalLen > XXH3_MIDSIZE_MAX) { + XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB]; + XXH3_digest_long(acc, state, secret); + XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START); + { XXH128_hash_t h128; + h128.low64 = XXH3_mergeAccs(acc, + secret + XXH_SECRET_MERGEACCS_START, + (xxh_u64)state->totalLen * XXH_PRIME64_1); + h128.high64 = XXH3_mergeAccs(acc, + secret + state->secretLimit + XXH_STRIPE_LEN + - sizeof(acc) - XXH_SECRET_MERGEACCS_START, + ~((xxh_u64)state->totalLen * XXH_PRIME64_2)); + return h128; + } + } + /* len <= XXH3_MIDSIZE_MAX : short code */ + if (state->seed) + return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed); + return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen), + secret, state->secretLimit + XXH_STRIPE_LEN); +} + +/* 128-bit utility functions */ + +#include <string.h> /* memcmp, memcpy */ + +/* return : 1 is equal, 0 if different */ +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2) +{ + /* note : XXH128_hash_t is compact, it has no padding byte */ + return !(memcmp(&h1, &h2, sizeof(h1))); +} + +/* This prototype is compatible with stdlib's qsort(). + * return : >0 if *h128_1 > *h128_2 + * <0 if *h128_1 < *h128_2 + * =0 if *h128_1 == *h128_2 */ +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2) +{ + XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1; + XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2; + int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64); + /* note : bets that, in most cases, hash values are different */ + if (hcmp) return hcmp; + return (h1.low64 > h2.low64) - (h2.low64 > h1.low64); +} + + +/*====== Canonical representation ======*/ +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API void +XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t)); + if (XXH_CPU_LITTLE_ENDIAN) { + hash.high64 = XXH_swap64(hash.high64); + hash.low64 = XXH_swap64(hash.low64); + } + XXH_memcpy(dst, &hash.high64, sizeof(hash.high64)); + XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64)); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH128_hash_t +XXH128_hashFromCanonical(const XXH128_canonical_t* src) +{ + XXH128_hash_t h; + h.high64 = XXH_readBE64(src); + h.low64 = XXH_readBE64(src->digest + 8); + return h; +} + + + +/* ========================================== + * Secret generators + * ========================================== + */ +#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x)) + +XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128) +{ + XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 ); + XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 ); +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API XXH_errorcode +XXH3_generateSecret(void* secretBuffer, size_t secretSize, const void* customSeed, size_t customSeedSize) +{ +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(secretBuffer != NULL); + XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); +#else + /* production mode, assert() are disabled */ + if (secretBuffer == NULL) return XXH_ERROR; + if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR; +#endif + + if (customSeedSize == 0) { + customSeed = XXH3_kSecret; + customSeedSize = XXH_SECRET_DEFAULT_SIZE; + } +#if (XXH_DEBUGLEVEL >= 1) + XXH_ASSERT(customSeed != NULL); +#else + if (customSeed == NULL) return XXH_ERROR; +#endif + + /* Fill secretBuffer with a copy of customSeed - repeat as needed */ + { size_t pos = 0; + while (pos < secretSize) { + size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize); + memcpy((char*)secretBuffer + pos, customSeed, toCopy); + pos += toCopy; + } } + + { size_t const nbSeg16 = secretSize / 16; + size_t n; + XXH128_canonical_t scrambler; + XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0)); + for (n=0; n<nbSeg16; n++) { + XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n); + XXH3_combine16((char*)secretBuffer + n*16, h128); + } + /* last segment */ + XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler)); + } + return XXH_OK; +} + +/*! @ingroup xxh3_family */ +XXH_PUBLIC_API void +XXH3_generateSecret_fromSeed(void* secretBuffer, XXH64_hash_t seed) +{ + XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE]; + XXH3_initCustomSecret(secret, seed); + XXH_ASSERT(secretBuffer != NULL); + memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE); +} + + + +/* Pop our optimization override from above */ +#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \ + && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \ + && defined(__OPTIMIZE__) && !defined(__OPTIMIZE_SIZE__) /* respect -O0 and -Os */ +# pragma GCC pop_options +#endif + +#endif /* XXH_NO_LONG_LONG */ + +#endif /* XXH_NO_XXH3 */ + +/*! + * @} + */ +#endif /* XXH_IMPLEMENTATION */ #if defined (__cplusplus) diff --git a/thirdparty/zstd/common/zstd_internal.h b/thirdparty/zstd/common/zstd_internal.h index 68252e987e..e4d36ce090 100644 --- a/thirdparty/zstd/common/zstd_internal.h +++ b/thirdparty/zstd/common/zstd_internal.h @@ -19,10 +19,8 @@ /*-************************************* * Dependencies ***************************************/ -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) -#include <arm_neon.h> -#endif #include "compiler.h" +#include "cpu.h" #include "mem.h" #include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */ #include "error_private.h" @@ -60,81 +58,7 @@ extern "C" { #undef MAX #define MIN(a,b) ((a)<(b) ? (a) : (b)) #define MAX(a,b) ((a)>(b) ? (a) : (b)) - -/** - * Ignore: this is an internal helper. - * - * This is a helper function to help force C99-correctness during compilation. - * Under strict compilation modes, variadic macro arguments can't be empty. - * However, variadic function arguments can be. Using a function therefore lets - * us statically check that at least one (string) argument was passed, - * independent of the compilation flags. - */ -static INLINE_KEYWORD UNUSED_ATTR -void _force_has_format_string(const char *format, ...) { - (void)format; -} - -/** - * Ignore: this is an internal helper. - * - * We want to force this function invocation to be syntactically correct, but - * we don't want to force runtime evaluation of its arguments. - */ -#define _FORCE_HAS_FORMAT_STRING(...) \ - if (0) { \ - _force_has_format_string(__VA_ARGS__); \ - } - -/** - * Return the specified error if the condition evaluates to true. - * - * In debug modes, prints additional information. - * In order to do that (particularly, printing the conditional that failed), - * this can't just wrap RETURN_ERROR(). - */ -#define RETURN_ERROR_IF(cond, err, ...) \ - if (cond) { \ - RAWLOG(3, "%s:%d: ERROR!: check %s failed, returning %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(cond), ZSTD_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } - -/** - * Unconditionally return the specified error. - * - * In debug modes, prints additional information. - */ -#define RETURN_ERROR(err, ...) \ - do { \ - RAWLOG(3, "%s:%d: ERROR!: unconditional check failed, returning %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(ERROR(err))); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return ERROR(err); \ - } while(0); - -/** - * If the provided expression evaluates to an error code, returns that error code. - * - * In debug modes, prints additional information. - */ -#define FORWARD_IF_ERROR(err, ...) \ - do { \ - size_t const err_code = (err); \ - if (ERR_isError(err_code)) { \ - RAWLOG(3, "%s:%d: ERROR!: forwarding error in %s: %s", \ - __FILE__, __LINE__, ZSTD_QUOTE(err), ERR_getErrorName(err_code)); \ - _FORCE_HAS_FORMAT_STRING(__VA_ARGS__); \ - RAWLOG(3, ": " __VA_ARGS__); \ - RAWLOG(3, "\n"); \ - return err_code; \ - } \ - } while(0); +#define BOUNDED(min,val,max) (MAX(min,MIN(val,max))) /*-************************************* @@ -143,7 +67,6 @@ void _force_has_format_string(const char *format, ...) { #define ZSTD_OPT_NUM (1<<12) #define ZSTD_REP_NUM 3 /* number of repcodes */ -#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; #define KB *(1 <<10) @@ -195,7 +118,7 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy /* Each table cannot take more than #symbols * FSELog bits */ #define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8) -static UNUSED_ATTR const U32 LL_bits[MaxLL+1] = { +static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, @@ -212,7 +135,7 @@ static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = { #define LL_DEFAULTNORMLOG 6 /* for static allocation */ static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG; -static UNUSED_ATTR const U32 ML_bits[MaxML+1] = { +static UNUSED_ATTR const U8 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -247,19 +170,30 @@ static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG; * Shared functions to include for inlining *********************************************/ static void ZSTD_copy8(void* dst, const void* src) { -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) +#if defined(ZSTD_ARCH_ARM_NEON) vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src)); #else ZSTD_memcpy(dst, src, 8); #endif } - #define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; } + +/* Need to use memmove here since the literal buffer can now be located within + the dst buffer. In circumstances where the op "catches up" to where the + literal buffer is, there can be partial overlaps in this call on the final + copy if the literal is being shifted by less than 16 bytes. */ static void ZSTD_copy16(void* dst, const void* src) { -#if !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) +#if defined(ZSTD_ARCH_ARM_NEON) vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src)); +#elif defined(ZSTD_ARCH_X86_SSE2) + _mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src)); +#elif defined(__clang__) + ZSTD_memmove(dst, src, 16); #else - ZSTD_memcpy(dst, src, 16); + /* ZSTD_memmove is not inlined properly by gcc */ + BYTE copy16_buf[16]; + ZSTD_memcpy(copy16_buf, src, 16); + ZSTD_memcpy(dst, copy16_buf, 16); #endif } #define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; } @@ -288,8 +222,6 @@ void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e BYTE* op = (BYTE*)dst; BYTE* const oend = op + length; - assert(diff >= 8 || (ovtype == ZSTD_no_overlap && diff <= -WILDCOPY_VECLEN)); - if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) { /* Handle short offset copies. */ do { @@ -352,9 +284,9 @@ typedef enum { * Private declarations *********************************************/ typedef struct seqDef_s { - U32 offset; /* offset == rawOffset + ZSTD_REP_NUM, or equivalently, offCode + 1 */ + U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */ U16 litLength; - U16 matchLength; + U16 mlBase; /* mlBase == matchLength - MINMATCH */ } seqDef; /* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */ @@ -396,7 +328,7 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore { ZSTD_sequenceLength seqLen; seqLen.litLength = seq->litLength; - seqLen.matchLength = seq->matchLength + MINMATCH; + seqLen.matchLength = seq->mlBase + MINMATCH; if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) { if (seqStore->longLengthType == ZSTD_llt_literalLength) { seqLen.litLength += 0xFFFF; @@ -436,8 +368,14 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus # if STATIC_BMI2 == 1 return _lzcnt_u32(val)^31; # else - unsigned long r=0; - return _BitScanReverse(&r, val) ? (unsigned)r : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */ return __builtin_clz (val) ^ 31; @@ -456,6 +394,63 @@ MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus } } +/** + * Counts the number of trailing zeros of a `size_t`. + * Most compilers should support CTZ as a builtin. A backup + * implementation is provided if the builtin isn't supported, but + * it may not be terribly efficient. + */ +MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val) +{ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) +# if STATIC_BMI2 + return _tzcnt_u64(val); +# else + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, (U64)val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# endif +# elif defined(__GNUC__) && (__GNUC__ >= 4) + return __builtin_ctzll((U64)val); +# else + static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19, + 4, 25, 14, 28, 9, 34, 20, 56, + 5, 17, 26, 54, 15, 41, 29, 43, + 10, 31, 38, 35, 21, 45, 49, 57, + 63, 6, 12, 18, 24, 27, 33, 55, + 16, 53, 40, 42, 30, 37, 44, 48, + 62, 11, 23, 32, 52, 39, 36, 47, + 61, 22, 51, 46, 60, 50, 59, 58 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + if (val != 0) { + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)r; + } else { + /* Should not reach this code path */ + __assume(0); + } +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return __builtin_ctz((U32)val); +# else + static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3, + 30, 22, 20, 15, 25, 17, 4, 8, + 31, 27, 13, 23, 21, 19, 16, 7, + 26, 12, 18, 6, 11, 5, 10, 9 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } +} + /* ZSTD_invalidateRepCodes() : * ensures next compression will not use repcodes from previous block. @@ -482,6 +477,14 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, const void* src, size_t srcSize); +/** + * @returns true iff the CPU supports dynamic BMI2 dispatch. + */ +MEM_STATIC int ZSTD_cpuSupportsBmi2(void) +{ + ZSTD_cpuid_t cpuid = ZSTD_cpuid(); + return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid); +} #if defined (__cplusplus) } diff --git a/thirdparty/zstd/common/zstd_trace.h b/thirdparty/zstd/common/zstd_trace.h index 2da5640771..f9121f7d8e 100644 --- a/thirdparty/zstd/common/zstd_trace.h +++ b/thirdparty/zstd/common/zstd_trace.h @@ -17,10 +17,19 @@ extern "C" { #include <stddef.h> -/* weak symbol support */ -#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && defined(__GNUC__) && \ +/* weak symbol support + * For now, enable conservatively: + * - Only GNUC + * - Only ELF + * - Only x86-64 and i386 + * Also, explicitly disable on platforms known not to work so they aren't + * forgotten in the future. + */ +#if !defined(ZSTD_HAVE_WEAK_SYMBOLS) && \ + defined(__GNUC__) && defined(__ELF__) && \ + (defined(__x86_64__) || defined(_M_X64) || defined(__i386__) || defined(_M_IX86)) && \ !defined(__APPLE__) && !defined(_WIN32) && !defined(__MINGW32__) && \ - !defined(__CYGWIN__) + !defined(__CYGWIN__) && !defined(_AIX) # define ZSTD_HAVE_WEAK_SYMBOLS 1 #else # define ZSTD_HAVE_WEAK_SYMBOLS 0 diff --git a/thirdparty/zstd/compress/clevels.h b/thirdparty/zstd/compress/clevels.h new file mode 100644 index 0000000000..7ed2e00490 --- /dev/null +++ b/thirdparty/zstd/compress/clevels.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#ifndef ZSTD_CLEVELS_H +#define ZSTD_CLEVELS_H + +#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_compressionParameters */ +#include "../zstd.h" + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_MAX_CLEVEL 22 + +#ifdef __GNUC__ +__attribute__((__unused__)) +#endif + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" - for any srcSize > 256 KB */ + /* W, C, H, S, L, TL, strat */ + { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ + { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ + { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ + { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ + { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ + { 21, 18, 19, 3, 5, 2, ZSTD_greedy }, /* level 5 */ + { 21, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6 */ + { 21, 19, 20, 4, 5, 8, ZSTD_lazy }, /* level 7 */ + { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 22, 23, 6, 5, 32, ZSTD_lazy2 }, /* level 12 */ + { 22, 22, 22, 4, 5, 32, ZSTD_btlazy2 }, /* level 13 */ + { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ + { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ + { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ + { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ + { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ + { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ + { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ + { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ + { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ + { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ + { 18, 16, 17, 3, 5, 2, ZSTD_greedy }, /* level 4.*/ + { 18, 17, 18, 5, 5, 2, ZSTD_greedy }, /* level 5.*/ + { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ + { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ + { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ + { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ + { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ + { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ + { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ + { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ + { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ + { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ + { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 16, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 16, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 16, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ + { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ + { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ + { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ + { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ + { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ + { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ + { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ + { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ + { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ + { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ + { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ + { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ + { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ + { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ + { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ + { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ +}, +}; + + + +#endif /* ZSTD_CLEVELS_H */ diff --git a/thirdparty/zstd/compress/fse_compress.c b/thirdparty/zstd/compress/fse_compress.c index b4297ec88a..5547b4ac09 100644 --- a/thirdparty/zstd/compress/fse_compress.c +++ b/thirdparty/zstd/compress/fse_compress.c @@ -75,13 +75,14 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ; FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); U32 const step = FSE_TABLESTEP(tableSize); + U32 const maxSV1 = maxSymbolValue+1; - U32* cumul = (U32*)workSpace; - FSE_FUNCTION_TYPE* tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSymbolValue + 2)); + U16* cumul = (U16*)workSpace; /* size = maxSV1 */ + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)(cumul + (maxSV1+1)); /* size = tableSize */ U32 highThreshold = tableSize-1; - if ((size_t)workSpace & 3) return ERROR(GENERIC); /* Must be 4 byte aligned */ + assert(((size_t)workSpace & 1) == 0); /* Must be 2 bytes-aligned */ if (FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) > wkspSize) return ERROR(tableLog_tooLarge); /* CTable header */ tableU16[-2] = (U16) tableLog; @@ -98,20 +99,61 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, /* symbol start positions */ { U32 u; cumul[0] = 0; - for (u=1; u <= maxSymbolValue+1; u++) { + for (u=1; u <= maxSV1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ cumul[u] = cumul[u-1] + 1; tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); } else { - cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + assert(normalizedCounter[u-1] >= 0); + cumul[u] = cumul[u-1] + (U16)normalizedCounter[u-1]; + assert(cumul[u] >= cumul[u-1]); /* no overflow */ } } - cumul[maxSymbolValue+1] = tableSize+1; + cumul[maxSV1] = (U16)(tableSize+1); } /* Spread symbols */ - { U32 position = 0; + if (highThreshold == tableSize - 1) { + /* Case for no low prob count symbols. Lay down 8 bytes at a time + * to reduce branch misses since we are operating on a small block + */ + BYTE* const spread = tableSymbol + tableSize; /* size = tableSize + 8 (may write beyond tableSize) */ + { U64 const add = 0x0101010101010101ull; + size_t pos = 0; + U64 sv = 0; + U32 s; + for (s=0; s<maxSV1; ++s, sv += add) { + int i; + int const n = normalizedCounter[s]; + MEM_write64(spread + pos, sv); + for (i = 8; i < n; i += 8) { + MEM_write64(spread + pos + i, sv); + } + assert(n>=0); + pos += (size_t)n; + } + } + /* Spread symbols across the table. Lack of lowprob symbols means that + * we don't need variable sized inner loop, so we can unroll the loop and + * reduce branch misses. + */ + { size_t position = 0; + size_t s; + size_t const unroll = 2; /* Experimentally determined optimal unroll */ + assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */ + for (s = 0; s < (size_t)tableSize; s += unroll) { + size_t u; + for (u = 0; u < unroll; ++u) { + size_t const uPosition = (position + (u * step)) & tableMask; + tableSymbol[uPosition] = spread[s + u]; + } + position = (position + (unroll * step)) & tableMask; + } + assert(position == 0); /* Must have initialized all positions */ + } + } else { + U32 position = 0; U32 symbol; - for (symbol=0; symbol<=maxSymbolValue; symbol++) { + for (symbol=0; symbol<maxSV1; symbol++) { int nbOccurrences; int const freq = normalizedCounter[symbol]; for (nbOccurrences=0; nbOccurrences<freq; nbOccurrences++) { @@ -120,7 +162,6 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */ } } - assert(position==0); /* Must have initialized all positions */ } @@ -144,16 +185,17 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, case -1: case 1: symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog); - symbolTT[s].deltaFindState = total - 1; + assert(total <= INT_MAX); + symbolTT[s].deltaFindState = (int)(total - 1); total ++; break; default : - { - U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1); - U32 const minStatePlus = normalizedCounter[s] << maxBitsOut; + assert(normalizedCounter[s] > 1); + { U32 const maxBitsOut = tableLog - BIT_highbit32 ((U32)normalizedCounter[s]-1); + U32 const minStatePlus = (U32)normalizedCounter[s] << maxBitsOut; symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus; - symbolTT[s].deltaFindState = total - normalizedCounter[s]; - total += normalizedCounter[s]; + symbolTT[s].deltaFindState = (int)(total - (unsigned)normalizedCounter[s]); + total += (unsigned)normalizedCounter[s]; } } } } #if 0 /* debug : symbol costs */ @@ -164,32 +206,26 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, symbol, normalizedCounter[symbol], FSE_getMaxNbBits(symbolTT, symbol), (double)FSE_bitCost(symbolTT, tableLog, symbol, 8) / 256); - } - } + } } #endif return 0; } -#ifndef ZSTD_NO_UNUSED_FUNCTIONS -size_t FSE_buildCTable(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) -{ - FSE_FUNCTION_TYPE tableSymbol[FSE_MAX_TABLESIZE]; /* memset() is not necessary, even if static analyzer complain about it */ - return FSE_buildCTable_wksp(ct, normalizedCounter, maxSymbolValue, tableLog, tableSymbol, sizeof(tableSymbol)); -} -#endif - #ifndef FSE_COMMONDEFS_ONLY - /*-************************************************************** * FSE NCount encoding ****************************************************************/ size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog) { - size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3; + size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog + + 4 /* bitCount initialized at 4 */ + + 2 /* first two symbols may use one additional bit each */) / 8) + + 1 /* round up to whole nb bytes */ + + 2 /* additional two bytes for bitstream flush */; return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ } diff --git a/thirdparty/zstd/compress/huf_compress.c b/thirdparty/zstd/compress/huf_compress.c index 485906e678..2b3d6adc2a 100644 --- a/thirdparty/zstd/compress/huf_compress.c +++ b/thirdparty/zstd/compress/huf_compress.c @@ -53,6 +53,28 @@ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxS /* ******************************************************* * HUF : Huffman block compression *********************************************************/ +#define HUF_WORKSPACE_MAX_ALIGNMENT 8 + +static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) +{ + size_t const mask = align - 1; + size_t const rem = (size_t)workspace & mask; + size_t const add = (align - rem) & mask; + BYTE* const aligned = (BYTE*)workspace + add; + assert((align & (align - 1)) == 0); /* pow 2 */ + assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); + if (*workspaceSizePtr >= add) { + assert(add < align); + assert(((size_t)aligned & mask) == 0); + *workspaceSizePtr -= add; + return aligned; + } else { + *workspaceSizePtr = 0; + return NULL; + } +} + + /* HUF_compressWeights() : * Same as FSE_compress(), but dedicated to huff0's weights compression. * The use case needs much less stack memory. @@ -75,7 +97,7 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT unsigned maxSymbolValue = HUF_TABLELOG_MAX; U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; - HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)workspace; + HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); @@ -106,6 +128,40 @@ static size_t HUF_compressWeights(void* dst, size_t dstSize, const void* weightT return (size_t)(op-ostart); } +static size_t HUF_getNbBits(HUF_CElt elt) +{ + return elt & 0xFF; +} + +static size_t HUF_getNbBitsFast(HUF_CElt elt) +{ + return elt; +} + +static size_t HUF_getValue(HUF_CElt elt) +{ + return elt & ~0xFF; +} + +static size_t HUF_getValueFast(HUF_CElt elt) +{ + return elt; +} + +static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) +{ + assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); + *elt = nbBits; +} + +static void HUF_setValue(HUF_CElt* elt, size_t value) +{ + size_t const nbBits = HUF_getNbBits(*elt); + if (nbBits > 0) { + assert((value >> nbBits) == 0); + *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); + } +} typedef struct { HUF_CompressWeightsWksp wksp; @@ -117,9 +173,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize) { + HUF_CElt const* const ct = CTable + 1; BYTE* op = (BYTE*)dst; U32 n; - HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)workspace; + HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); /* check conditions */ if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); @@ -130,9 +187,10 @@ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, for (n=1; n<huffLog+1; n++) wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n); for (n=0; n<maxSymbolValue; n++) - wksp->huffWeight[n] = wksp->bitsToWeight[CTable[n].nbBits]; + wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; /* attempt weights compression by FSE */ + if (maxDstSize < 1) return ERROR(dstSize_tooSmall); { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; @@ -166,6 +224,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ U32 tableLog = 0; U32 nbSymbols = 0; + HUF_CElt* const ct = CTable + 1; /* get symbol weights */ CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); @@ -175,6 +234,8 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); + CTable[0] = tableLog; + /* Prepare base value per rank */ { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { @@ -186,13 +247,13 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void /* fill nbBits */ { U32 n; for (n=0; n<nbSymbols; n++) { const U32 w = huffWeight[n]; - CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0); + HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0)); } } /* fill val */ { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; - { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; } + { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; } /* determine stating value per rank */ valPerRank[tableLog+1] = 0; /* for w==0 */ { U16 min = 0; @@ -202,18 +263,18 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void min >>= 1; } } /* assign value within rank, symbol order */ - { U32 n; for (n=0; n<nbSymbols; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } + { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } } *maxSymbolValuePtr = nbSymbols - 1; return readSize; } -U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue) +U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) { - const HUF_CElt* table = (const HUF_CElt*)symbolTable; + const HUF_CElt* ct = CTable + 1; assert(symbolValue <= HUF_SYMBOLVALUE_MAX); - return table[symbolValue].nbBits; + return (U32)HUF_getNbBits(ct[symbolValue]); } @@ -367,22 +428,118 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) } typedef struct { - U32 base; - U32 curr; + U16 base; + U16 curr; } rankPos; typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32]; -#define RANK_POSITION_TABLE_SIZE 32 +/* Number of buckets available for HUF_sort() */ +#define RANK_POSITION_TABLE_SIZE 192 typedef struct { huffNodeTable huffNodeTbl; rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; } HUF_buildCTable_wksp_tables; +/* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. + * Strategy is to use as many buckets as possible for representing distinct + * counts while using the remainder to represent all "large" counts. + * + * To satisfy this requirement for 192 buckets, we can do the following: + * Let buckets 0-166 represent distinct counts of [0, 166] + * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. + */ +#define RANK_POSITION_MAX_COUNT_LOG 32 +#define RANK_POSITION_LOG_BUCKETS_BEGIN (RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */ +#define RANK_POSITION_DISTINCT_COUNT_CUTOFF RANK_POSITION_LOG_BUCKETS_BEGIN + BIT_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */ + +/* Return the appropriate bucket index for a given count. See definition of + * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. + */ +static U32 HUF_getIndex(U32 const count) { + return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) + ? count + : BIT_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; +} + +/* Helper swap function for HUF_quickSortPartition() */ +static void HUF_swapNodes(nodeElt* a, nodeElt* b) { + nodeElt tmp = *a; + *a = *b; + *b = tmp; +} + +/* Returns 0 if the huffNode array is not sorted by descending count */ +MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { + U32 i; + for (i = 1; i < maxSymbolValue1; ++i) { + if (huffNode[i].count > huffNode[i-1].count) { + return 0; + } + } + return 1; +} + +/* Insertion sort by descending order */ +HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { + int i; + int const size = high-low+1; + huffNode += low; + for (i = 1; i < size; ++i) { + nodeElt const key = huffNode[i]; + int j = i - 1; + while (j >= 0 && huffNode[j].count < key.count) { + huffNode[j + 1] = huffNode[j]; + j--; + } + huffNode[j + 1] = key; + } +} + +/* Pivot helper function for quicksort. */ +static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { + /* Simply select rightmost element as pivot. "Better" selectors like + * median-of-three don't experimentally appear to have any benefit. + */ + U32 const pivot = arr[high].count; + int i = low - 1; + int j = low; + for ( ; j < high; j++) { + if (arr[j].count > pivot) { + i++; + HUF_swapNodes(&arr[i], &arr[j]); + } + } + HUF_swapNodes(&arr[i + 1], &arr[high]); + return i + 1; +} + +/* Classic quicksort by descending with partially iterative calls + * to reduce worst case callstack size. + */ +static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { + int const kInsertionSortThreshold = 8; + if (high - low < kInsertionSortThreshold) { + HUF_insertionSort(arr, low, high); + return; + } + while (low < high) { + int const idx = HUF_quickSortPartition(arr, low, high); + if (idx - low < high - idx) { + HUF_simpleQuickSort(arr, low, idx - 1); + low = idx + 1; + } else { + HUF_simpleQuickSort(arr, idx + 1, high); + high = idx - 1; + } + } +} + /** * HUF_sort(): * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. + * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. * * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. * Must have (maxSymbolValue + 1) entries. @@ -390,44 +547,52 @@ typedef struct { * @param[in] maxSymbolValue Maximum symbol value. * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. */ -static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition) -{ - int n; - int const maxSymbolValue1 = (int)maxSymbolValue + 1; +static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { + U32 n; + U32 const maxSymbolValue1 = maxSymbolValue+1; /* Compute base and set curr to base. - * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1. - * Then 2^lowerRank <= count[n]+1 <= 2^rank. + * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. + * See HUF_getIndex to see bucketing strategy. * We attribute each symbol to lowerRank's base value, because we want to know where * each rank begins in the output, so for rank R we want to count ranks R+1 and above. */ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); for (n = 0; n < maxSymbolValue1; ++n) { - U32 lowerRank = BIT_highbit32(count[n] + 1); + U32 lowerRank = HUF_getIndex(count[n]); + assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); rankPosition[lowerRank].base++; } + assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); + /* Set up the rankPosition table */ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { rankPosition[n-1].base += rankPosition[n].base; rankPosition[n-1].curr = rankPosition[n-1].base; } - /* Sort */ + + /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ for (n = 0; n < maxSymbolValue1; ++n) { U32 const c = count[n]; - U32 const r = BIT_highbit32(c+1) + 1; - U32 pos = rankPosition[r].curr++; - /* Insert into the correct position in the rank. - * We have at most 256 symbols, so this insertion should be fine. - */ - while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) { - huffNode[pos] = huffNode[pos-1]; - pos--; - } + U32 const r = HUF_getIndex(c) + 1; + U32 const pos = rankPosition[r].curr++; + assert(pos < maxSymbolValue1); huffNode[pos].count = c; huffNode[pos].byte = (BYTE)n; } -} + /* Sort each bucket. */ + for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { + U32 const bucketSize = rankPosition[n].curr-rankPosition[n].base; + U32 const bucketStartIdx = rankPosition[n].base; + if (bucketSize > 1) { + assert(bucketStartIdx < maxSymbolValue1); + HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); + } + } + + assert(HUF_isSorted(huffNode, maxSymbolValue1)); +} /** HUF_buildCTable_wksp() : * Same as HUF_buildCTable(), but using externally allocated scratch buffer. @@ -490,6 +655,7 @@ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) */ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) { + HUF_CElt* const ct = CTable + 1; /* fill result into ctable (val, nbBits) */ int n; U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; @@ -505,20 +671,20 @@ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, i min >>= 1; } } for (n=0; n<alphabetSize; n++) - CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ + HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ for (n=0; n<alphabetSize; n++) - CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */ + HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */ + CTable[0] = maxNbBits; } -size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +size_t HUF_buildCTable_wksp (HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) { - HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace; + HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; nodeElt* const huffNode = huffNode0+1; int nonNullRank; /* safety checks */ - if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) return ERROR(workSpace_tooSmall); if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; @@ -536,96 +702,334 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ - HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits); + HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); return maxNbBits; } size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + HUF_CElt const* ct = CTable + 1; size_t nbBits = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { - nbBits += CTable[s].nbBits * count[s]; + nbBits += HUF_getNbBits(ct[s]) * count[s]; } return nbBits >> 3; } int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + HUF_CElt const* ct = CTable + 1; int bad = 0; int s; for (s = 0; s <= (int)maxSymbolValue; ++s) { - bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); } return !bad; } size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } +/** HUF_CStream_t: + * Huffman uses its own BIT_CStream_t implementation. + * There are three major differences from BIT_CStream_t: + * 1. HUF_addBits() takes a HUF_CElt (size_t) which is + * the pair (nbBits, value) in the format: + * format: + * - Bits [0, 4) = nbBits + * - Bits [4, 64 - nbBits) = 0 + * - Bits [64 - nbBits, 64) = value + * 2. The bitContainer is built from the upper bits and + * right shifted. E.g. to add a new value of N bits + * you right shift the bitContainer by N, then or in + * the new value into the N upper bits. + * 3. The bitstream has two bit containers. You can add + * bits to the second container and merge them into + * the first container. + */ + +#define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) + +typedef struct { + size_t bitContainer[2]; + size_t bitPos[2]; + + BYTE* startPtr; + BYTE* ptr; + BYTE* endPtr; +} HUF_CStream_t; + +/**! HUF_initCStream(): + * Initializes the bitstream. + * @returns 0 or an error code. + */ +static size_t HUF_initCStream(HUF_CStream_t* bitC, + void* startPtr, size_t dstCapacity) +{ + ZSTD_memset(bitC, 0, sizeof(*bitC)); + bitC->startPtr = (BYTE*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); + if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! HUF_addBits(): + * Adds the symbol stored in HUF_CElt elt to the bitstream. + * + * @param elt The element we're adding. This is a (nbBits, value) pair. + * See the HUF_CStream_t docs for the format. + * @param idx Insert into the bitstream at this idx. + * @param kFast This is a template parameter. If the bitstream is guaranteed + * to have at least 4 unused bits after this call it may be 1, + * otherwise it must be 0. HUF_addBits() is faster when fast is set. + */ +FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) +{ + assert(idx <= 1); + assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); + /* This is efficient on x86-64 with BMI2 because shrx + * only reads the low 6 bits of the register. The compiler + * knows this and elides the mask. When fast is set, + * every operation can use the same value loaded from elt. + */ + bitC->bitContainer[idx] >>= HUF_getNbBits(elt); + bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); + /* We only read the low 8 bits of bitC->bitPos[idx] so it + * doesn't matter that the high bits have noise from the value. + */ + bitC->bitPos[idx] += HUF_getNbBitsFast(elt); + assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + /* The last 4-bits of elt are dirty if fast is set, + * so we must not be overwriting bits that have already been + * inserted into the bit container. + */ +#if DEBUGLEVEL >= 1 + { + size_t const nbBits = HUF_getNbBits(elt); + size_t const dirtyBits = nbBits == 0 ? 0 : BIT_highbit32((U32)nbBits) + 1; + (void)dirtyBits; + /* Middle bits are 0. */ + assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); + /* We didn't overwrite any bits in the bit container. */ + assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); + (void)dirtyBits; + } +#endif +} + +FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) +{ + bitC->bitContainer[1] = 0; + bitC->bitPos[1] = 0; +} + +/*! HUF_mergeIndex1() : + * Merges the bit container @ index 1 into the bit container @ index 0 + * and zeros the bit container @ index 1. + */ +FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) +{ + assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); + bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); + bitC->bitContainer[0] |= bitC->bitContainer[1]; + bitC->bitPos[0] += bitC->bitPos[1]; + assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); +} + +/*! HUF_flushBits() : +* Flushes the bits in the bit container @ index 0. +* +* @post bitPos will be < 8. +* @param kFast If kFast is set then we must know a-priori that +* the bit container will not overflow. +*/ +FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) +{ + /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ + size_t const nbBits = bitC->bitPos[0] & 0xFF; + size_t const nbBytes = nbBits >> 3; + /* The top nbBits bits of bitContainer are the ones we need. */ + size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); + /* Mask bitPos to account for the bytes we consumed. */ + bitC->bitPos[0] &= 7; + assert(nbBits > 0); + assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); + assert(bitC->ptr <= bitC->endPtr); + MEM_writeLEST(bitC->ptr, bitContainer); + bitC->ptr += nbBytes; + assert(!kFast || bitC->ptr <= bitC->endPtr); + if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + /* bitContainer doesn't need to be modified because the leftover + * bits are already the top bitPos bits. And we don't care about + * noise in the lower values. + */ +} + +/*! HUF_endMark() + * @returns The Huffman stream end mark: A 1-bit value = 1. + */ +static HUF_CElt HUF_endMark(void) +{ + HUF_CElt endMark; + HUF_setNbBits(&endMark, 1); + HUF_setValue(&endMark, 1); + return endMark; +} + +/*! HUF_closeCStream() : + * @return Size of CStream, in bytes, + * or 0 if it could not fit into dstBuffer */ +static size_t HUF_closeCStream(HUF_CStream_t* bitC) +{ + HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); + HUF_flushBits(bitC, /* kFast */ 0); + { + size_t const nbBits = bitC->bitPos[0] & 0xFF; + if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ + return (bitC->ptr - bitC->startPtr) + (nbBits > 0); + } +} + FORCE_INLINE_TEMPLATE void -HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) { - BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); + HUF_addBits(bitCPtr, CTable[symbol], idx, fast); } -#define HUF_FLUSHBITS(s) BIT_flushBits(s) +FORCE_INLINE_TEMPLATE void +HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, + const BYTE* ip, size_t srcSize, + const HUF_CElt* ct, + int kUnroll, int kFastFlush, int kLastFast) +{ + /* Join to kUnroll */ + int n = (int)srcSize; + int rem = n % kUnroll; + if (rem > 0) { + for (; rem > 0; --rem) { + HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); + } + HUF_flushBits(bitC, kFastFlush); + } + assert(n % kUnroll == 0); + + /* Join to 2 * kUnroll */ + if (n % (2 * kUnroll)) { + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); + HUF_flushBits(bitC, kFastFlush); + n -= kUnroll; + } + assert(n % (2 * kUnroll) == 0); + + for (; n>0; n-= 2 * kUnroll) { + /* Encode kUnroll symbols into the bitstream @ index 0. */ + int u; + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); + HUF_flushBits(bitC, kFastFlush); + /* Encode kUnroll symbols into the bitstream @ index 1. + * This allows us to start filling the bit container + * without any data dependencies. + */ + HUF_zeroIndex1(bitC); + for (u = 1; u < kUnroll; ++u) { + HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); + } + HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); + /* Merge bitstream @ index 1 into the bitstream @ index 0 */ + HUF_mergeIndex1(bitC); + HUF_flushBits(bitC, kFastFlush); + } + assert(n == 0); + +} -#define HUF_FLUSHBITS_1(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) +/** + * Returns a tight upper bound on the output space needed by Huffman + * with 8 bytes buffer to handle over-writes. If the output is at least + * this large we don't need to do bounds checks during Huffman encoding. + */ +static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) +{ + return ((srcSize * tableLog) >> 3) + 8; +} -#define HUF_FLUSHBITS_2(stream) \ - if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) FORCE_INLINE_TEMPLATE size_t HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { + U32 const tableLog = (U32)CTable[0]; + HUF_CElt const* ct = CTable + 1; const BYTE* ip = (const BYTE*) src; BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; - size_t n; - BIT_CStream_t bitC; + HUF_CStream_t bitC; /* init */ if (dstSize < 8) return 0; /* not enough space to compress */ - { size_t const initErr = BIT_initCStream(&bitC, op, (size_t)(oend-op)); + { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); if (HUF_isError(initErr)) return 0; } - n = srcSize & ~3; /* join to mod 4 */ - switch (srcSize & 3) - { - case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); - HUF_FLUSHBITS_2(&bitC); - /* fall-through */ - case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); - HUF_FLUSHBITS_1(&bitC); - /* fall-through */ - case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); - HUF_FLUSHBITS(&bitC); - /* fall-through */ - case 0 : /* fall-through */ - default: break; - } - - for (; n>0; n-=4) { /* note : n&3==0 at this stage */ - HUF_encodeSymbol(&bitC, ip[n- 1], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 2], CTable); - HUF_FLUSHBITS_2(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 3], CTable); - HUF_FLUSHBITS_1(&bitC); - HUF_encodeSymbol(&bitC, ip[n- 4], CTable); - HUF_FLUSHBITS(&bitC); + if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); + else { + if (MEM_32bits()) { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: ZSTD_FALLTHROUGH; + case 9: ZSTD_FALLTHROUGH; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 7: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } else { + switch (tableLog) { + case 11: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 10: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + case 9: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 8: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 7: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); + break; + case 6: ZSTD_FALLTHROUGH; + default: + HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); + break; + } + } } + assert(bitC.ptr <= bitC.endPtr); - return BIT_closeCStream(&bitC); + return HUF_closeCStream(&bitC); } #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) @@ -667,9 +1071,13 @@ HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { - return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress1X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); } +size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +{ + return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); +} static size_t HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, @@ -689,8 +1097,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } @@ -698,8 +1105,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } @@ -707,8 +1113,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, ip += segmentSize; assert(op <= oend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, bmi2) ); - if (cSize==0) return 0; - assert(cSize <= 65535); + if (cSize == 0 || cSize > 65535) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } @@ -717,7 +1122,7 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, assert(op <= oend); assert(ip <= iend); { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, bmi2) ); - if (cSize==0) return 0; + if (cSize == 0 || cSize > 65535) return 0; op += cSize; } @@ -726,7 +1131,12 @@ HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) { - return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); + return HUF_compress4X_usingCTable_bmi2(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0); +} + +size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2) +{ + return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, bmi2); } typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; @@ -750,35 +1160,38 @@ static size_t HUF_compressCTable_internal( typedef struct { unsigned count[HUF_SYMBOLVALUE_MAX + 1]; - HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1]; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; union { HUF_buildCTable_wksp_tables buildCTable_wksp; HUF_WriteCTableWksp writeCTable_wksp; + U32 hist_wksp[HIST_WKSP_SIZE_U32]; } wksps; } HUF_compress_tables_t; +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 +#define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ + /* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, - * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */ + * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ static size_t HUF_compress_internal (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, HUF_nbStreams_e nbStreams, - void* workSpace_align4, size_t wkspSize, + void* workSpace, size_t wkspSize, HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat, - const int bmi2) + const int bmi2, unsigned suspectUncompressible) { - HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4; + HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); BYTE* const ostart = (BYTE*)dst; BYTE* const oend = ostart + dstSize; BYTE* op = ostart; - HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE); - assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */ + HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); /* checks & inits */ - if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall); + if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); if (!srcSize) return 0; /* Uncompressed */ if (!dstSize) return 0; /* cannot fit anything within dst budget */ if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ @@ -794,8 +1207,23 @@ HUF_compress_internal (void* dst, size_t dstSize, nbStreams, oldHufTable, bmi2); } + /* If uncompressible data is suspected, do a smaller sampling first */ + DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); + if (suspectUncompressible && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { + size_t largestTotal = 0; + { unsigned maxSymbolValueBegin = maxSymbolValue; + CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestBegin; + } + { unsigned maxSymbolValueEnd = maxSymbolValue; + CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); + largestTotal += largestEnd; + } + if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ + } + /* Scan input and build symbol stats */ - { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) ); + { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ } @@ -820,9 +1248,12 @@ HUF_compress_internal (void* dst, size_t dstSize, &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); CHECK_F(maxBits); huffLog = (U32)maxBits; - /* Zero unused symbols in CTable, so we can check it for validity */ - ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0, - sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt))); + } + /* Zero unused symbols in CTable, so we can check it for validity */ + { + size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue); + size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt); + ZSTD_memset(table->CTable + ctableSize, 0, unusedSize); } /* Write table description header */ @@ -859,19 +1290,20 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize, return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + NULL, NULL, 0, 0 /*bmi2*/, 0); } size_t HUF_compress1X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, + int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_singleStream, workSpace, wkspSize, hufTable, - repeat, preferRepeat, bmi2); + repeat, preferRepeat, bmi2, suspectUncompressible); } /* HUF_compress4X_repeat(): @@ -885,22 +1317,23 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - NULL, NULL, 0, 0 /*bmi2*/); + NULL, NULL, 0, 0 /*bmi2*/, 0); } /* HUF_compress4X_repeat(): * compress input using 4 streams. + * consider skipping quickly * re-use an existing huffman compression table */ size_t HUF_compress4X_repeat (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog, void* workSpace, size_t wkspSize, - HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2) + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible) { return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_fourStreams, workSpace, wkspSize, - hufTable, repeat, preferRepeat, bmi2); + hufTable, repeat, preferRepeat, bmi2, suspectUncompressible); } #ifndef ZSTD_NO_UNUSED_FUNCTIONS @@ -918,7 +1351,7 @@ size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } @@ -926,7 +1359,7 @@ size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned huffLog) { - unsigned workSpace[HUF_WORKSPACE_SIZE_U32]; + U64 workSpace[HUF_WORKSPACE_SIZE_U64]; return HUF_compress4X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace)); } diff --git a/thirdparty/zstd/compress/zstd_compress.c b/thirdparty/zstd/compress/zstd_compress.c index b7ee2980a7..f06456af92 100644 --- a/thirdparty/zstd/compress/zstd_compress.c +++ b/thirdparty/zstd/compress/zstd_compress.c @@ -12,7 +12,6 @@ * Dependencies ***************************************/ #include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */ -#include "../common/cpu.h" #include "../common/mem.h" #include "hist.h" /* HIST_countFast_wksp */ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */ @@ -42,6 +41,18 @@ # define ZSTD_COMPRESS_HEAPMODE 0 #endif +/*! + * ZSTD_HASHLOG3_MAX : + * Maximum size of the hash table dedicated to find 3-bytes matches, + * in log format, aka 17 => 1 << 17 == 128Ki positions. + * This structure is only used in zstd_opt. + * Since allocation is centralized for all strategies, it has to be known here. + * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3, + * so that zstd_opt.c doesn't need to know about this constant. + */ +#ifndef ZSTD_HASHLOG3_MAX +# define ZSTD_HASHLOG3_MAX 17 +#endif /*-************************************* * Helper functions @@ -72,10 +83,10 @@ struct ZSTD_CDict_s { ZSTD_customMem customMem; U32 dictID; int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */ - ZSTD_useRowMatchFinderMode_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use - * row-based matchfinder. Unless the cdict is reloaded, we will use - * the same greedy/lazy matchfinder at compression time. - */ + ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use + * row-based matchfinder. Unless the cdict is reloaded, we will use + * the same greedy/lazy matchfinder at compression time. + */ }; /* typedef'd to ZSTD_CDict within "zstd.h" */ ZSTD_CCtx* ZSTD_createCCtx(void) @@ -88,7 +99,7 @@ static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager) assert(cctx != NULL); ZSTD_memset(cctx, 0, sizeof(*cctx)); cctx->customMem = memManager; - cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); + cctx->bmi2 = ZSTD_cpuSupportsBmi2(); { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters); assert(!ZSTD_isError(err)); (void)err; @@ -214,35 +225,42 @@ static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) { /* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder * for this compression. */ -static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_useRowMatchFinderMode_e mode) { - assert(mode != ZSTD_urm_auto); - return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_urm_enableRowMatchFinder); +static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) { + assert(mode != ZSTD_ps_auto); + return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable); } -/* Returns row matchfinder usage enum given an initial mode and cParams */ -static ZSTD_useRowMatchFinderMode_e ZSTD_resolveRowMatchFinderMode(ZSTD_useRowMatchFinderMode_e mode, - const ZSTD_compressionParameters* const cParams) { -#if !defined(ZSTD_NO_INTRINSICS) && (defined(__SSE2__) || defined(__ARM_NEON)) +/* Returns row matchfinder usage given an initial mode and cParams */ +static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { +#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON) int const kHasSIMD128 = 1; #else int const kHasSIMD128 = 0; #endif - if (mode != ZSTD_urm_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ - mode = ZSTD_urm_disableRowMatchFinder; + if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */ + mode = ZSTD_ps_disable; if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode; if (kHasSIMD128) { - if (cParams->windowLog > 14) mode = ZSTD_urm_enableRowMatchFinder; + if (cParams->windowLog > 14) mode = ZSTD_ps_enable; } else { - if (cParams->windowLog > 17) mode = ZSTD_urm_enableRowMatchFinder; + if (cParams->windowLog > 17) mode = ZSTD_ps_enable; } return mode; } +/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */ +static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable; +} + /* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const U32 forDDSDict) { - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate. * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder. */ @@ -253,16 +271,10 @@ static int ZSTD_allocateChainTable(const ZSTD_strategy strategy, * enable long distance matching (wlog >= 27, strategy >= btopt). * Returns 0 otherwise. */ -static U32 ZSTD_CParams_shouldEnableLdm(const ZSTD_compressionParameters* const cParams) { - return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27; -} - -/* Returns 1 if compression parameters are such that we should - * enable blockSplitter (wlog >= 17, strategy >= btopt). - * Returns 0 otherwise. - */ -static U32 ZSTD_CParams_useBlockSplitter(const ZSTD_compressionParameters* const cParams) { - return cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17; +static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode, + const ZSTD_compressionParameters* const cParams) { + if (mode != ZSTD_ps_auto) return mode; + return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable; } static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( @@ -274,20 +286,13 @@ static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams( cctxParams.cParams = cParams; /* Adjust advanced params according to cParams */ - if (ZSTD_CParams_shouldEnableLdm(&cParams)) { - DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including LDM into cctx params"); - cctxParams.ldmParams.enableLdm = 1; - /* LDM is enabled by default for optimal parser and window size >= 128MB */ + cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams); + if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams); assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog); assert(cctxParams.ldmParams.hashRateLog < 32); } - - if (ZSTD_CParams_useBlockSplitter(&cParams)) { - DEBUGLOG(4, "ZSTD_makeCCtxParamsFromCParams(): Including block splitting into cctx params"); - cctxParams.splitBlocks = 1; - } - + cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams); cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams); assert(!ZSTD_checkCParams(cParams)); return cctxParams; @@ -348,7 +353,10 @@ static void ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams, ZSTD_par */ cctxParams->compressionLevel = compressionLevel; cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, ¶ms->cParams); - DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d", cctxParams->useRowMatchFinder); + cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, ¶ms->cParams); + cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, ¶ms->cParams); + DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d", + cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm); } size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params) @@ -518,9 +526,9 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) return bounds; case ZSTD_c_literalCompressionMode: - ZSTD_STATIC_ASSERT(ZSTD_lcm_auto < ZSTD_lcm_huffman && ZSTD_lcm_huffman < ZSTD_lcm_uncompressed); - bounds.lowerBound = ZSTD_lcm_auto; - bounds.upperBound = ZSTD_lcm_uncompressed; + ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable); + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_targetCBlockSize: @@ -549,14 +557,14 @@ ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param) bounds.upperBound = 1; return bounds; - case ZSTD_c_splitBlocks: - bounds.lowerBound = 0; - bounds.upperBound = 1; + case ZSTD_c_useBlockSplitter: + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_useRowMatchFinder: - bounds.lowerBound = (int)ZSTD_urm_auto; - bounds.upperBound = (int)ZSTD_urm_enableRowMatchFinder; + bounds.lowerBound = (int)ZSTD_ps_auto; + bounds.upperBound = (int)ZSTD_ps_disable; return bounds; case ZSTD_c_deterministicRefPrefix: @@ -625,7 +633,7 @@ static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_splitBlocks: + case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: default: @@ -680,7 +688,7 @@ size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value) case ZSTD_c_stableOutBuffer: case ZSTD_c_blockDelimiters: case ZSTD_c_validateSequences: - case ZSTD_c_splitBlocks: + case ZSTD_c_useBlockSplitter: case ZSTD_c_useRowMatchFinder: case ZSTD_c_deterministicRefPrefix: break; @@ -780,7 +788,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, } case ZSTD_c_literalCompressionMode : { - const ZSTD_literalCompressionMode_e lcm = (ZSTD_literalCompressionMode_e)value; + const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value; BOUNDCHECK(ZSTD_c_literalCompressionMode, lcm); CCtxParams->literalCompressionMode = lcm; return CCtxParams->literalCompressionMode; @@ -835,7 +843,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return CCtxParams->enableDedicatedDictSearch; case ZSTD_c_enableLongDistanceMatching : - CCtxParams->ldmParams.enableLdm = (value!=0); + CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value; return CCtxParams->ldmParams.enableLdm; case ZSTD_c_ldmHashLog : @@ -857,8 +865,8 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, return CCtxParams->ldmParams.bucketSizeLog; case ZSTD_c_ldmHashRateLog : - RETURN_ERROR_IF(value > ZSTD_WINDOWLOG_MAX - ZSTD_HASHLOG_MIN, - parameter_outOfBound, "Param out of bounds!"); + if (value!=0) /* 0 ==> default */ + BOUNDCHECK(ZSTD_c_ldmHashRateLog, value); CCtxParams->ldmParams.hashRateLog = value; return CCtxParams->ldmParams.hashRateLog; @@ -894,14 +902,14 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams, CCtxParams->validateSequences = value; return CCtxParams->validateSequences; - case ZSTD_c_splitBlocks: - BOUNDCHECK(ZSTD_c_splitBlocks, value); - CCtxParams->splitBlocks = value; - return CCtxParams->splitBlocks; + case ZSTD_c_useBlockSplitter: + BOUNDCHECK(ZSTD_c_useBlockSplitter, value); + CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value; + return CCtxParams->useBlockSplitter; case ZSTD_c_useRowMatchFinder: BOUNDCHECK(ZSTD_c_useRowMatchFinder, value); - CCtxParams->useRowMatchFinder = (ZSTD_useRowMatchFinderMode_e)value; + CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value; return CCtxParams->useRowMatchFinder; case ZSTD_c_deterministicRefPrefix: @@ -1032,8 +1040,8 @@ size_t ZSTD_CCtxParams_getParameter( case ZSTD_c_validateSequences : *value = (int)CCtxParams->validateSequences; break; - case ZSTD_c_splitBlocks : - *value = (int)CCtxParams->splitBlocks; + case ZSTD_c_useBlockSplitter : + *value = (int)CCtxParams->useBlockSplitter; break; case ZSTD_c_useRowMatchFinder : *value = (int)CCtxParams->useRowMatchFinder; @@ -1067,7 +1075,7 @@ size_t ZSTD_CCtx_setParametersUsingCCtxParams( return 0; } -ZSTDLIB_API size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) +size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize) { DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %u bytes", (U32)pledgedSrcSize); RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong, @@ -1147,14 +1155,14 @@ size_t ZSTD_CCtx_loadDictionary_advanced( return 0; } -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference( +size_t ZSTD_CCtx_loadDictionary_byReference( ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto); } -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) { return ZSTD_CCtx_loadDictionary_advanced( cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto); @@ -1324,7 +1332,7 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar, break; case ZSTD_cpm_createCDict: /* Assume a small source size when creating a dictionary - * with an unkown source size. + * with an unknown source size. */ if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN) srcSize = minSrcSize; @@ -1398,7 +1406,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( srcSizeHint = CCtxParams->srcSizeHint; } cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode); - if (CCtxParams->ldmParams.enableLdm) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; + if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG; ZSTD_overrideCParams(&cParams, &CCtxParams->cParams); assert(!ZSTD_checkCParams(cParams)); /* srcSizeHint == 0 means 0 */ @@ -1407,7 +1415,7 @@ ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams( static size_t ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const U32 enableDedicatedDictSearch, const U32 forCCtx) { @@ -1440,7 +1448,7 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams, /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */ ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u", (U32)chainSize, (U32)hSize, (U32)h3Size); @@ -1451,12 +1459,12 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( const ZSTD_compressionParameters* cParams, const ldmParams_t* ldmParams, const int isStatic, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const size_t buffInSize, const size_t buffOutSize, const U64 pledgedSrcSize) { - size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << cParams->windowLog), pledgedSrcSize)); + size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize); size_t const blockSize = MIN(ZSTD_BLOCKSIZE_MAX, windowSize); U32 const divider = (cParams->minMatch==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; @@ -1469,7 +1477,7 @@ static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal( size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams); size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize); - size_t const ldmSeqSpace = ldmParams->enableLdm ? + size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ? ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0; @@ -1496,8 +1504,8 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params) { ZSTD_compressionParameters const cParams = ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict); - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, - &cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, + &cParams); RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only."); /* estimateCCtxSize is for one-shot compression. So no buffers should @@ -1514,9 +1522,9 @@ size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams) /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { @@ -1561,7 +1569,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params) size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered) ? ZSTD_compressBound(blockSize) + 1 : 0; - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, ¶ms->cParams); return ZSTD_estimateCCtxSize_usingCCtxParams_internal( &cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize, @@ -1576,9 +1584,9 @@ size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams) /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */ size_t noRowCCtxSize; size_t rowCCtxSize; - initialParams.useRowMatchFinder = ZSTD_urm_disableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_disable; noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); - initialParams.useRowMatchFinder = ZSTD_urm_enableRowMatchFinder; + initialParams.useRowMatchFinder = ZSTD_ps_enable; rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams); return MAX(noRowCCtxSize, rowCCtxSize); } else { @@ -1713,7 +1721,7 @@ static size_t ZSTD_reset_matchState(ZSTD_matchState_t* ms, ZSTD_cwksp* ws, const ZSTD_compressionParameters* cParams, - const ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + const ZSTD_paramSwitch_e useRowMatchFinder, const ZSTD_compResetPolicy_e crp, const ZSTD_indexResetPolicy_e forceResetIndex, const ZSTD_resetTarget_e forWho) @@ -1728,7 +1736,7 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0; DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); if (forceResetIndex == ZSTDirp_reset) { ZSTD_window_init(&ms->window); ZSTD_cwksp_mark_tables_dirty(ws); @@ -1774,8 +1782,8 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms, if (ms->tagTable) ZSTD_memset(ms->tagTable, 0, tagTableSize); } { /* Switch to 32-entry rows if searchLog is 5 (or more) */ - U32 const rowLog = cParams->searchLog < 5 ? 4 : 5; - assert(cParams->hashLog > rowLog); + U32 const rowLog = BOUNDED(4, cParams->searchLog, 6); + assert(cParams->hashLog >= rowLog); ms->rowHashLog = cParams->hashLog - rowLog; } } @@ -1824,8 +1832,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_buffered_policy_e const zbuff) { ZSTD_cwksp* const ws = &zc->workspace; - DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d", - (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder); + DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d", + (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter); assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams))); zc->isFirstBlock = 1; @@ -1836,8 +1844,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->appliedParams = *params; params = &zc->appliedParams; - assert(params->useRowMatchFinder != ZSTD_urm_auto); - if (params->ldmParams.enableLdm) { + assert(params->useRowMatchFinder != ZSTD_ps_auto); + assert(params->useBlockSplitter != ZSTD_ps_auto); + assert(params->ldmParams.enableLdm != ZSTD_ps_auto); + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* Adjust long distance matching parameters */ ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, ¶ms->cParams); assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog); @@ -1900,7 +1910,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t)); RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock"); zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE); - RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); + RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace"); } } ZSTD_cwksp_clear(ws); @@ -1937,7 +1947,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize); /* ldm bucketOffsets table */ - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const numBuckets = ((size_t)1) << (params->ldmParams.hashLog - @@ -1964,7 +1974,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, ZSTD_resetTarget_CCtx), ""); /* ldm hash table */ - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* TODO: avoid memset? */ size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog; zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t)); @@ -1976,8 +1986,8 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc, zc->ldmState.loadedDictEnd = 0; } - assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws)); + assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace, resizeWorkspace)); zc->initialized = 1; @@ -2115,7 +2125,7 @@ static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx, } ZSTD_cwksp_mark_tables_dirty(&cctx->workspace); - assert(params.useRowMatchFinder != ZSTD_urm_auto); + assert(params.useRowMatchFinder != ZSTD_ps_auto); /* copy tables */ { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */) @@ -2209,8 +2219,12 @@ static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, { ZSTD_CCtx_params params = dstCCtx->requestedParams; /* Copy only compression parameters related to tables. */ params.cParams = srcCCtx->appliedParams.cParams; - assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_urm_auto); + assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto); + assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto); params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder; + params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter; + params.ldmParams = srcCCtx->appliedParams.ldmParams; params.fParams = fParams; ZSTD_resetCCtx_internal(dstCCtx, ¶ms, pledgedSrcSize, /* loadedDictSize */ 0, @@ -2296,6 +2310,8 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa int const nbRows = (int)size / ZSTD_ROWSIZE; int cellNb = 0; int rowNb; + /* Protect special index values < ZSTD_WINDOW_START_INDEX. */ + U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX; assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */ assert(size < (1U<<31)); /* can be casted to int */ @@ -2315,12 +2331,17 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa for (rowNb=0 ; rowNb < nbRows ; rowNb++) { int column; for (column=0; column<ZSTD_ROWSIZE; column++) { - if (preserveMark) { - U32 const adder = (table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) ? reducerValue : 0; - table[cellNb] += adder; + U32 newVal; + if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) { + /* This write is pointless, but is required(?) for the compiler + * to auto-vectorize the loop. */ + newVal = ZSTD_DUBT_UNSORTED_MARK; + } else if (table[cellNb] < reducerThreshold) { + newVal = 0; + } else { + newVal = table[cellNb] - reducerValue; } - if (table[cellNb] < reducerValue) table[cellNb] = 0; - else table[cellNb] -= reducerValue; + table[cellNb] = newVal; cellNb++; } } } @@ -2375,9 +2396,9 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) assert(nbSeq <= seqStorePtr->maxNbSeq); for (u=0; u<nbSeq; u++) { U32 const llv = sequences[u].litLength; - U32 const mlv = sequences[u].matchLength; + U32 const mlv = sequences[u].mlBase; llCodeTable[u] = (BYTE)ZSTD_LLcode(llv); - ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); + ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase); mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv); } if (seqStorePtr->longLengthType==ZSTD_llt_literalLength) @@ -2399,11 +2420,13 @@ static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams) /* ZSTD_blockSplitterEnabled(): * Returns if block splitting param is being used * If used, compression will do best effort to split a block in order to improve compression ratio. + * At the time this function is called, the parameter must be finalized. * Returns 1 if true, 0 otherwise. */ static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams) { - DEBUGLOG(5, "ZSTD_blockSplitterEnabled(splitBlocks=%d)", cctxParams->splitBlocks); - return (cctxParams->splitBlocks != 0); + DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter); + assert(cctxParams->useBlockSplitter != ZSTD_ps_auto); + return (cctxParams->useBlockSplitter == ZSTD_ps_enable); } /* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types @@ -2546,6 +2569,7 @@ ZSTD_buildSequencesStatistics(seqStore_t* seqStorePtr, size_t nbSeq, * compresses both literals and sequences * Returns compressed size of block, or a zstd error. */ +#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20 MEM_STATIC size_t ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, const ZSTD_entropyCTables_t* prevEntropy, @@ -2580,15 +2604,19 @@ ZSTD_entropyCompressSeqStore_internal(seqStore_t* seqStorePtr, /* Compress literals */ { const BYTE* const literals = seqStorePtr->litStart; + size_t const numSequences = seqStorePtr->sequences - seqStorePtr->sequencesStart; + size_t const numLiterals = seqStorePtr->lit - seqStorePtr->litStart; + /* Base suspicion of uncompressibility on ratio of literals to sequences */ + unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO); size_t const litSize = (size_t)(seqStorePtr->lit - literals); size_t const cSize = ZSTD_compressLiterals( &prevEntropy->huf, &nextEntropy->huf, cctxParams->cParams.strategy, - ZSTD_disableLiteralsCompression(cctxParams), + ZSTD_literalsCompressionIsDisabled(cctxParams), op, dstCapacity, literals, litSize, entropyWorkspace, entropyWkspSize, - bmi2); + bmi2, suspectUncompressible); FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed"); assert(cSize <= dstCapacity); op += cSize; @@ -2693,7 +2721,7 @@ ZSTD_entropyCompressSeqStore(seqStore_t* seqStorePtr, /* ZSTD_selectBlockCompressor() : * Not static, but internal use only (used by long distance matcher) * assumption : strat is a valid strategy */ -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e useRowMatchFinder, ZSTD_dictMode_e dictMode) +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode) { static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = { { ZSTD_compressBlock_fast /* default for 0 */, @@ -2758,7 +2786,7 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRow ZSTD_compressBlock_lazy2_dedicatedDictSearch_row } }; DEBUGLOG(4, "Selecting a row-based matchfinder"); - assert(useRowMatchFinder != ZSTD_urm_auto); + assert(useRowMatchFinder != ZSTD_ps_auto); selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy]; } else { selectedCompressor = blockCompressor[(int)dictMode][(int)strat]; @@ -2825,7 +2853,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i]; } if (zc->externSeqStore.pos < zc->externSeqStore.size) { - assert(!zc->appliedParams.ldmParams.enableLdm); + assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable); /* Updates ldmSeqStore.pos */ lastLLSize = ZSTD_ldm_blockCompress(&zc->externSeqStore, @@ -2834,7 +2862,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize) zc->appliedParams.useRowMatchFinder, src, srcSize); assert(zc->externSeqStore.pos <= zc->externSeqStore.size); - } else if (zc->appliedParams.ldmParams.enableLdm) { + } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { rawSeqStore_t ldmSeqStore = kNullRawSeqStore; ldmSeqStore.seq = zc->ldmSequences; @@ -2882,9 +2910,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1); ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (i = 0; i < seqStoreSeqSize; ++i) { - U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM; + U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM; outSeqs[i].litLength = seqStoreSeqs[i].litLength; - outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH; + outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH; outSeqs[i].rep = 0; if (i == seqStore->longLengthPos) { @@ -2895,9 +2923,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) } } - if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) { + if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) { /* Derive the correct offset corresponding to a repcode */ - outSeqs[i].rep = seqStoreSeqs[i].offset; + outSeqs[i].rep = seqStoreSeqs[i].offBase; if (outSeqs[i].litLength != 0) { rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1]; } else { @@ -2911,9 +2939,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc) outSeqs[i].offset = rawOffset; /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode so we provide seqStoreSeqs[i].offset - 1 */ - updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, - seqStoreSeqs[i].offset - 1, - seqStoreSeqs[i].litLength == 0); + ZSTD_updateRep(updatedRepcodes.rep, + seqStoreSeqs[i].offBase - 1, + seqStoreSeqs[i].litLength == 0); literalsRead += outSeqs[i].litLength; } /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0. @@ -3027,7 +3055,7 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi const ZSTD_hufCTables_t* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_hufCTablesMetadata_t* hufMetadata, - const int disableLiteralsCompression, + const int literalsCompressionIsDisabled, void* workspace, size_t wkspSize) { BYTE* const wkspStart = (BYTE*)workspace; @@ -3045,7 +3073,7 @@ static size_t ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSi /* Prepare nextEntropy assuming reusing the existing table */ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf)); - if (disableLiteralsCompression) { + if (literalsCompressionIsDisabled) { DEBUGLOG(5, "set_basic - disabled"); hufMetadata->hType = set_basic; return 0; @@ -3192,7 +3220,7 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize, &prevEntropy->huf, &nextEntropy->huf, &entropyMetadata->hufMetadata, - ZSTD_disableLiteralsCompression(cctxParams), + ZSTD_literalsCompressionIsDisabled(cctxParams), workspace, wkspSize); FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed"); entropyMetadata->fseMetadata.fseTablesSize = @@ -3235,7 +3263,7 @@ static size_t ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSiz static size_t ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, size_t nbSeq, unsigned maxCode, const FSE_CTable* fseCTable, - const U32* additionalBits, + const U8* additionalBits, short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t wkspSize) { @@ -3319,19 +3347,20 @@ static size_t ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize, * * Returns the estimated compressed size of the seqStore, or a zstd error. */ -static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, const ZSTD_CCtx* zc) { - ZSTD_entropyCTablesMetadata_t entropyMetadata; +static size_t ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc) { + ZSTD_entropyCTablesMetadata_t* entropyMetadata = &zc->blockSplitCtx.entropyMetadata; + DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()"); FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, - &entropyMetadata, + entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), ""); return ZSTD_estimateBlockSize(seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart), seqStore->ofCode, seqStore->llCode, seqStore->mlCode, (size_t)(seqStore->sequences - seqStore->sequencesStart), - &zc->blockState.nextCBlock->entropy, &entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, - (int)(entropyMetadata.hufMetadata.hType == set_compressed), 1); + &zc->blockState.nextCBlock->entropy, entropyMetadata, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE, + (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1); } /* Returns literals bytes represented in a seqStore */ @@ -3356,7 +3385,7 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) { size_t i; for (i = 0; i < nbSeqs; ++i) { seqDef seq = seqStore->sequencesStart[i]; - matchBytes += seq.matchLength + MINMATCH; + matchBytes += seq.mlBase + MINMATCH; if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) { matchBytes += 0x10000; } @@ -3405,11 +3434,13 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore, /** * Returns the raw offset represented by the combination of offCode, ll0, and repcode history. - * offCode must be an offCode representing a repcode, therefore in the range of [0, 2]. + * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq(). */ -static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) { - U32 const adjustedOffCode = offCode + ll0; - assert(offCode < ZSTD_REP_NUM); +static U32 +ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) +{ + U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */ + assert(STORED_IS_REPCODE(offCode)); if (adjustedOffCode == ZSTD_REP_NUM) { /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */ assert(rep[0] > 0); @@ -3420,11 +3451,16 @@ static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 /** * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise - * due to emission of RLE/raw blocks that disturb the offset history, and replaces any repcodes within - * the seqStore that may be invalid. + * due to emission of RLE/raw blocks that disturb the offset history, + * and replaces any repcodes within the seqStore that may be invalid. + * + * dRepcodes are updated as would be on the decompression side. + * cRepcodes are updated exactly in accordance with the seqStore. * - * dRepcodes are updated as would be on the decompression side. cRepcodes are updated exactly in - * accordance with the seqStore. + * Note : this function assumes seq->offBase respects the following numbering scheme : + * 0 : invalid + * 1-3 : repcode 1-3 + * 4+ : real_offset+3 */ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes, seqStore_t* const seqStore, U32 const nbSeq) { @@ -3432,9 +3468,9 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ for (; idx < nbSeq; ++idx) { seqDef* const seq = seqStore->sequencesStart + idx; U32 const ll0 = (seq->litLength == 0); - U32 offCode = seq->offset - 1; - assert(seq->offset > 0); - if (offCode <= ZSTD_REP_MOVE) { + U32 const offCode = OFFBASE_TO_STORED(seq->offBase); + assert(seq->offBase > 0); + if (STORED_IS_REPCODE(offCode)) { U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0); U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0); /* Adjust simulated decompression repcode history if we come across a mismatch. Replace @@ -3442,14 +3478,14 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * repcode history. */ if (dRawOffset != cRawOffset) { - seq->offset = cRawOffset + ZSTD_REP_NUM; + seq->offBase = cRawOffset + ZSTD_REP_NUM; } } /* Compression repcode history is always updated with values directly from the unmodified seqStore. * Decompression repcode history may use modified seq->offset value taken from compression repcode history. */ - *dRepcodes = ZSTD_updateRep(dRepcodes->rep, seq->offset - 1, ll0); - *cRepcodes = ZSTD_updateRep(cRepcodes->rep, offCode, ll0); + ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0); + ZSTD_updateRep(cRepcodes->rep, offCode, ll0); } } @@ -3458,11 +3494,13 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_ * * Returns the total size of that block (including header) or a ZSTD error code. */ -static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, - repcodes_t* const dRep, repcodes_t* const cRep, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, - U32 lastBlock, U32 isPartition) { +static size_t +ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore, + repcodes_t* const dRep, repcodes_t* const cRep, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastBlock, U32 isPartition) +{ const U32 rleMaxLength = 25; BYTE* op = (BYTE*)dst; const BYTE* ip = (const BYTE*)src; @@ -3471,9 +3509,11 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */ repcodes_t const dRepOriginal = *dRep; + DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock"); if (isPartition) ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart)); + RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit"); cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore, &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy, &zc->appliedParams, @@ -3499,9 +3539,6 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const return 0; } - if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) - zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; - if (cSeqsSize == 0) { cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock); FORWARD_IF_ERROR(cSize, "Nocompress block failed"); @@ -3518,6 +3555,10 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const cSize = ZSTD_blockHeaderSize + cSeqsSize; DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize); } + + if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid) + zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check; + return cSize; } @@ -3528,7 +3569,6 @@ typedef struct { } seqStoreSplits; #define MIN_SEQUENCES_BLOCK_SPLITTING 300 -#define MAX_NB_SPLITS 196 /* Helper function to perform the recursive search for block splits. * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half. @@ -3539,29 +3579,33 @@ typedef struct { * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING). * In practice, recursion depth usually doesn't go beyond 4. * - * Furthermore, the number of splits is capped by MAX_NB_SPLITS. At MAX_NB_SPLITS == 196 with the current existing blockSize + * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize * maximum of 128 KB, this value is actually impossible to reach. */ -static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, - const ZSTD_CCtx* zc, const seqStore_t* origSeqStore) { - seqStore_t fullSeqStoreChunk; - seqStore_t firstHalfSeqStore; - seqStore_t secondHalfSeqStore; +static void +ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx, + ZSTD_CCtx* zc, const seqStore_t* origSeqStore) +{ + seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk; + seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore; + seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore; size_t estimatedOriginalSize; size_t estimatedFirstHalfSize; size_t estimatedSecondHalfSize; size_t midIdx = (startIdx + endIdx)/2; - if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= MAX_NB_SPLITS) { + if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) { + DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences"); return; } - ZSTD_deriveSeqStoreChunk(&fullSeqStoreChunk, origSeqStore, startIdx, endIdx); - ZSTD_deriveSeqStoreChunk(&firstHalfSeqStore, origSeqStore, startIdx, midIdx); - ZSTD_deriveSeqStoreChunk(&secondHalfSeqStore, origSeqStore, midIdx, endIdx); - estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&fullSeqStoreChunk, zc); - estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&firstHalfSeqStore, zc); - estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&secondHalfSeqStore, zc); - DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", + DEBUGLOG(4, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx); + ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx); + ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx); + estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc); + estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc); + estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc); + DEBUGLOG(4, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu", estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize); if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) { return; @@ -3596,17 +3640,19 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq) * * Returns combined size of all blocks (which includes headers), or a ZSTD error code. */ -static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, - const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) { +static size_t +ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, + const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) +{ size_t cSize = 0; const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; - U32 partitions[MAX_NB_SPLITS]; size_t i = 0; size_t srcBytesTotal = 0; + U32* partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */ + seqStore_t* nextSeqStore = &zc->blockSplitCtx.nextSeqStore; + seqStore_t* currSeqStore = &zc->blockSplitCtx.currSeqStore; size_t numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq); - seqStore_t nextSeqStore; - seqStore_t currSeqStore; /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two @@ -3626,6 +3672,7 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s repcodes_t cRep; ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t)); + ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t)); DEBUGLOG(4, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)", (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, @@ -3643,36 +3690,36 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s return cSizeSingleBlock; } - ZSTD_deriveSeqStoreChunk(&currSeqStore, &zc->seqStore, 0, partitions[0]); + ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]); for (i = 0; i <= numSplits; ++i) { size_t srcBytes; size_t cSizeChunk; U32 const lastPartition = (i == numSplits); U32 lastBlockEntireSrc = 0; - srcBytes = ZSTD_countSeqStoreLiteralsBytes(&currSeqStore) + ZSTD_countSeqStoreMatchBytes(&currSeqStore); + srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore); srcBytesTotal += srcBytes; if (lastPartition) { /* This is the final partition, need to account for possible last literals */ srcBytes += blockSize - srcBytesTotal; lastBlockEntireSrc = lastBlock; } else { - ZSTD_deriveSeqStoreChunk(&nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); + ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]); } - cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, &currSeqStore, + cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore, &dRep, &cRep, op, dstCapacity, ip, srcBytes, lastBlockEntireSrc, 1 /* isPartition */); - DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(&currSeqStore, zc), cSizeChunk); + DEBUGLOG(5, "Estimated size: %zu actual size: %zu", ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk); FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!"); ip += srcBytes; op += cSizeChunk; dstCapacity -= cSizeChunk; cSize += cSizeChunk; - currSeqStore = nextSeqStore; + *currSeqStore = *nextSeqStore; assert(cSizeChunk <= ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize); } /* cRep and dRep may have diverged during the compression. If so, we use the dRep repcodes @@ -3682,14 +3729,17 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s return cSize; } -static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, U32 lastBlock) { +static size_t +ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, U32 lastBlock) +{ const BYTE* ip = (const BYTE*)src; BYTE* op = (BYTE*)dst; U32 nbSeq; size_t cSize; DEBUGLOG(4, "ZSTD_compressBlock_splitBlock"); + assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable); { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize); FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed"); @@ -3704,15 +3754,15 @@ static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc, nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart); } - assert(zc->appliedParams.splitBlocks == 1); cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq); FORWARD_IF_ERROR(cSize, "Splitting blocks failed!"); return cSize; } -static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, - void* dst, size_t dstCapacity, - const void* src, size_t srcSize, U32 frame) +static size_t +ZSTD_compressBlock_internal(ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, U32 frame) { /* This the upper bound for the length of an rle block. * This isn't the actual upper bound. Finding the real threshold @@ -3746,12 +3796,6 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */, zc->bmi2); - if (zc->seqCollector.collectSequences) { - ZSTD_copyBlockSequences(zc); - return 0; - } - - if (frame && /* We don't want to emit our first block as a RLE even if it qualifies because * doing so will cause the decoder (cli only) to throw a "should consume all input error." @@ -3915,6 +3959,7 @@ static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx, ZSTD_overflowCorrectIfNeeded( ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize); ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); + ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState); /* Ensure hash/chain table insertion resumes no sooner than lowlimit */ if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit; @@ -3991,7 +4036,9 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, if (!singleSegment) op[pos++] = windowLogByte; switch(dictIDSizeCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : break; case 1 : op[pos] = (BYTE)(dictID); pos++; break; case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; @@ -3999,7 +4046,9 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, } switch(fcsCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; @@ -4047,7 +4096,7 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe { RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong, "wrong cctx stage"); - RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm, + RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable, parameter_unsupported, "incompatible with ldm"); cctx->externSeqStore.seq = seq; @@ -4088,7 +4137,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, ms->forceNonContiguous = 0; ms->nextToUpdate = ms->window.dictLimit; } - if (cctx->appliedParams.ldmParams.enableLdm) { + if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0); } @@ -4157,7 +4206,7 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, { const BYTE* ip = (const BYTE*) src; const BYTE* const iend = ip + srcSize; - int const loadLdmDict = params->ldmParams.enableLdm && ls != NULL; + int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL; /* Assert that we the ms params match the params we're being given */ ZSTD_assertEqualCParams(params->cParams, ms->cParams); @@ -4214,8 +4263,8 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms, assert(ms->chainTable != NULL); ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE); } else { - assert(params->useRowMatchFinder != ZSTD_urm_auto); - if (params->useRowMatchFinder == ZSTD_urm_enableRowMatchFinder) { + assert(params->useRowMatchFinder != ZSTD_ps_auto); + if (params->useRowMatchFinder == ZSTD_ps_enable) { size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog) * sizeof(U16); ZSTD_memset(ms->tagTable, 0, tagTableSize); ZSTD_row_update(ms, iend-HASH_READ_SIZE); @@ -4715,7 +4764,7 @@ size_t ZSTD_estimateCDictSize_advanced( + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small * in case we are using DDS with row-hash. */ - + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams), + + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams), /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *)))); @@ -4792,7 +4841,7 @@ static size_t ZSTD_initCDict_internal( static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_compressionParameters cParams, - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, U32 enableDedicatedDictSearch, ZSTD_customMem customMem) { @@ -4842,7 +4891,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, &cctxParams, customMem); } -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2( +ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, @@ -4947,7 +4996,7 @@ const ZSTD_CDict* ZSTD_initStaticCDict( ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams) { - ZSTD_useRowMatchFinderMode_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_urm_auto, &cParams); + ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams); /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */ size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0); size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) @@ -5403,7 +5452,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, zcs->outBuffFlushedSize = 0; zcs->streamStage = zcss_flush; /* pass-through to flush stage */ } - /* fall-through */ + ZSTD_FALLTHROUGH; case zcss_flush: DEBUGLOG(5, "flush stage"); assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered); @@ -5524,17 +5573,8 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx, dictSize, mode); } - if (ZSTD_CParams_shouldEnableLdm(¶ms.cParams)) { - /* Enable LDM by default for optimal parser and window size >= 128MB */ - DEBUGLOG(4, "LDM enabled by default (window size >= 128MB, strategy >= btopt)"); - params.ldmParams.enableLdm = 1; - } - - if (ZSTD_CParams_useBlockSplitter(¶ms.cParams)) { - DEBUGLOG(4, "Block splitter enabled by default (window size >= 128K, strategy >= btopt)"); - params.splitBlocks = 1; - } - + params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, ¶ms.cParams); + params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, ¶ms.cParams); params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, ¶ms.cParams); #ifdef ZSTD_MULTITHREAD @@ -5715,39 +5755,39 @@ typedef struct { size_t posInSrc; /* Number of bytes given by sequences provided so far */ } ZSTD_sequencePosition; -/* Returns a ZSTD error code if sequence is not valid */ -static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength, - size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) { - size_t offsetBound; - U32 windowSize = 1 << windowLog; +/* ZSTD_validateSequence() : + * @offCode : is presumed to follow format required by ZSTD_storeSeq() + * @returns a ZSTD error code if sequence is not valid + */ +static size_t +ZSTD_validateSequence(U32 offCode, U32 matchLength, + size_t posInSrc, U32 windowLog, size_t dictSize) +{ + U32 const windowSize = 1 << windowLog; /* posInSrc represents the amount of data the the decoder would decode up to this point. * As long as the amount of data decoded is less than or equal to window size, offsets may be * larger than the total length of output decoded in order to reference the dict, even larger than * window size. After output surpasses windowSize, we're limited to windowSize offsets again. */ - offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; - RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!"); - RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small"); + size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize; + RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!"); + RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small"); return 0; } /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */ -static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) { - U32 offCode = rawOffset + ZSTD_REP_MOVE; - U32 repCode = 0; +static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) +{ + U32 offCode = STORE_OFFSET(rawOffset); if (!ll0 && rawOffset == rep[0]) { - repCode = 1; + offCode = STORE_REPCODE_1; } else if (rawOffset == rep[1]) { - repCode = 2 - ll0; + offCode = STORE_REPCODE(2 - ll0); } else if (rawOffset == rep[2]) { - repCode = 3 - ll0; + offCode = STORE_REPCODE(3 - ll0); } else if (ll0 && rawOffset == rep[0] - 1) { - repCode = 3; - } - if (repCode) { - /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */ - offCode = repCode - 1; + offCode = STORE_REPCODE_3; } return offCode; } @@ -5755,18 +5795,17 @@ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter. */ -static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) { +static size_t +ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, + ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize) +{ U32 idx = seqPos->idx; BYTE const* ip = (BYTE const*)(src); const BYTE* const iend = ip + blockSize; repcodes_t updatedRepcodes; U32 dictSize; - U32 litLength; - U32 matchLength; - U32 ll0; - U32 offCode; if (cctx->cdict) { dictSize = (U32)cctx->cdict->dictContentSize; @@ -5777,23 +5816,22 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS } ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) { - litLength = inSeqs[idx].litLength; - matchLength = inSeqs[idx].matchLength; - ll0 = litLength == 0; - offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); - updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + U32 const litLength = inSeqs[idx].litLength; + U32 const ll0 = (litLength == 0); + U32 const matchLength = inSeqs[idx].matchLength; + U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize, - cctx->appliedParams.cParams.minMatch), + cctx->appliedParams.cParams.windowLog, dictSize), "Sequence validation failed"); } RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); ip += matchLength + litLength; } ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t)); @@ -5820,9 +5858,11 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS * avoid splitting a match, or to avoid splitting a match such that it would produce a match * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block. */ -static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, - const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, - const void* src, size_t blockSize) { +static size_t +ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, + const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, + const void* src, size_t blockSize) +{ U32 idx = seqPos->idx; U32 startPosInSequence = seqPos->posInSequence; U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize; @@ -5832,10 +5872,6 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq repcodes_t updatedRepcodes; U32 bytesAdjustment = 0; U32 finalMatchSplit = 0; - U32 litLength; - U32 matchLength; - U32 rawOffset; - U32 offCode; if (cctx->cdict) { dictSize = cctx->cdict->dictContentSize; @@ -5849,9 +5885,10 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t)); while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) { const ZSTD_Sequence currSeq = inSeqs[idx]; - litLength = currSeq.litLength; - matchLength = currSeq.matchLength; - rawOffset = currSeq.offset; + U32 litLength = currSeq.litLength; + U32 matchLength = currSeq.matchLength; + U32 const rawOffset = currSeq.offset; + U32 offCode; /* Modify the sequence depending on where endPosInSequence lies */ if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) { @@ -5904,22 +5941,21 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq } } /* Check if this offset can be represented with a repcode */ - { U32 ll0 = (litLength == 0); + { U32 const ll0 = (litLength == 0); offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0); - updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); + ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0); } if (cctx->appliedParams.validateSequences) { seqPos->posInSrc += litLength + matchLength; FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc, - cctx->appliedParams.cParams.windowLog, dictSize, - cctx->appliedParams.cParams.minMatch), + cctx->appliedParams.cParams.windowLog, dictSize), "Sequence validation failed"); } DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength); RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation, "Not enough memory allocated. Try adjusting ZSTD_c_minMatch."); - ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH); + ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength); ip += matchLength + litLength; } DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength); @@ -5944,7 +5980,8 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos, const ZSTD_Sequence* const inSeqs, size_t inSeqsSize, const void* src, size_t blockSize); -static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) { +static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) +{ ZSTD_sequenceCopier sequenceCopier = NULL; assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode)); if (mode == ZSTD_sf_explicitBlockDelimiters) { @@ -5958,12 +5995,15 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) /* Compress, block-by-block, all of the sequences given. * - * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error. + * Returns the cumulative size of all compressed blocks (including their headers), + * otherwise a ZSTD error. */ -static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, - void* dst, size_t dstCapacity, - const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize) { +static size_t +ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const ZSTD_Sequence* inSeqs, size_t inSeqsSize, + const void* src, size_t srcSize) +{ size_t cSize = 0; U32 lastBlock; size_t blockSize; @@ -5973,7 +6013,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, BYTE const* ip = (BYTE const*)src; BYTE* op = (BYTE*)dst; - ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); + ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters); DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize); /* Special case: empty frame */ @@ -6073,7 +6113,8 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx, size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, - const void* src, size_t srcSize) { + const void* src, size_t srcSize) +{ BYTE* op = (BYTE*)dst; size_t cSize = 0; size_t compressedBlocksSize = 0; @@ -6140,119 +6181,12 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) /*-===== Pre-defined compression levels =====-*/ +#include "clevels.h" -#define ZSTD_MAX_CLEVEL 22 int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; } int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; } -static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { -{ /* "default" - for any srcSize > 256 KB */ - /* W, C, H, S, L, TL, strat */ - { 19, 12, 13, 1, 6, 1, ZSTD_fast }, /* base for negative levels */ - { 19, 13, 14, 1, 7, 0, ZSTD_fast }, /* level 1 */ - { 20, 15, 16, 1, 6, 0, ZSTD_fast }, /* level 2 */ - { 21, 16, 17, 1, 5, 0, ZSTD_dfast }, /* level 3 */ - { 21, 18, 18, 1, 5, 0, ZSTD_dfast }, /* level 4 */ - { 21, 18, 19, 2, 5, 2, ZSTD_greedy }, /* level 5 */ - { 21, 19, 19, 3, 5, 4, ZSTD_greedy }, /* level 6 */ - { 21, 19, 19, 3, 5, 8, ZSTD_lazy }, /* level 7 */ - { 21, 19, 19, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ - { 21, 19, 20, 4, 5, 16, ZSTD_lazy2 }, /* level 9 */ - { 22, 20, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ - { 22, 21, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ - { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ - { 22, 21, 22, 5, 5, 32, ZSTD_btlazy2 }, /* level 13 */ - { 22, 22, 23, 5, 5, 32, ZSTD_btlazy2 }, /* level 14 */ - { 22, 23, 23, 6, 5, 32, ZSTD_btlazy2 }, /* level 15 */ - { 22, 22, 22, 5, 5, 48, ZSTD_btopt }, /* level 16 */ - { 23, 23, 22, 5, 4, 64, ZSTD_btopt }, /* level 17 */ - { 23, 23, 22, 6, 3, 64, ZSTD_btultra }, /* level 18 */ - { 23, 24, 22, 7, 3,256, ZSTD_btultra2}, /* level 19 */ - { 25, 25, 23, 7, 3,256, ZSTD_btultra2}, /* level 20 */ - { 26, 26, 24, 7, 3,512, ZSTD_btultra2}, /* level 21 */ - { 27, 27, 25, 9, 3,999, ZSTD_btultra2}, /* level 22 */ -}, -{ /* for srcSize <= 256 KB */ - /* W, C, H, S, L, T, strat */ - { 18, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 18, 13, 14, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 18, 14, 14, 1, 5, 0, ZSTD_dfast }, /* level 2 */ - { 18, 16, 16, 1, 4, 0, ZSTD_dfast }, /* level 3 */ - { 18, 16, 17, 2, 5, 2, ZSTD_greedy }, /* level 4.*/ - { 18, 18, 18, 3, 5, 2, ZSTD_greedy }, /* level 5.*/ - { 18, 18, 19, 3, 5, 4, ZSTD_lazy }, /* level 6.*/ - { 18, 18, 19, 4, 4, 4, ZSTD_lazy }, /* level 7 */ - { 18, 18, 19, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 18, 18, 19, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 18, 18, 19, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 18, 18, 19, 5, 4, 12, ZSTD_btlazy2 }, /* level 11.*/ - { 18, 19, 19, 7, 4, 12, ZSTD_btlazy2 }, /* level 12.*/ - { 18, 18, 19, 4, 4, 16, ZSTD_btopt }, /* level 13 */ - { 18, 18, 19, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 18, 18, 19, 6, 3,128, ZSTD_btopt }, /* level 15.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 18, 19, 19, 6, 3,128, ZSTD_btultra2}, /* level 18.*/ - { 18, 19, 19, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 18, 19, 19, 10, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 18, 19, 19, 12, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 18, 19, 19, 13, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 128 KB */ - /* W, C, H, S, L, T, strat */ - { 17, 12, 12, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 17, 12, 13, 1, 6, 0, ZSTD_fast }, /* level 1 */ - { 17, 13, 15, 1, 5, 0, ZSTD_fast }, /* level 2 */ - { 17, 15, 16, 2, 5, 0, ZSTD_dfast }, /* level 3 */ - { 17, 17, 17, 2, 4, 0, ZSTD_dfast }, /* level 4 */ - { 17, 16, 17, 3, 4, 2, ZSTD_greedy }, /* level 5 */ - { 17, 17, 17, 3, 4, 4, ZSTD_lazy }, /* level 6 */ - { 17, 17, 17, 3, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ - { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ - { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ - { 17, 17, 17, 5, 4, 8, ZSTD_btlazy2 }, /* level 11 */ - { 17, 18, 17, 7, 4, 12, ZSTD_btlazy2 }, /* level 12 */ - { 17, 18, 17, 3, 4, 12, ZSTD_btopt }, /* level 13.*/ - { 17, 18, 17, 4, 3, 32, ZSTD_btopt }, /* level 14.*/ - { 17, 18, 17, 6, 3,256, ZSTD_btopt }, /* level 15.*/ - { 17, 18, 17, 6, 3,128, ZSTD_btultra }, /* level 16.*/ - { 17, 18, 17, 8, 3,256, ZSTD_btultra }, /* level 17.*/ - { 17, 18, 17, 10, 3,512, ZSTD_btultra }, /* level 18.*/ - { 17, 18, 17, 5, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 17, 18, 17, 7, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 17, 18, 17, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 17, 18, 17, 11, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -{ /* for srcSize <= 16 KB */ - /* W, C, H, S, L, T, strat */ - { 14, 12, 13, 1, 5, 1, ZSTD_fast }, /* base for negative levels */ - { 14, 14, 15, 1, 5, 0, ZSTD_fast }, /* level 1 */ - { 14, 14, 15, 1, 4, 0, ZSTD_fast }, /* level 2 */ - { 14, 14, 15, 2, 4, 0, ZSTD_dfast }, /* level 3 */ - { 14, 14, 14, 4, 4, 2, ZSTD_greedy }, /* level 4 */ - { 14, 14, 14, 3, 4, 4, ZSTD_lazy }, /* level 5.*/ - { 14, 14, 14, 4, 4, 8, ZSTD_lazy2 }, /* level 6 */ - { 14, 14, 14, 6, 4, 8, ZSTD_lazy2 }, /* level 7 */ - { 14, 14, 14, 8, 4, 8, ZSTD_lazy2 }, /* level 8.*/ - { 14, 15, 14, 5, 4, 8, ZSTD_btlazy2 }, /* level 9.*/ - { 14, 15, 14, 9, 4, 8, ZSTD_btlazy2 }, /* level 10.*/ - { 14, 15, 14, 3, 4, 12, ZSTD_btopt }, /* level 11.*/ - { 14, 15, 14, 4, 3, 24, ZSTD_btopt }, /* level 12.*/ - { 14, 15, 14, 5, 3, 32, ZSTD_btultra }, /* level 13.*/ - { 14, 15, 15, 6, 3, 64, ZSTD_btultra }, /* level 14.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra }, /* level 15.*/ - { 14, 15, 15, 5, 3, 48, ZSTD_btultra2}, /* level 16.*/ - { 14, 15, 15, 6, 3,128, ZSTD_btultra2}, /* level 17.*/ - { 14, 15, 15, 7, 3,256, ZSTD_btultra2}, /* level 18.*/ - { 14, 15, 15, 8, 3,256, ZSTD_btultra2}, /* level 19.*/ - { 14, 15, 15, 8, 3,512, ZSTD_btultra2}, /* level 20.*/ - { 14, 15, 15, 9, 3,512, ZSTD_btultra2}, /* level 21.*/ - { 14, 15, 15, 10, 3,999, ZSTD_btultra2}, /* level 22.*/ -}, -}; - static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize) { ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict); diff --git a/thirdparty/zstd/compress/zstd_compress_internal.h b/thirdparty/zstd/compress/zstd_compress_internal.h index 3b04fd09f6..c406e794bd 100644 --- a/thirdparty/zstd/compress/zstd_compress_internal.h +++ b/thirdparty/zstd/compress/zstd_compress_internal.h @@ -63,7 +63,7 @@ typedef struct { } ZSTD_localDict; typedef struct { - HUF_CElt CTable[HUF_CTABLE_SIZE_U32(255)]; + HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)]; HUF_repeat repeatMode; } ZSTD_hufCTables_t; @@ -129,7 +129,7 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr, *********************************/ typedef struct { - U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */ + U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */ U32 len; /* Raw length of match */ } ZSTD_match_t; @@ -179,7 +179,7 @@ typedef struct { U32 offCodeSumBasePrice; /* to compare to log2(offreq) */ ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */ const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */ - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; } optState_t; typedef struct { @@ -199,6 +199,8 @@ typedef struct { */ } ZSTD_window_t; +#define ZSTD_WINDOW_START_INDEX 2 + typedef struct ZSTD_matchState_t ZSTD_matchState_t; #define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */ @@ -264,7 +266,7 @@ typedef struct { } ldmState_t; typedef struct { - U32 enableLdm; /* 1 if enable long distance matching */ + ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */ U32 hashLog; /* Log size of hashTable */ U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */ U32 minMatchLength; /* Minimum match length */ @@ -295,7 +297,7 @@ struct ZSTD_CCtx_params_s { * There is no guarantee that hint is close to actual source size */ ZSTD_dictAttachPref_e attachDictPref; - ZSTD_literalCompressionMode_e literalCompressionMode; + ZSTD_paramSwitch_e literalCompressionMode; /* Multithreading: used to pass parameters to mtctx */ int nbWorkers; @@ -318,10 +320,10 @@ struct ZSTD_CCtx_params_s { int validateSequences; /* Block splitting */ - int splitBlocks; + ZSTD_paramSwitch_e useBlockSplitter; /* Param for deciding whether to use row-based matchfinder */ - ZSTD_useRowMatchFinderMode_e useRowMatchFinder; + ZSTD_paramSwitch_e useRowMatchFinder; /* Always load a dictionary in ext-dict mode (not prefix mode)? */ int deterministicRefPrefix; @@ -343,6 +345,22 @@ typedef enum { ZSTDb_buffered } ZSTD_buffered_policy_e; +/** + * Struct that contains all elements of block splitter that should be allocated + * in a wksp. + */ +#define ZSTD_MAX_NB_BLOCK_SPLITS 196 +typedef struct { + seqStore_t fullSeqStoreChunk; + seqStore_t firstHalfSeqStore; + seqStore_t secondHalfSeqStore; + seqStore_t currSeqStore; + seqStore_t nextSeqStore; + + U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS]; + ZSTD_entropyCTablesMetadata_t entropyMetadata; +} ZSTD_blockSplitCtx; + struct ZSTD_CCtx_s { ZSTD_compressionStage_e stage; int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */ @@ -374,7 +392,7 @@ struct ZSTD_CCtx_s { ZSTD_blockState_t blockState; U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */ - /* Wether we are streaming or not */ + /* Whether we are streaming or not */ ZSTD_buffered_policy_e bufferedPolicy; /* streaming */ @@ -408,6 +426,9 @@ struct ZSTD_CCtx_s { #if ZSTD_TRACE ZSTD_TraceCtx traceCtx; #endif + + /* Workspace for block splitter */ + ZSTD_blockSplitCtx blockSplitCtx; }; typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e; @@ -442,7 +463,7 @@ typedef enum { typedef size_t (*ZSTD_blockCompressor) ( ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize); -ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_useRowMatchFinderMode_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); +ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode); MEM_STATIC U32 ZSTD_LLcode(U32 litLength) @@ -476,31 +497,6 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase) return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase]; } -typedef struct repcodes_s { - U32 rep[3]; -} repcodes_t; - -MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0) -{ - repcodes_t newReps; - if (offset >= ZSTD_REP_NUM) { /* full offset */ - newReps.rep[2] = rep[1]; - newReps.rep[1] = rep[0]; - newReps.rep[0] = offset - ZSTD_REP_MOVE; - } else { /* repcode */ - U32 const repCode = offset + ll0; - if (repCode > 0) { /* note : if repCode==0, no change */ - U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; - newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2]; - newReps.rep[1] = rep[0]; - newReps.rep[0] = currentOffset; - } else { /* repCode == 0 */ - ZSTD_memcpy(&newReps, rep, sizeof(newReps)); - } - } - return newReps; -} - /* ZSTD_cParam_withinBounds: * @return 1 if value is within cParam bounds, * 0 otherwise */ @@ -549,17 +545,17 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat) return (srcSize >> minlog) + 2; } -MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParams) +MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams) { switch (cctxParams->literalCompressionMode) { - case ZSTD_lcm_huffman: + case ZSTD_ps_enable: return 0; - case ZSTD_lcm_uncompressed: + case ZSTD_ps_disable: return 1; default: assert(0 /* impossible: pre-validated */); - /* fall-through */ - case ZSTD_lcm_auto: + ZSTD_FALLTHROUGH; + case ZSTD_ps_auto: return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0); } } @@ -569,7 +565,9 @@ MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParam * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single * large copies. */ -static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) { +static void +ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) +{ assert(iend > ilimit_w); if (ip <= ilimit_w) { ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap); @@ -579,14 +577,30 @@ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const ie while (ip < iend) *op++ = *ip++; } +#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) +#define STORE_REPCODE_1 STORE_REPCODE(1) +#define STORE_REPCODE_2 STORE_REPCODE(2) +#define STORE_REPCODE_3 STORE_REPCODE(3) +#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1) +#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE) +#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE) +#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE) +#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE) +#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */ +#define STORED_TO_OFFBASE(o) ((o)+1) +#define OFFBASE_TO_STORED(o) ((o)-1) + /*! ZSTD_storeSeq() : - * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t. - * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes). - * `mlBase` : matchLength - MINMATCH + * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t. + * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET(). + * @matchLength : must be >= MINMATCH * Allowed to overread literals up to litLimit. */ -HINT_INLINE UNUSED_ATTR -void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase) +HINT_INLINE UNUSED_ATTR void +ZSTD_storeSeq(seqStore_t* seqStorePtr, + size_t litLength, const BYTE* literals, const BYTE* litLimit, + U32 offBase_minus1, + size_t matchLength) { BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH; BYTE const* const litEnd = literals + litLength; @@ -595,7 +609,7 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */ { U32 const pos = (U32)((const BYTE*)literals - g_start); DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u", - pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode); + pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1); } #endif assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq); @@ -626,19 +640,59 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera seqStorePtr->sequences[0].litLength = (U16)litLength; /* match offset */ - seqStorePtr->sequences[0].offset = offCode + 1; + seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1); /* match Length */ - if (mlBase>0xFFFF) { - assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ - seqStorePtr->longLengthType = ZSTD_llt_matchLength; - seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + assert(matchLength >= MINMATCH); + { size_t const mlBase = matchLength - MINMATCH; + if (mlBase>0xFFFF) { + assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */ + seqStorePtr->longLengthType = ZSTD_llt_matchLength; + seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + } + seqStorePtr->sequences[0].mlBase = (U16)mlBase; } - seqStorePtr->sequences[0].matchLength = (U16)mlBase; seqStorePtr->sequences++; } +/* ZSTD_updateRep() : + * updates in-place @rep (array of repeat offsets) + * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq() + */ +MEM_STATIC void +ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +{ + if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */ + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = STORED_OFFSET(offBase_minus1); + } else { /* repcode */ + U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0; + if (repCode > 0) { /* note : if repCode==0, no change */ + U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode]; + rep[2] = (repCode >= 2) ? rep[1] : rep[2]; + rep[1] = rep[0]; + rep[0] = currentOffset; + } else { /* repCode == 0 */ + /* nothing to do */ + } + } +} + +typedef struct repcodes_s { + U32 rep[3]; +} repcodes_t; + +MEM_STATIC repcodes_t +ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0) +{ + repcodes_t newReps; + ZSTD_memcpy(&newReps, rep, sizeof(newReps)); + ZSTD_updateRep(newReps.rep, offBase_minus1, ll0); + return newReps; +} + /*-************************************* * Match length counter @@ -651,8 +705,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # if STATIC_BMI2 return _tzcnt_u64(val) >> 3; # else - unsigned long r = 0; - return _BitScanForward64( &r, (U64)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, (U64)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_ctzll((U64)val) >> 3); @@ -669,8 +729,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r=0; - return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanForward(&r, (U32)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_ctz((U32)val) >> 3); # else @@ -687,8 +753,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # if STATIC_BMI2 return _lzcnt_u64(val) >> 3; # else - unsigned long r = 0; - return _BitScanReverse64(&r, (U64)val) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse64(&r, (U64)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # endif # elif defined(__GNUC__) && (__GNUC__ >= 4) return (__builtin_clzll(val) >> 3); @@ -702,8 +774,14 @@ static unsigned ZSTD_NbCommonBytes (size_t val) # endif } else { /* 32 bits */ # if defined(_MSC_VER) - unsigned long r = 0; - return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0; + if (val != 0) { + unsigned long r; + _BitScanReverse(&r, (unsigned long)val); + return (unsigned)(r >> 3); + } else { + /* Should not reach this code path */ + __assume(0); + } # elif defined(__GNUC__) && (__GNUC__ >= 3) return (__builtin_clz((U32)val) >> 3); # else @@ -884,9 +962,9 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window) MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window) { - return window.dictLimit == 1 && - window.lowLimit == 1 && - (window.nextSrc - window.base) == 1; + return window.dictLimit == ZSTD_WINDOW_START_INDEX && + window.lowLimit == ZSTD_WINDOW_START_INDEX && + (window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX; } /** @@ -937,7 +1015,9 @@ MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window, { U32 const cycleSize = 1u << cycleLog; U32 const curr = (U32)((BYTE const*)src - window.base); - U32 const minIndexToOverflowCorrect = cycleSize + MAX(maxDist, cycleSize); + U32 const minIndexToOverflowCorrect = cycleSize + + MAX(maxDist, cycleSize) + + ZSTD_WINDOW_START_INDEX; /* Adjust the min index to backoff the overflow correction frequency, * so we don't waste too much CPU in overflow correction. If this @@ -1012,10 +1092,14 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, U32 const cycleSize = 1u << cycleLog; U32 const cycleMask = cycleSize - 1; U32 const curr = (U32)((BYTE const*)src - window->base); - U32 const currentCycle0 = curr & cycleMask; - /* Exclude zero so that newCurrent - maxDist >= 1. */ - U32 const currentCycle1 = currentCycle0 == 0 ? cycleSize : currentCycle0; - U32 const newCurrent = currentCycle1 + MAX(maxDist, cycleSize); + U32 const currentCycle = curr & cycleMask; + /* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */ + U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX + ? MAX(cycleSize, ZSTD_WINDOW_START_INDEX) + : 0; + U32 const newCurrent = currentCycle + + currentCycleCorrection + + MAX(maxDist, cycleSize); U32 const correction = curr - newCurrent; /* maxDist must be a power of two so that: * (newCurrent & cycleMask) == (curr & cycleMask) @@ -1031,14 +1115,20 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, window->base += correction; window->dictBase += correction; - if (window->lowLimit <= correction) window->lowLimit = 1; - else window->lowLimit -= correction; - if (window->dictLimit <= correction) window->dictLimit = 1; - else window->dictLimit -= correction; + if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->lowLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->lowLimit -= correction; + } + if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) { + window->dictLimit = ZSTD_WINDOW_START_INDEX; + } else { + window->dictLimit -= correction; + } /* Ensure we can still reference the full window. */ assert(newCurrent >= maxDist); - assert(newCurrent - maxDist >= 1); + assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX); /* Ensure that lowLimit and dictLimit didn't underflow. */ assert(window->lowLimit <= newCurrent); assert(window->dictLimit <= newCurrent); @@ -1149,11 +1239,12 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window, MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) { ZSTD_memset(window, 0, sizeof(*window)); - window->base = (BYTE const*)""; - window->dictBase = (BYTE const*)""; - window->dictLimit = 1; /* start from 1, so that 1st position is valid */ - window->lowLimit = 1; /* it ensures first and later CCtx usages compress the same */ - window->nextSrc = window->base + 1; /* see issue #1241 */ + window->base = (BYTE const*)" "; + window->dictBase = (BYTE const*)" "; + ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */ + window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */ + window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */ + window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */ window->nbOverflowCorrections = 0; } @@ -1206,15 +1297,15 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, */ MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog) { - U32 const maxDistance = 1U << windowLog; - U32 const lowestValid = ms->window.lowLimit; - U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; - U32 const isDictionary = (ms->loadedDictEnd != 0); + U32 const maxDistance = 1U << windowLog; + U32 const lowestValid = ms->window.lowLimit; + U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid; + U32 const isDictionary = (ms->loadedDictEnd != 0); /* When using a dictionary the entire dictionary is valid if a single byte of the dictionary * is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't * valid for the entire block. So this check is sufficient to find the lowest valid match index. */ - U32 const matchLowest = isDictionary ? lowestValid : withinWindow; + U32 const matchLowest = isDictionary ? lowestValid : withinWindow; return matchLowest; } diff --git a/thirdparty/zstd/compress/zstd_compress_literals.c b/thirdparty/zstd/compress/zstd_compress_literals.c index 008337bb1b..52b0a8059a 100644 --- a/thirdparty/zstd/compress/zstd_compress_literals.c +++ b/thirdparty/zstd/compress/zstd_compress_literals.c @@ -73,7 +73,8 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2) + const int bmi2, + unsigned suspectUncompressible) { size_t const minGain = ZSTD_minGain(srcSize, strategy); size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); @@ -105,11 +106,11 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, HUF_compress1X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) : + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) : HUF_compress4X_repeat( ostart+lhSize, dstCapacity-lhSize, src, srcSize, HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize, - (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2); + (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible); if (repeat != HUF_repeat_none) { /* reused the existing table */ DEBUGLOG(5, "Reusing previous huffman table"); diff --git a/thirdparty/zstd/compress/zstd_compress_literals.h b/thirdparty/zstd/compress/zstd_compress_literals.h index 9904c0cd30..9775fb97cb 100644 --- a/thirdparty/zstd/compress/zstd_compress_literals.h +++ b/thirdparty/zstd/compress/zstd_compress_literals.h @@ -18,12 +18,14 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize); +/* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf, ZSTD_hufCTables_t* nextHuf, ZSTD_strategy strategy, int disableLiteralCompression, void* dst, size_t dstCapacity, const void* src, size_t srcSize, void* entropyWorkspace, size_t entropyWorkspaceSize, - const int bmi2); + const int bmi2, + unsigned suspectUncompressible); #endif /* ZSTD_COMPRESS_LITERALS_H */ diff --git a/thirdparty/zstd/compress/zstd_compress_sequences.c b/thirdparty/zstd/compress/zstd_compress_sequences.c index 611eabdcbb..f1e40af2ea 100644 --- a/thirdparty/zstd/compress/zstd_compress_sequences.c +++ b/thirdparty/zstd/compress/zstd_compress_sequences.c @@ -275,10 +275,11 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity, assert(nbSeq_1 > 1); assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp)); (void)entropyWorkspaceSize; - FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), ""); - { size_t const NCountSize = FSE_writeNCount(op, oend - op, wksp->norm, max, tableLog); /* overflow protected */ + FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed"); + assert(oend >= op); + { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed"); - FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), ""); + FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed"); return NCountSize; } } @@ -312,19 +313,19 @@ ZSTD_encodeSequences_body( FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); + BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]); if (MEM_32bits()) BIT_flushBits(&blockStream); if (longOffsets) { U32 const ofBits = ofCodeTable[nbSeq-1]; unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { - BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits); BIT_flushBits(&blockStream); } - BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits, ofBits - extraBits); } else { - BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]); } BIT_flushBits(&blockStream); @@ -338,8 +339,8 @@ ZSTD_encodeSequences_body( U32 const mlBits = ML_bits[mlCode]; DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u", (unsigned)sequences[n].litLength, - (unsigned)sequences[n].matchLength + MINMATCH, - (unsigned)sequences[n].offset); + (unsigned)sequences[n].mlBase + MINMATCH, + (unsigned)sequences[n].offBase); /* 32b*/ /* 64b*/ /* (7)*/ /* (7)*/ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */ @@ -350,18 +351,18 @@ ZSTD_encodeSequences_body( BIT_flushBits(&blockStream); /* (7)*/ BIT_addBits(&blockStream, sequences[n].litLength, llBits); if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); - BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); + BIT_addBits(&blockStream, sequences[n].mlBase, mlBits); if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream); if (longOffsets) { unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); if (extraBits) { - BIT_addBits(&blockStream, sequences[n].offset, extraBits); + BIT_addBits(&blockStream, sequences[n].offBase, extraBits); BIT_flushBits(&blockStream); /* (7)*/ } - BIT_addBits(&blockStream, sequences[n].offset >> extraBits, + BIT_addBits(&blockStream, sequences[n].offBase >> extraBits, ofBits - extraBits); /* 31 */ } else { - BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */ } BIT_flushBits(&blockStream); /* (7)*/ DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr)); @@ -398,7 +399,7 @@ ZSTD_encodeSequences_default( #if DYNAMIC_BMI2 -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t ZSTD_encodeSequences_bmi2( void* dst, size_t dstCapacity, FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable, diff --git a/thirdparty/zstd/compress/zstd_compress_superblock.c b/thirdparty/zstd/compress/zstd_compress_superblock.c index e4e45069bc..10e3378577 100644 --- a/thirdparty/zstd/compress/zstd_compress_superblock.c +++ b/thirdparty/zstd/compress/zstd_compress_superblock.c @@ -132,6 +132,7 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* const seqDef* sp = sstart; size_t matchLengthSum = 0; size_t litLengthSum = 0; + (void)(litLengthSum); /* suppress unused variable warning on some environments */ while (send-sp > 0) { ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp); litLengthSum += seqLen.litLength; @@ -324,7 +325,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type, const BYTE* codeTable, unsigned maxCode, size_t nbSeq, const FSE_CTable* fseCTable, - const U32* additionalBits, + const U8* additionalBits, short const* defaultNorm, U32 defaultNormLog, U32 defaultMax, void* workspace, size_t wkspSize) { @@ -474,7 +475,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, /* I think there is an optimization opportunity here. * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful * since it recalculates estimate from scratch. - * For example, it would recount literal distribution and symbol codes everytime. + * For example, it would recount literal distribution and symbol codes every time. */ cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount, &nextCBlock->entropy, entropyMetadata, @@ -538,7 +539,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr, repcodes_t rep; ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep)); for (seq = sstart; seq < sp; ++seq) { - rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); + ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0); } ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep)); } diff --git a/thirdparty/zstd/compress/zstd_cwksp.h b/thirdparty/zstd/compress/zstd_cwksp.h index 2656d26ca2..dc3f40c80c 100644 --- a/thirdparty/zstd/compress/zstd_cwksp.h +++ b/thirdparty/zstd/compress/zstd_cwksp.h @@ -219,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes * to align the beginning of tables section, as well as another n_2=[0, 63] bytes - * to align the beginning of the aligned secion. + * to align the beginning of the aligned section. * * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and * aligneds being sized in multiples of 64 bytes. @@ -243,12 +243,14 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt /** * Internal function. Do not use directly. - * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which - * counts from the end of the wksp. (as opposed to the object/table segment) + * Reserves the given number of bytes within the aligned/buffer segment of the wksp, + * which counts from the end of the wksp (as opposed to the object/table segment). * * Returns a pointer to the beginning of that space. */ -MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) { +MEM_STATIC void* +ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) +{ void* const alloc = (BYTE*)ws->allocStart - bytes; void* const bottom = ws->tableEnd; DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining", @@ -260,6 +262,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t ws->allocFailed = 1; return NULL; } + /* the area is reserved from the end of wksp. + * If it overlaps with tableValidEnd, it voids guarantees on values' range */ if (alloc < ws->tableValidEnd) { ws->tableValidEnd = alloc; } @@ -269,10 +273,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t /** * Moves the cwksp to the next phase, and does any necessary allocations. + * cwksp initialization must necessarily go through each phase in order. * Returns a 0 on success, or zstd error */ -MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase( - ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) { +MEM_STATIC size_t +ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) +{ assert(phase >= ws->phase); if (phase > ws->phase) { /* Going from allocating objects to allocating buffers */ @@ -295,15 +301,15 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase( { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ void* const alloc = ws->objectEnd; size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); - void* const end = (BYTE*)alloc + bytesToAlign; + void* const objectEnd = (BYTE*)alloc + bytesToAlign; DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign); - RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation, + RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, "table phase - alignment initial allocation failed!"); - ws->objectEnd = end; - ws->tableEnd = end; - ws->tableValidEnd = end; - } - } + ws->objectEnd = objectEnd; + ws->tableEnd = objectEnd; /* table area starts being empty */ + if (ws->tableValidEnd < ws->tableEnd) { + ws->tableValidEnd = ws->tableEnd; + } } } ws->phase = phase; ZSTD_cwksp_assert_internal_consistency(ws); } @@ -313,15 +319,17 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase( /** * Returns whether this object/buffer/etc was allocated in this workspace. */ -MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) { +MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) +{ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); } /** * Internal function. Do not use directly. */ -MEM_STATIC void* ZSTD_cwksp_reserve_internal( - ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) { +MEM_STATIC void* +ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) +{ void* alloc; if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { return NULL; @@ -351,14 +359,16 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal( /** * Reserves and returns unaligned memory. */ -MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) { +MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) +{ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers); } /** * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). */ -MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { +MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) +{ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), ZSTD_cwksp_alloc_aligned); assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); @@ -370,7 +380,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) { * their values remain constrained, allowing us to re-use them without * memset()-ing them. */ -MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { +MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) +{ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; void* alloc; void* end; @@ -408,9 +419,11 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) { /** * Aligned on sizeof(void*). + * Note : should happen only once, at workspace first initialization */ -MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { - size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); +MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) +{ + size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*)); void* alloc = ws->objectEnd; void* end = (BYTE*)alloc + roundedBytes; @@ -419,15 +432,15 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE; #endif - DEBUGLOG(5, + DEBUGLOG(4, "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining", alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); - assert(((size_t)alloc & (sizeof(void*)-1)) == 0); - assert((bytes & (sizeof(void*)-1)) == 0); + assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); + assert(bytes % ZSTD_ALIGNOF(void*) == 0); ZSTD_cwksp_assert_internal_consistency(ws); /* we must be in the first phase, no advance is possible */ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { - DEBUGLOG(4, "cwksp: object alloc failed!"); + DEBUGLOG(3, "cwksp: object alloc failed!"); ws->allocFailed = 1; return NULL; } @@ -438,7 +451,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE) /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on * either size. */ - alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; + alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE; if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) { __asan_unpoison_memory_region(alloc, bytes); } @@ -447,7 +460,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) { return alloc; } -MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) { +MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) +{ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty"); #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE) diff --git a/thirdparty/zstd/compress/zstd_double_fast.c b/thirdparty/zstd/compress/zstd_double_fast.c index d0d3a784dd..76933dea26 100644 --- a/thirdparty/zstd/compress/zstd_double_fast.c +++ b/thirdparty/zstd/compress/zstd_double_fast.c @@ -48,10 +48,216 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms, FORCE_INLINE_TEMPLATE -size_t ZSTD_compressBlock_doubleFast_generic( +size_t ZSTD_compressBlock_doubleFast_noDict_generic( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + void const* src, size_t srcSize, U32 const mls /* template */) +{ + ZSTD_compressionParameters const* cParams = &ms->cParams; + U32* const hashLong = ms->hashTable; + const U32 hBitsL = cParams->hashLog; + U32* const hashSmall = ms->chainTable; + const U32 hBitsS = cParams->chainLog; + const BYTE* const base = ms->window.base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* anchor = istart; + const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); + /* presumes that, if there is a dictionary, it must be using Attach mode */ + const U32 prefixLowestIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); + const BYTE* const prefixLowest = base + prefixLowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=rep[0], offset_2=rep[1]; + U32 offsetSaved = 0; + + size_t mLength; + U32 offset; + U32 curr; + + /* how many positions to search before increasing step size */ + const size_t kStepIncr = 1 << kSearchStrength; + /* the position at which to increment the step size if no match is found */ + const BYTE* nextStep; + size_t step; /* the current step size */ + + size_t hl0; /* the long hash at ip */ + size_t hl1; /* the long hash at ip1 */ + + U32 idxl0; /* the long match index for ip */ + U32 idxl1; /* the long match index for ip1 */ + + const BYTE* matchl0; /* the long match for ip */ + const BYTE* matchs0; /* the short match for ip */ + const BYTE* matchl1; /* the long match for ip1 */ + + const BYTE* ip = istart; /* the current position */ + const BYTE* ip1; /* the next position */ + + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic"); + + /* init */ + ip += ((ip - prefixLowest) == 0); + { + U32 const current = (U32)(ip - base); + U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog); + U32 const maxRep = current - windowLow; + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Outer Loop: one iteration per match found and stored */ + while (1) { + step = 1; + nextStep = ip + kStepIncr; + ip1 = ip + step; + + if (ip1 > ilimit) { + goto _cleanup; + } + + hl0 = ZSTD_hashPtr(ip, hBitsL, 8); + idxl0 = hashLong[hl0]; + matchl0 = base + idxl0; + + /* Inner Loop: one iteration per search / position */ + do { + const size_t hs0 = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 idxs0 = hashSmall[hs0]; + curr = (U32)(ip-base); + matchs0 = base + idxs0; + + hashLong[hl0] = hashSmall[hs0] = curr; /* update hash tables */ + + /* check noDict repcode */ + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); + goto _match_stored; + } + + hl1 = ZSTD_hashPtr(ip1, hBitsL, 8); + + if (idxl0 > prefixLowestIndex) { + /* check prefix long match */ + if (MEM_read64(matchl0) == MEM_read64(ip)) { + mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8; + offset = (U32)(ip-matchl0); + while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */ + goto _match_found; + } + } + + idxl1 = hashLong[hl1]; + matchl1 = base + idxl1; + + if (idxs0 > prefixLowestIndex) { + /* check prefix short match */ + if (MEM_read32(matchs0) == MEM_read32(ip)) { + goto _search_next_long; + } + } + + if (ip1 >= nextStep) { + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + step++; + nextStep += kStepIncr; + } + ip = ip1; + ip1 += step; + + hl0 = hl1; + idxl0 = idxl1; + matchl0 = matchl1; + #if defined(__aarch64__) + PREFETCH_L1(ip+256); + #endif + } while (ip1 <= ilimit); + +_cleanup: + /* save reps for next block */ + rep[0] = offset_1 ? offset_1 : offsetSaved; + rep[1] = offset_2 ? offset_2 : offsetSaved; + + /* Return the last literals size */ + return (size_t)(iend - anchor); + +_search_next_long: + + /* check prefix long +1 match */ + if (idxl1 > prefixLowestIndex) { + if (MEM_read64(matchl1) == MEM_read64(ip1)) { + ip = ip1; + mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8; + offset = (U32)(ip-matchl1); + while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */ + goto _match_found; + } + } + + /* if no long +1 match, explore the short match we found */ + mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4; + offset = (U32)(ip - matchs0); + while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */ + + /* fall-through */ + +_match_found: /* requires ip, offset, mLength */ + offset_2 = offset_1; + offset_1 = offset; + + if (step < 4) { + /* It is unsafe to write this value back to the hashtable when ip1 is + * greater than or equal to the new ip we will have after we're done + * processing this match. Rather than perform that test directly + * (ip1 >= ip + mLength), which costs speed in practice, we do a simpler + * more predictable test. The minmatch even if we take a short match is + * 4 bytes, so as long as step, the distance between ip and ip1 + * (initially) is less than 4, we know ip1 < new ip. */ + hashLong[hl1] = (U32)(ip1 - base); + } + + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); + +_match_stored: + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Complementary insertion */ + /* done after iLimit test, as candidates could be > iend-8 */ + { U32 const indexToInsert = curr+2; + hashLong[ZSTD_hashPtr(base+indexToInsert, hBitsL, 8)] = indexToInsert; + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + hashSmall[ZSTD_hashPtr(base+indexToInsert, hBitsS, mls)] = indexToInsert; + hashSmall[ZSTD_hashPtr(ip-1, hBitsS, mls)] = (U32)(ip-1-base); + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + } + } +} + + +FORCE_INLINE_TEMPLATE +size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls /* template */, ZSTD_dictMode_e const dictMode) + U32 const mls /* template */) { ZSTD_compressionParameters const* cParams = &ms->cParams; U32* const hashLong = ms->hashTable; @@ -72,54 +278,30 @@ size_t ZSTD_compressBlock_doubleFast_generic( U32 offsetSaved = 0; const ZSTD_matchState_t* const dms = ms->dictMatchState; - const ZSTD_compressionParameters* const dictCParams = - dictMode == ZSTD_dictMatchState ? - &dms->cParams : NULL; - const U32* const dictHashLong = dictMode == ZSTD_dictMatchState ? - dms->hashTable : NULL; - const U32* const dictHashSmall = dictMode == ZSTD_dictMatchState ? - dms->chainTable : NULL; - const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ? - dms->window.dictLimit : 0; - const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ? - dms->window.base : NULL; - const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ? - dictBase + dictStartIndex : NULL; - const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ? - dms->window.nextSrc : NULL; - const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ? - prefixLowestIndex - (U32)(dictEnd - dictBase) : - 0; - const U32 dictHBitsL = dictMode == ZSTD_dictMatchState ? - dictCParams->hashLog : hBitsL; - const U32 dictHBitsS = dictMode == ZSTD_dictMatchState ? - dictCParams->chainLog : hBitsS; + const ZSTD_compressionParameters* const dictCParams = &dms->cParams; + const U32* const dictHashLong = dms->hashTable; + const U32* const dictHashSmall = dms->chainTable; + const U32 dictStartIndex = dms->window.dictLimit; + const BYTE* const dictBase = dms->window.base; + const BYTE* const dictStart = dictBase + dictStartIndex; + const BYTE* const dictEnd = dms->window.nextSrc; + const U32 dictIndexDelta = prefixLowestIndex - (U32)(dictEnd - dictBase); + const U32 dictHBitsL = dictCParams->hashLog; + const U32 dictHBitsS = dictCParams->chainLog; const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictStart)); - DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_generic"); - - assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState); + DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_dictMatchState_generic"); /* if a dictionary is attached, it must be within window range */ - if (dictMode == ZSTD_dictMatchState) { - assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); - } + assert(ms->window.dictLimit + (1U << cParams->windowLog) >= endIndex); /* init */ ip += (dictAndPrefixLength == 0); - if (dictMode == ZSTD_noDict) { - U32 const curr = (U32)(ip - base); - U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); - U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; - } - if (dictMode == ZSTD_dictMatchState) { - /* dictMatchState repCode checks don't currently handle repCode == 0 - * disabling. */ - assert(offset_1 <= dictAndPrefixLength); - assert(offset_2 <= dictAndPrefixLength); - } + + /* dictMatchState repCode checks don't currently handle repCode == 0 + * disabling. */ + assert(offset_1 <= dictAndPrefixLength); + assert(offset_2 <= dictAndPrefixLength); /* Main Search Loop */ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ @@ -135,29 +317,18 @@ size_t ZSTD_compressBlock_doubleFast_generic( const BYTE* matchLong = base + matchIndexL; const BYTE* match = base + matchIndexS; const U32 repIndex = curr + 1 - offset_1; - const BYTE* repMatch = (dictMode == ZSTD_dictMatchState - && repIndex < prefixLowestIndex) ? + const BYTE* repMatch = (repIndex < prefixLowestIndex) ? dictBase + (repIndex - dictIndexDelta) : base + repIndex; hashLong[h2] = hashSmall[h] = curr; /* update hash tables */ - /* check dictMatchState repcode */ - if (dictMode == ZSTD_dictMatchState - && ((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) + /* check repcode */ + if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */) && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); - goto _match_stored; - } - - /* check noDict repcode */ - if ( dictMode == ZSTD_noDict - && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { - mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; - ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); goto _match_stored; } @@ -169,7 +340,7 @@ size_t ZSTD_compressBlock_doubleFast_generic( while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ goto _match_found; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dictMatchState long match */ U32 const dictMatchIndexL = dictHashLong[dictHL]; const BYTE* dictMatchL = dictBase + dictMatchIndexL; @@ -187,7 +358,7 @@ size_t ZSTD_compressBlock_doubleFast_generic( if (MEM_read32(match) == MEM_read32(ip)) { goto _search_next_long; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dictMatchState short match */ U32 const dictMatchIndexS = dictHashSmall[dictHS]; match = dictBase + dictMatchIndexS; @@ -220,7 +391,7 @@ _search_next_long: while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */ goto _match_found; } - } else if (dictMode == ZSTD_dictMatchState) { + } else { /* check dict long +1 match */ U32 const dictMatchIndexL3 = dictHashLong[dictHLNext]; const BYTE* dictMatchL3 = dictBase + dictMatchIndexL3; @@ -234,7 +405,7 @@ _search_next_long: } } } /* if no long +1 match, explore the short match we found */ - if (dictMode == ZSTD_dictMatchState && matchIndexS < prefixLowestIndex) { + if (matchIndexS < prefixLowestIndex) { mLength = ZSTD_count_2segments(ip+4, match+4, iend, dictEnd, prefixLowest) + 4; offset = (U32)(curr - matchIndexS); while (((ip>anchor) & (match>dictStart)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ @@ -244,13 +415,11 @@ _search_next_long: while (((ip>anchor) & (match>prefixLowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ } - /* fall-through */ - _match_found: offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); _match_stored: /* match found */ @@ -268,43 +437,27 @@ _match_stored: } /* check immediate repcode */ - if (dictMode == ZSTD_dictMatchState) { - while (ip <= ilimit) { - U32 const current2 = (U32)(ip-base); - U32 const repIndex2 = current2 - offset_2; - const BYTE* repMatch2 = dictMode == ZSTD_dictMatchState - && repIndex2 < prefixLowestIndex ? - dictBase + repIndex2 - dictIndexDelta : - base + repIndex2; - if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) - && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { - const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; - size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; - U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; - ip += repLength2; - anchor = ip; - continue; - } - break; - } } - - if (dictMode == ZSTD_noDict) { - while ( (ip <= ilimit) - && ( (offset_2>0) - & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */ - hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); - hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH); - ip += rLength; + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ? + dictBase + repIndex2 - dictIndexDelta : + base + repIndex2; + if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */) + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; anchor = ip; - continue; /* faster when present ... (?) */ - } } } + continue; + } + break; + } + } } /* while (ip < ilimit) */ /* save reps for next block */ @@ -315,6 +468,24 @@ _match_stored: return (size_t)(iend - anchor); } +#define ZSTD_GEN_DFAST_FN(dictMode, mls) \ + static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \ + } + +ZSTD_GEN_DFAST_FN(noDict, 4) +ZSTD_GEN_DFAST_FN(noDict, 5) +ZSTD_GEN_DFAST_FN(noDict, 6) +ZSTD_GEN_DFAST_FN(noDict, 7) + +ZSTD_GEN_DFAST_FN(dictMatchState, 4) +ZSTD_GEN_DFAST_FN(dictMatchState, 5) +ZSTD_GEN_DFAST_FN(dictMatchState, 6) +ZSTD_GEN_DFAST_FN(dictMatchState, 7) + size_t ZSTD_compressBlock_doubleFast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -325,13 +496,13 @@ size_t ZSTD_compressBlock_doubleFast( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast_noDict_7(ms, seqStore, rep, src, srcSize); } } @@ -345,13 +516,13 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState); + return ZSTD_compressBlock_doubleFast_dictMatchState_7(ms, seqStore, rep, src, srcSize); } } @@ -387,7 +558,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( /* if extDict is invalidated due to maxDistance, switch to "regular" variant */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_doubleFast_generic(ms, seqStore, rep, src, srcSize, mls, ZSTD_noDict); + return ZSTD_compressBlock_doubleFast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ @@ -409,12 +580,12 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */ if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */ - & (offset_1 < curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); } else { if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend; @@ -425,7 +596,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) { size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); @@ -450,7 +621,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( } offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } else { ip += ((ip-anchor) >> kSearchStrength) + 1; @@ -477,12 +648,12 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( U32 const repIndex2 = current2 - offset_2; const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */ - & (offset_2 < current2 - dictStartIndex)) + & (offset_2 <= current2 - dictStartIndex)) && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; ip += repLength2; @@ -500,6 +671,10 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic( return (size_t)(iend - anchor); } +ZSTD_GEN_DFAST_FN(extDict, 4) +ZSTD_GEN_DFAST_FN(extDict, 5) +ZSTD_GEN_DFAST_FN(extDict, 6) +ZSTD_GEN_DFAST_FN(extDict, 7) size_t ZSTD_compressBlock_doubleFast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -510,12 +685,12 @@ size_t ZSTD_compressBlock_doubleFast_extDict( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_doubleFast_extDict_4(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_doubleFast_extDict_5(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_doubleFast_extDict_6(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_doubleFast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_doubleFast_extDict_7(ms, seqStore, rep, src, srcSize); } } diff --git a/thirdparty/zstd/compress/zstd_fast.c b/thirdparty/zstd/compress/zstd_fast.c index 4edc04dccd..802fc31579 100644 --- a/thirdparty/zstd/compress/zstd_fast.c +++ b/thirdparty/zstd/compress/zstd_fast.c @@ -43,145 +43,294 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms, } +/** + * If you squint hard enough (and ignore repcodes), the search operation at any + * given position is broken into 4 stages: + * + * 1. Hash (map position to hash value via input read) + * 2. Lookup (map hash val to index via hashtable read) + * 3. Load (map index to value at that position via input read) + * 4. Compare + * + * Each of these steps involves a memory read at an address which is computed + * from the previous step. This means these steps must be sequenced and their + * latencies are cumulative. + * + * Rather than do 1->2->3->4 sequentially for a single position before moving + * onto the next, this implementation interleaves these operations across the + * next few positions: + * + * R = Repcode Read & Compare + * H = Hash + * T = Table Lookup + * M = Match Read & Compare + * + * Pos | Time --> + * ----+------------------- + * N | ... M + * N+1 | ... TM + * N+2 | R H T M + * N+3 | H TM + * N+4 | R H T M + * N+5 | H ... + * N+6 | R ... + * + * This is very much analogous to the pipelining of execution in a CPU. And just + * like a CPU, we have to dump the pipeline when we find a match (i.e., take a + * branch). + * + * When this happens, we throw away our current state, and do the following prep + * to re-enter the loop: + * + * Pos | Time --> + * ----+------------------- + * N | H T + * N+1 | H + * + * This is also the work we do at the beginning to enter the loop initially. + */ FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_fast_generic( +ZSTD_compressBlock_fast_noDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, - U32 const mls) + U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; U32 const hlog = cParams->hashLog; /* support stepSize of 0 */ - size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1; + size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2; const BYTE* const base = ms->window.base; const BYTE* const istart = (const BYTE*)src; - /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */ - const BYTE* ip0 = istart; - const BYTE* ip1; - const BYTE* anchor = istart; const U32 endIndex = (U32)((size_t)(istart - base) + srcSize); const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog); const BYTE* const prefixStart = base + prefixStartIndex; const BYTE* const iend = istart + srcSize; const BYTE* const ilimit = iend - HASH_READ_SIZE; - U32 offset_1=rep[0], offset_2=rep[1]; + + const BYTE* anchor = istart; + const BYTE* ip0 = istart; + const BYTE* ip1; + const BYTE* ip2; + const BYTE* ip3; + U32 current0; + + U32 rep_offset1 = rep[0]; + U32 rep_offset2 = rep[1]; U32 offsetSaved = 0; - /* init */ + size_t hash0; /* hash for ip0 */ + size_t hash1; /* hash for ip1 */ + U32 idx; /* match idx for ip0 */ + U32 mval; /* src value at match idx */ + + U32 offcode; + const BYTE* match0; + size_t mLength; + + /* ip0 and ip1 are always adjacent. The targetLength skipping and + * uncompressibility acceleration is applied to every other position, + * matching the behavior of #1562. step therefore represents the gap + * between pairs of positions, from ip0 to ip2 or ip1 to ip3. */ + size_t step; + const BYTE* nextStep; + const size_t kStepIncr = (1 << (kSearchStrength - 1)); + DEBUGLOG(5, "ZSTD_compressBlock_fast_generic"); ip0 += (ip0 == prefixStart); - ip1 = ip0 + 1; { U32 const curr = (U32)(ip0 - base); U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog); U32 const maxRep = curr - windowLow; - if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; - if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + if (rep_offset2 > maxRep) offsetSaved = rep_offset2, rep_offset2 = 0; + if (rep_offset1 > maxRep) offsetSaved = rep_offset1, rep_offset1 = 0; } - /* Main Search Loop */ -#ifdef __INTEL_COMPILER - /* From intel 'The vector pragma indicates that the loop should be - * vectorized if it is legal to do so'. Can be used together with - * #pragma ivdep (but have opted to exclude that because intel - * warns against using it).*/ - #pragma vector always -#endif - while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */ - size_t mLength; - BYTE const* ip2 = ip0 + 2; - size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls); - U32 const val0 = MEM_read32(ip0); - size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls); - U32 const val1 = MEM_read32(ip1); - U32 const current0 = (U32)(ip0-base); - U32 const current1 = (U32)(ip1-base); - U32 const matchIndex0 = hashTable[h0]; - U32 const matchIndex1 = hashTable[h1]; - BYTE const* repMatch = ip2 - offset_1; - const BYTE* match0 = base + matchIndex0; - const BYTE* match1 = base + matchIndex1; - U32 offcode; - -#if defined(__aarch64__) - PREFETCH_L1(ip0+256); -#endif - - hashTable[h0] = current0; /* update hash table */ - hashTable[h1] = current1; /* update hash table */ - - assert(ip0 + 1 == ip1); - - if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { - mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0; - ip0 = ip2 - mLength; - match0 = repMatch - mLength; + /* start each op */ +_start: /* Requires: ip0 */ + + step = stepSize; + nextStep = ip0 + kStepIncr; + + /* calculate positions, ip0 - anchor == 0, so we skip step calc */ + ip1 = ip0 + 1; + ip2 = ip0 + step; + ip3 = ip2 + 1; + + if (ip3 >= ilimit) { + goto _cleanup; + } + + hash0 = ZSTD_hashPtr(ip0, hlog, mls); + hash1 = ZSTD_hashPtr(ip1, hlog, mls); + + idx = hashTable[hash0]; + + do { + /* load repcode match for ip[2]*/ + const U32 rval = MEM_read32(ip2 - rep_offset1); + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* check repcode at ip[2] */ + if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) { + ip0 = ip2; + match0 = ip0 - rep_offset1; + mLength = ip0[-1] == match0[-1]; + ip0 -= mLength; + match0 -= mLength; + offcode = STORE_REPCODE_1; mLength += 4; - offcode = 0; goto _match; } - if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) { - /* found a regular match */ - goto _offset; + + /* load match for ip[0] */ + if (idx >= prefixStartIndex) { + mval = MEM_read32(base + idx); + } else { + mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ } - if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) { - /* found a regular match after one literal */ - ip0 = ip1; - match0 = match1; + + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ goto _offset; } - { size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize; - assert(step >= 2); - ip0 += step; - ip1 += step; - continue; + + /* lookup ip[1] */ + idx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip3; + + /* write back hash table entry */ + current0 = (U32)(ip0 - base); + hashTable[hash0] = current0; + + /* load match for ip[0] */ + if (idx >= prefixStartIndex) { + mval = MEM_read32(base + idx); + } else { + mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */ } -_offset: /* Requires: ip0, match0 */ - /* Compute the offset code */ - offset_2 = offset_1; - offset_1 = (U32)(ip0-match0); - offcode = offset_1 + ZSTD_REP_MOVE; - mLength = 4; - /* Count the backwards match length */ - while (((ip0>anchor) & (match0>prefixStart)) - && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */ -_match: /* Requires: ip0, match0, offcode */ - /* Count the forward length */ - mLength += ZSTD_count(ip0+mLength, match0+mLength, iend); - ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH); - /* match found */ - ip0 += mLength; - anchor = ip0; + /* check match at ip[0] */ + if (MEM_read32(ip0) == mval) { + /* found a match! */ + goto _offset; + } - if (ip0 <= ilimit) { - /* Fill Table */ - assert(base+current0+2 > istart); /* check base overflow */ - hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ - hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); - - if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */ - while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) { - /* store sequence */ - size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4; - { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ - hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); - ip0 += rLength; - ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH); - anchor = ip0; - continue; /* faster when present (confirmed on gcc-8) ... (?) */ - } } } - ip1 = ip0 + 1; - } + /* lookup ip[1] */ + idx = hashTable[hash1]; + + /* hash ip[2] */ + hash0 = hash1; + hash1 = ZSTD_hashPtr(ip2, hlog, mls); + + /* advance to next positions */ + ip0 = ip1; + ip1 = ip2; + ip2 = ip0 + step; + ip3 = ip1 + step; + + /* calculate step */ + if (ip2 >= nextStep) { + step++; + PREFETCH_L1(ip1 + 64); + PREFETCH_L1(ip1 + 128); + nextStep += kStepIncr; + } + } while (ip3 < ilimit); + +_cleanup: + /* Note that there are probably still a couple positions we could search. + * However, it seems to be a meaningful performance hit to try to search + * them. So let's not. */ /* save reps for next block */ - rep[0] = offset_1 ? offset_1 : offsetSaved; - rep[1] = offset_2 ? offset_2 : offsetSaved; + rep[0] = rep_offset1 ? rep_offset1 : offsetSaved; + rep[1] = rep_offset2 ? rep_offset2 : offsetSaved; /* Return the last literals size */ return (size_t)(iend - anchor); + +_offset: /* Requires: ip0, idx */ + + /* Compute the offset code. */ + match0 = base + idx; + rep_offset2 = rep_offset1; + rep_offset1 = (U32)(ip0-match0); + offcode = STORE_OFFSET(rep_offset1); + mLength = 4; + + /* Count the backwards match length. */ + while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) { + ip0--; + match0--; + mLength++; + } + +_match: /* Requires: ip0, match0, offcode */ + + /* Count the forward length. */ + mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend); + + ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength); + + ip0 += mLength; + anchor = ip0; + + /* write next hash table entry */ + if (ip1 < ip0) { + hashTable[hash1] = (U32)(ip1 - base); + } + + /* Fill table and check for immediate repcode. */ + if (ip0 <= ilimit) { + /* Fill Table */ + assert(base+current0+2 > istart); /* check base overflow */ + hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base); + + if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */ + while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4; + { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */ + hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base); + ip0 += rLength; + ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength); + anchor = ip0; + continue; /* faster when present (confirmed on gcc-8) ... (?) */ + } } } + + goto _start; } +#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \ + static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \ + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \ + void const* src, size_t srcSize) \ + { \ + return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \ + } + +ZSTD_GEN_FAST_FN(noDict, 4, 1) +ZSTD_GEN_FAST_FN(noDict, 5, 1) +ZSTD_GEN_FAST_FN(noDict, 6, 1) +ZSTD_GEN_FAST_FN(noDict, 7, 1) + +ZSTD_GEN_FAST_FN(noDict, 4, 0) +ZSTD_GEN_FAST_FN(noDict, 5, 0) +ZSTD_GEN_FAST_FN(noDict, 6, 0) +ZSTD_GEN_FAST_FN(noDict, 7, 0) size_t ZSTD_compressBlock_fast( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -189,24 +338,40 @@ size_t ZSTD_compressBlock_fast( { U32 const mls = ms->cParams.minMatch; assert(ms->dictMatchState == NULL); - switch(mls) - { - default: /* includes case 3 */ - case 4 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4); - case 5 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5); - case 6 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6); - case 7 : - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7); + if (ms->cParams.targetLength > 1) { + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize); + case 5 : + return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize); + case 6 : + return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize); + case 7 : + return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize); + } + } else { + switch(mls) + { + default: /* includes case 3 */ + case 4 : + return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize); + case 5 : + return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize); + case 6 : + return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize); + case 7 : + return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize); + } + } } FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_fast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls) + void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; @@ -242,6 +407,8 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( assert(endIndex - prefixStartIndex <= maxDistance); (void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */ + (void)hasStep; /* not currently specialized on whether it's accelerated */ + /* ensure there will be no underflow * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); @@ -272,7 +439,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength); } else if ( (matchIndex <= prefixStartIndex) ) { size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls); U32 const dictMatchIndex = dictHashTable[dictHash]; @@ -292,7 +459,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } } else if (MEM_read32(match) != MEM_read32(ip)) { /* it's not a match, and we're not going to check the dictionary */ @@ -307,7 +474,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); } /* match found */ @@ -332,7 +499,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH); + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; @@ -351,6 +518,12 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic( return (size_t)(iend - anchor); } + +ZSTD_GEN_FAST_FN(dictMatchState, 4, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 5, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 6, 0) +ZSTD_GEN_FAST_FN(dictMatchState, 7, 0) + size_t ZSTD_compressBlock_fast_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize) @@ -361,20 +534,20 @@ size_t ZSTD_compressBlock_fast_dictMatchState( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); } } static size_t ZSTD_compressBlock_fast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - void const* src, size_t srcSize, U32 const mls) + void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; @@ -398,11 +571,13 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( const BYTE* const ilimit = iend - 8; U32 offset_1=rep[0], offset_2=rep[1]; + (void)hasStep; /* not currently specialized on whether it's accelerated */ + DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1); /* switch to "regular" variant if extDict is invalidated due to maxDistance */ if (prefixStartIndex == dictStartIndex) - return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls); + return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize); /* Search Loop */ while (ip < ilimit) { /* < instead of <=, because (ip+1) */ @@ -418,12 +593,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( DEBUGLOG(7, "offset_1 = %u , curr = %u", offset_1, curr); if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */ - & (offset_1 < curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - dictStartIndex) ) /* note: we are searching at curr+1 */ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend; size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4; ip++; - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength); ip += rLength; anchor = ip; } else { @@ -439,7 +614,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4; while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ offset_2 = offset_1; offset_1 = offset; /* update offset history */ - ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength); ip += mLength; anchor = ip; } } @@ -453,12 +628,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( U32 const current2 = (U32)(ip-base); U32 const repIndex2 = current2 - offset_2; const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2; - if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 < curr - dictStartIndex)) /* intentional overflow */ + if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 <= curr - dictStartIndex)) /* intentional overflow */ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend; size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4; { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH); + ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2); hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2; ip += repLength2; anchor = ip; @@ -475,6 +650,10 @@ static size_t ZSTD_compressBlock_fast_extDict_generic( return (size_t)(iend - anchor); } +ZSTD_GEN_FAST_FN(extDict, 4, 0) +ZSTD_GEN_FAST_FN(extDict, 5, 0) +ZSTD_GEN_FAST_FN(extDict, 6, 0) +ZSTD_GEN_FAST_FN(extDict, 7, 0) size_t ZSTD_compressBlock_fast_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], @@ -485,12 +664,12 @@ size_t ZSTD_compressBlock_fast_extDict( { default: /* includes case 3 */ case 4 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4); + return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize); case 5 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5); + return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize); case 6 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6); + return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize); case 7 : - return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7); + return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize); } } diff --git a/thirdparty/zstd/compress/zstd_lazy.c b/thirdparty/zstd/compress/zstd_lazy.c index 3d523e8472..2e38dcb46d 100644 --- a/thirdparty/zstd/compress/zstd_lazy.c +++ b/thirdparty/zstd/compress/zstd_lazy.c @@ -61,7 +61,7 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms, * assumption : curr >= btlow == (curr - btmask) * doesn't fail */ static void -ZSTD_insertDUBT1(ZSTD_matchState_t* ms, +ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, U32 curr, const BYTE* inputEnd, U32 nbCompares, U32 btLow, const ZSTD_dictMode_e dictMode) @@ -93,7 +93,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, assert(curr >= btLow); assert(ip < iend); /* condition for ZSTD_count */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); @@ -151,7 +151,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms, static size_t ZSTD_DUBT_findBetterDictMatch ( - ZSTD_matchState_t* ms, + const ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, size_t* offsetPtr, size_t bestLength, @@ -185,7 +185,7 @@ ZSTD_DUBT_findBetterDictMatch ( (void)dictMode; assert(dictMode == ZSTD_dictMatchState); - while (nbCompares-- && (dictMatchIndex > dictLowLimit)) { + for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) { U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dictBase + dictMatchIndex; @@ -197,8 +197,8 @@ ZSTD_DUBT_findBetterDictMatch ( U32 matchIndex = dictMatchIndex + dictIndexDelta; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) { DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)", - curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex); - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; + curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, STORE_OFFSET(curr - matchIndex), dictMatchIndex, matchIndex); + bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); } if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */ break; /* drop, to guarantee consistency (miss a little bit of compression) */ @@ -218,7 +218,7 @@ ZSTD_DUBT_findBetterDictMatch ( } if (bestLength >= MINMATCH) { - U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } @@ -309,7 +309,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, matchIndex = hashTable[h]; hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex > windowLow)) { + for (; nbCompares && (matchIndex > windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match; @@ -328,7 +328,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) - bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex; + bestLength = matchLength, *offsetPtr = STORE_OFFSET(curr - matchIndex); if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */ if (dictMode == ZSTD_dictMatchState) { nbCompares = 0; /* in addition to avoiding checking any @@ -357,6 +357,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { bestLength = ZSTD_DUBT_findBetterDictMatch( ms, ip, iend, @@ -367,7 +368,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ if (bestLength >= MINMATCH) { - U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex; + U32 const mIndex = curr - (U32)STORED_OFFSET(*offsetPtr); (void)mIndex; DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)", curr, (U32)bestLength, (U32)*offsetPtr, mIndex); } @@ -390,54 +391,6 @@ ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode); } - -static size_t -ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); - } -} - - -static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); - } -} - - -static size_t ZSTD_BtFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); - } -} - /*********************************** * Dedicated dict search ***********************************/ @@ -450,7 +403,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B U32* const chainTable = ms->chainTable; U32 const chainSize = 1 << ms->cParams.chainLog; U32 idx = ms->nextToUpdate; - U32 const minChain = chainSize < target ? target - chainSize : idx; + U32 const minChain = chainSize < target - idx ? target - chainSize : idx; U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG; U32 const cacheSize = bucketSize - 1; U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize; @@ -464,7 +417,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; U32* const tmpHashTable = hashTable; U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog); - U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog; + U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog; U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx; U32 hashIdx; @@ -608,7 +561,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE; + *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) { /* best possible, avoids read overflow on next attempt */ return ml; @@ -645,7 +598,7 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE; + *offsetPtr = STORE_OFFSET(curr - (matchIndex + ddsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } @@ -692,7 +645,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) { /* inlining is important to hardwire a hot branch (template emulation) */ FORCE_INLINE_TEMPLATE -size_t ZSTD_HcFindBestMatch_generic ( +size_t ZSTD_HcFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, @@ -750,7 +703,7 @@ size_t ZSTD_HcFindBestMatch_generic ( /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; + *offsetPtr = STORE_OFFSET(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -758,6 +711,7 @@ size_t ZSTD_HcFindBestMatch_generic ( matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); } + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dedicatedDictSearch) { ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); @@ -784,7 +738,8 @@ size_t ZSTD_HcFindBestMatch_generic ( /* save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; + assert(curr > matchIndex + dmsIndexDelta); + *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } @@ -797,310 +752,80 @@ size_t ZSTD_HcFindBestMatch_generic ( return ml; } - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict); - } -} - - -static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState); - } -} - - -static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch); - } -} - - -FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict); - case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict); - case 7 : - case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict); - } -} - /* ********************************* * (SIMD) Row-based matchfinder ***********************************/ /* Constants for row-based hash */ -#define ZSTD_ROW_HASH_TAG_OFFSET 1 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ -#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ +#define ZSTD_ROW_HASH_TAG_OFFSET 16 /* byte offset of hashes in the match state's tagTable from the beginning of a row */ +#define ZSTD_ROW_HASH_TAG_BITS 8 /* nb bits to use for the tag */ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1) +#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */ #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1) -typedef U32 ZSTD_VecMask; /* Clarifies when we are interacting with a U32 representing a mask of matches */ - -#if !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) /* SIMD SSE version */ - -#include <emmintrin.h> -typedef __m128i ZSTD_Vec128; - -/* Returns a 128-bit container with 128-bits from src */ -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - return _mm_loadu_si128((ZSTD_Vec128 const*)src); -} - -/* Returns a ZSTD_Vec128 with the byte "val" packed 16 times */ -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - return _mm_set1_epi8((char)val); -} - -/* Do byte-by-byte comparison result of x and y. Then collapse 128-bit resultant mask - * into a 32-bit mask that is the MSB of each byte. - * */ -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - return (ZSTD_VecMask)_mm_movemask_epi8(_mm_cmpeq_epi8(x, y)); -} - -typedef struct { - __m128i fst; - __m128i snd; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_read(ptr); - v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1); - return v; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_set8(val); - v.snd = ZSTD_Vec128_set8(val); - return v; -} - -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask fstMask; - ZSTD_VecMask sndMask; - fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst); - sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd); - return fstMask | (sndMask << 16); -} - -#elif !defined(ZSTD_NO_INTRINSICS) && defined(__ARM_NEON) /* SIMD ARM NEON Version */ - -#include <arm_neon.h> -typedef uint8x16_t ZSTD_Vec128; - -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - return vld1q_u8((const BYTE* const)src); -} - -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - return vdupq_n_u8(val); -} - -/* Mimics '_mm_movemask_epi8()' from SSE */ -static U32 ZSTD_vmovmaskq_u8(ZSTD_Vec128 val) { - /* Shift out everything but the MSB bits in each byte */ - uint16x8_t highBits = vreinterpretq_u16_u8(vshrq_n_u8(val, 7)); - /* Merge the even lanes together with vsra (right shift and add) */ - uint32x4_t paired16 = vreinterpretq_u32_u16(vsraq_n_u16(highBits, highBits, 7)); - uint64x2_t paired32 = vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14)); - uint8x16_t paired64 = vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28)); - /* Extract the low 8 bits from each lane, merge */ - return vgetq_lane_u8(paired64, 0) | ((U32)vgetq_lane_u8(paired64, 8) << 8); -} - -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - return (ZSTD_VecMask)ZSTD_vmovmaskq_u8(vceqq_u8(x, y)); -} - -typedef struct { - uint8x16_t fst; - uint8x16_t snd; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const ptr) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_read(ptr); - v.snd = ZSTD_Vec128_read((ZSTD_Vec128 const*)ptr + 1); - return v; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 v; - v.fst = ZSTD_Vec128_set8(val); - v.snd = ZSTD_Vec128_set8(val); - return v; -} - -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask fstMask; - ZSTD_VecMask sndMask; - fstMask = ZSTD_Vec128_cmpMask8(x.fst, y.fst); - sndMask = ZSTD_Vec128_cmpMask8(x.snd, y.snd); - return fstMask | (sndMask << 16); -} - -#else /* Scalar fallback version */ - -#define VEC128_NB_SIZE_T (16 / sizeof(size_t)) -typedef struct { - size_t vec[VEC128_NB_SIZE_T]; -} ZSTD_Vec128; - -static ZSTD_Vec128 ZSTD_Vec128_read(const void* const src) { - ZSTD_Vec128 ret; - ZSTD_memcpy(ret.vec, src, VEC128_NB_SIZE_T*sizeof(size_t)); - return ret; -} - -static ZSTD_Vec128 ZSTD_Vec128_set8(BYTE val) { - ZSTD_Vec128 ret = { {0} }; - int startBit = sizeof(size_t) * 8 - 8; - for (;startBit >= 0; startBit -= 8) { - unsigned j = 0; - for (;j < VEC128_NB_SIZE_T; ++j) { - ret.vec[j] |= ((size_t)val << startBit); - } - } - return ret; -} - -/* Compare x to y, byte by byte, generating a "matches" bitfield */ -static ZSTD_VecMask ZSTD_Vec128_cmpMask8(ZSTD_Vec128 x, ZSTD_Vec128 y) { - ZSTD_VecMask res = 0; - unsigned i = 0; - unsigned l = 0; - for (; i < VEC128_NB_SIZE_T; ++i) { - const size_t cmp1 = x.vec[i]; - const size_t cmp2 = y.vec[i]; - unsigned j = 0; - for (; j < sizeof(size_t); ++j, ++l) { - if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) { - res |= ((U32)1 << (j+i*sizeof(size_t))); - } - } - } - return res; -} - -#define VEC256_NB_SIZE_T 2*VEC128_NB_SIZE_T -typedef struct { - size_t vec[VEC256_NB_SIZE_T]; -} ZSTD_Vec256; - -static ZSTD_Vec256 ZSTD_Vec256_read(const void* const src) { - ZSTD_Vec256 ret; - ZSTD_memcpy(ret.vec, src, VEC256_NB_SIZE_T*sizeof(size_t)); - return ret; -} - -static ZSTD_Vec256 ZSTD_Vec256_set8(BYTE val) { - ZSTD_Vec256 ret = { {0} }; - int startBit = sizeof(size_t) * 8 - 8; - for (;startBit >= 0; startBit -= 8) { - unsigned j = 0; - for (;j < VEC256_NB_SIZE_T; ++j) { - ret.vec[j] |= ((size_t)val << startBit); - } - } - return ret; -} - -/* Compare x to y, byte by byte, generating a "matches" bitfield */ -static ZSTD_VecMask ZSTD_Vec256_cmpMask8(ZSTD_Vec256 x, ZSTD_Vec256 y) { - ZSTD_VecMask res = 0; - unsigned i = 0; - unsigned l = 0; - for (; i < VEC256_NB_SIZE_T; ++i) { - const size_t cmp1 = x.vec[i]; - const size_t cmp2 = y.vec[i]; - unsigned j = 0; - for (; j < sizeof(size_t); ++j, ++l) { - if (((cmp1 >> j*8) & 0xFF) == ((cmp2 >> j*8) & 0xFF)) { - res |= ((U32)1 << (j+i*sizeof(size_t))); - } - } - } - return res; -} - -#endif /* !defined(ZSTD_NO_INTRINSICS) && defined(__SSE2__) */ +typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */ /* ZSTD_VecMask_next(): * Starting from the LSB, returns the idx of the next non-zero bit. * Basically counting the nb of trailing zeroes. */ static U32 ZSTD_VecMask_next(ZSTD_VecMask val) { -# if defined(_MSC_VER) /* Visual */ - unsigned long r=0; - return _BitScanForward(&r, val) ? (U32)r : 0; -# elif defined(__GNUC__) && (__GNUC__ >= 3) - return (U32)__builtin_ctz(val); + assert(val != 0); +# if defined(_MSC_VER) && defined(_WIN64) + if (val != 0) { + unsigned long r; + _BitScanForward64(&r, val); + return (U32)(r); + } else { + /* Should not reach this code path */ + __assume(0); + } +# elif (defined(__GNUC__) && ((__GNUC__ > 3) || ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4)))) + if (sizeof(size_t) == 4) { + U32 mostSignificantWord = (U32)(val >> 32); + U32 leastSignificantWord = (U32)val; + if (leastSignificantWord == 0) { + return 32 + (U32)__builtin_ctz(mostSignificantWord); + } else { + return (U32)__builtin_ctz(leastSignificantWord); + } + } else { + return (U32)__builtin_ctzll(val); + } # else - /* Software ctz version: http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightMultLookup */ - static const U32 multiplyDeBruijnBitPosition[32] = - { - 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8, - 31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 - }; - return multiplyDeBruijnBitPosition[((U32)((v & -(int)v) * 0x077CB531U)) >> 27]; + /* Software ctz version: http://aggregate.org/MAGIC/#Trailing%20Zero%20Count + * and: https://stackoverflow.com/questions/2709430/count-number-of-bits-in-a-64-bit-long-big-integer + */ + val = ~val & (val - 1ULL); /* Lowest set bit mask */ + val = val - ((val >> 1) & 0x5555555555555555); + val = (val & 0x3333333333333333ULL) + ((val >> 2) & 0x3333333333333333ULL); + return (U32)((((val + (val >> 4)) & 0xF0F0F0F0F0F0F0FULL) * 0x101010101010101ULL) >> 56); # endif } -/* ZSTD_VecMask_rotateRight(): - * Rotates a bitfield to the right by "rotation" bits. - * If the rotation is greater than totalBits, the returned mask is 0. +/* ZSTD_rotateRight_*(): + * Rotates a bitfield to the right by "count" bits. + * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts */ -FORCE_INLINE_TEMPLATE ZSTD_VecMask -ZSTD_VecMask_rotateRight(ZSTD_VecMask mask, U32 const rotation, U32 const totalBits) { - if (rotation == 0) - return mask; - switch (totalBits) { - default: - assert(0); - case 16: - return (mask >> rotation) | (U16)(mask << (16 - rotation)); - case 32: - return (mask >> rotation) | (U32)(mask << (32 - rotation)); - } +FORCE_INLINE_TEMPLATE +U64 ZSTD_rotateRight_U64(U64 const value, U32 count) { + assert(count < 64); + count &= 0x3F; /* for fickle pattern recognition */ + return (value >> count) | (U64)(value << ((0U - count) & 0x3F)); +} + +FORCE_INLINE_TEMPLATE +U32 ZSTD_rotateRight_U32(U32 const value, U32 count) { + assert(count < 32); + count &= 0x1F; /* for fickle pattern recognition */ + return (value >> count) | (U32)(value << ((0U - count) & 0x1F)); +} + +FORCE_INLINE_TEMPLATE +U16 ZSTD_rotateRight_U16(U16 const value, U32 count) { + assert(count < 16); + count &= 0x0F; /* for fickle pattern recognition */ + return (value >> count) | (U16)(value << ((0U - count) & 0x0F)); } /* ZSTD_row_nextIndex(): @@ -1126,20 +851,24 @@ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) { */ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, U16 const* tagTable, U32 const relRow, U32 const rowLog) { PREFETCH_L1(hashTable + relRow); - if (rowLog == 5) { + if (rowLog >= 5) { PREFETCH_L1(hashTable + relRow + 16); + /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */ } PREFETCH_L1(tagTable + relRow); - assert(rowLog == 4 || rowLog == 5); + if (rowLog == 6) { + PREFETCH_L1(tagTable + relRow + 32); + } + assert(rowLog == 4 || rowLog == 5 || rowLog == 6); assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */ - assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on a multiple of 32 or 64 bytes */ + assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */ } /* ZSTD_row_fillHashCache(): * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, * but not beyond iLimit. */ -static void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, +FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, U32 const rowLog, U32 const mls, U32 idx, const BYTE* const iLimit) { @@ -1179,35 +908,65 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab } } -/* ZSTD_row_update_internal(): - * Inserts the byte at ip into the appropriate position in the hash table. - * Determines the relative row, and the position within the {16, 32} entry row to insert at. +/* ZSTD_row_update_internalImpl(): + * Updates the hash table with positions starting from updateStartIdx until updateEndIdx. */ -FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, - U32 const mls, U32 const rowLog, - U32 const rowMask, U32 const useCache) +FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, + U32 updateStartIdx, U32 const updateEndIdx, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) { U32* const hashTable = ms->hashTable; U16* const tagTable = ms->tagTable; U32 const hashLog = ms->rowHashLog; const BYTE* const base = ms->window.base; - const U32 target = (U32)(ip - base); - U32 idx = ms->nextToUpdate; - DEBUGLOG(6, "ZSTD_row_update_internal(): nextToUpdate=%u, current=%u", idx, target); - for (; idx < target; ++idx) { - U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, idx, hashLog, rowLog, mls) - : (U32)ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); + DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx); + for (; updateStartIdx < updateEndIdx; ++updateStartIdx) { + U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls) + : (U32)ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls); U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog; U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); /* Though tagTable is laid out as a table of U16, each tag is only 1 byte. Explicit cast allows us to get exact desired position within each row */ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask); - assert(hash == ZSTD_hashPtr(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); + assert(hash == ZSTD_hashPtr(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls)); ((BYTE*)tagRow)[pos + ZSTD_ROW_HASH_TAG_OFFSET] = hash & ZSTD_ROW_HASH_TAG_MASK; - row[pos] = idx; + row[pos] = updateStartIdx; } +} + +/* ZSTD_row_update_internal(): + * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. + * Skips sections of long matches as is necessary. + */ +FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, + U32 const mls, U32 const rowLog, + U32 const rowMask, U32 const useCache) +{ + U32 idx = ms->nextToUpdate; + const BYTE* const base = ms->window.base; + const U32 target = (U32)(ip - base); + const U32 kSkipThreshold = 384; + const U32 kMaxMatchStartPositionsToUpdate = 96; + const U32 kMaxMatchEndPositionsToUpdate = 32; + + if (useCache) { + /* Only skip positions when using hash cache, i.e. + * if we are loading a dict, don't skip anything. + * If we decide to skip, then we only update a set number + * of positions at the beginning and end of the match. + */ + if (UNLIKELY(target - idx > kSkipThreshold)) { + U32 const bound = idx + kMaxMatchStartPositionsToUpdate; + ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache); + idx = target - kMaxMatchEndPositionsToUpdate; + ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1); + } + } + assert(target >= idx); + ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache); ms->nextToUpdate = target; } @@ -1216,7 +975,7 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const * processing. */ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; + const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6); const U32 rowMask = (1u << rowLog) - 1; const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */); @@ -1224,26 +983,131 @@ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) { ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* dont use cache */); } +#if defined(ZSTD_ARCH_X86_SSE2) +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head) +{ + const __m128i comparisonMask = _mm_set1_epi8((char)tag); + int matches[4] = {0}; + int i; + assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4); + for (i=0; i<nbChunks; i++) { + const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i)); + const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask); + matches[i] = _mm_movemask_epi8(equalMask); + } + if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head); + if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head); + assert(nbChunks == 4); + return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head); +} +#endif + /* Returns a ZSTD_VecMask (U32) that has the nth bit set to 1 if the newly-computed "tag" matches * the hash at the nth position in a row of the tagTable. - */ -FORCE_INLINE_TEMPLATE -ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) { - ZSTD_VecMask matches = 0; - if (rowEntries == 16) { - ZSTD_Vec128 hashes = ZSTD_Vec128_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET); - ZSTD_Vec128 expandedTags = ZSTD_Vec128_set8(tag); - matches = ZSTD_Vec128_cmpMask8(hashes, expandedTags); - } else if (rowEntries == 32) { - ZSTD_Vec256 hashes = ZSTD_Vec256_read(tagRow + ZSTD_ROW_HASH_TAG_OFFSET); - ZSTD_Vec256 expandedTags = ZSTD_Vec256_set8(tag); - matches = ZSTD_Vec256_cmpMask8(hashes, expandedTags); - } else { - assert(0); + * Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield + * to match up with the actual layout of the entries within the hashTable */ +FORCE_INLINE_TEMPLATE ZSTD_VecMask +ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 head, const U32 rowEntries) +{ + const BYTE* const src = tagRow + ZSTD_ROW_HASH_TAG_OFFSET; + assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64); + assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES); + +#if defined(ZSTD_ARCH_X86_SSE2) + + return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, head); + +#else /* SW or NEON-LE */ + +# if defined(ZSTD_ARCH_ARM_NEON) + /* This NEON path only works for little endian - otherwise use SWAR below */ + if (MEM_isLittleEndian()) { + if (rowEntries == 16) { + const uint8x16_t chunk = vld1q_u8(src); + const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag))); + const uint16x8_t t0 = vshlq_n_u16(equalMask, 7); + const uint32x4_t t1 = vreinterpretq_u32_u16(vsriq_n_u16(t0, t0, 14)); + const uint64x2_t t2 = vreinterpretq_u64_u32(vshrq_n_u32(t1, 14)); + const uint8x16_t t3 = vreinterpretq_u8_u64(vsraq_n_u64(t2, t2, 28)); + const U16 hi = (U16)vgetq_lane_u8(t3, 8); + const U16 lo = (U16)vgetq_lane_u8(t3, 0); + return ZSTD_rotateRight_U16((hi << 8) | lo, head); + } else if (rowEntries == 32) { + const uint16x8x2_t chunk = vld2q_u16((const U16*)(const void*)src); + const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]); + const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]); + const uint8x16_t equalMask0 = vceqq_u8(chunk0, vdupq_n_u8(tag)); + const uint8x16_t equalMask1 = vceqq_u8(chunk1, vdupq_n_u8(tag)); + const int8x8_t pack0 = vqmovn_s16(vreinterpretq_s16_u8(equalMask0)); + const int8x8_t pack1 = vqmovn_s16(vreinterpretq_s16_u8(equalMask1)); + const uint8x8_t t0 = vreinterpret_u8_s8(pack0); + const uint8x8_t t1 = vreinterpret_u8_s8(pack1); + const uint8x8_t t2 = vsri_n_u8(t1, t0, 2); + const uint8x8x2_t t3 = vuzp_u8(t2, t0); + const uint8x8_t t4 = vsri_n_u8(t3.val[1], t3.val[0], 4); + const U32 matches = vget_lane_u32(vreinterpret_u32_u8(t4), 0); + return ZSTD_rotateRight_U32(matches, head); + } else { /* rowEntries == 64 */ + const uint8x16x4_t chunk = vld4q_u8(src); + const uint8x16_t dup = vdupq_n_u8(tag); + const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup); + const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup); + const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup); + const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup); + + const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1); + const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1); + const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2); + const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4); + const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4); + const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0); + return ZSTD_rotateRight_U64(matches, head); + } } - /* Each row is a circular buffer beginning at the value of "head". So we must rotate the "matches" bitfield - to match up with the actual layout of the entries within the hashTable */ - return ZSTD_VecMask_rotateRight(matches, head, rowEntries); +# endif /* ZSTD_ARCH_ARM_NEON */ + /* SWAR */ + { const size_t chunkSize = sizeof(size_t); + const size_t shiftAmount = ((chunkSize * 8) - chunkSize); + const size_t xFF = ~((size_t)0); + const size_t x01 = xFF / 0xFF; + const size_t x80 = x01 << 7; + const size_t splatChar = tag * x01; + ZSTD_VecMask matches = 0; + int i = rowEntries - chunkSize; + assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8)); + if (MEM_isLittleEndian()) { /* runtime check so have two loops */ + const size_t extractMagic = (xFF / 0x7F) >> chunkSize; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= (chunk * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } else { /* big endian: reverse bits during extraction */ + const size_t msb = xFF ^ (xFF >> 1); + const size_t extractMagic = (msb / 0x1FF) | msb; + do { + size_t chunk = MEM_readST(&src[i]); + chunk ^= splatChar; + chunk = (((chunk | x80) - x01) | chunk) & x80; + matches <<= chunkSize; + matches |= ((chunk >> 7) * extractMagic) >> shiftAmount; + i -= chunkSize; + } while (i >= 0); + } + matches = ~matches; + if (rowEntries == 16) { + return ZSTD_rotateRight_U16((U16)matches, head); + } else if (rowEntries == 32) { + return ZSTD_rotateRight_U32((U32)matches, head); + } else { + return ZSTD_rotateRight_U64((U64)matches, head); + } + } +#endif } /* The high-level approach of the SIMD row based match finder is as follows: @@ -1262,7 +1126,7 @@ ZSTD_VecMask ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, con * - Pick the longest match. */ FORCE_INLINE_TEMPLATE -size_t ZSTD_RowFindBestMatch_generic ( +size_t ZSTD_RowFindBestMatch( ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iLimit, size_t* offsetPtr, @@ -1293,11 +1157,13 @@ size_t ZSTD_RowFindBestMatch_generic ( /* DMS/DDS variables that may be referenced laster */ const ZSTD_matchState_t* const dms = ms->dictMatchState; - size_t ddsIdx; - U32 ddsExtraAttempts; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ - U32 dmsTag; - U32* dmsRow; - BYTE* dmsTagRow; + + /* Initialize the following variables to satisfy static analyzer */ + size_t ddsIdx = 0; + U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */ + U32 dmsTag = 0; + U32* dmsRow = NULL; + BYTE* dmsTagRow = NULL; if (dictMode == ZSTD_dedicatedDictSearch) { const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG; @@ -1329,7 +1195,7 @@ size_t ZSTD_RowFindBestMatch_generic ( U32* const row = hashTable + relRow; BYTE* tagRow = (BYTE*)(tagTable + relRow); U32 const head = *tagRow & rowMask; - U32 matchBuffer[32 /* maximum nb entries per row */]; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, head, rowEntries); @@ -1379,12 +1245,13 @@ size_t ZSTD_RowFindBestMatch_generic ( /* Save best solution */ if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE; + *offsetPtr = STORE_OFFSET(curr - matchIndex); if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */ } } } + assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dedicatedDictSearch) { ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms, ip, iLimit, prefixStart, curr, dictLimit, ddsIdx); @@ -1397,7 +1264,7 @@ size_t ZSTD_RowFindBestMatch_generic ( const U32 dmsIndexDelta = dictLimit - dmsSize; { U32 const head = *dmsTagRow & rowMask; - U32 matchBuffer[32 /* maximum nb row entries */]; + U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES]; size_t numMatches = 0; size_t currMatch = 0; ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, head, rowEntries); @@ -1426,7 +1293,8 @@ size_t ZSTD_RowFindBestMatch_generic ( if (currentMl > ml) { ml = currentMl; - *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE; + assert(curr > matchIndex + dmsIndexDelta); + *offsetPtr = STORE_OFFSET(curr - (matchIndex + dmsIndexDelta)); if (ip+currentMl == iLimit) break; } } @@ -1435,84 +1303,175 @@ size_t ZSTD_RowFindBestMatch_generic ( return ml; } -/* Inlining is important to hardwire a hot branch (template emulation) */ -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectMLS ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - const ZSTD_dictMode_e dictMode, size_t* offsetPtr, const U32 rowLog) -{ - switch(ms->cParams.minMatch) - { - default : /* includes case 3 */ - case 4 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, dictMode, rowLog); - case 5 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, dictMode, rowLog); - case 7 : - case 6 : return ZSTD_RowFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, dictMode, rowLog); - } -} -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_selectRowLog ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_noDict, offsetPtr, 5); +typedef size_t (*searchMax_f)( + ZSTD_matchState_t* ms, + const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); + +/** + * This struct contains the functions necessary for lazy to search. + * Currently, that is only searchMax. However, it is still valuable to have the + * VTable because this makes it easier to add more functions to the VTable later. + * + * TODO: The start of the search function involves loading and calculating a + * bunch of constants from the ZSTD_matchState_t. These computations could be + * done in an initialization function, and saved somewhere in the match state. + * Then we could pass a pointer to the saved state instead of the match state, + * and avoid duplicate computations. + * + * TODO: Move the match re-winding into searchMax. This improves compression + * ratio, and unlocks further simplifications with the next TODO. + * + * TODO: Try moving the repcode search into searchMax. After the re-winding + * and repcode search are in searchMax, there is no more logic in the match + * finder loop that requires knowledge about the dictMode. So we should be + * able to avoid force inlining it, and we can join the extDict loop with + * the single segment loop. It should go in searchMax instead of its own + * function to avoid having multiple virtual function calls per search. + */ +typedef struct { + searchMax_f searchMax; +} ZSTD_LazyVTable; + +#define GEN_ZSTD_BT_VTABLE(dictMode, mls) \ + static size_t ZSTD_BtFindBestMatch_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ + } \ + static const ZSTD_LazyVTable ZSTD_BtVTable_##dictMode##_##mls = { \ + ZSTD_BtFindBestMatch_##dictMode##_##mls \ + }; + +#define GEN_ZSTD_HC_VTABLE(dictMode, mls) \ + static size_t ZSTD_HcFindBestMatch_##dictMode##_##mls( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \ + } \ + static const ZSTD_LazyVTable ZSTD_HcVTable_##dictMode##_##mls = { \ + ZSTD_HcFindBestMatch_##dictMode##_##mls \ + }; + +#define GEN_ZSTD_ROW_VTABLE(dictMode, mls, rowLog) \ + static size_t ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog( \ + ZSTD_matchState_t* ms, \ + const BYTE* ip, const BYTE* const iLimit, \ + size_t* offsetPtr) \ + { \ + assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \ + assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \ + return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \ + } \ + static const ZSTD_LazyVTable ZSTD_RowVTable_##dictMode##_##mls##_##rowLog = { \ + ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog \ + }; + +#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \ + X(dictMode, mls, 4) \ + X(dictMode, mls, 5) \ + X(dictMode, mls, 6) + +#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \ + ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6) + +#define ZSTD_FOR_EACH_MLS(X, dictMode) \ + X(dictMode, 4) \ + X(dictMode, 5) \ + X(dictMode, 6) + +#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \ + X(__VA_ARGS__, noDict) \ + X(__VA_ARGS__, extDict) \ + X(__VA_ARGS__, dictMatchState) \ + X(__VA_ARGS__, dedicatedDictSearch) + +/* Generate Row VTables for each combination of (dictMode, mls, rowLog) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_VTABLE) +/* Generate Binary Tree VTables for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_VTABLE) +/* Generate Hash Chain VTables for each combination of (dictMode, mls) */ +ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_VTABLE) + +#define GEN_ZSTD_BT_VTABLE_ARRAY(dictMode) \ + { \ + &ZSTD_BtVTable_##dictMode##_4, \ + &ZSTD_BtVTable_##dictMode##_5, \ + &ZSTD_BtVTable_##dictMode##_6 \ } -} -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dictMatchState_selectRowLog( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dictMatchState, offsetPtr, 5); +#define GEN_ZSTD_HC_VTABLE_ARRAY(dictMode) \ + { \ + &ZSTD_HcVTable_##dictMode##_4, \ + &ZSTD_HcVTable_##dictMode##_5, \ + &ZSTD_HcVTable_##dictMode##_6 \ } -} -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_dedicatedDictSearch, offsetPtr, 5); +#define GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, mls) \ + { \ + &ZSTD_RowVTable_##dictMode##_##mls##_4, \ + &ZSTD_RowVTable_##dictMode##_##mls##_5, \ + &ZSTD_RowVTable_##dictMode##_##mls##_6 \ } -} -FORCE_INLINE_TEMPLATE size_t ZSTD_RowFindBestMatch_extDict_selectRowLog ( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* const iLimit, - size_t* offsetPtr) -{ - const U32 cappedSearchLog = MIN(ms->cParams.searchLog, 5); - switch(cappedSearchLog) - { - default : - case 4 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 4); - case 5 : return ZSTD_RowFindBestMatch_selectMLS(ms, ip, iLimit, ZSTD_extDict, offsetPtr, 5); +#define GEN_ZSTD_ROW_VTABLE_ARRAY(dictMode) \ + { \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 4), \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 5), \ + GEN_ZSTD_ROW_VTABLE_ARRAY_(dictMode, 6) \ } -} +#define GEN_ZSTD_VTABLE_ARRAY(X) \ + { \ + X(noDict), \ + X(extDict), \ + X(dictMatchState), \ + X(dedicatedDictSearch) \ + } /* ******************************* * Common parser - lazy strategy *********************************/ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e; +/** + * This table is indexed first by the four ZSTD_dictMode_e values, and then + * by the two searchMethod_e values. NULLs are placed for configurations + * that should never occur (extDict modes go to the other implementation + * below and there is no DDSS for binary tree search yet). + */ + +static ZSTD_LazyVTable const* +ZSTD_selectLazyVTable(ZSTD_matchState_t const* ms, searchMethod_e searchMethod, ZSTD_dictMode_e dictMode) +{ + /* Fill the Hc/Bt VTable arrays with the right functions for the (dictMode, mls) combination. */ + ZSTD_LazyVTable const* const hcVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_HC_VTABLE_ARRAY); + ZSTD_LazyVTable const* const btVTables[4][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_BT_VTABLE_ARRAY); + /* Fill the Row VTable array with the right functions for the (dictMode, mls, rowLog) combination. */ + ZSTD_LazyVTable const* const rowVTables[4][3][3] = GEN_ZSTD_VTABLE_ARRAY(GEN_ZSTD_ROW_VTABLE_ARRAY); + + U32 const mls = MAX(4, MIN(6, ms->cParams.minMatch)); + U32 const rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); + switch (searchMethod) { + case search_hashChain: + return hcVTables[dictMode][mls - 4]; + case search_binaryTree: + return btVTables[dictMode][mls - 4]; + case search_rowHash: + return rowVTables[dictMode][mls - 4][rowLog - 4]; + default: + return NULL; + } +} + FORCE_INLINE_TEMPLATE size_t ZSTD_compressBlock_lazy_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, @@ -1525,46 +1484,12 @@ ZSTD_compressBlock_lazy_generic( const BYTE* ip = istart; const BYTE* anchor = istart; const BYTE* const iend = istart + srcSize; - const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; + const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8; const BYTE* const base = ms->window.base; const U32 prefixLowestIndex = ms->window.dictLimit; const BYTE* const prefixLowest = base + prefixLowestIndex; - const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - - /** - * This table is indexed first by the four ZSTD_dictMode_e values, and then - * by the two searchMethod_e values. NULLs are placed for configurations - * that should never occur (extDict modes go to the other implementation - * below and there is no DDSS for binary tree search yet). - */ - const searchMax_f searchFuncs[4][3] = { - { - ZSTD_HcFindBestMatch_selectMLS, - ZSTD_BtFindBestMatch_selectMLS, - ZSTD_RowFindBestMatch_selectRowLog - }, - { - NULL, - NULL, - NULL - }, - { - ZSTD_HcFindBestMatch_dictMatchState_selectMLS, - ZSTD_BtFindBestMatch_dictMatchState_selectMLS, - ZSTD_RowFindBestMatch_dictMatchState_selectRowLog - }, - { - ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS, - NULL, - ZSTD_RowFindBestMatch_dedicatedDictSearch_selectRowLog - } - }; - - searchMax_f const searchMax = searchFuncs[dictMode][(int)searchMethod]; + searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, dictMode)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; const int isDMS = dictMode == ZSTD_dictMatchState; @@ -1599,6 +1524,7 @@ ZSTD_compressBlock_lazy_generic( } if (searchMethod == search_rowHash) { + const U32 rowLog = MAX(4, MIN(6, ms->cParams.searchLog)); ZSTD_row_fillHashCache(ms, base, rowLog, MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */), ms->nextToUpdate, ilimit); @@ -1613,8 +1539,9 @@ ZSTD_compressBlock_lazy_generic( #endif while (ip < ilimit) { size_t matchLength=0; - size_t offset=0; + size_t offcode=STORE_REPCODE_1; const BYTE* start=ip+1; + DEBUGLOG(7, "search baseline (depth 0)"); /* check repCode */ if (isDxS) { @@ -1640,7 +1567,7 @@ ZSTD_compressBlock_lazy_generic( { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; + matchLength = ml2, start = ip, offcode=offsetFound; } if (matchLength < 4) { @@ -1651,14 +1578,15 @@ ZSTD_compressBlock_lazy_generic( /* let's try to find a better solution */ if (depth>=1) while (ip<ilimit) { + DEBUGLOG(7, "search depth 1"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1670,30 +1598,31 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offcode = offset2, start = ip; continue; /* search a better one */ } } /* let's find an even better one */ if ((depth==2) && (ip<ilimit)) { + DEBUGLOG(7, "search depth 2"); ip ++; if ( (dictMode == ZSTD_noDict) - && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + && (offcode) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; } if (isDxS) { const U32 repIndex = (U32)(ip - base) - offset_1; @@ -1705,46 +1634,45 @@ ZSTD_compressBlock_lazy_generic( const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend; size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4; int const gain2 = (int)(mlRep * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((mlRep >= 4) && (gain2 > gain1)) - matchLength = mlRep, offset = 0, start = ip; + matchLength = mlRep, offcode = STORE_REPCODE_1, start = ip; } } { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offcode = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* NOTE: - * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior. - * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which - * overflows the pointer, which is undefined behavior. + * Pay attention that `start[-value]` can lead to strange undefined behavior + * notably if `value` is unsigned, resulting in a large positive `-value`. */ /* catch up */ - if (offset) { + if (STORED_IS_OFFSET(offcode)) { if (dictMode == ZSTD_noDict) { - while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest)) - && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */ + while ( ((start > anchor) & (start - STORED_OFFSET(offcode) > prefixLowest)) + && (start[-1] == (start-STORED_OFFSET(offcode))[-1]) ) /* only search for offset within prefix */ { start--; matchLength++; } } if (isDxS) { - U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex; const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ } - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); } /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); + { size_t const litLength = (size_t)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); anchor = ip = start + matchLength; } @@ -1760,8 +1688,8 @@ _storeSequence: && (MEM_read32(repMatch) == MEM_read32(ip)) ) { const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); ip += matchLength; anchor = ip; continue; @@ -1775,8 +1703,8 @@ _storeSequence: && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) { /* store sequence */ matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap repcodes */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ @@ -1955,15 +1883,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const U32 windowLog = ms->cParams.windowLog; const U32 rowLog = ms->cParams.searchLog < 5 ? 4 : 5; - typedef size_t (*searchMax_f)( - ZSTD_matchState_t* ms, - const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr); - const searchMax_f searchFuncs[3] = { - ZSTD_HcFindBestMatch_extDict_selectMLS, - ZSTD_BtFindBestMatch_extDict_selectMLS, - ZSTD_RowFindBestMatch_extDict_selectRowLog - }; - searchMax_f searchMax = searchFuncs[(int)searchMethod]; + searchMax_f const searchMax = ZSTD_selectLazyVTable(ms, searchMethod, ZSTD_extDict)->searchMax; U32 offset_1 = rep[0], offset_2 = rep[1]; DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod); @@ -1985,7 +1905,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( #endif while (ip < ilimit) { size_t matchLength=0; - size_t offset=0; + size_t offcode=STORE_REPCODE_1; const BYTE* start=ip+1; U32 curr = (U32)(ip-base); @@ -1995,7 +1915,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */ - & (offset_1 < curr+1 - windowLow) ) /* note: we are searching at curr+1 */ + & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */ if (MEM_read32(ip+1) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; @@ -2007,10 +1927,10 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( { size_t offsetFound = 999999999; size_t const ml2 = searchMax(ms, ip, iend, &offsetFound); if (ml2 > matchLength) - matchLength = ml2, start = ip, offset=offsetFound; + matchLength = ml2, start = ip, offcode=offsetFound; } - if (matchLength < 4) { + if (matchLength < 4) { ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */ continue; } @@ -2021,30 +1941,30 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ - if (offset) { + if (offcode) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 3); - int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; + matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; } } /* search match, depth 1 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 4); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offcode = offset2, start = ip; continue; /* search a better one */ } } @@ -2053,48 +1973,48 @@ size_t ZSTD_compressBlock_lazy_extDict_generic( ip ++; curr++; /* check repCode */ - if (offset) { + if (offcode) { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog); const U32 repIndex = (U32)(curr - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_1 < curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; int const gain2 = (int)(repLength * 4); - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 1); if ((repLength >= 4) && (gain2 > gain1)) - matchLength = repLength, offset = 0, start = ip; + matchLength = repLength, offcode = STORE_REPCODE_1, start = ip; } } /* search match, depth 2 */ { size_t offset2=999999999; size_t const ml2 = searchMax(ms, ip, iend, &offset2); - int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ - int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offset2))); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)STORED_TO_OFFBASE(offcode)) + 7); if ((ml2 >= 4) && (gain2 > gain1)) { - matchLength = ml2, offset = offset2, start = ip; + matchLength = ml2, offcode = offset2, start = ip; continue; } } } break; /* nothing found : store previous solution */ } /* catch up */ - if (offset) { - U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + if (STORED_IS_OFFSET(offcode)) { + U32 const matchIndex = (U32)((size_t)(start-base) - STORED_OFFSET(offcode)); const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ - offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + offset_2 = offset_1; offset_1 = (U32)STORED_OFFSET(offcode); } /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; - ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH); + { size_t const litLength = (size_t)(start - anchor); + ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offcode, matchLength); anchor = ip = start + matchLength; } @@ -2106,13 +2026,13 @@ _storeSequence: const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */ - & (offset_2 < repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ + & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */ if (MEM_read32(ip) == MEM_read32(repMatch)) { /* repcode detected we should take it */ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4; - offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ - ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH); + offcode = offset_2; offset_2 = offset_1; offset_1 = (U32)offcode; /* swap offset history */ + ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, matchLength); ip += matchLength; anchor = ip; continue; /* faster when present ... (?) */ diff --git a/thirdparty/zstd/compress/zstd_ldm.c b/thirdparty/zstd/compress/zstd_ldm.c index fa4ebeabd7..f662b2546e 100644 --- a/thirdparty/zstd/compress/zstd_ldm.c +++ b/thirdparty/zstd/compress/zstd_ldm.c @@ -159,12 +159,12 @@ size_t ZSTD_ldm_getTableSize(ldmParams_t params) size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog); size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize) + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t)); - return params.enableLdm ? totalSize : 0; + return params.enableLdm == ZSTD_ps_enable ? totalSize : 0; } size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize) { - return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0; + return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0; } /** ZSTD_ldm_getBucket() : @@ -478,7 +478,7 @@ static size_t ZSTD_ldm_generateSequences_internal( */ if (anchor > ip + hashed) { ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength); - /* Continue the outter loop at anchor (ip + hashed == anchor). */ + /* Continue the outer loop at anchor (ip + hashed == anchor). */ ip = anchor - hashed; break; } @@ -579,7 +579,9 @@ size_t ZSTD_ldm_generateSequences( return 0; } -void ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) { +void +ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch) +{ while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) { rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos; if (srcSize <= seq->litLength) { @@ -657,7 +659,7 @@ void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, void const* src, size_t srcSize) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -709,8 +711,8 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, rep[0] = sequence.offset; /* Store the sequence */ ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend, - sequence.offset + ZSTD_REP_MOVE, - sequence.matchLength - MINMATCH); + STORE_OFFSET(sequence.offset), + sequence.matchLength); ip += sequence.matchLength; } } diff --git a/thirdparty/zstd/compress/zstd_ldm.h b/thirdparty/zstd/compress/zstd_ldm.h index 393466fa9f..4e68dbf52e 100644 --- a/thirdparty/zstd/compress/zstd_ldm.h +++ b/thirdparty/zstd/compress/zstd_ldm.h @@ -66,7 +66,7 @@ size_t ZSTD_ldm_generateSequences( */ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], - ZSTD_useRowMatchFinderMode_e useRowMatchFinder, + ZSTD_paramSwitch_e useRowMatchFinder, void const* src, size_t srcSize); /** diff --git a/thirdparty/zstd/compress/zstd_ldm_geartab.h b/thirdparty/zstd/compress/zstd_ldm_geartab.h index e5c24d856b..647f865be2 100644 --- a/thirdparty/zstd/compress/zstd_ldm_geartab.h +++ b/thirdparty/zstd/compress/zstd_ldm_geartab.h @@ -11,7 +11,10 @@ #ifndef ZSTD_LDM_GEARTAB_H #define ZSTD_LDM_GEARTAB_H -static U64 ZSTD_ldm_gearTab[256] = { +#include "../common/compiler.h" /* UNUSED_ATTR */ +#include "../common/mem.h" /* U64 */ + +static UNUSED_ATTR const U64 ZSTD_ldm_gearTab[256] = { 0xf5b8f72c5f77775c, 0x84935f266b7ac412, 0xb647ada9ca730ccc, 0xb065bb4b114fb1de, 0x34584e7e8c3a9fd0, 0x4e97e17c6ae26b05, 0x3a03d743bc99a604, 0xcecd042422c4044f, 0x76de76c58524259e, diff --git a/thirdparty/zstd/compress/zstd_opt.c b/thirdparty/zstd/compress/zstd_opt.c index 402a7e5c76..1b1ddad428 100644 --- a/thirdparty/zstd/compress/zstd_opt.c +++ b/thirdparty/zstd/compress/zstd_opt.c @@ -14,7 +14,6 @@ #define ZSTD_LITFREQ_ADD 2 /* scaling factor for litFreq, so that frequencies adapt faster to new stats */ -#define ZSTD_FREQ_DIV 4 /* log factor when using previous stats to init next stats */ #define ZSTD_MAX_PRICE (1<<30) #define ZSTD_PREDEF_THRESHOLD 1024 /* if srcSize < ZSTD_PREDEF_THRESHOLD, symbols' cost is assumed static, directly determined by pre-defined distributions */ @@ -24,11 +23,11 @@ * Price functions for optimal parser ***************************************/ -#if 0 /* approximation at bit level */ +#if 0 /* approximation at bit level (for tests) */ # define BITCOST_ACCURACY 0 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) -# define WEIGHT(stat) ((void)opt, ZSTD_bitWeight(stat)) -#elif 0 /* fractional bit accuracy */ +# define WEIGHT(stat, opt) ((void)opt, ZSTD_bitWeight(stat)) +#elif 0 /* fractional bit accuracy (for tests) */ # define BITCOST_ACCURACY 8 # define BITCOST_MULTIPLIER (1 << BITCOST_ACCURACY) # define WEIGHT(stat,opt) ((void)opt, ZSTD_fracWeight(stat)) @@ -66,7 +65,7 @@ MEM_STATIC double ZSTD_fCost(U32 price) static int ZSTD_compressedLiterals(optState_t const* const optPtr) { - return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed; + return optPtr->literalCompressionMode != ZSTD_ps_disable; } static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) @@ -79,25 +78,46 @@ static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel) } -/* ZSTD_downscaleStat() : - * reduce all elements in table by a factor 2^(ZSTD_FREQ_DIV+malus) - * return the resulting sum of elements */ -static U32 ZSTD_downscaleStat(unsigned* table, U32 lastEltIndex, int malus) +static U32 sum_u32(const unsigned table[], size_t nbElts) +{ + size_t n; + U32 total = 0; + for (n=0; n<nbElts; n++) { + total += table[n]; + } + return total; +} + +static U32 ZSTD_downscaleStats(unsigned* table, U32 lastEltIndex, U32 shift) { U32 s, sum=0; - DEBUGLOG(5, "ZSTD_downscaleStat (nbElts=%u)", (unsigned)lastEltIndex+1); - assert(ZSTD_FREQ_DIV+malus > 0 && ZSTD_FREQ_DIV+malus < 31); + DEBUGLOG(5, "ZSTD_downscaleStats (nbElts=%u, shift=%u)", (unsigned)lastEltIndex+1, (unsigned)shift); + assert(shift < 30); for (s=0; s<lastEltIndex+1; s++) { - table[s] = 1 + (table[s] >> (ZSTD_FREQ_DIV+malus)); + table[s] = 1 + (table[s] >> shift); sum += table[s]; } return sum; } +/* ZSTD_scaleStats() : + * reduce all elements in table is sum too large + * return the resulting sum of elements */ +static U32 ZSTD_scaleStats(unsigned* table, U32 lastEltIndex, U32 logTarget) +{ + U32 const prevsum = sum_u32(table, lastEltIndex+1); + U32 const factor = prevsum >> logTarget; + DEBUGLOG(5, "ZSTD_scaleStats (nbElts=%u, target=%u)", (unsigned)lastEltIndex+1, (unsigned)logTarget); + assert(logTarget < 30); + if (factor <= 1) return prevsum; + return ZSTD_downscaleStats(table, lastEltIndex, ZSTD_highbit32(factor)); +} + /* ZSTD_rescaleFreqs() : * if first block (detected by optPtr->litLengthSum == 0) : init statistics * take hints from dictionary if there is one - * or init from zero, using src for literals stats, or flat 1 for match symbols + * and init from zero if there is none, + * using src for literals stats, and baseline stats for sequence symbols * otherwise downscale existing stats, to be used as seed for next block. */ static void @@ -126,7 +146,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, optPtr->litSum = 0; for (lit=0; lit<=MaxLit; lit++) { U32 const scaleLog = 11; /* scale to 2K */ - U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit); + U32 const bitCost = HUF_getNbBitsFromCTable(optPtr->symbolCosts->huf.CTable, lit); assert(bitCost <= scaleLog); optPtr->litFreq[lit] = bitCost ? 1 << (scaleLog-bitCost) : 1 /*minimum to calculate cost*/; optPtr->litSum += optPtr->litFreq[lit]; @@ -174,14 +194,19 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, if (compressedLiterals) { unsigned lit = MaxLit; HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */ - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); + optPtr->litSum = ZSTD_downscaleStats(optPtr->litFreq, MaxLit, 8); } - { unsigned ll; - for (ll=0; ll<=MaxLL; ll++) - optPtr->litLengthFreq[ll] = 1; + { unsigned const baseLLfreqs[MaxLL+1] = { + 4, 2, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->litLengthFreq, baseLLfreqs, sizeof(baseLLfreqs)); + optPtr->litLengthSum = sum_u32(baseLLfreqs, MaxLL+1); } - optPtr->litLengthSum = MaxLL+1; { unsigned ml; for (ml=0; ml<=MaxML; ml++) @@ -189,21 +214,26 @@ ZSTD_rescaleFreqs(optState_t* const optPtr, } optPtr->matchLengthSum = MaxML+1; - { unsigned of; - for (of=0; of<=MaxOff; of++) - optPtr->offCodeFreq[of] = 1; + { unsigned const baseOFCfreqs[MaxOff+1] = { + 6, 2, 1, 1, 2, 3, 4, 4, + 4, 3, 2, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 + }; + ZSTD_memcpy(optPtr->offCodeFreq, baseOFCfreqs, sizeof(baseOFCfreqs)); + optPtr->offCodeSum = sum_u32(baseOFCfreqs, MaxOff+1); } - optPtr->offCodeSum = MaxOff+1; + } } else { /* new block : re-use previous statistics, scaled down */ if (compressedLiterals) - optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1); - optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0); + optPtr->litSum = ZSTD_scaleStats(optPtr->litFreq, MaxLit, 12); + optPtr->litLengthSum = ZSTD_scaleStats(optPtr->litLengthFreq, MaxLL, 11); + optPtr->matchLengthSum = ZSTD_scaleStats(optPtr->matchLengthFreq, MaxML, 11); + optPtr->offCodeSum = ZSTD_scaleStats(optPtr->offCodeFreq, MaxOff, 11); } ZSTD_setBasePrices(optPtr, optLevel); @@ -239,7 +269,16 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength, * cost of literalLength symbol */ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optPtr, int optLevel) { - if (optPtr->priceType == zop_predef) return WEIGHT(litLength, optLevel); + assert(litLength <= ZSTD_BLOCKSIZE_MAX); + if (optPtr->priceType == zop_predef) + return WEIGHT(litLength, optLevel); + /* We can't compute the litLength price for sizes >= ZSTD_BLOCKSIZE_MAX + * because it isn't representable in the zstd format. So instead just + * call it 1 bit more than ZSTD_BLOCKSIZE_MAX - 1. In this case the block + * would be all literals. + */ + if (litLength == ZSTD_BLOCKSIZE_MAX) + return BITCOST_MULTIPLIER + ZSTD_litLengthPrice(ZSTD_BLOCKSIZE_MAX - 1, optPtr, optLevel); /* dynamic statistics */ { U32 const llCode = ZSTD_LLcode(litLength); @@ -252,15 +291,17 @@ static U32 ZSTD_litLengthPrice(U32 const litLength, const optState_t* const optP /* ZSTD_getMatchPrice() : * Provides the cost of the match part (offset + matchLength) of a sequence * Must be combined with ZSTD_fullLiteralsCost() to get the full cost of a sequence. - * optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) */ + * @offcode : expects a scale where 0,1,2 are repcodes 1-3, and 3+ are real_offsets+2 + * @optLevel: when <2, favors small offset for decompression speed (improved cache efficiency) + */ FORCE_INLINE_TEMPLATE U32 -ZSTD_getMatchPrice(U32 const offset, +ZSTD_getMatchPrice(U32 const offcode, U32 const matchLength, const optState_t* const optPtr, int const optLevel) { U32 price; - U32 const offCode = ZSTD_highbit32(offset+1); + U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offcode)); U32 const mlBase = matchLength - MINMATCH; assert(matchLength >= MINMATCH); @@ -303,8 +344,8 @@ static void ZSTD_updateStats(optState_t* const optPtr, optPtr->litLengthSum++; } - /* match offset code (0-2=>repCode; 3+=>offset+2) */ - { U32 const offCode = ZSTD_highbit32(offsetCode+1); + /* offset code : expected to follow storeSeq() numeric representation */ + { U32 const offCode = ZSTD_highbit32(STORED_TO_OFFBASE(offsetCode)); assert(offCode <= MaxOff); optPtr->offCodeFreq[offCode]++; optPtr->offCodeSum++; @@ -338,7 +379,7 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length) /* Update hashTable3 up to ip (excluded) Assumption : always within prefix (i.e. not within extDict) */ -static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, +static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, U32* nextToUpdate3, const BYTE* const ip) { @@ -364,11 +405,13 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_matchState_t* ms, * Binary Tree search ***************************************/ /** ZSTD_insertBt1() : add one or multiple positions to tree. - * ip : assumed <= iend-8 . + * @param ip assumed <= iend-8 . + * @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @return : nb of positions added */ static U32 ZSTD_insertBt1( - ZSTD_matchState_t* ms, + const ZSTD_matchState_t* ms, const BYTE* const ip, const BYTE* const iend, + U32 const target, U32 const mls, const int extDict) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -391,7 +434,10 @@ static U32 ZSTD_insertBt1( U32* smallerPtr = bt + 2*(curr&btMask); U32* largerPtr = smallerPtr + 1; U32 dummy32; /* to be nullified at the end */ - U32 const windowLow = ms->window.lowLimit; + /* windowLow is based on target because + * we only need positions that will be in the window at the end of the tree update. + */ + U32 const windowLow = ZSTD_getLowestMatchIndex(ms, target, cParams->windowLog); U32 matchEndIdx = curr+8+1; size_t bestLength = 8; U32 nbCompares = 1U << cParams->searchLog; @@ -404,11 +450,12 @@ static U32 ZSTD_insertBt1( DEBUGLOG(8, "ZSTD_insertBt1 (%u)", curr); + assert(curr <= target); assert(ip <= iend-8); /* required for h calculation */ hashTable[h] = curr; /* Update Hash Table */ assert(windowLow > 0); - while (nbCompares-- && (matchIndex >= windowLow)) { + for (; nbCompares && (matchIndex >= windowLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ assert(matchIndex < curr); @@ -492,7 +539,7 @@ void ZSTD_updateTree_internal( idx, target, dictMode); while(idx < target) { - U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, mls, dictMode == ZSTD_extDict); + U32 const forward = ZSTD_insertBt1(ms, base+idx, iend, target, mls, dictMode == ZSTD_extDict); assert(idx < (U32)(idx + forward)); idx += forward; } @@ -597,7 +644,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( DEBUGLOG(8, "found repCode %u (ll0:%u, offset:%u) of length %u", repCode, ll0, repOffset, repLen); bestLength = repLen; - matches[mnum].off = repCode - ll0; + matches[mnum].off = STORE_REPCODE(repCode - ll0 + 1); /* expect value between 1 and 3 */ matches[mnum].len = (U32)repLen; mnum++; if ( (repLen > sufficient_len) @@ -626,7 +673,7 @@ U32 ZSTD_insertBtAndGetAllMatches ( bestLength = mlen; assert(curr > matchIndex3); assert(mnum==0); /* no prior solution */ - matches[0].off = (curr - matchIndex3) + ZSTD_REP_MOVE; + matches[0].off = STORE_OFFSET(curr - matchIndex3); matches[0].len = (U32)mlen; mnum = 1; if ( (mlen > sufficient_len) | @@ -635,11 +682,11 @@ U32 ZSTD_insertBtAndGetAllMatches ( return 1; } } } /* no dictMatchState lookup: dicts don't have a populated HC3 table */ - } + } /* if (mls == 3) */ hashTable[h] = curr; /* Update Hash Table */ - while (nbCompares-- && (matchIndex >= matchLow)) { + for (; nbCompares && (matchIndex >= matchLow); --nbCompares) { U32* const nextPtr = bt + 2*(matchIndex & btMask); const BYTE* match; size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ @@ -660,20 +707,19 @@ U32 ZSTD_insertBtAndGetAllMatches ( if (matchLength > bestLength) { DEBUGLOG(8, "found match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); + (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); assert(matchEndIdx > matchIndex); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].off = STORE_OFFSET(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { if (dictMode == ZSTD_dictMatchState) nbCompares = 0; /* break should also skip searching dms */ break; /* drop, to preserve bt consistency (miss a little bit of compression) */ - } - } + } } if (match[matchLength] < ip[matchLength]) { /* match smaller than current */ @@ -692,12 +738,13 @@ U32 ZSTD_insertBtAndGetAllMatches ( *smallerPtr = *largerPtr = 0; + assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */ if (dictMode == ZSTD_dictMatchState && nbCompares) { size_t const dmsH = ZSTD_hashPtr(ip, dmsHashLog, mls); U32 dictMatchIndex = dms->hashTable[dmsH]; const U32* const dmsBt = dms->chainTable; commonLengthSmaller = commonLengthLarger = 0; - while (nbCompares-- && (dictMatchIndex > dmsLowLimit)) { + for (; nbCompares && (dictMatchIndex > dmsLowLimit); --nbCompares) { const U32* const nextPtr = dmsBt + 2*(dictMatchIndex & dmsBtMask); size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ const BYTE* match = dmsBase + dictMatchIndex; @@ -708,18 +755,17 @@ U32 ZSTD_insertBtAndGetAllMatches ( if (matchLength > bestLength) { matchIndex = dictMatchIndex + dmsIndexDelta; DEBUGLOG(8, "found dms match of length %u at distance %u (offCode=%u)", - (U32)matchLength, curr - matchIndex, curr - matchIndex + ZSTD_REP_MOVE); + (U32)matchLength, curr - matchIndex, STORE_OFFSET(curr - matchIndex)); if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; bestLength = matchLength; - matches[mnum].off = (curr - matchIndex) + ZSTD_REP_MOVE; + matches[mnum].off = STORE_OFFSET(curr - matchIndex); matches[mnum].len = (U32)matchLength; mnum++; if ( (matchLength > ZSTD_OPT_NUM) | (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */) { break; /* drop, to guarantee consistency (miss a little bit of compression) */ - } - } + } } if (dictMatchIndex <= dmsBtLow) { break; } /* beyond tree size, stop the search */ if (match[matchLength] < ip[matchLength]) { @@ -729,39 +775,91 @@ U32 ZSTD_insertBtAndGetAllMatches ( /* match is larger than current */ commonLengthLarger = matchLength; dictMatchIndex = nextPtr[0]; - } - } - } + } } } /* if (dictMode == ZSTD_dictMatchState) */ assert(matchEndIdx > curr+8); ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */ return mnum; } - -FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( - ZSTD_match_t* matches, /* store result (match found, increasing size) in this table */ - ZSTD_matchState_t* ms, - U32* nextToUpdate3, - const BYTE* ip, const BYTE* const iHighLimit, const ZSTD_dictMode_e dictMode, - const U32 rep[ZSTD_REP_NUM], - U32 const ll0, - U32 const lengthToBeat) +typedef U32 (*ZSTD_getAllMatchesFn)( + ZSTD_match_t*, + ZSTD_matchState_t*, + U32*, + const BYTE*, + const BYTE*, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, + U32 const lengthToBeat); + +FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal( + ZSTD_match_t* matches, + ZSTD_matchState_t* ms, + U32* nextToUpdate3, + const BYTE* ip, + const BYTE* const iHighLimit, + const U32 rep[ZSTD_REP_NUM], + U32 const ll0, + U32 const lengthToBeat, + const ZSTD_dictMode_e dictMode, + const U32 mls) { - const ZSTD_compressionParameters* const cParams = &ms->cParams; - U32 const matchLengthSearch = cParams->minMatch; - DEBUGLOG(8, "ZSTD_BtGetAllMatches"); - if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */ - ZSTD_updateTree_internal(ms, ip, iHighLimit, matchLengthSearch, dictMode); - switch(matchLengthSearch) - { - case 3 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 3); - default : - case 4 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 4); - case 5 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 5); - case 7 : - case 6 : return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, 6); + assert(BOUNDED(3, ms->cParams.minMatch, 6) == mls); + DEBUGLOG(8, "ZSTD_BtGetAllMatches(dictMode=%d, mls=%u)", (int)dictMode, mls); + if (ip < ms->window.base + ms->nextToUpdate) + return 0; /* skipped area */ + ZSTD_updateTree_internal(ms, ip, iHighLimit, mls, dictMode); + return ZSTD_insertBtAndGetAllMatches(matches, ms, nextToUpdate3, ip, iHighLimit, dictMode, rep, ll0, lengthToBeat, mls); +} + +#define ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls) ZSTD_btGetAllMatches_##dictMode##_##mls + +#define GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, mls) \ + static U32 ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, mls)( \ + ZSTD_match_t* matches, \ + ZSTD_matchState_t* ms, \ + U32* nextToUpdate3, \ + const BYTE* ip, \ + const BYTE* const iHighLimit, \ + const U32 rep[ZSTD_REP_NUM], \ + U32 const ll0, \ + U32 const lengthToBeat) \ + { \ + return ZSTD_btGetAllMatches_internal( \ + matches, ms, nextToUpdate3, ip, iHighLimit, \ + rep, ll0, lengthToBeat, ZSTD_##dictMode, mls); \ + } + +#define GEN_ZSTD_BT_GET_ALL_MATCHES(dictMode) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 3) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 4) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 5) \ + GEN_ZSTD_BT_GET_ALL_MATCHES_(dictMode, 6) + +GEN_ZSTD_BT_GET_ALL_MATCHES(noDict) +GEN_ZSTD_BT_GET_ALL_MATCHES(extDict) +GEN_ZSTD_BT_GET_ALL_MATCHES(dictMatchState) + +#define ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMode) \ + { \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 3), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 4), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 5), \ + ZSTD_BT_GET_ALL_MATCHES_FN(dictMode, 6) \ } + +static ZSTD_getAllMatchesFn +ZSTD_selectBtGetAllMatches(ZSTD_matchState_t const* ms, ZSTD_dictMode_e const dictMode) +{ + ZSTD_getAllMatchesFn const getAllMatchesFns[3][4] = { + ZSTD_BT_GET_ALL_MATCHES_ARRAY(noDict), + ZSTD_BT_GET_ALL_MATCHES_ARRAY(extDict), + ZSTD_BT_GET_ALL_MATCHES_ARRAY(dictMatchState) + }; + U32 const mls = BOUNDED(3, ms->cParams.minMatch, 6); + assert((U32)dictMode < 3); + assert(mls - 3 < 4); + return getAllMatchesFns[(int)dictMode][mls - 3]; } /************************* @@ -770,16 +868,18 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_BtGetAllMatches ( /* Struct containing info needed to make decision about ldm inclusion */ typedef struct { - rawSeqStore_t seqStore; /* External match candidates store for this block */ - U32 startPosInBlock; /* Start position of the current match candidate */ - U32 endPosInBlock; /* End position of the current match candidate */ - U32 offset; /* Offset of the match candidate */ + rawSeqStore_t seqStore; /* External match candidates store for this block */ + U32 startPosInBlock; /* Start position of the current match candidate */ + U32 endPosInBlock; /* End position of the current match candidate */ + U32 offset; /* Offset of the match candidate */ } ZSTD_optLdm_t; /* ZSTD_optLdm_skipRawSeqStoreBytes(): - * Moves forward in rawSeqStore by nbBytes, which will update the fields 'pos' and 'posInSequence'. + * Moves forward in @rawSeqStore by @nbBytes, + * which will update the fields 'pos' and 'posInSequence'. */ -static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) { +static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) +{ U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes); while (currPos && rawSeqStore->pos < rawSeqStore->size) { rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos]; @@ -800,8 +900,10 @@ static void ZSTD_optLdm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t * Calculates the beginning and end of the next match in the current block. * Updates 'pos' and 'posInSequence' of the ldmSeqStore. */ -static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, - U32 blockBytesRemaining) { +static void +ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 currPosInBlock, + U32 blockBytesRemaining) +{ rawSeq currSeq; U32 currBlockEndPos; U32 literalsBytesRemaining; @@ -813,8 +915,8 @@ static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 cu optLdm->endPosInBlock = UINT_MAX; return; } - /* Calculate appropriate bytes left in matchLength and litLength after adjusting - based on ldmSeqStore->posInSequence */ + /* Calculate appropriate bytes left in matchLength and litLength + * after adjusting based on ldmSeqStore->posInSequence */ currSeq = optLdm->seqStore.seq[optLdm->seqStore.pos]; assert(optLdm->seqStore.posInSequence <= currSeq.litLength + currSeq.matchLength); currBlockEndPos = currPosInBlock + blockBytesRemaining; @@ -850,15 +952,16 @@ static void ZSTD_opt_getNextMatchAndUpdateSeqStore(ZSTD_optLdm_t* optLdm, U32 cu } /* ZSTD_optLdm_maybeAddMatch(): - * Adds a match if it's long enough, based on it's 'matchStartPosInBlock' - * and 'matchEndPosInBlock', into 'matches'. Maintains the correct ordering of 'matches' + * Adds a match if it's long enough, + * based on it's 'matchStartPosInBlock' and 'matchEndPosInBlock', + * into 'matches'. Maintains the correct ordering of 'matches'. */ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, - ZSTD_optLdm_t* optLdm, U32 currPosInBlock) { - U32 posDiff = currPosInBlock - optLdm->startPosInBlock; + const ZSTD_optLdm_t* optLdm, U32 currPosInBlock) +{ + U32 const posDiff = currPosInBlock - optLdm->startPosInBlock; /* Note: ZSTD_match_t actually contains offCode and matchLength (before subtracting MINMATCH) */ - U32 candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; - U32 candidateOffCode = optLdm->offset + ZSTD_REP_MOVE; + U32 const candidateMatchLength = optLdm->endPosInBlock - optLdm->startPosInBlock - posDiff; /* Ensure that current block position is not outside of the match */ if (currPosInBlock < optLdm->startPosInBlock @@ -868,6 +971,7 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, } if (*nbMatches == 0 || ((candidateMatchLength > matches[*nbMatches-1].len) && *nbMatches < ZSTD_OPT_NUM)) { + U32 const candidateOffCode = STORE_OFFSET(optLdm->offset); DEBUGLOG(6, "ZSTD_optLdm_maybeAddMatch(): Adding ldm candidate match (offCode: %u matchLength %u) at block position=%u", candidateOffCode, candidateMatchLength, currPosInBlock); matches[*nbMatches].len = candidateMatchLength; @@ -879,8 +983,11 @@ static void ZSTD_optLdm_maybeAddMatch(ZSTD_match_t* matches, U32* nbMatches, /* ZSTD_optLdm_processMatchCandidate(): * Wrapper function to update ldm seq store and call ldm functions as necessary. */ -static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_t* matches, U32* nbMatches, - U32 currPosInBlock, U32 remainingBytes) { +static void +ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, + ZSTD_match_t* matches, U32* nbMatches, + U32 currPosInBlock, U32 remainingBytes) +{ if (optLdm->seqStore.size == 0 || optLdm->seqStore.pos >= optLdm->seqStore.size) { return; } @@ -891,19 +998,19 @@ static void ZSTD_optLdm_processMatchCandidate(ZSTD_optLdm_t* optLdm, ZSTD_match_ * at the end of a match from the ldm seq store, and will often be some bytes * over beyond matchEndPosInBlock. As such, we need to correct for these "overshoots" */ - U32 posOvershoot = currPosInBlock - optLdm->endPosInBlock; + U32 const posOvershoot = currPosInBlock - optLdm->endPosInBlock; ZSTD_optLdm_skipRawSeqStoreBytes(&optLdm->seqStore, posOvershoot); - } + } ZSTD_opt_getNextMatchAndUpdateSeqStore(optLdm, currPosInBlock, remainingBytes); } ZSTD_optLdm_maybeAddMatch(matches, nbMatches, optLdm, currPosInBlock); } + /*-******************************* * Optimal parser *********************************/ - static U32 ZSTD_totalLen(ZSTD_optimal_t sol) { return sol.litlen + sol.mlen; @@ -944,6 +1051,8 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, const BYTE* const prefixStart = base + ms->window.dictLimit; const ZSTD_compressionParameters* const cParams = &ms->cParams; + ZSTD_getAllMatchesFn getAllMatches = ZSTD_selectBtGetAllMatches(ms, dictMode); + U32 const sufficient_len = MIN(cParams->targetLength, ZSTD_OPT_NUM -1); U32 const minMatch = (cParams->minMatch == 3) ? 3 : 4; U32 nextToUpdate3 = ms->nextToUpdate; @@ -971,7 +1080,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* find first match */ { U32 const litlen = (U32)(ip - anchor); U32 const ll0 = !litlen; - U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, ip, iend, dictMode, rep, ll0, minMatch); + U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, ip, iend, rep, ll0, minMatch); ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, (U32)(ip-istart), (U32)(iend - ip)); if (!nbMatches) { ip++; continue; } @@ -985,18 +1094,18 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, * in every price. We include the literal length to avoid negative * prices when we subtract the previous literal length. */ - opt[0].price = ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); + opt[0].price = (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel); /* large match -> immediate encoding */ { U32 const maxML = matches[nbMatches-1].len; - U32 const maxOffset = matches[nbMatches-1].off; + U32 const maxOffcode = matches[nbMatches-1].off; DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series", - nbMatches, maxML, maxOffset, (U32)(ip-prefixStart)); + nbMatches, maxML, maxOffcode, (U32)(ip-prefixStart)); if (maxML > sufficient_len) { lastSequence.litlen = litlen; lastSequence.mlen = maxML; - lastSequence.off = maxOffset; + lastSequence.off = maxOffcode; DEBUGLOG(6, "large match (%u>%u), immediate encoding", maxML, sufficient_len); cur = 0; @@ -1005,24 +1114,25 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, } } /* set prices for first matches starting position == 0 */ - { U32 const literalsPrice = opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); + assert(opt[0].price >= 0); + { U32 const literalsPrice = (U32)opt[0].price + ZSTD_litLengthPrice(0, optStatePtr, optLevel); U32 pos; U32 matchNb; for (pos = 1; pos < minMatch; pos++) { opt[pos].price = ZSTD_MAX_PRICE; /* mlen, litlen and price will be fixed during forward scanning */ } for (matchNb = 0; matchNb < nbMatches; matchNb++) { - U32 const offset = matches[matchNb].off; + U32 const offcode = matches[matchNb].off; U32 const end = matches[matchNb].len; for ( ; pos <= end ; pos++ ) { - U32 const matchPrice = ZSTD_getMatchPrice(offset, pos, optStatePtr, optLevel); + U32 const matchPrice = ZSTD_getMatchPrice(offcode, pos, optStatePtr, optLevel); U32 const sequencePrice = literalsPrice + matchPrice; DEBUGLOG(7, "rPos:%u => set initial price : %.2f", pos, ZSTD_fCost(sequencePrice)); opt[pos].mlen = pos; - opt[pos].off = offset; + opt[pos].off = offcode; opt[pos].litlen = litlen; - opt[pos].price = sequencePrice; + opt[pos].price = (int)sequencePrice; } } last_pos = pos-1; } @@ -1037,9 +1147,9 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, /* Fix current position with one literal if cheaper */ { U32 const litlen = (opt[cur-1].mlen == 0) ? opt[cur-1].litlen + 1 : 1; int const price = opt[cur-1].price - + ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) - + ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) - - ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); + + (int)ZSTD_rawLiteralsCost(ip+cur-1, 1, optStatePtr, optLevel) + + (int)ZSTD_litLengthPrice(litlen, optStatePtr, optLevel) + - (int)ZSTD_litLengthPrice(litlen-1, optStatePtr, optLevel); assert(price < 1000000000); /* overflow check */ if (price <= opt[cur].price) { DEBUGLOG(7, "cPos:%zi==rPos:%u : better price (%.2f<=%.2f) using literal (ll==%u) (hist:%u,%u,%u)", @@ -1065,7 +1175,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, assert(cur >= opt[cur].mlen); if (opt[cur].mlen != 0) { U32 const prev = cur - opt[cur].mlen; - repcodes_t newReps = ZSTD_updateRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); + repcodes_t const newReps = ZSTD_newRep(opt[prev].rep, opt[cur].off, opt[cur].litlen==0); ZSTD_memcpy(opt[cur].rep, &newReps, sizeof(repcodes_t)); } else { ZSTD_memcpy(opt[cur].rep, opt[cur - 1].rep, sizeof(repcodes_t)); @@ -1082,11 +1192,12 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, continue; /* skip unpromising positions; about ~+6% speed, -0.01 ratio */ } + assert(opt[cur].price >= 0); { U32 const ll0 = (opt[cur].mlen != 0); U32 const litlen = (opt[cur].mlen == 0) ? opt[cur].litlen : 0; - U32 const previousPrice = opt[cur].price; + U32 const previousPrice = (U32)opt[cur].price; U32 const basePrice = previousPrice + ZSTD_litLengthPrice(0, optStatePtr, optLevel); - U32 nbMatches = ZSTD_BtGetAllMatches(matches, ms, &nextToUpdate3, inr, iend, dictMode, opt[cur].rep, ll0, minMatch); + U32 nbMatches = getAllMatches(matches, ms, &nextToUpdate3, inr, iend, opt[cur].rep, ll0, minMatch); U32 matchNb; ZSTD_optLdm_processMatchCandidate(&optLdm, matches, &nbMatches, @@ -1124,7 +1235,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, for (mlen = lastML; mlen >= startML; mlen--) { /* scan downward */ U32 const pos = cur + mlen; - int const price = basePrice + ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); + int const price = (int)basePrice + (int)ZSTD_getMatchPrice(offset, mlen, optStatePtr, optLevel); if ((pos > last_pos) || (price < opt[pos].price)) { DEBUGLOG(7, "rPos:%u (ml=%2u) => new better price (%.2f<%.2f)", @@ -1154,7 +1265,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ * update them while traversing the sequences. */ if (lastSequence.mlen != 0) { - repcodes_t reps = ZSTD_updateRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); + repcodes_t const reps = ZSTD_newRep(opt[cur].rep, lastSequence.off, lastSequence.litlen==0); ZSTD_memcpy(rep, &reps, sizeof(reps)); } else { ZSTD_memcpy(rep, opt[cur].rep, sizeof(repcodes_t)); @@ -1198,7 +1309,7 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ assert(anchor + llen <= iend); ZSTD_updateStats(optStatePtr, llen, anchor, offCode, mlen); - ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen-MINMATCH); + ZSTD_storeSeq(seqStore, llen, anchor, iend, offCode, mlen); anchor += advance; ip = anchor; } } @@ -1210,38 +1321,30 @@ _shortestPath: /* cur, last_pos, best_mlen, best_off have to be set */ return (size_t)(iend - anchor); } +static size_t ZSTD_compressBlock_opt0( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /* optLevel */, dictMode); +} + +static size_t ZSTD_compressBlock_opt2( + ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], + const void* src, size_t srcSize, const ZSTD_dictMode_e dictMode) +{ + return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /* optLevel */, dictMode); +} size_t ZSTD_compressBlock_btopt( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btopt"); - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } -/* used in 2-pass strategy */ -static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus) -{ - U32 s, sum=0; - assert(ZSTD_FREQ_DIV+bonus >= 0); - for (s=0; s<lastEltIndex+1; s++) { - table[s] <<= ZSTD_FREQ_DIV+bonus; - table[s]--; - sum += table[s]; - } - return sum; -} -/* used in 2-pass strategy */ -MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr) -{ - if (ZSTD_compressedLiterals(optPtr)) - optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0); - optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0); - optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0); - optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0); -} /* ZSTD_initStats_ultra(): * make a first compression pass, just to seed stats with more accurate starting values. @@ -1263,7 +1366,7 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, assert(ms->window.dictLimit == ms->window.lowLimit); /* no dictionary */ assert(ms->window.dictLimit - ms->nextToUpdate <= 1); /* no prefix (note: intentional overflow, defined as 2-complement) */ - ZSTD_compressBlock_opt_generic(ms, seqStore, tmpRep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); /* generate stats into ms->opt*/ + ZSTD_compressBlock_opt2(ms, seqStore, tmpRep, src, srcSize, ZSTD_noDict); /* generate stats into ms->opt*/ /* invalidate first scan from history */ ZSTD_resetSeqStore(seqStore); @@ -1272,8 +1375,6 @@ ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ms->window.lowLimit = ms->window.dictLimit; ms->nextToUpdate = ms->window.dictLimit; - /* re-inforce weight of collected statistics */ - ZSTD_upscaleStats(&ms->opt); } size_t ZSTD_compressBlock_btultra( @@ -1281,7 +1382,7 @@ size_t ZSTD_compressBlock_btultra( const void* src, size_t srcSize) { DEBUGLOG(5, "ZSTD_compressBlock_btultra (srcSize=%zu)", srcSize); - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } size_t ZSTD_compressBlock_btultra2( @@ -1309,35 +1410,35 @@ size_t ZSTD_compressBlock_btultra2( ZSTD_initStats_ultra(ms, seqStore, rep, src, srcSize); } - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_noDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_noDict); } size_t ZSTD_compressBlock_btopt_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_dictMatchState); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btultra_dictMatchState( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_dictMatchState); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_dictMatchState); } size_t ZSTD_compressBlock_btopt_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 0 /*optLevel*/, ZSTD_extDict); + return ZSTD_compressBlock_opt0(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } size_t ZSTD_compressBlock_btultra_extDict( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], const void* src, size_t srcSize) { - return ZSTD_compressBlock_opt_generic(ms, seqStore, rep, src, srcSize, 2 /*optLevel*/, ZSTD_extDict); + return ZSTD_compressBlock_opt2(ms, seqStore, rep, src, srcSize, ZSTD_extDict); } /* note : no btultra2 variant for extDict nor dictMatchState, diff --git a/thirdparty/zstd/compress/zstdmt_compress.c b/thirdparty/zstd/compress/zstdmt_compress.c index 22aa3e1245..6bc14b035e 100644 --- a/thirdparty/zstd/compress/zstdmt_compress.c +++ b/thirdparty/zstd/compress/zstdmt_compress.c @@ -102,9 +102,8 @@ typedef struct ZSTDMT_bufferPool_s { buffer_t bTable[1]; /* variable size */ } ZSTDMT_bufferPool; -static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem) +static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem) { - unsigned const maxNbBuffers = 2*nbWorkers + 3; ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc( sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem); if (bufPool==NULL) return NULL; @@ -160,9 +159,8 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const } -static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, U32 nbWorkers) +static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers) { - unsigned const maxNbBuffers = 2*nbWorkers + 3; if (srcBufPool==NULL) return NULL; if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */ return srcBufPool; @@ -171,7 +169,7 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, size_t const bSize = srcBufPool->bufferSize; /* forward parameters */ ZSTDMT_bufferPool* newBufPool; ZSTDMT_freeBufferPool(srcBufPool); - newBufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem); if (newBufPool==NULL) return newBufPool; ZSTDMT_setBufferSize(newBufPool, bSize); return newBufPool; @@ -263,6 +261,16 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf) ZSTD_customFree(buf.start, bufPool->cMem); } +/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released. + * The 3 additional buffers are as follows: + * 1 buffer for input loading + * 1 buffer for "next input" when submitting current one + * 1 buffer stuck in queue */ +#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3 + +/* After a worker releases its rawSeqStore, it is immediately ready for reuse. + * So we only need one seq buffer per worker. */ +#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers /* ===== Seq Pool Wrapper ====== */ @@ -316,7 +324,7 @@ static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq) static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem) { - ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); if (seqPool == NULL) return NULL; ZSTDMT_setNbSeq(seqPool, 0); return seqPool; @@ -329,7 +337,7 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool) static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers) { - return ZSTDMT_expandBufferPool(pool, nbWorkers); + return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers)); } @@ -467,7 +475,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState, ZSTD_dictContentType_e dictContentType) { /* Adjust parameters */ - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10); ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams); assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog); @@ -478,7 +486,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState, serialState->nextJobID = 0; if (params.fParams.checksumFlag) XXH64_reset(&serialState->xxhState, 0); - if (params.ldmParams.enableLdm) { + if (params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_customMem cMem = params.customMem; unsigned const hashLog = params.ldmParams.hashLog; size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t); @@ -564,7 +572,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, /* A future job may error and skip our job */ if (serialState->nextJobID == jobID) { /* It is now our turn, do any processing necessary */ - if (serialState->params.ldmParams.enableLdm) { + if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) { size_t error; assert(seqStore.seq != NULL && seqStore.pos == 0 && seqStore.size == 0 && seqStore.capacity > 0); @@ -594,7 +602,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState, if (seqStore.size > 0) { size_t const err = ZSTD_referenceExternalSequences( jobCCtx, seqStore.seq, seqStore.size); - assert(serialState->params.ldmParams.enableLdm); + assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable); assert(!ZSTD_isError(err)); (void)err; } @@ -672,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation)); job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */ } - if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL) + if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL) JOB_ERROR(ERROR(memory_allocation)); /* Don't compute the checksum for chunks, since we compute it externally, @@ -680,7 +688,7 @@ static void ZSTDMT_compressionJob(void* jobDescription) */ if (job->jobID != 0) jobParams.fParams.checksumFlag = 0; /* Don't run LDM for the chunks, since we handle it externally */ - jobParams.ldmParams.enableLdm = 0; + jobParams.ldmParams.enableLdm = ZSTD_ps_disable; /* Correct nbWorkers to 0. */ jobParams.nbWorkers = 0; @@ -807,6 +815,15 @@ typedef struct { static const roundBuff_t kNullRoundBuff = {NULL, 0, 0}; #define RSYNC_LENGTH 32 +/* Don't create chunks smaller than the zstd block size. + * This stops us from regressing compression ratio too much, + * and ensures our output fits in ZSTD_compressBound(). + * + * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then + * ZSTD_COMPRESSBOUND() will need to be updated. + */ +#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX +#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG) typedef struct { U64 hash; @@ -927,7 +944,7 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem); assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */ mtctx->jobIDMask = nbJobs - 1; - mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem); + mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem); mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem); mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem); initError = ZSTDMT_serialState_init(&mtctx->serial); @@ -1030,7 +1047,7 @@ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers) { if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation); FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , ""); - mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers); + mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers)); if (mtctx->bufPool == NULL) return ERROR(memory_allocation); mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers); if (mtctx->cctxPool == NULL) return ERROR(memory_allocation); @@ -1135,7 +1152,7 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx) static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params) { unsigned jobLog; - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on cycleLog instead. */ @@ -1179,7 +1196,7 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params) int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy); int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog); assert(0 <= overlapRLog && overlapRLog <= 8); - if (params->ldmParams.enableLdm) { + if (params->ldmParams.enableLdm == ZSTD_ps_enable) { /* In Long Range Mode, the windowLog is typically oversized. * In which case, it's preferable to determine the jobSize * based on chainLog instead. @@ -1252,6 +1269,9 @@ size_t ZSTDMT_initCStream_internal( /* Aim for the targetsectionSize as the average job size. */ U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10); U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10); + /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our + * expected job size is at least 4x larger. */ + assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2); DEBUGLOG(4, "rsyncLog = %u", rsyncBits); mtctx->rsync.hash = 0; mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1; @@ -1263,7 +1283,7 @@ size_t ZSTDMT_initCStream_internal( ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize)); { /* If ldm is enabled we need windowSize space. */ - size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0; + size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0; /* Two buffers of slack, plus extra space for the overlap * This is the minimum slack that LDM works with. One extra because * flush might waste up to targetSectionSize-1 bytes. Another extra @@ -1538,17 +1558,21 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx) static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range) { BYTE const* const bufferStart = (BYTE const*)buffer.start; - BYTE const* const bufferEnd = bufferStart + buffer.capacity; BYTE const* const rangeStart = (BYTE const*)range.start; - BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart; if (rangeStart == NULL || bufferStart == NULL) return 0; - /* Empty ranges cannot overlap */ - if (bufferStart == bufferEnd || rangeStart == rangeEnd) - return 0; - return bufferStart < rangeEnd && rangeStart < bufferEnd; + { + BYTE const* const bufferEnd = bufferStart + buffer.capacity; + BYTE const* const rangeEnd = rangeStart + range.size; + + /* Empty ranges cannot overlap */ + if (bufferStart == bufferEnd || rangeStart == rangeEnd) + return 0; + + return bufferStart < rangeEnd && rangeStart < bufferEnd; + } } static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) @@ -1575,7 +1599,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window) static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer) { - if (mtctx->params.ldmParams.enableLdm) { + if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) { ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex; DEBUGLOG(5, "ZSTDMT_waitForLdmComplete"); DEBUGLOG(5, "source [0x%zx, 0x%zx)", @@ -1678,6 +1702,11 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) if (!mtctx->params.rsyncable) /* Rsync is disabled. */ return syncPoint; + if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE) + /* We don't emit synchronization points if it would produce too small blocks. + * We don't have enough input to find a synchronization point, so don't look. + */ + return syncPoint; if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH) /* Not enough to compute the hash. * We will miss any synchronization points in this RSYNC_LENGTH byte @@ -1688,10 +1717,28 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) */ return syncPoint; /* Initialize the loop variables. */ - if (mtctx->inBuff.filled >= RSYNC_LENGTH) { - /* We have enough bytes buffered to initialize the hash. + if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) { + /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions + * because they can't possibly be a sync point. So we can start + * part way through the input buffer. + */ + pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled; + if (pos >= RSYNC_LENGTH) { + prev = istart + pos - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); + } else { + assert(mtctx->inBuff.filled >= RSYNC_LENGTH); + prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; + hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos)); + hash = ZSTD_rollingHash_append(hash, istart, pos); + } + } else { + /* We have enough bytes buffered to initialize the hash, + * and are have processed enough bytes to find a sync point. * Start scanning at the beginning of the input. */ + assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE); + assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH); pos = 0; prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH; hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH); @@ -1705,16 +1752,6 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) syncPoint.flush = 1; return syncPoint; } - } else { - /* We don't have enough bytes buffered to initialize the hash, but - * we know we have at least RSYNC_LENGTH bytes total. - * Start scanning after the first RSYNC_LENGTH bytes less the bytes - * already buffered. - */ - pos = RSYNC_LENGTH - mtctx->inBuff.filled; - prev = (BYTE const*)mtctx->inBuff.buffer.start - pos; - hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled); - hash = ZSTD_rollingHash_append(hash, istart, pos); } /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll * through the input. If we hit a synchronization point, then cut the @@ -1726,8 +1763,9 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input) */ for (; pos < syncPoint.toLoad; ++pos) { BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH]; - /* if (pos >= RSYNC_LENGTH) assert(ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); */ + assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash); hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower); + assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE); if ((hash & hitMask) == hitMask) { syncPoint.toLoad = pos + 1; syncPoint.flush = 1; diff --git a/thirdparty/zstd/compress/zstdmt_compress.h b/thirdparty/zstd/compress/zstdmt_compress.h index 2fee2ec745..271eb1ac71 100644 --- a/thirdparty/zstd/compress/zstdmt_compress.h +++ b/thirdparty/zstd/compress/zstdmt_compress.h @@ -65,8 +65,11 @@ size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx); * Private use only. Init streaming operation. * expects params to be valid. * must receive dict, or cdict, or none, but not both. + * mtctx can be freshly constructed or reused from a prior compression. + * If mtctx is reused, memory allocations from the prior compression may not be freed, + * even if they are not needed for the current compression. * @return : 0, or an error code */ -size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs, +size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx, const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType, const ZSTD_CDict* cdict, ZSTD_CCtx_params params, unsigned long long pledgedSrcSize); diff --git a/thirdparty/zstd/decompress/huf_decompress.c b/thirdparty/zstd/decompress/huf_decompress.c index b93c9a003b..2027188255 100644 --- a/thirdparty/zstd/decompress/huf_decompress.c +++ b/thirdparty/zstd/decompress/huf_decompress.c @@ -22,6 +22,13 @@ #define HUF_STATIC_LINKING_ONLY #include "../common/huf.h" #include "../common/error_private.h" +#include "../common/zstd_internal.h" + +/* ************************************************************** +* Constants +****************************************************************/ + +#define HUF_DECODER_FAST_TABLELOG 11 /* ************************************************************** * Macros @@ -36,6 +43,30 @@ #error "Cannot force the use of the X1 and X2 decoders at the same time!" #endif +#if ZSTD_ENABLE_ASM_X86_64_BMI2 && DYNAMIC_BMI2 +# define HUF_ASM_X86_64_BMI2_ATTRS BMI2_TARGET_ATTRIBUTE +#else +# define HUF_ASM_X86_64_BMI2_ATTRS +#endif + +#ifdef __cplusplus +# define HUF_EXTERN_C extern "C" +#else +# define HUF_EXTERN_C +#endif +#define HUF_ASM_DECL HUF_EXTERN_C + +#if DYNAMIC_BMI2 || (ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +# define HUF_NEED_BMI2_FUNCTION 1 +#else +# define HUF_NEED_BMI2_FUNCTION 0 +#endif + +#if !(ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__)) +# define HUF_NEED_DEFAULT_FUNCTION 1 +#else +# define HUF_NEED_DEFAULT_FUNCTION 0 +#endif /* ************************************************************** * Error Management @@ -65,7 +96,7 @@ return fn##_body(dst, dstSize, cSrc, cSrcSize, DTable); \ } \ \ - static TARGET_ATTRIBUTE("bmi2") size_t fn##_bmi2( \ + static BMI2_TARGET_ATTRIBUTE size_t fn##_bmi2( \ void* dst, size_t dstSize, \ const void* cSrc, size_t cSrcSize, \ const HUF_DTable* DTable) \ @@ -107,13 +138,147 @@ static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) return dtd; } +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +static size_t HUF_initDStream(BYTE const* ip) { + BYTE const lastByte = ip[7]; + size_t const bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + size_t const value = MEM_readLEST(ip) | 1; + assert(bitsConsumed <= 8); + return value << bitsConsumed; +} +typedef struct { + BYTE const* ip[4]; + BYTE* op[4]; + U64 bits[4]; + void const* dt; + BYTE const* ilimit; + BYTE* oend; + BYTE const* iend[4]; +} HUF_DecompressAsmArgs; + +/** + * Initializes args for the asm decoding loop. + * @returns 0 on success + * 1 if the fallback implementation should be used. + * Or an error code on failure. + */ +static size_t HUF_DecompressAsmArgs_init(HUF_DecompressAsmArgs* args, void* dst, size_t dstSize, void const* src, size_t srcSize, const HUF_DTable* DTable) +{ + void const* dt = DTable + 1; + U32 const dtLog = HUF_getDTableDesc(DTable).tableLog; + + const BYTE* const ilimit = (const BYTE*)src + 6 + 8; + + BYTE* const oend = (BYTE*)dst + dstSize; + + /* The following condition is false on x32 platform, + * but HUF_asm is not compatible with this ABI */ + if (!(MEM_isLittleEndian() && !MEM_32bits())) return 1; + + /* strict minimum : jump table + 1 byte per stream */ + if (srcSize < 10) + return ERROR(corruption_detected); + + /* Must have at least 8 bytes per stream because we don't handle initializing smaller bit containers. + * If table log is not correct at this point, fallback to the old decoder. + * On small inputs we don't have enough data to trigger the fast loop, so use the old decoder. + */ + if (dtLog != HUF_DECODER_FAST_TABLELOG) + return 1; + + /* Read the jump table. */ + { + const BYTE* const istart = (const BYTE*)src; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = srcSize - (length1 + length2 + length3 + 6); + args->iend[0] = istart + 6; /* jumpTable */ + args->iend[1] = args->iend[0] + length1; + args->iend[2] = args->iend[1] + length2; + args->iend[3] = args->iend[2] + length3; + + /* HUF_initDStream() requires this, and this small of an input + * won't benefit from the ASM loop anyways. + * length1 must be >= 16 so that ip[0] >= ilimit before the loop + * starts. + */ + if (length1 < 16 || length2 < 8 || length3 < 8 || length4 < 8) + return 1; + if (length4 > srcSize) return ERROR(corruption_detected); /* overflow */ + } + /* ip[] contains the position that is currently loaded into bits[]. */ + args->ip[0] = args->iend[1] - sizeof(U64); + args->ip[1] = args->iend[2] - sizeof(U64); + args->ip[2] = args->iend[3] - sizeof(U64); + args->ip[3] = (BYTE const*)src + srcSize - sizeof(U64); + + /* op[] contains the output pointers. */ + args->op[0] = (BYTE*)dst; + args->op[1] = args->op[0] + (dstSize+3)/4; + args->op[2] = args->op[1] + (dstSize+3)/4; + args->op[3] = args->op[2] + (dstSize+3)/4; + + /* No point to call the ASM loop for tiny outputs. */ + if (args->op[3] >= oend) + return 1; + + /* bits[] is the bit container. + * It is read from the MSB down to the LSB. + * It is shifted left as it is read, and zeros are + * shifted in. After the lowest valid bit a 1 is + * set, so that CountTrailingZeros(bits[]) can be used + * to count how many bits we've consumed. + */ + args->bits[0] = HUF_initDStream(args->ip[0]); + args->bits[1] = HUF_initDStream(args->ip[1]); + args->bits[2] = HUF_initDStream(args->ip[2]); + args->bits[3] = HUF_initDStream(args->ip[3]); + + /* If ip[] >= ilimit, it is guaranteed to be safe to + * reload bits[]. It may be beyond its section, but is + * guaranteed to be valid (>= istart). + */ + args->ilimit = ilimit; + + args->oend = oend; + args->dt = dt; + + return 0; +} + +static size_t HUF_initRemainingDStream(BIT_DStream_t* bit, HUF_DecompressAsmArgs const* args, int stream, BYTE* segmentEnd) +{ + /* Validate that we haven't overwritten. */ + if (args->op[stream] > segmentEnd) + return ERROR(corruption_detected); + /* Validate that we haven't read beyond iend[]. + * Note that ip[] may be < iend[] because the MSB is + * the next bit to read, and we may have consumed 100% + * of the stream, so down to iend[i] - 8 is valid. + */ + if (args->ip[stream] < args->iend[stream] - 8) + return ERROR(corruption_detected); + + /* Construct the BIT_DStream_t. */ + bit->bitContainer = MEM_readLE64(args->ip[stream]); + bit->bitsConsumed = ZSTD_countTrailingZeros((size_t)args->bits[stream]); + bit->start = (const char*)args->iend[0]; + bit->limitPtr = bit->start + sizeof(size_t); + bit->ptr = (const char*)args->ip[stream]; + + return 0; +} +#endif + #ifndef HUF_FORCE_DECOMPRESS_X2 /*-***************************/ /* single-symbol decoding */ /*-***************************/ -typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decoding */ +typedef struct { BYTE nbBits; BYTE byte; } HUF_DEltX1; /* single-symbol decoding */ /** * Packs 4 HUF_DEltX1 structs into a U64. This is used to lay down 4 entries at @@ -122,14 +287,44 @@ typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX1; /* single-symbol decodi static U64 HUF_DEltX1_set4(BYTE symbol, BYTE nbBits) { U64 D4; if (MEM_isLittleEndian()) { - D4 = symbol + (nbBits << 8); - } else { D4 = (symbol << 8) + nbBits; + } else { + D4 = symbol + (nbBits << 8); } D4 *= 0x0001000100010001ULL; return D4; } +/** + * Increase the tableLog to targetTableLog and rescales the stats. + * If tableLog > targetTableLog this is a no-op. + * @returns New tableLog + */ +static U32 HUF_rescaleStats(BYTE* huffWeight, U32* rankVal, U32 nbSymbols, U32 tableLog, U32 targetTableLog) +{ + if (tableLog > targetTableLog) + return tableLog; + if (tableLog < targetTableLog) { + U32 const scale = targetTableLog - tableLog; + U32 s; + /* Increase the weight for all non-zero probability symbols by scale. */ + for (s = 0; s < nbSymbols; ++s) { + huffWeight[s] += (BYTE)((huffWeight[s] == 0) ? 0 : scale); + } + /* Update rankVal to reflect the new weights. + * All weights except 0 get moved to weight + scale. + * Weights [1, scale] are empty. + */ + for (s = targetTableLog; s > scale; --s) { + rankVal[s] = rankVal[s - scale]; + } + for (s = scale; s > 0; --s) { + rankVal[s] = 0; + } + } + return targetTableLog; +} + typedef struct { U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; U32 rankStart[HUF_TABLELOG_ABSOLUTEMAX + 1]; @@ -162,8 +357,12 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr iSize = HUF_readStats_wksp(wksp->huffWeight, HUF_SYMBOLVALUE_MAX + 1, wksp->rankVal, &nbSymbols, &tableLog, src, srcSize, wksp->statsWksp, sizeof(wksp->statsWksp), bmi2); if (HUF_isError(iSize)) return iSize; + /* Table header */ { DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog + 1; + U32 const targetTableLog = MIN(maxTableLog, HUF_DECODER_FAST_TABLELOG); + tableLog = HUF_rescaleStats(wksp->huffWeight, wksp->rankVal, nbSymbols, tableLog, targetTableLog); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; @@ -207,7 +406,7 @@ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t sr /* fill DTable * We fill all entries of each weight in order. - * That way length is a constant for each iteration of the outter loop. + * That way length is a constant for each iteration of the outer loop. * We can switch based on the length to a different inner loop which is * optimized for that particular case. */ @@ -304,11 +503,15 @@ HUF_decodeStreamX1(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, cons BYTE* const pStart = p; /* up to 4 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_1(p, bitDPtr); - HUF_DECODE_SYMBOLX1_2(p, bitDPtr); - HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + if ((pEnd - p) > 3) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-3)) { + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_1(p, bitDPtr); + HUF_DECODE_SYMBOLX1_2(p, bitDPtr); + HUF_DECODE_SYMBOLX1_0(p, bitDPtr); + } + } else { + BIT_reloadDStream(bitDPtr); } /* [0-3] symbols remaining */ @@ -388,33 +591,36 @@ HUF_decompress4X1_usingDTable_internal_body( U32 endSignal = 1; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* up to 16 symbols per loop (4 symbols per stream) in 64-bit mode */ - for ( ; (endSignal) & (op4 < olimit) ; ) { - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_1(op1, &bitD1); - HUF_DECODE_SYMBOLX1_1(op2, &bitD2); - HUF_DECODE_SYMBOLX1_1(op3, &bitD3); - HUF_DECODE_SYMBOLX1_1(op4, &bitD4); - HUF_DECODE_SYMBOLX1_2(op1, &bitD1); - HUF_DECODE_SYMBOLX1_2(op2, &bitD2); - HUF_DECODE_SYMBOLX1_2(op3, &bitD3); - HUF_DECODE_SYMBOLX1_2(op4, &bitD4); - HUF_DECODE_SYMBOLX1_0(op1, &bitD1); - HUF_DECODE_SYMBOLX1_0(op2, &bitD2); - HUF_DECODE_SYMBOLX1_0(op3, &bitD3); - HUF_DECODE_SYMBOLX1_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit) ; ) { + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_1(op1, &bitD1); + HUF_DECODE_SYMBOLX1_1(op2, &bitD2); + HUF_DECODE_SYMBOLX1_1(op3, &bitD3); + HUF_DECODE_SYMBOLX1_1(op4, &bitD4); + HUF_DECODE_SYMBOLX1_2(op1, &bitD1); + HUF_DECODE_SYMBOLX1_2(op2, &bitD2); + HUF_DECODE_SYMBOLX1_2(op3, &bitD3); + HUF_DECODE_SYMBOLX1_2(op4, &bitD4); + HUF_DECODE_SYMBOLX1_0(op1, &bitD1); + HUF_DECODE_SYMBOLX1_0(op2, &bitD2); + HUF_DECODE_SYMBOLX1_0(op3, &bitD3); + HUF_DECODE_SYMBOLX1_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + } } /* check corruption */ @@ -440,6 +646,79 @@ HUF_decompress4X1_usingDTable_internal_body( } } +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X1_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_NEED_DEFAULT_FUNCTION +static +size_t HUF_decompress4X1_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X1_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +HUF_ASM_DECL void HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; + +static HUF_ASM_X86_64_BMI2_ATTRS +size_t +HUF_decompress4X1_usingDTable_internal_bmi2_asm( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + void const* dt = DTable + 1; + const BYTE* const iend = (const BYTE*)cSrc + 6; + BYTE* const oend = (BYTE*)dst + dstSize; + HUF_DecompressAsmArgs args; + { + size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init asm args"); + if (ret != 0) + return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + } + + assert(args.ip[0] >= args.ilimit); + HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop(&args); + + /* Our loop guarantees that ip[] >= ilimit and that we haven't + * overwritten any op[]. + */ + assert(args.ip[0] >= iend); + assert(args.ip[1] >= iend); + assert(args.ip[2] >= iend); + assert(args.ip[3] >= iend); + assert(args.op[3] <= oend); + (void)iend; + + /* finish bit streams one by one. */ + { + size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + /* Decompress and validate that we've produced exactly the expected length. */ + args.op[i] += HUF_decodeStreamX1(args.op[i], &bit, segmentEnd, (HUF_DEltX1 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) return ERROR(corruption_detected); + } + } + + /* decoded size */ + return dstSize; +} +#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const void *cSrc, @@ -447,8 +726,28 @@ typedef size_t (*HUF_decompress_usingDTable_t)(void *dst, size_t dstSize, const HUF_DTable *DTable); HUF_DGEN(HUF_decompress1X1_usingDTable_internal) -HUF_DGEN(HUF_decompress4X1_usingDTable_internal) +static size_t HUF_decompress4X1_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { +# if ZSTD_ENABLE_ASM_X86_64_BMI2 + return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +# else + return HUF_decompress4X1_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); +# endif + } +#else + (void)bmi2; +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + return HUF_decompress4X1_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +#else + return HUF_decompress4X1_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); +#endif +} size_t HUF_decompress1X1_usingDTable( @@ -518,106 +817,226 @@ size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, /* *************************/ typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX2; /* double-symbols decoding */ -typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; +typedef struct { BYTE symbol; } sortedSymbol_t; typedef U32 rankValCol_t[HUF_TABLELOG_MAX + 1]; typedef rankValCol_t rankVal_t[HUF_TABLELOG_MAX]; +/** + * Constructs a HUF_DEltX2 in a U32. + */ +static U32 HUF_buildDEltX2U32(U32 symbol, U32 nbBits, U32 baseSeq, int level) +{ + U32 seq; + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, sequence) == 0); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, nbBits) == 2); + DEBUG_STATIC_ASSERT(offsetof(HUF_DEltX2, length) == 3); + DEBUG_STATIC_ASSERT(sizeof(HUF_DEltX2) == sizeof(U32)); + if (MEM_isLittleEndian()) { + seq = level == 1 ? symbol : (baseSeq + (symbol << 8)); + return seq + (nbBits << 16) + ((U32)level << 24); + } else { + seq = level == 1 ? (symbol << 8) : ((baseSeq << 8) + symbol); + return (seq << 16) + (nbBits << 8) + (U32)level; + } +} -/* HUF_fillDTableX2Level2() : - * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ -static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 sizeLog, const U32 consumed, - const U32* rankValOrigin, const int minWeight, - const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, - U32 nbBitsBaseline, U16 baseSeq, U32* wksp, size_t wkspSize) +/** + * Constructs a HUF_DEltX2. + */ +static HUF_DEltX2 HUF_buildDEltX2(U32 symbol, U32 nbBits, U32 baseSeq, int level) { HUF_DEltX2 DElt; - U32* rankVal = wksp; + U32 const val = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + DEBUG_STATIC_ASSERT(sizeof(DElt) == sizeof(val)); + ZSTD_memcpy(&DElt, &val, sizeof(val)); + return DElt; +} - assert(wkspSize >= HUF_TABLELOG_MAX + 1); - (void)wkspSize; - /* get pre-calculated rankVal */ - ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1)); +/** + * Constructs 2 HUF_DEltX2s and packs them into a U64. + */ +static U64 HUF_buildDEltX2U64(U32 symbol, U32 nbBits, U16 baseSeq, int level) +{ + U32 DElt = HUF_buildDEltX2U32(symbol, nbBits, baseSeq, level); + return (U64)DElt + ((U64)DElt << 32); +} - /* fill skipped values */ - if (minWeight>1) { - U32 i, skipSize = rankVal[minWeight]; - MEM_writeLE16(&(DElt.sequence), baseSeq); - DElt.nbBits = (BYTE)(consumed); - DElt.length = 1; - for (i = 0; i < skipSize; i++) - DTable[i] = DElt; +/** + * Fills the DTable rank with all the symbols from [begin, end) that are each + * nbBits long. + * + * @param DTableRank The start of the rank in the DTable. + * @param begin The first symbol to fill (inclusive). + * @param end The last symbol to fill (exclusive). + * @param nbBits Each symbol is nbBits long. + * @param tableLog The table log. + * @param baseSeq If level == 1 { 0 } else { the first level symbol } + * @param level The level in the table. Must be 1 or 2. + */ +static void HUF_fillDTableX2ForWeight( + HUF_DEltX2* DTableRank, + sortedSymbol_t const* begin, sortedSymbol_t const* end, + U32 nbBits, U32 tableLog, + U16 baseSeq, int const level) +{ + U32 const length = 1U << ((tableLog - nbBits) & 0x1F /* quiet static-analyzer */); + const sortedSymbol_t* ptr; + assert(level >= 1 && level <= 2); + switch (length) { + case 1: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + *DTableRank++ = DElt; + } + break; + case 2: + for (ptr = begin; ptr != end; ++ptr) { + HUF_DEltX2 const DElt = HUF_buildDEltX2(ptr->symbol, nbBits, baseSeq, level); + DTableRank[0] = DElt; + DTableRank[1] = DElt; + DTableRank += 2; + } + break; + case 4: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + DTableRank += 4; + } + break; + case 8: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + DTableRank += 8; + } + break; + default: + for (ptr = begin; ptr != end; ++ptr) { + U64 const DEltX2 = HUF_buildDEltX2U64(ptr->symbol, nbBits, baseSeq, level); + HUF_DEltX2* const DTableRankEnd = DTableRank + length; + for (; DTableRank != DTableRankEnd; DTableRank += 8) { + ZSTD_memcpy(DTableRank + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTableRank + 6, &DEltX2, sizeof(DEltX2)); + } + } + break; } +} - /* fill DTable */ - { U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */ - const U32 symbol = sortedSymbols[s].symbol; - const U32 weight = sortedSymbols[s].weight; - const U32 nbBits = nbBitsBaseline - weight; - const U32 length = 1 << (sizeLog-nbBits); - const U32 start = rankVal[weight]; - U32 i = start; - const U32 end = start + length; - - MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8))); - DElt.nbBits = (BYTE)(nbBits + consumed); - DElt.length = 2; - do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */ +/* HUF_fillDTableX2Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 consumedBits, + const U32* rankVal, const int minWeight, const int maxWeight1, + const sortedSymbol_t* sortedSymbols, U32 const* rankStart, + U32 nbBitsBaseline, U16 baseSeq) +{ + /* Fill skipped values (all positions up to rankVal[minWeight]). + * These are positions only get a single symbol because the combined weight + * is too large. + */ + if (minWeight>1) { + U32 const length = 1U << ((targetLog - consumedBits) & 0x1F /* quiet static-analyzer */); + U64 const DEltX2 = HUF_buildDEltX2U64(baseSeq, consumedBits, /* baseSeq */ 0, /* level */ 1); + int const skipSize = rankVal[minWeight]; + assert(length > 1); + assert((U32)skipSize < length); + switch (length) { + case 2: + assert(skipSize == 1); + ZSTD_memcpy(DTable, &DEltX2, sizeof(DEltX2)); + break; + case 4: + assert(skipSize <= 4); + ZSTD_memcpy(DTable + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + 2, &DEltX2, sizeof(DEltX2)); + break; + default: + { + int i; + for (i = 0; i < skipSize; i += 8) { + ZSTD_memcpy(DTable + i + 0, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 2, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 4, &DEltX2, sizeof(DEltX2)); + ZSTD_memcpy(DTable + i + 6, &DEltX2, sizeof(DEltX2)); + } + } + } + } - rankVal[weight] += length; - } } + /* Fill each of the second level symbols by weight. */ + { + int w; + for (w = minWeight; w < maxWeight1; ++w) { + int const begin = rankStart[w]; + int const end = rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; + U32 const totalBits = nbBits + consumedBits; + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedSymbols + begin, sortedSymbols + end, + totalBits, targetLog, + baseSeq, /* level */ 2); + } + } } - static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, - const sortedSymbol_t* sortedList, const U32 sortedListSize, + const sortedSymbol_t* sortedList, const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, - const U32 nbBitsBaseline, U32* wksp, size_t wkspSize) + const U32 nbBitsBaseline) { - U32* rankVal = wksp; + U32* const rankVal = rankValOrigin[0]; const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ const U32 minBits = nbBitsBaseline - maxWeight; - U32 s; - - assert(wkspSize >= HUF_TABLELOG_MAX + 1); - wksp += HUF_TABLELOG_MAX + 1; - wkspSize -= HUF_TABLELOG_MAX + 1; - - ZSTD_memcpy(rankVal, rankValOrigin, sizeof(U32) * (HUF_TABLELOG_MAX + 1)); - - /* fill DTable */ - for (s=0; s<sortedListSize; s++) { - const U16 symbol = sortedList[s].symbol; - const U32 weight = sortedList[s].weight; - const U32 nbBits = nbBitsBaseline - weight; - const U32 start = rankVal[weight]; - const U32 length = 1 << (targetLog-nbBits); - - if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */ - U32 sortedRank; + int w; + int const wEnd = (int)maxWeight + 1; + + /* Fill DTable in order of weight. */ + for (w = 1; w < wEnd; ++w) { + int const begin = (int)rankStart[w]; + int const end = (int)rankStart[w+1]; + U32 const nbBits = nbBitsBaseline - w; + + if (targetLog-nbBits >= minBits) { + /* Enough room for a second symbol. */ + int start = rankVal[w]; + U32 const length = 1U << ((targetLog - nbBits) & 0x1F /* quiet static-analyzer */); int minWeight = nbBits + scaleLog; + int s; if (minWeight < 1) minWeight = 1; - sortedRank = rankStart[minWeight]; - HUF_fillDTableX2Level2(DTable+start, targetLog-nbBits, nbBits, - rankValOrigin[nbBits], minWeight, - sortedList+sortedRank, sortedListSize-sortedRank, - nbBitsBaseline, symbol, wksp, wkspSize); + /* Fill the DTable for every symbol of weight w. + * These symbols get at least 1 second symbol. + */ + for (s = begin; s != end; ++s) { + HUF_fillDTableX2Level2( + DTable + start, targetLog, nbBits, + rankValOrigin[nbBits], minWeight, wEnd, + sortedList, rankStart, + nbBitsBaseline, sortedList[s].symbol); + start += length; + } } else { - HUF_DEltX2 DElt; - MEM_writeLE16(&(DElt.sequence), symbol); - DElt.nbBits = (BYTE)(nbBits); - DElt.length = 1; - { U32 const end = start + length; - U32 u; - for (u = start; u < end; u++) DTable[u] = DElt; - } } - rankVal[weight] += length; + /* Only a single symbol. */ + HUF_fillDTableX2ForWeight( + DTable + rankVal[w], + sortedList + begin, sortedList + end, + nbBits, targetLog, + /* baseSeq */ 0, /* level */ 1); + } } } typedef struct { rankValCol_t rankVal[HUF_TABLELOG_MAX]; U32 rankStats[HUF_TABLELOG_MAX + 1]; - U32 rankStart0[HUF_TABLELOG_MAX + 2]; + U32 rankStart0[HUF_TABLELOG_MAX + 3]; sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; U32 calleeWksp[HUF_READ_STATS_WORKSPACE_SIZE_U32]; @@ -627,9 +1046,16 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize) { - U32 tableLog, maxW, sizeOfSort, nbSymbols; + return HUF_readDTableX2_wksp_bmi2(DTable, src, srcSize, workSpace, wkspSize, /* bmi2 */ 0); +} + +size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, + const void* src, size_t srcSize, + void* workSpace, size_t wkspSize, int bmi2) +{ + U32 tableLog, maxW, nbSymbols; DTableDesc dtd = HUF_getDTableDesc(DTable); - U32 const maxTableLog = dtd.maxTableLog; + U32 maxTableLog = dtd.maxTableLog; size_t iSize; void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; @@ -647,11 +1073,12 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); /* ZSTD_memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ - iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), /* bmi2 */ 0); + iSize = HUF_readStats_wksp(wksp->weightList, HUF_SYMBOLVALUE_MAX + 1, wksp->rankStats, &nbSymbols, &tableLog, src, srcSize, wksp->calleeWksp, sizeof(wksp->calleeWksp), bmi2); if (HUF_isError(iSize)) return iSize; /* check result */ if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + if (tableLog <= HUF_DECODER_FAST_TABLELOG && maxTableLog > HUF_DECODER_FAST_TABLELOG) maxTableLog = HUF_DECODER_FAST_TABLELOG; /* find maxWeight */ for (maxW = tableLog; wksp->rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ @@ -664,7 +1091,7 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, rankStart[w] = curr; } rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/ - sizeOfSort = nextRankStart; + rankStart[maxW+1] = nextRankStart; } /* sort symbols by weight */ @@ -673,7 +1100,6 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, U32 const w = wksp->weightList[s]; U32 const r = rankStart[w]++; wksp->sortedSymbol[r].symbol = (BYTE)s; - wksp->sortedSymbol[r].weight = (BYTE)w; } rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */ } @@ -698,10 +1124,9 @@ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, } } } } HUF_fillDTableX2(dt, maxTableLog, - wksp->sortedSymbol, sizeOfSort, + wksp->sortedSymbol, wksp->rankStart0, wksp->rankVal, maxW, - tableLog+1, - wksp->calleeWksp, sizeof(wksp->calleeWksp) / sizeof(U32)); + tableLog+1); dtd.tableLog = (BYTE)maxTableLog; dtd.tableType = 1; @@ -714,7 +1139,7 @@ FORCE_INLINE_TEMPLATE U32 HUF_decodeSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, dt+val, 2); + ZSTD_memcpy(op, &dt[val].sequence, 2); BIT_skipBits(DStream, dt[val].nbBits); return dt[val].length; } @@ -723,15 +1148,17 @@ FORCE_INLINE_TEMPLATE U32 HUF_decodeLastSymbolX2(void* op, BIT_DStream_t* DStream, const HUF_DEltX2* dt, const U32 dtLog) { size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ - ZSTD_memcpy(op, dt+val, 1); - if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); - else { + ZSTD_memcpy(op, &dt[val].sequence, 1); + if (dt[val].length==1) { + BIT_skipBits(DStream, dt[val].nbBits); + } else { if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { BIT_skipBits(DStream, dt[val].nbBits); if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); - } } + } + } return 1; } @@ -753,19 +1180,37 @@ HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, BYTE* const pStart = p; /* up to 8 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_1(p, bitDPtr); - HUF_DECODE_SYMBOLX2_2(p, bitDPtr); - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= sizeof(bitDPtr->bitContainer)) { + if (dtLog <= 11 && MEM_64bits()) { + /* up to 10 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-9)) { + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } else { + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + } + } else { + BIT_reloadDStream(bitDPtr); } /* closer to end : up to 2 symbols at a time */ - while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + if ((size_t)(pEnd - p) >= 2) { + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); - while (p <= pEnd-2) - HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + } if (p < pEnd) p += HUF_decodeLastSymbolX2(p, bitDPtr, dt, dtLog); @@ -799,7 +1244,6 @@ HUF_decompress1X2_usingDTable_internal_body( /* decoded size */ return dstSize; } - FORCE_INLINE_TEMPLATE size_t HUF_decompress4X2_usingDTable_internal_body( void* dst, size_t dstSize, @@ -841,57 +1285,60 @@ HUF_decompress4X2_usingDTable_internal_body( U32 const dtLog = dtd.tableLog; if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD4, istart4, length4) ); /* 16-32 symbols per loop (4-8 symbols per stream) */ - for ( ; (endSignal) & (op4 < olimit); ) { + if ((size_t)(oend - op4) >= sizeof(size_t)) { + for ( ; (endSignal) & (op4 < olimit); ) { #if defined(__clang__) && (defined(__x86_64__) || defined(__i386__)) - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; - endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + endSignal &= BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished; + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal &= BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished; + endSignal &= BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished; #else - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_1(op1, &bitD1); - HUF_DECODE_SYMBOLX2_1(op2, &bitD2); - HUF_DECODE_SYMBOLX2_1(op3, &bitD3); - HUF_DECODE_SYMBOLX2_1(op4, &bitD4); - HUF_DECODE_SYMBOLX2_2(op1, &bitD1); - HUF_DECODE_SYMBOLX2_2(op2, &bitD2); - HUF_DECODE_SYMBOLX2_2(op3, &bitD3); - HUF_DECODE_SYMBOLX2_2(op4, &bitD4); - HUF_DECODE_SYMBOLX2_0(op1, &bitD1); - HUF_DECODE_SYMBOLX2_0(op2, &bitD2); - HUF_DECODE_SYMBOLX2_0(op3, &bitD3); - HUF_DECODE_SYMBOLX2_0(op4, &bitD4); - endSignal = (U32)LIKELY( - (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) - & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = (U32)LIKELY((U32) + (BIT_reloadDStreamFast(&bitD1) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD2) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD3) == BIT_DStream_unfinished) + & (BIT_reloadDStreamFast(&bitD4) == BIT_DStream_unfinished)); #endif + } } /* check corruption */ @@ -915,8 +1362,99 @@ HUF_decompress4X2_usingDTable_internal_body( } } +#if HUF_NEED_BMI2_FUNCTION +static BMI2_TARGET_ATTRIBUTE +size_t HUF_decompress4X2_usingDTable_internal_bmi2(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if HUF_NEED_DEFAULT_FUNCTION +static +size_t HUF_decompress4X2_usingDTable_internal_default(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable) { + return HUF_decompress4X2_usingDTable_internal_body(dst, dstSize, cSrc, cSrcSize, DTable); +} +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +HUF_ASM_DECL void HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(HUF_DecompressAsmArgs* args) ZSTDLIB_HIDDEN; + +static HUF_ASM_X86_64_BMI2_ATTRS size_t +HUF_decompress4X2_usingDTable_internal_bmi2_asm( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) { + void const* dt = DTable + 1; + const BYTE* const iend = (const BYTE*)cSrc + 6; + BYTE* const oend = (BYTE*)dst + dstSize; + HUF_DecompressAsmArgs args; + { + size_t const ret = HUF_DecompressAsmArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); + FORWARD_IF_ERROR(ret, "Failed to init asm args"); + if (ret != 0) + return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); + } + + assert(args.ip[0] >= args.ilimit); + HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop(&args); + + /* note : op4 already verified within main loop */ + assert(args.ip[0] >= iend); + assert(args.ip[1] >= iend); + assert(args.ip[2] >= iend); + assert(args.ip[3] >= iend); + assert(args.op[3] <= oend); + (void)iend; + + /* finish bitStreams one by one */ + { + size_t const segmentSize = (dstSize+3) / 4; + BYTE* segmentEnd = (BYTE*)dst; + int i; + for (i = 0; i < 4; ++i) { + BIT_DStream_t bit; + if (segmentSize <= (size_t)(oend - segmentEnd)) + segmentEnd += segmentSize; + else + segmentEnd = oend; + FORWARD_IF_ERROR(HUF_initRemainingDStream(&bit, &args, i, segmentEnd), "corruption"); + args.op[i] += HUF_decodeStreamX2(args.op[i], &bit, segmentEnd, (HUF_DEltX2 const*)dt, HUF_DECODER_FAST_TABLELOG); + if (args.op[i] != segmentEnd) + return ERROR(corruption_detected); + } + } + + /* decoded size */ + return dstSize; +} +#endif /* ZSTD_ENABLE_ASM_X86_64_BMI2 */ + +static size_t HUF_decompress4X2_usingDTable_internal(void* dst, size_t dstSize, void const* cSrc, + size_t cSrcSize, HUF_DTable const* DTable, int bmi2) +{ +#if DYNAMIC_BMI2 + if (bmi2) { +# if ZSTD_ENABLE_ASM_X86_64_BMI2 + return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +# else + return HUF_decompress4X2_usingDTable_internal_bmi2(dst, dstSize, cSrc, cSrcSize, DTable); +# endif + } +#else + (void)bmi2; +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 && defined(__BMI2__) + return HUF_decompress4X2_usingDTable_internal_bmi2_asm(dst, dstSize, cSrc, cSrcSize, DTable); +#else + return HUF_decompress4X2_usingDTable_internal_default(dst, dstSize, cSrc, cSrcSize, DTable); +#endif +} + HUF_DGEN(HUF_decompress1X2_usingDTable_internal) -HUF_DGEN(HUF_decompress4X2_usingDTable_internal) size_t HUF_decompress1X2_usingDTable( void* dst, size_t dstSize, @@ -1025,25 +1563,25 @@ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, #if !defined(HUF_FORCE_DECOMPRESS_X1) && !defined(HUF_FORCE_DECOMPRESS_X2) typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; -static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +static const algo_time_t algoTime[16 /* Quantization */][2 /* single, double */] = { /* single, double, quad */ - {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ - {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ - {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ - {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ - {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ - {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ - {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ - {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ - {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ - {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ - {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ - {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ - {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ - {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ - {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ - {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ + {{0,0}, {1,1}}, /* Q==0 : impossible */ + {{0,0}, {1,1}}, /* Q==1 : impossible */ + {{ 150,216}, { 381,119}}, /* Q == 2 : 12-18% */ + {{ 170,205}, { 514,112}}, /* Q == 3 : 18-25% */ + {{ 177,199}, { 539,110}}, /* Q == 4 : 25-32% */ + {{ 197,194}, { 644,107}}, /* Q == 5 : 32-38% */ + {{ 221,192}, { 735,107}}, /* Q == 6 : 38-44% */ + {{ 256,189}, { 881,106}}, /* Q == 7 : 44-50% */ + {{ 359,188}, {1167,109}}, /* Q == 8 : 50-56% */ + {{ 582,187}, {1570,114}}, /* Q == 9 : 56-62% */ + {{ 688,187}, {1712,122}}, /* Q ==10 : 62-69% */ + {{ 825,186}, {1965,136}}, /* Q ==11 : 69-75% */ + {{ 976,185}, {2131,150}}, /* Q ==12 : 75-81% */ + {{1180,186}, {2070,175}}, /* Q ==13 : 81-87% */ + {{1377,185}, {1731,202}}, /* Q ==14 : 87-93% */ + {{1412,185}, {1695,202}}, /* Q ==15 : 93-99% */ }; #endif @@ -1070,7 +1608,7 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) U32 const D256 = (U32)(dstSize >> 8); U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); - DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, to reduce cache eviction */ + DTime1 += DTime1 >> 5; /* small advantage to algorithm using less memory, to reduce cache eviction */ return DTime1 < DTime0; } #endif diff --git a/thirdparty/zstd/decompress/huf_decompress_amd64.S b/thirdparty/zstd/decompress/huf_decompress_amd64.S new file mode 100644 index 0000000000..49589cb611 --- /dev/null +++ b/thirdparty/zstd/decompress/huf_decompress_amd64.S @@ -0,0 +1,585 @@ +/* + * Copyright (c) Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under both the BSD-style license (found in the + * LICENSE file in the root directory of this source tree) and the GPLv2 (found + * in the COPYING file in the root directory of this source tree). + * You may select, at your option, one of the above-listed licenses. + */ + +#include "../common/portability_macros.h" + +/* Stack marking + * ref: https://wiki.gentoo.org/wiki/Hardened/GNU_stack_quickstart + */ +#if defined(__ELF__) && defined(__GNUC__) +.section .note.GNU-stack,"",%progbits +#endif + +#if ZSTD_ENABLE_ASM_X86_64_BMI2 + +/* Calling convention: + * + * %rdi contains the first argument: HUF_DecompressAsmArgs*. + * %rbp isn't maintained (no frame pointer). + * %rsp contains the stack pointer that grows down. + * No red-zone is assumed, only addresses >= %rsp are used. + * All register contents are preserved. + * + * TODO: Support Windows calling convention. + */ + +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop) +ZSTD_HIDE_ASM_FUNCTION(_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop) +.global HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop +.global HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +.global _HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop +.global _HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop +.text + +/* Sets up register mappings for clarity. + * op[], bits[], dtable & ip[0] each get their own register. + * ip[1,2,3] & olimit alias var[]. + * %rax is a scratch register. + */ + +#define op0 rsi +#define op1 rbx +#define op2 rcx +#define op3 rdi + +#define ip0 r8 +#define ip1 r9 +#define ip2 r10 +#define ip3 r11 + +#define bits0 rbp +#define bits1 rdx +#define bits2 r12 +#define bits3 r13 +#define dtable r14 +#define olimit r15 + +/* var[] aliases ip[1,2,3] & olimit + * ip[1,2,3] are saved every iteration. + * olimit is only used in compute_olimit. + */ +#define var0 r15 +#define var1 r9 +#define var2 r10 +#define var3 r11 + +/* 32-bit var registers */ +#define vard0 r15d +#define vard1 r9d +#define vard2 r10d +#define vard3 r11d + +/* Calls X(N) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM(X) \ + X(0); \ + X(1); \ + X(2); \ + X(3) + +/* Calls X(N, idx) for each stream 0, 1, 2, 3. */ +#define FOR_EACH_STREAM_WITH_INDEX(X, idx) \ + X(0, idx); \ + X(1, idx); \ + X(2, idx); \ + X(3, idx) + +/* Define both _HUF_* & HUF_* symbols because MacOS + * C symbols are prefixed with '_' & Linux symbols aren't. + */ +_HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: +HUF_decompress4X1_usingDTable_internal_bmi2_asm_loop: + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + push %rbx + push %rcx + push %rdx + push %rbp + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + push %r12 + push %r13 + push %r14 + push %r15 + + /* Read HUF_DecompressAsmArgs* args from %rax */ + movq %rdi, %rax + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + push 104(%rax) /* ilimit */ + push 112(%rax) /* oend */ + push %olimit /* olimit space */ + + subq $24, %rsp + +.L_4X1_compute_olimit: + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rbx, rdx must be saved + * op3 & ip0 mustn't be clobbered + */ + movq %rbx, 0(%rsp) + movq %rdx, 8(%rsp) + + movq 32(%rsp), %rax /* rax = oend */ + subq %op3, %rax /* rax = oend - op3 */ + + /* r15 = (oend - op3) / 5 */ + movabsq $-3689348814741910323, %rdx + mulq %rdx + movq %rdx, %r15 + shrq $2, %r15 + + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilimit */ + subq %rdx, %rax /* rax = ip0 - ilimit */ + movq %rax, %rbx /* rbx = ip0 - ilimit */ + + /* rdx = (ip0 - ilimit) / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %rbx + shrq %rbx + addq %rbx, %rdx + shrq $2, %rdx + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* r15 = r15 * 5 */ + leaq (%r15, %r15, 4), %r15 + + /* olimit = op3 + r15 */ + addq %op3, %olimit + + movq 8(%rsp), %rdx + movq 0(%rsp), %rbx + + /* If (op3 + 20 > olimit) */ + movq %op3, %rax /* rax = op3 */ + addq $20, %rax /* rax = op3 + 20 */ + cmpq %rax, %olimit /* op3 + 20 > olimit */ + jb .L_4X1_exit + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb .L_4X1_exit + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb .L_4X1_exit + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb .L_4X1_exit + +/* Reads top 11 bits from bits[n] + * Loads dt[bits[n]] into var[n] + */ +#define GET_NEXT_DELT(n) \ + movq $53, %var##n; \ + shrxq %var##n, %bits##n, %var##n; \ + movzwl (%dtable,%var##n,2),%vard##n + +/* var[n] must contain the DTable entry computed with GET_NEXT_DELT + * Moves var[n] to %rax + * bits[n] <<= var[n] & 63 + * op[n][idx] = %rax >> 8 + * %ah is a way to access bits [8, 16) of %rax + */ +#define DECODE_FROM_DELT(n, idx) \ + movq %var##n, %rax; \ + shlxq %var##n, %bits##n, %bits##n; \ + movb %ah, idx(%op##n) + +/* Assumes GET_NEXT_DELT has been called. + * Calls DECODE_FROM_DELT then GET_NEXT_DELT + */ +#define DECODE_AND_GET_NEXT(n, idx) \ + DECODE_FROM_DELT(n, idx); \ + GET_NEXT_DELT(n) \ + +/* // ctz & nbBytes is stored in bits[n] + * // nbBits is stored in %rax + * ctz = CTZ[bits[n]] + * nbBits = ctz & 7 + * nbBytes = ctz >> 3 + * op[n] += 5 + * ip[n] -= nbBytes + * // Note: x86-64 is little-endian ==> no bswap + * bits[n] = MEM_readST(ip[n]) | 1 + * bits[n] <<= nbBits + */ +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + andq $7, %rax; \ + shrq $3, %bits##n; \ + leaq 5(%op##n), %op##n; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlx %rax, %bits##n, %bits##n + + /* Store clobbered variables on the stack */ + movq %olimit, 24(%rsp) + movq %ip1, 0(%rsp) + movq %ip2, 8(%rsp) + movq %ip3, 16(%rsp) + + /* Call GET_NEXT_DELT for each stream */ + FOR_EACH_STREAM(GET_NEXT_DELT) + + .p2align 6 + +.L_4X1_loop_body: + /* Decode 5 symbols in each of the 4 streams (20 total) + * Must have called GET_NEXT_DELT for each stream + */ + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE_AND_GET_NEXT, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE_FROM_DELT, 4) + + /* Load ip[1,2,3] from stack (var[] aliases them) + * ip[] is needed for RELOAD_BITS + * Each will be stored back to the stack after RELOAD + */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Reload each stream & fetch the next table entry + * to prepare for the next iteration + */ + RELOAD_BITS(0) + GET_NEXT_DELT(0) + + RELOAD_BITS(1) + movq %ip1, 0(%rsp) + GET_NEXT_DELT(1) + + RELOAD_BITS(2) + movq %ip2, 8(%rsp) + GET_NEXT_DELT(2) + + RELOAD_BITS(3) + movq %ip3, 16(%rsp) + GET_NEXT_DELT(3) + + /* If op3 < olimit: continue the loop */ + cmp %op3, 24(%rsp) + ja .L_4X1_loop_body + + /* Reload ip[1,2,3] from stack */ + movq 0(%rsp), %ip1 + movq 8(%rsp), %ip2 + movq 16(%rsp), %ip3 + + /* Re-compute olimit */ + jmp .L_4X1_compute_olimit + +#undef GET_NEXT_DELT +#undef DECODE_FROM_DELT +#undef DECODE +#undef RELOAD_BITS +.L_4X1_exit: + addq $24, %rsp + + /* Restore stack (oend & olimit) */ + pop %rax /* olimit */ + pop %rax /* oend */ + pop %rax /* ilimit */ + pop %rax /* arg */ + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rbp + pop %rdx + pop %rcx + pop %rbx + pop %rax + ret + +_HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: +HUF_decompress4X2_usingDTable_internal_bmi2_asm_loop: + /* Save all registers - even if they are callee saved for simplicity. */ + push %rax + push %rbx + push %rcx + push %rdx + push %rbp + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + push %r12 + push %r13 + push %r14 + push %r15 + + movq %rdi, %rax + movq 0(%rax), %ip0 + movq 8(%rax), %ip1 + movq 16(%rax), %ip2 + movq 24(%rax), %ip3 + movq 32(%rax), %op0 + movq 40(%rax), %op1 + movq 48(%rax), %op2 + movq 56(%rax), %op3 + movq 64(%rax), %bits0 + movq 72(%rax), %bits1 + movq 80(%rax), %bits2 + movq 88(%rax), %bits3 + movq 96(%rax), %dtable + push %rax /* argument */ + push %rax /* olimit */ + push 104(%rax) /* ilimit */ + + movq 112(%rax), %rax + push %rax /* oend3 */ + + movq %op3, %rax + push %rax /* oend2 */ + + movq %op2, %rax + push %rax /* oend1 */ + + movq %op1, %rax + push %rax /* oend0 */ + + /* Scratch space */ + subq $8, %rsp + +.L_4X2_compute_olimit: + /* Computes how many iterations we can do safely + * %r15, %rax may be clobbered + * rdx must be saved + * op[1,2,3,4] & ip0 mustn't be clobbered + */ + movq %rdx, 0(%rsp) + + /* We can consume up to 7 input bytes each iteration. */ + movq %ip0, %rax /* rax = ip0 */ + movq 40(%rsp), %rdx /* rdx = ilimit */ + subq %rdx, %rax /* rax = ip0 - ilimit */ + movq %rax, %r15 /* r15 = ip0 - ilimit */ + + /* rdx = rax / 7 */ + movabsq $2635249153387078803, %rdx + mulq %rdx + subq %rdx, %r15 + shrq %r15 + addq %r15, %rdx + shrq $2, %rdx + + /* r15 = (ip0 - ilimit) / 7 */ + movq %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 8(%rsp), %rax /* rax = oend0 */ + subq %op0, %rax /* rax = oend0 - op0 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 16(%rsp), %rax /* rax = oend1 */ + subq %op1, %rax /* rax = oend1 - op1 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 24(%rsp), %rax /* rax = oend2 */ + subq %op2, %rax /* rax = oend2 - op2 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + movabsq $-3689348814741910323, %rdx + movq 32(%rsp), %rax /* rax = oend3 */ + subq %op3, %rax /* rax = oend3 - op3 */ + mulq %rdx + shrq $3, %rdx /* rdx = rax / 10 */ + + /* r15 = min(%rdx, %r15) */ + cmpq %rdx, %r15 + cmova %rdx, %r15 + + /* olimit = op3 + 5 * r15 */ + movq %r15, %rax + leaq (%op3, %rax, 4), %olimit + addq %rax, %olimit + + movq 0(%rsp), %rdx + + /* If (op3 + 10 > olimit) */ + movq %op3, %rax /* rax = op3 */ + addq $10, %rax /* rax = op3 + 10 */ + cmpq %rax, %olimit /* op3 + 10 > olimit */ + jb .L_4X2_exit + + /* If (ip1 < ip0) go to exit */ + cmpq %ip0, %ip1 + jb .L_4X2_exit + + /* If (ip2 < ip1) go to exit */ + cmpq %ip1, %ip2 + jb .L_4X2_exit + + /* If (ip3 < ip2) go to exit */ + cmpq %ip2, %ip3 + jb .L_4X2_exit + +#define DECODE(n, idx) \ + movq %bits##n, %rax; \ + shrq $53, %rax; \ + movzwl 0(%dtable,%rax,4),%r8d; \ + movzbl 2(%dtable,%rax,4),%r15d; \ + movzbl 3(%dtable,%rax,4),%eax; \ + movw %r8w, (%op##n); \ + shlxq %r15, %bits##n, %bits##n; \ + addq %rax, %op##n + +#define RELOAD_BITS(n) \ + bsfq %bits##n, %bits##n; \ + movq %bits##n, %rax; \ + shrq $3, %bits##n; \ + andq $7, %rax; \ + subq %bits##n, %ip##n; \ + movq (%ip##n), %bits##n; \ + orq $1, %bits##n; \ + shlxq %rax, %bits##n, %bits##n + + + movq %olimit, 48(%rsp) + + .p2align 6 + +.L_4X2_loop_body: + /* We clobber r8, so store it on the stack */ + movq %r8, 0(%rsp) + + /* Decode 5 symbols from each of the 4 streams (20 symbols total). */ + FOR_EACH_STREAM_WITH_INDEX(DECODE, 0) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 1) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 2) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 3) + FOR_EACH_STREAM_WITH_INDEX(DECODE, 4) + + /* Reload r8 */ + movq 0(%rsp), %r8 + + FOR_EACH_STREAM(RELOAD_BITS) + + cmp %op3, 48(%rsp) + ja .L_4X2_loop_body + jmp .L_4X2_compute_olimit + +#undef DECODE +#undef RELOAD_BITS +.L_4X2_exit: + addq $8, %rsp + /* Restore stack (oend & olimit) */ + pop %rax /* oend0 */ + pop %rax /* oend1 */ + pop %rax /* oend2 */ + pop %rax /* oend3 */ + pop %rax /* ilimit */ + pop %rax /* olimit */ + pop %rax /* arg */ + + /* Save ip / op / bits */ + movq %ip0, 0(%rax) + movq %ip1, 8(%rax) + movq %ip2, 16(%rax) + movq %ip3, 24(%rax) + movq %op0, 32(%rax) + movq %op1, 40(%rax) + movq %op2, 48(%rax) + movq %op3, 56(%rax) + movq %bits0, 64(%rax) + movq %bits1, 72(%rax) + movq %bits2, 80(%rax) + movq %bits3, 88(%rax) + + /* Restore registers */ + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rbp + pop %rdx + pop %rcx + pop %rbx + pop %rax + ret + +#endif diff --git a/thirdparty/zstd/decompress/zstd_decompress.c b/thirdparty/zstd/decompress/zstd_decompress.c index 910bc034c0..0031e98cfb 100644 --- a/thirdparty/zstd/decompress/zstd_decompress.c +++ b/thirdparty/zstd/decompress/zstd_decompress.c @@ -56,7 +56,6 @@ * Dependencies *********************************************************/ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memmove, ZSTD_memset */ -#include "../common/cpu.h" /* bmi2 */ #include "../common/mem.h" /* low level memory routines */ #define FSE_STATIC_LINKING_ONLY #include "../common/fse.h" @@ -177,12 +176,15 @@ static const ZSTD_DDict* ZSTD_DDictHashSet_getDDict(ZSTD_DDictHashSet* hashSet, static ZSTD_DDictHashSet* ZSTD_createDDictHashSet(ZSTD_customMem customMem) { ZSTD_DDictHashSet* ret = (ZSTD_DDictHashSet*)ZSTD_customMalloc(sizeof(ZSTD_DDictHashSet), customMem); DEBUGLOG(4, "Allocating new hash set"); + if (!ret) + return NULL; ret->ddictPtrTable = (const ZSTD_DDict**)ZSTD_customCalloc(DDICT_HASHSET_TABLE_BASE_SIZE * sizeof(ZSTD_DDict*), customMem); - ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; - ret->ddictPtrCount = 0; - if (!ret || !ret->ddictPtrTable) { + if (!ret->ddictPtrTable) { + ZSTD_customFree(ret, customMem); return NULL; } + ret->ddictPtrTableSize = DDICT_HASHSET_TABLE_BASE_SIZE; + ret->ddictPtrCount = 0; return ret; } @@ -255,11 +257,15 @@ static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx) dctx->inBuffSize = 0; dctx->outBuffSize = 0; dctx->streamStage = zdss_init; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) dctx->legacyContext = NULL; dctx->previousLegacyVersion = 0; +#endif dctx->noForwardProgress = 0; dctx->oversizedDuration = 0; - dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid()); +#if DYNAMIC_BMI2 + dctx->bmi2 = ZSTD_cpuSupportsBmi2(); +#endif dctx->ddictSet = NULL; ZSTD_DCtx_resetParameters(dctx); #ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION @@ -280,8 +286,7 @@ ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize) return dctx; } -ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) -{ +static ZSTD_DCtx* ZSTD_createDCtx_internal(ZSTD_customMem customMem) { if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL; { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_customMalloc(sizeof(*dctx), customMem); @@ -292,10 +297,15 @@ ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) } } +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + return ZSTD_createDCtx_internal(customMem); +} + ZSTD_DCtx* ZSTD_createDCtx(void) { DEBUGLOG(3, "ZSTD_createDCtx"); - return ZSTD_createDCtx_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } static void ZSTD_clearDict(ZSTD_DCtx* dctx) @@ -380,6 +390,19 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size) return 0; } +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + */ +unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size) +{ + if (size < ZSTD_FRAMEIDSIZE) return 0; + { U32 const magic = MEM_readLE32(buffer); + if ((magic & ZSTD_MAGIC_SKIPPABLE_MASK) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } + return 0; +} + /** ZSTD_frameHeaderSize_internal() : * srcSize must be large enough to reach header size fields. * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless. @@ -466,7 +489,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s } switch(dictIDSizeCode) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : break; case 1 : dictID = ip[pos]; pos++; break; case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; @@ -474,7 +499,9 @@ size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, s } switch(fcsID) { - default: assert(0); /* impossible */ + default: + assert(0); /* impossible */ + ZSTD_FALLTHROUGH; case 0 : if (singleSegment) frameContentSize = ip[pos]; break; case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; case 2 : frameContentSize = MEM_readLE32(ip+pos); break; @@ -503,7 +530,6 @@ size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t src return ZSTD_getFrameHeader_advanced(zfhPtr, src, srcSize, ZSTD_f_zstd1); } - /** ZSTD_getFrameContentSize() : * compatible with legacy mode * @return : decompressed size of the single frame pointed to be `src` if known, otherwise @@ -544,6 +570,37 @@ static size_t readSkippableFrameSize(void const* src, size_t srcSize) } } +/*! ZSTD_readSkippableFrame() : + * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, + const void* src, size_t srcSize) +{ + U32 const magicNumber = MEM_readLE32(src); + size_t skippableFrameSize = readSkippableFrameSize(src, srcSize); + size_t skippableContentSize = skippableFrameSize - ZSTD_SKIPPABLEHEADERSIZE; + + /* check input validity */ + RETURN_ERROR_IF(!ZSTD_isSkippableFrame(src, srcSize), frameParameter_unsupported, ""); + RETURN_ERROR_IF(skippableFrameSize < ZSTD_SKIPPABLEHEADERSIZE || skippableFrameSize > srcSize, srcSize_wrong, ""); + RETURN_ERROR_IF(skippableContentSize > dstCapacity, dstSize_tooSmall, ""); + + /* deliver payload */ + if (skippableContentSize > 0 && dst != NULL) + ZSTD_memcpy(dst, (const BYTE *)src + ZSTD_SKIPPABLEHEADERSIZE, skippableContentSize); + if (magicVariant != NULL) + *magicVariant = magicNumber - ZSTD_MAGIC_SKIPPABLE_START; + return skippableContentSize; +} + /** ZSTD_findDecompressedSize() : * compatible with legacy mode * `srcSize` must be the exact length of some number of ZSTD compressed and/or @@ -858,7 +915,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); break; case bt_raw : decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); @@ -1009,7 +1066,7 @@ static ZSTD_DDict const* ZSTD_getDDict(ZSTD_DCtx* dctx) switch (dctx->dictUses) { default: assert(0 /* Impossible */); - /* fall-through */ + ZSTD_FALLTHROUGH; case ZSTD_dont_use: ZSTD_clearDict(dctx); return NULL; @@ -1031,7 +1088,7 @@ size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t sr { #if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1) size_t regenSize; - ZSTD_DCtx* const dctx = ZSTD_createDCtx(); + ZSTD_DCtx* const dctx = ZSTD_createDCtx_internal(ZSTD_defaultCMem); RETURN_ERROR_IF(dctx==NULL, memory_allocation, "NULL pointer!"); regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize); ZSTD_freeDCtx(dctx); @@ -1065,7 +1122,7 @@ static size_t ZSTD_nextSrcSizeToDecompressWithInputSize(ZSTD_DCtx* dctx, size_t return dctx->expected; if (dctx->bType != bt_raw) return dctx->expected; - return MIN(MAX(inputSize, 1), dctx->expected); + return BOUNDED(1, inputSize, dctx->expected); } ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { @@ -1073,7 +1130,9 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { { default: /* should not happen */ assert(0); + ZSTD_FALLTHROUGH; case ZSTDds_getFrameHeaderSize: + ZSTD_FALLTHROUGH; case ZSTDds_decodeFrameHeader: return ZSTDnit_frameHeader; case ZSTDds_decodeBlockHeader: @@ -1085,6 +1144,7 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { case ZSTDds_checkChecksum: return ZSTDnit_checksum; case ZSTDds_decodeSkippableHeader: + ZSTD_FALLTHROUGH; case ZSTDds_skipFrame: return ZSTDnit_skippableFrame; } @@ -1168,7 +1228,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c { case bt_compressed: DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed"); - rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1); + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1, is_streaming); dctx->expected = 0; /* Streaming not supported */ break; case bt_raw : @@ -1493,7 +1553,7 @@ size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, ZSTD_DStream* ZSTD_createDStream(void) { DEBUGLOG(3, "ZSTD_createDStream"); - return ZSTD_createDStream_advanced(ZSTD_defaultCMem); + return ZSTD_createDCtx_internal(ZSTD_defaultCMem); } ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) @@ -1503,7 +1563,7 @@ ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize) ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) { - return ZSTD_createDCtx_advanced(customMem); + return ZSTD_createDCtx_internal(customMem); } size_t ZSTD_freeDStream(ZSTD_DStream* zds) @@ -1763,7 +1823,8 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx) size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize) { size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX); - unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2); + /* space is needed to store the litbuffer after the output of a given block without stomping the extDict of a previous run, as well as to cover both windows against wildcopy*/ + unsigned long long const neededRBSize = windowSize + blockSize + ZSTD_BLOCKSIZE_MAX + (WILDCOPY_OVERLENGTH * 2); unsigned long long const neededSize = MIN(frameContentSize, neededRBSize); size_t const minRBSize = (size_t) neededSize; RETURN_ERROR_IF((unsigned long long)minRBSize != neededSize, @@ -1897,10 +1958,12 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB DEBUGLOG(5, "stage zdss_init => transparent reset "); zds->streamStage = zdss_loadHeader; zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) zds->legacyVersion = 0; +#endif zds->hostageByte = 0; zds->expectedOutBuffer = *output; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_loadHeader : DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip)); @@ -2038,7 +2101,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB zds->outBuffSize = neededOutBuffSize; } } } zds->streamStage = zdss_read; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_read: DEBUGLOG(5, "stage zdss_read"); @@ -2057,7 +2120,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } } if (ip==iend) { someMoreWork = 0; break; } /* no more input */ zds->streamStage = zdss_load; - /* fall-through */ + ZSTD_FALLTHROUGH; case zdss_load: { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds); diff --git a/thirdparty/zstd/decompress/zstd_decompress_block.c b/thirdparty/zstd/decompress/zstd_decompress_block.c index 349dcdc333..2e44d30d2f 100644 --- a/thirdparty/zstd/decompress/zstd_decompress_block.c +++ b/thirdparty/zstd/decompress/zstd_decompress_block.c @@ -69,15 +69,56 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, } } +/* Allocate buffer for literals, either overlapping current dst, or split between dst and litExtraBuffer, or stored entirely within litExtraBuffer */ +static void ZSTD_allocateLiteralsBuffer(ZSTD_DCtx* dctx, void* const dst, const size_t dstCapacity, const size_t litSize, + const streaming_operation streaming, const size_t expectedWriteSize, const unsigned splitImmediately) +{ + if (streaming == not_streaming && dstCapacity > ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH + litSize + WILDCOPY_OVERLENGTH) + { + /* room for litbuffer to fit without read faulting */ + dctx->litBuffer = (BYTE*)dst + ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_in_dst; + } + else if (litSize > ZSTD_LITBUFFEREXTRASIZE) + { + /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + if (splitImmediately) { + /* won't fit in litExtraBuffer, so it will be split between end of dst and extra buffer */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd = dctx->litBuffer + litSize - ZSTD_LITBUFFEREXTRASIZE; + } + else { + /* initially this will be stored entirely in dst during huffman decoding, it will partially shifted to litExtraBuffer after */ + dctx->litBuffer = (BYTE*)dst + expectedWriteSize - litSize; + dctx->litBufferEnd = (BYTE*)dst + expectedWriteSize; + } + dctx->litBufferLocation = ZSTD_split; + } + else + { + /* fits entirely within litExtraBuffer, so no split is necessary */ + dctx->litBuffer = dctx->litExtraBuffer; + dctx->litBufferEnd = dctx->litBuffer + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; + } +} /* Hidden declaration for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize); + const void* src, size_t srcSize, + void* dst, size_t dstCapacity, const streaming_operation streaming); /*! ZSTD_decodeLiteralsBlock() : + * Where it is possible to do so without being stomped by the output during decompression, the literals block will be stored + * in the dstBuffer. If there is room to do so, it will be stored in full in the excess dst space after where the current + * block will be output. Otherwise it will be stored at the end of the current dst blockspace, with a small portion being + * stored in dctx->litExtraBuffer to help keep it "ahead" of the current output write. + * * @return : nb of bytes read from src (< srcSize ) * note : symbol not declared but exposed for fullbench */ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, - const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ + const void* src, size_t srcSize, /* note : srcSize < BLOCKSIZE */ + void* dst, size_t dstCapacity, const streaming_operation streaming) { DEBUGLOG(5, "ZSTD_decodeLiteralsBlock"); RETURN_ERROR_IF(srcSize < MIN_CBLOCK_SIZE, corruption_detected, ""); @@ -90,7 +131,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, case set_repeat: DEBUGLOG(5, "set_repeat flag : re-using stats from previous compressed literals block"); RETURN_ERROR_IF(dctx->litEntropy==0, dictionary_corrupted, ""); - /* fall-through */ + ZSTD_FALLTHROUGH; case set_compressed: RETURN_ERROR_IF(srcSize < 5, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3"); @@ -99,6 +140,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); size_t hufSuccess; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -121,8 +163,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, litCSize = (lhc >> 22) + ((size_t)istart[4] << 10); break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); RETURN_ERROR_IF(litCSize + lhSize > srcSize, corruption_detected, ""); + RETURN_ERROR_IF(expectedWriteSize < litSize , dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 0); /* prefetch huffman table if cold */ if (dctx->ddictIsCold && (litSize > 768 /* heuristic */)) { @@ -133,11 +178,11 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, if (singleStream) { hufSuccess = HUF_decompress1X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); } else { hufSuccess = HUF_decompress4X_usingDTable_bmi2( dctx->litBuffer, litSize, istart+lhSize, litCSize, - dctx->HUFptr, dctx->bmi2); + dctx->HUFptr, ZSTD_DCtx_get_bmi2(dctx)); } } else { if (singleStream) { @@ -150,15 +195,22 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, hufSuccess = HUF_decompress1X1_DCtx_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); #endif } else { hufSuccess = HUF_decompress4X_hufOnly_wksp_bmi2( dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->workspace, - sizeof(dctx->workspace), dctx->bmi2); + sizeof(dctx->workspace), ZSTD_DCtx_get_bmi2(dctx)); } } + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memcpy(dctx->litExtraBuffer, dctx->litBufferEnd - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memmove(dctx->litBuffer + ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH, dctx->litBuffer, litSize - ZSTD_LITBUFFEREXTRASIZE); + dctx->litBuffer += ZSTD_LITBUFFEREXTRASIZE - WILDCOPY_OVERLENGTH; + dctx->litBufferEnd -= WILDCOPY_OVERLENGTH; + } RETURN_ERROR_IF(HUF_isError(hufSuccess), corruption_detected, ""); @@ -166,13 +218,13 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, dctx->litSize = litSize; dctx->litEntropy = 1; if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; - ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return litCSize + lhSize; } case set_basic: { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -189,23 +241,36 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ RETURN_ERROR_IF(litSize+lhSize > srcSize, corruption_detected, ""); - ZSTD_memcpy(dctx->litBuffer, istart+lhSize, litSize); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memcpy(dctx->litExtraBuffer, istart + lhSize + litSize - ZSTD_LITBUFFEREXTRASIZE, ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memcpy(dctx->litBuffer, istart + lhSize, litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; - ZSTD_memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); return lhSize+litSize; } /* direct reference into compressed stream */ dctx->litPtr = istart+lhSize; dctx->litSize = litSize; + dctx->litBufferEnd = dctx->litPtr + litSize; + dctx->litBufferLocation = ZSTD_not_in_dst; return lhSize+litSize; } case set_rle: { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; + size_t expectedWriteSize = MIN(ZSTD_BLOCKSIZE_MAX, dstCapacity); switch(lhlCode) { case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ @@ -222,8 +287,19 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize<4, corruption_detected, "srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4"); break; } + RETURN_ERROR_IF(litSize > 0 && dst == NULL, dstSize_tooSmall, "NULL not handled"); RETURN_ERROR_IF(litSize > ZSTD_BLOCKSIZE_MAX, corruption_detected, ""); - ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + RETURN_ERROR_IF(expectedWriteSize < litSize, dstSize_tooSmall, ""); + ZSTD_allocateLiteralsBuffer(dctx, dst, dstCapacity, litSize, streaming, expectedWriteSize, 1); + if (dctx->litBufferLocation == ZSTD_split) + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize - ZSTD_LITBUFFEREXTRASIZE); + ZSTD_memset(dctx->litExtraBuffer, istart[lhSize], ZSTD_LITBUFFEREXTRASIZE); + } + else + { + ZSTD_memset(dctx->litBuffer, istart[lhSize], litSize); + } dctx->litPtr = dctx->litBuffer; dctx->litSize = litSize; return lhSize+1; @@ -343,7 +419,7 @@ static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = { }; /* ML_defaultDTable */ -static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits) +static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U8 nbAddBits) { void* ptr = dt; ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr; @@ -355,7 +431,7 @@ static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddB cell->nbBits = 0; cell->nextState = 0; assert(nbAddBits < 255); - cell->nbAdditionalBits = (BYTE)nbAddBits; + cell->nbAdditionalBits = nbAddBits; cell->baseValue = baseValue; } @@ -367,7 +443,7 @@ static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddB FORCE_INLINE_TEMPLATE void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize) { ZSTD_seqSymbol* const tableDecode = dt+1; @@ -478,7 +554,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) ); tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize); assert(nbAdditionalBits[symbol] < 255); - tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol]; + tableDecode[u].nbAdditionalBits = nbAdditionalBits[symbol]; tableDecode[u].baseValue = baseValue[symbol]; } } @@ -487,7 +563,7 @@ void ZSTD_buildFSETable_body(ZSTD_seqSymbol* dt, /* Avoids the FORCE_INLINE of the _body() function. */ static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize) { ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, @@ -495,9 +571,9 @@ static void ZSTD_buildFSETable_body_default(ZSTD_seqSymbol* dt, } #if DYNAMIC_BMI2 -TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt, +BMI2_TARGET_ATTRIBUTE static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize) { ZSTD_buildFSETable_body(dt, normalizedCounter, maxSymbolValue, @@ -507,7 +583,7 @@ TARGET_ATTRIBUTE("bmi2") static void ZSTD_buildFSETable_body_bmi2(ZSTD_seqSymbol void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize, int bmi2) { #if DYNAMIC_BMI2 @@ -529,7 +605,7 @@ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr, symbolEncodingType_e type, unsigned max, U32 maxLog, const void* src, size_t srcSize, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable, int ddictIsCold, int nbSeq, U32* wksp, size_t wkspSize, int bmi2) @@ -541,7 +617,7 @@ static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymb RETURN_ERROR_IF((*(const BYTE*)src) > max, corruption_detected, ""); { U32 const symbol = *(const BYTE*)src; U32 const baseline = baseValue[symbol]; - U32 const nbBits = nbAdditionalBits[symbol]; + U8 const nbBits = nbAdditionalBits[symbol]; ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits); } *DTablePtr = DTableSpace; @@ -620,7 +696,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, LL_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(llhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += llhSize; } @@ -632,7 +708,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, OF_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(ofhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += ofhSize; } @@ -644,7 +720,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, ML_defaultDTable, dctx->fseEntropy, dctx->ddictIsCold, nbSeq, dctx->workspace, sizeof(dctx->workspace), - dctx->bmi2); + ZSTD_DCtx_get_bmi2(dctx)); RETURN_ERROR_IF(ZSTD_isError(mlhSize), corruption_detected, "ZSTD_buildSeqTable failed"); ip += mlhSize; } @@ -713,7 +789,7 @@ HINT_INLINE void ZSTD_overlapCopy8(BYTE** op, BYTE const** ip, size_t offset) { * - ZSTD_overlap_src_before_dst: The src and dst may overlap and may be any distance apart. * The src buffer must be before the dst buffer. */ -static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { +static void ZSTD_safecopy(BYTE* op, const BYTE* const oend_w, BYTE const* ip, ptrdiff_t length, ZSTD_overlap_e ovtype) { ptrdiff_t const diff = op - ip; BYTE* const oend = op + length; @@ -729,6 +805,7 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ /* Copy 8 bytes and ensure the offset >= 8 when there can be overlap. */ assert(length >= 8); ZSTD_overlapCopy8(&op, &ip, diff); + length -= 8; assert(op - ip >= 8); assert(op <= oend); } @@ -743,12 +820,35 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ assert(oend > oend_w); ZSTD_wildcopy(op, ip, oend_w - op, ovtype); ip += oend_w - op; - op = oend_w; + op += oend_w - op; } /* Handle the leftovers. */ while (op < oend) *op++ = *ip++; } +/* ZSTD_safecopyDstBeforeSrc(): + * This version allows overlap with dst before src, or handles the non-overlap case with dst after src + * Kept separate from more common ZSTD_safecopy case to avoid performance impact to the safecopy common case */ +static void ZSTD_safecopyDstBeforeSrc(BYTE* op, BYTE const* ip, ptrdiff_t length) { + ptrdiff_t const diff = op - ip; + BYTE* const oend = op + length; + + if (length < 8 || diff > -8) { + /* Handle short lengths, close overlaps, and dst not before src. */ + while (op < oend) *op++ = *ip++; + return; + } + + if (op <= oend - WILDCOPY_OVERLENGTH && diff < -WILDCOPY_VECLEN) { + ZSTD_wildcopy(op, ip, oend - WILDCOPY_OVERLENGTH - op, ZSTD_no_overlap); + ip += oend - WILDCOPY_OVERLENGTH - op; + op += oend - WILDCOPY_OVERLENGTH - op; + } + + /* Handle the leftovers. */ + while (op < oend) *op++ = *ip++; +} + /* ZSTD_execSequenceEnd(): * This version handles cases that are near the end of the output buffer. It requires * more careful checks to make sure there is no overflow. By separating out these hard @@ -759,9 +859,9 @@ static void ZSTD_safecopy(BYTE* op, BYTE* const oend_w, BYTE const* ip, ptrdiff_ */ FORCE_NOINLINE size_t ZSTD_execSequenceEnd(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; @@ -784,27 +884,76 @@ size_t ZSTD_execSequenceEnd(BYTE* op, if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { /* offset beyond prefix */ RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); - match = dictEnd - (prefixStart-match); + match = dictEnd - (prefixStart - match); if (match + sequence.matchLength <= dictEnd) { ZSTD_memmove(oLitEnd, match, sequence.matchLength); return sequenceLength; } /* span extDict & currentPrefixSegment */ { size_t const length1 = dictEnd - match; - ZSTD_memmove(oLitEnd, match, length1); - op = oLitEnd + length1; - sequence.matchLength -= length1; - match = prefixStart; - } } + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); + return sequenceLength; +} + +/* ZSTD_execSequenceEndSplitLitBuffer(): + * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. + */ +FORCE_NOINLINE +size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + + /* bounds checks : careful of address space overflow in 32-bit mode */ + RETURN_ERROR_IF(sequenceLength > (size_t)(oend - op), dstSize_tooSmall, "last match must fit within dstBuffer"); + RETURN_ERROR_IF(sequence.litLength > (size_t)(litLimit - *litPtr), corruption_detected, "try to read beyond literal buffer"); + assert(op < op + sequenceLength); + assert(oLitEnd < op + sequenceLength); + + /* copy literals */ + RETURN_ERROR_IF(op > *litPtr && op < *litPtr + sequence.litLength, dstSize_tooSmall, "output should not catch up to and overwrite literal buffer"); + ZSTD_safecopyDstBeforeSrc(op, *litPtr, sequence.litLength); + op = oLitEnd; + *litPtr = iLitEnd; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix */ + RETURN_ERROR_IF(sequence.offset > (size_t)(oLitEnd - virtualStart), corruption_detected, ""); + match = dictEnd - (prefixStart - match); + if (match + sequence.matchLength <= dictEnd) { + ZSTD_memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } ZSTD_safecopy(op, oend_w, match, sequence.matchLength, ZSTD_overlap_src_before_dst); return sequenceLength; } HINT_INLINE size_t ZSTD_execSequence(BYTE* op, - BYTE* const oend, seq_t sequence, - const BYTE** litPtr, const BYTE* const litLimit, - const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) { BYTE* const oLitEnd = op + sequence.litLength; size_t const sequenceLength = sequence.litLength + sequence.matchLength; @@ -821,10 +970,102 @@ size_t ZSTD_execSequence(BYTE* op, * - 32-bit mode and the match length overflows */ if (UNLIKELY( + iLitEnd > litLimit || + oMatchEnd > oend_w || + (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) + return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + + /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ + assert(op <= oLitEnd /* No overflow */); + assert(oLitEnd < oMatchEnd /* Non-zero match & no overflow */); + assert(oMatchEnd <= oend /* No underflow */); + assert(iLitEnd <= litLimit /* Literal length is in bounds */); + assert(oLitEnd <= oend_w /* Can wildcopy literals */); + assert(oMatchEnd <= oend_w /* Can wildcopy matches */); + + /* Copy Literals: + * Split out litLength <= 16 since it is nearly always true. +1.6% on gcc-9. + * We likely don't need the full 32-byte wildcopy. + */ + assert(WILDCOPY_OVERLENGTH >= 16); + ZSTD_copy16(op, (*litPtr)); + if (UNLIKELY(sequence.litLength > 16)) { + ZSTD_wildcopy(op + 16, (*litPtr) + 16, sequence.litLength - 16, ZSTD_no_overlap); + } + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* Copy Match */ + if (sequence.offset > (size_t)(oLitEnd - prefixStart)) { + /* offset beyond prefix -> go into extDict */ + RETURN_ERROR_IF(UNLIKELY(sequence.offset > (size_t)(oLitEnd - virtualStart)), corruption_detected, ""); + match = dictEnd + (match - prefixStart); + if (match + sequence.matchLength <= dictEnd) { + ZSTD_memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + ZSTD_memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = prefixStart; + } + } + /* Match within prefix of 1 or more bytes */ + assert(op <= oMatchEnd); + assert(oMatchEnd <= oend_w); + assert(match >= prefixStart); + assert(sequence.matchLength >= 1); + + /* Nearly all offsets are >= WILDCOPY_VECLEN bytes, which means we can use wildcopy + * without overlap checking. + */ + if (LIKELY(sequence.offset >= WILDCOPY_VECLEN)) { + /* We bet on a full wildcopy for matches, since we expect matches to be + * longer than literals (in general). In silesia, ~10% of matches are longer + * than 16 bytes. + */ + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength, ZSTD_no_overlap); + return sequenceLength; + } + assert(sequence.offset < WILDCOPY_VECLEN); + + /* Copy 8 bytes and spread the offset to be >= 8. */ + ZSTD_overlapCopy8(&op, &match, sequence.offset); + + /* If the match length is > 8 bytes, then continue with the wildcopy. */ + if (sequence.matchLength > 8) { + assert(op < oMatchEnd); + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength - 8, ZSTD_overlap_src_before_dst); + } + return sequenceLength; +} + +HINT_INLINE +size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, + BYTE* const oend, const BYTE* const oend_w, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const prefixStart, const BYTE* const virtualStart, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + assert(op != NULL /* Precondition */); + assert(oend_w < oend /* No underflow */); + /* Handle edge cases in a slow path: + * - Read beyond end of literals + * - Match end is within WILDCOPY_OVERLIMIT of oend + * - 32-bit mode and the match length overflows + */ + if (UNLIKELY( iLitEnd > litLimit || oMatchEnd > oend_w || (MEM_32bits() && (size_t)(oend - op) < sequenceLength + WILDCOPY_OVERLENGTH))) - return ZSTD_execSequenceEnd(op, oend, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); + return ZSTD_execSequenceEndSplitLitBuffer(op, oend, oend_w, sequence, litPtr, litLimit, prefixStart, virtualStart, dictEnd); /* Assumptions (everything else goes into ZSTD_execSequenceEnd()) */ assert(op <= oLitEnd /* No overflow */); @@ -892,6 +1133,7 @@ size_t ZSTD_execSequence(BYTE* op, return sequenceLength; } + static void ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt) { @@ -905,20 +1147,10 @@ ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqS } FORCE_INLINE_TEMPLATE void -ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD) -{ - ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state]; - U32 const nbBits = DInfo.nbBits; - size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.nextState + lowBits; -} - -FORCE_INLINE_TEMPLATE void -ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, ZSTD_seqSymbol const DInfo) +ZSTD_updateFseStateWithDInfo(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, U16 nextState, U32 nbBits) { - U32 const nbBits = DInfo.nbBits; size_t const lowBits = BIT_readBits(bitD, nbBits); - DStatePtr->state = DInfo.nextState + lowBits; + DStatePtr->state = nextState + lowBits; } /* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum @@ -937,102 +1169,100 @@ FORCE_INLINE_TEMPLATE seq_t ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets) { seq_t seq; - ZSTD_seqSymbol const llDInfo = seqState->stateLL.table[seqState->stateLL.state]; - ZSTD_seqSymbol const mlDInfo = seqState->stateML.table[seqState->stateML.state]; - ZSTD_seqSymbol const ofDInfo = seqState->stateOffb.table[seqState->stateOffb.state]; - U32 const llBase = llDInfo.baseValue; - U32 const mlBase = mlDInfo.baseValue; - U32 const ofBase = ofDInfo.baseValue; - BYTE const llBits = llDInfo.nbAdditionalBits; - BYTE const mlBits = mlDInfo.nbAdditionalBits; - BYTE const ofBits = ofDInfo.nbAdditionalBits; - BYTE const totalBits = llBits+mlBits+ofBits; - - /* sequence */ - { size_t offset; - if (ofBits > 1) { - ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); - ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); - assert(ofBits <= MaxOff); - if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { - U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); - offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); - BIT_reloadDStream(&seqState->DStream); - if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); - assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ - } else { - offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); - } - seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; - } else { - U32 const ll0 = (llBase == 0); - if (LIKELY((ofBits == 0))) { - if (LIKELY(!ll0)) - offset = seqState->prevOffset[0]; - else { - offset = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset; + const ZSTD_seqSymbol* const llDInfo = seqState->stateLL.table + seqState->stateLL.state; + const ZSTD_seqSymbol* const mlDInfo = seqState->stateML.table + seqState->stateML.state; + const ZSTD_seqSymbol* const ofDInfo = seqState->stateOffb.table + seqState->stateOffb.state; + seq.matchLength = mlDInfo->baseValue; + seq.litLength = llDInfo->baseValue; + { U32 const ofBase = ofDInfo->baseValue; + BYTE const llBits = llDInfo->nbAdditionalBits; + BYTE const mlBits = mlDInfo->nbAdditionalBits; + BYTE const ofBits = ofDInfo->nbAdditionalBits; + BYTE const totalBits = llBits+mlBits+ofBits; + + U16 const llNext = llDInfo->nextState; + U16 const mlNext = mlDInfo->nextState; + U16 const ofNext = ofDInfo->nextState; + U32 const llnbBits = llDInfo->nbBits; + U32 const mlnbBits = mlDInfo->nbBits; + U32 const ofnbBits = ofDInfo->nbBits; + /* + * As gcc has better branch and block analyzers, sometimes it is only + * valuable to mark likelyness for clang, it gives around 3-4% of + * performance. + */ + + /* sequence */ + { size_t offset; + #if defined(__clang__) + if (LIKELY(ofBits > 1)) { + #else + if (ofBits > 1) { + #endif + ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1); + ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5); + assert(ofBits <= MaxOff); + if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) { + U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed); + offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */ + } else { + offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); } + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; } else { - offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); - { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; - temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ - if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; - seqState->prevOffset[1] = seqState->prevOffset[0]; - seqState->prevOffset[0] = offset = temp; - } } } - seq.offset = offset; - } - - seq.matchLength = mlBase; - if (mlBits > 0) - seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); - - if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) - BIT_reloadDStream(&seqState->DStream); - if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) - BIT_reloadDStream(&seqState->DStream); - /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ - ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); - - seq.litLength = llBase; - if (llBits > 0) - seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); - - if (MEM_32bits()) - BIT_reloadDStream(&seqState->DStream); - - DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", - (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); - - /* ANS state update - * gcc-9.0.0 does 2.5% worse with ZSTD_updateFseStateWithDInfo(). - * clang-9.2.0 does 7% worse with ZSTD_updateFseState(). - * Naturally it seems like ZSTD_updateFseStateWithDInfo() should be the - * better option, so it is the default for other compilers. But, if you - * measure that it is worse, please put up a pull request. - */ - { -#if defined(__GNUC__) && !defined(__clang__) - const int kUseUpdateFseState = 1; -#else - const int kUseUpdateFseState = 0; -#endif - if (kUseUpdateFseState) { - ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ - ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ - } else { - ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llDInfo); /* <= 9 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlDInfo); /* <= 9 bits */ - if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ - ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofDInfo); /* <= 8 bits */ + U32 const ll0 = (llDInfo->baseValue == 0); + if (LIKELY((ofBits == 0))) { + offset = seqState->prevOffset[ll0]; + seqState->prevOffset[1] = seqState->prevOffset[!ll0]; + seqState->prevOffset[0] = offset; + } else { + offset = ofBase + ll0 + BIT_readBitsFast(&seqState->DStream, 1); + { size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } } } + seq.offset = offset; } + + #if defined(__clang__) + if (UNLIKELY(mlBits > 0)) + #else + if (mlBits > 0) + #endif + seq.matchLength += BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/); + + if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32)) + BIT_reloadDStream(&seqState->DStream); + if (MEM_64bits() && UNLIKELY(totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog))) + BIT_reloadDStream(&seqState->DStream); + /* Ensure there are enough bits to read the rest of data in 64-bit mode. */ + ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64); + + #if defined(__clang__) + if (UNLIKELY(llBits > 0)) + #else + if (llBits > 0) + #endif + seq.litLength += BIT_readBitsFast(&seqState->DStream, llBits/*>0*/); + + if (MEM_32bits()) + BIT_reloadDStream(&seqState->DStream); + + DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u", + (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset); + + ZSTD_updateFseStateWithDInfo(&seqState->stateLL, &seqState->DStream, llNext, llnbBits); /* <= 9 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateML, &seqState->DStream, mlNext, mlnbBits); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + ZSTD_updateFseStateWithDInfo(&seqState->stateOffb, &seqState->DStream, ofNext, ofnbBits); /* <= 8 bits */ } return seq; @@ -1085,9 +1315,11 @@ MEM_STATIC void ZSTD_assertValidSequence( #endif #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG + + FORCE_INLINE_TEMPLATE size_t DONT_VECTORIZE -ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, +ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, const ZSTD_longOffset_e isLongOffset, @@ -1099,11 +1331,11 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, BYTE* const oend = ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const vBase = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); - DEBUGLOG(5, "ZSTD_decompressSequences_body"); + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer"); (void)frame; /* Regen sequences */ @@ -1124,55 +1356,237 @@ ZSTD_decompressSequences_body( ZSTD_DCtx* dctx, BIT_DStream_endOfBuffer < BIT_DStream_completed && BIT_DStream_completed < BIT_DStream_overflow); + /* decompress without overrunning litPtr begins */ + { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + /* Align the decompression loop to 32 + 16 bytes. + * + * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression + * speed swings based on the alignment of the decompression loop. This + * performance swing is caused by parts of the decompression loop falling + * out of the DSB. The entire decompression loop should fit in the DSB, + * when it can't we get much worse performance. You can measure if you've + * hit the good case or the bad case with this perf command for some + * compressed file test.zst: + * + * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ + * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst + * + * If you see most cycles served out of the MITE you've hit the bad case. + * If you see most cycles served out of the DSB you've hit the good case. + * If it is pretty even then you may be in an okay case. + * + * This issue has been reproduced on the following CPUs: + * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 + * Use Instruments->Counters to get DSB/MITE cycles. + * I never got performance swings, but I was able to + * go from the good case of mostly DSB to half of the + * cycles served from MITE. + * - Coffeelake: Intel i9-9900k + * - Coffeelake: Intel i7-9700k + * + * I haven't been able to reproduce the instability or DSB misses on any + * of the following CPUS: + * - Haswell + * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH + * - Skylake + * + * Alignment is done for each of the three major decompression loops: + * - ZSTD_decompressSequences_bodySplitLitBuffer - presplit section of the literal buffer + * - ZSTD_decompressSequences_bodySplitLitBuffer - postsplit section of the literal buffer + * - ZSTD_decompressSequences_body + * Alignment choices are made to minimize large swings on bad cases and influence on performance + * from changes external to this code, rather than to overoptimize on the current commit. + * + * If you are seeing performance stability this script can help test. + * It tests on 4 commits in zstd where I saw performance change. + * + * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 + */ #if defined(__GNUC__) && defined(__x86_64__) - /* Align the decompression loop to 32 + 16 bytes. - * - * zstd compiled with gcc-9 on an Intel i9-9900k shows 10% decompression - * speed swings based on the alignment of the decompression loop. This - * performance swing is caused by parts of the decompression loop falling - * out of the DSB. The entire decompression loop should fit in the DSB, - * when it can't we get much worse performance. You can measure if you've - * hit the good case or the bad case with this perf command for some - * compressed file test.zst: - * - * perf stat -e cycles -e instructions -e idq.all_dsb_cycles_any_uops \ - * -e idq.all_mite_cycles_any_uops -- ./zstd -tq test.zst - * - * If you see most cycles served out of the MITE you've hit the bad case. - * If you see most cycles served out of the DSB you've hit the good case. - * If it is pretty even then you may be in an okay case. - * - * This issue has been reproduced on the following CPUs: - * - Kabylake: Macbook Pro (15-inch, 2019) 2.4 GHz Intel Core i9 - * Use Instruments->Counters to get DSB/MITE cycles. - * I never got performance swings, but I was able to - * go from the good case of mostly DSB to half of the - * cycles served from MITE. - * - Coffeelake: Intel i9-9900k - * - Coffeelake: Intel i7-9700k - * - * I haven't been able to reproduce the instability or DSB misses on any - * of the following CPUS: - * - Haswell - * - Broadwell: Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GH - * - Skylake - * - * If you are seeing performance stability this script can help test. - * It tests on 4 commits in zstd where I saw performance change. - * - * https://gist.github.com/terrelln/9889fc06a423fd5ca6e99351564473f4 - */ - __asm__(".p2align 6"); - __asm__("nop"); - __asm__(".p2align 5"); - __asm__("nop"); -# if __GNUC__ >= 9 - /* better for gcc-9 and gcc-10, worse for clang and gcc-8 */ - __asm__(".p2align 3"); + __asm__(".p2align 6"); +# if __GNUC__ >= 7 + /* good for gcc-7, gcc-9, and gcc-11 */ + __asm__("nop"); + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 4"); +# if __GNUC__ == 8 || __GNUC__ == 10 + /* good for gcc-8 and gcc-10 */ + __asm__("nop"); + __asm__(".p2align 3"); +# endif +# endif +#endif + + /* Handle the initial state where litBuffer is currently split between dst and litExtraBuffer */ + for (; litPtr + sequence.litLength <= dctx->litBufferEnd; ) { + size_t const oneSeqSize = ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence.litLength - WILDCOPY_OVERLENGTH, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (UNLIKELY(!--nbSeq)) + break; + BIT_reloadDStream(&(seqState.DStream)); + sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + } + + /* If there are more sequences, they will need to read literals from litExtraBuffer; copy over the remainder from dst and update litPtr and litEnd */ + if (nbSeq > 0) { + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence.litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (--nbSeq) + BIT_reloadDStream(&(seqState.DStream)); + } + } + } + + if (nbSeq > 0) /* there is remaining lit from extra buffer */ + { + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ != 7 + /* worse for gcc-7 better for gcc-8, gcc-9, and gcc-10 and clang */ + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); +# elif __GNUC__ >= 11 + __asm__(".p2align 3"); +# else + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); +# endif +#endif + + for (; ; ) { + seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litBufferEnd, prefixStart, vBase, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequence, prefixStart, vBase); +#endif + if (UNLIKELY(ZSTD_isError(oneSeqSize))) + return oneSeqSize; + DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize); + op += oneSeqSize; + if (UNLIKELY(!--nbSeq)) + break; + BIT_reloadDStream(&(seqState.DStream)); + } + } + + /* check if reached exact end */ + DEBUGLOG(5, "ZSTD_decompressSequences_bodySplitLitBuffer: after decode loop, remaining nbSeq : %i", nbSeq); + RETURN_ERROR_IF(nbSeq, corruption_detected, ""); + RETURN_ERROR_IF(BIT_reloadDStream(&seqState.DStream) < BIT_DStream_completed, corruption_detected, ""); + /* save reps for next block */ + { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + if (dctx->litBufferLocation == ZSTD_split) /* split hasn't been reached yet, first get dst then copy litExtraBuffer */ + { + size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + } + { size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + } + + return op-ostart; +} + +FORCE_INLINE_TEMPLATE size_t +DONT_VECTORIZE +ZSTD_decompressSequences_body(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const prefixStart = (const BYTE*)(dctx->prefixStart); + const BYTE* const vBase = (const BYTE*)(dctx->virtualStart); + const BYTE* const dictEnd = (const BYTE*)(dctx->dictEnd); + DEBUGLOG(5, "ZSTD_decompressSequences_body"); + (void)frame; + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i = 0; i < ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; } + RETURN_ERROR_IF( + ERR_isError(BIT_initDStream(&seqState.DStream, ip, iend - ip)), + corruption_detected, ""); + ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + assert(dst != NULL); + + ZSTD_STATIC_ASSERT( + BIT_DStream_unfinished < BIT_DStream_completed && + BIT_DStream_endOfBuffer < BIT_DStream_completed && + BIT_DStream_completed < BIT_DStream_overflow); + +#if defined(__GNUC__) && defined(__x86_64__) + __asm__(".p2align 6"); + __asm__("nop"); +# if __GNUC__ >= 7 + __asm__(".p2align 5"); + __asm__("nop"); + __asm__(".p2align 3"); # else - __asm__(".p2align 4"); + __asm__(".p2align 4"); + __asm__("nop"); + __asm__(".p2align 3"); # endif #endif + for ( ; ; ) { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, prefixStart, vBase, dictEnd); @@ -1218,6 +1632,16 @@ ZSTD_decompressSequences_default(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } + +static size_t +ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT @@ -1250,10 +1674,10 @@ ZSTD_decompressSequencesLong_body( const BYTE* ip = (const BYTE*)seqStart; const BYTE* const iend = ip + seqSize; BYTE* const ostart = (BYTE*)dst; - BYTE* const oend = ostart + maxDstSize; + BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize; BYTE* op = ostart; const BYTE* litPtr = dctx->litPtr; - const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* const prefixStart = (const BYTE*) (dctx->prefixStart); const BYTE* const dictStart = (const BYTE*) (dctx->virtualStart); const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); @@ -1289,32 +1713,94 @@ ZSTD_decompressSequencesLong_body( } RETURN_ERROR_IF(seqNb<seqAdvance, corruption_detected, ""); - /* decode and decompress */ - for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) { - seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset); - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd); + /* decompress without stomping litBuffer */ + for (; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb < nbSeq); seqNb++) { + seq_t sequence = ZSTD_decodeSequence(&seqState, isLongOffset); + size_t oneSeqSize; + + if (dctx->litBufferLocation == ZSTD_split && litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength > dctx->litBufferEnd) + { + /* lit buffer is reaching split point, empty out the first buffer and transition to litExtraBuffer */ + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + oneSeqSize = ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb-ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); #endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); - sequences[seqNb & STORED_SEQS_MASK] = sequence; - op += oneSeqSize; + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } + else + { + /* lit buffer is either wholly contained in first or second split, or not split at all*/ + oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK].litLength - WILDCOPY_OVERLENGTH, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[(seqNb - ADVANCED_SEQS) & STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + + prefetchPos = ZSTD_prefetchMatch(prefetchPos, sequence, prefixStart, dictEnd); + sequences[seqNb & STORED_SEQS_MASK] = sequence; + op += oneSeqSize; + } } RETURN_ERROR_IF(seqNb<nbSeq, corruption_detected, ""); /* finish queue */ seqNb -= seqAdvance; for ( ; seqNb<nbSeq ; seqNb++) { - size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequences[seqNb&STORED_SEQS_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd); + seq_t *sequence = &(sequences[seqNb&STORED_SEQS_MASK]); + if (dctx->litBufferLocation == ZSTD_split && litPtr + sequence->litLength > dctx->litBufferEnd) + { + const size_t leftoverLit = dctx->litBufferEnd - litPtr; + if (leftoverLit) + { + RETURN_ERROR_IF(leftoverLit > (size_t)(oend - op), dstSize_tooSmall, "remaining lit must fit within dstBuffer"); + ZSTD_safecopyDstBeforeSrc(op, litPtr, leftoverLit); + sequence->litLength -= leftoverLit; + op += leftoverLit; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + dctx->litBufferLocation = ZSTD_not_in_dst; + { + size_t const oneSeqSize = ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); #if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) - assert(!ZSTD_isError(oneSeqSize)); - if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); #endif - if (ZSTD_isError(oneSeqSize)) return oneSeqSize; - op += oneSeqSize; + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } + } + else + { + size_t const oneSeqSize = dctx->litBufferLocation == ZSTD_split ? + ZSTD_execSequenceSplitLitBuffer(op, oend, litPtr + sequence->litLength - WILDCOPY_OVERLENGTH, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd) : + ZSTD_execSequence(op, oend, *sequence, &litPtr, litBufferEnd, prefixStart, dictStart, dictEnd); +#if defined(FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION) && defined(FUZZING_ASSERT_VALID_SEQUENCE) + assert(!ZSTD_isError(oneSeqSize)); + if (frame) ZSTD_assertValidSequence(dctx, op, oend, sequences[seqNb&STORED_SEQS_MASK], prefixStart, dictStart); +#endif + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } /* save reps for next block */ @@ -1322,10 +1808,21 @@ ZSTD_decompressSequencesLong_body( } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; + if (dctx->litBufferLocation == ZSTD_split) /* first deplete literal buffer in dst, then copy litExtraBuffer */ + { + size_t const lastLLSize = litBufferEnd - litPtr; + RETURN_ERROR_IF(lastLLSize > (size_t)(oend - op), dstSize_tooSmall, ""); + if (op != NULL) { + ZSTD_memmove(op, litPtr, lastLLSize); + op += lastLLSize; + } + litPtr = dctx->litExtraBuffer; + litBufferEnd = dctx->litExtraBuffer + ZSTD_LITBUFFEREXTRASIZE; + } + { size_t const lastLLSize = litBufferEnd - litPtr; RETURN_ERROR_IF(lastLLSize > (size_t)(oend-op), dstSize_tooSmall, ""); if (op != NULL) { - ZSTD_memcpy(op, litPtr, lastLLSize); + ZSTD_memmove(op, litPtr, lastLLSize); op += lastLLSize; } } @@ -1349,7 +1846,7 @@ ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx, #if DYNAMIC_BMI2 #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t DONT_VECTORIZE ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, @@ -1359,10 +1856,20 @@ ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx, { return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } +static BMI2_TARGET_ATTRIBUTE size_t +DONT_VECTORIZE +ZSTD_decompressSequencesSplitLitBuffer_bmi2(ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + return ZSTD_decompressSequences_bodySplitLitBuffer(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT -static TARGET_ATTRIBUTE("bmi2") size_t +static BMI2_TARGET_ATTRIBUTE size_t ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, const void* seqStart, size_t seqSize, int nbSeq, @@ -1391,11 +1898,25 @@ ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, { DEBUGLOG(5, "ZSTD_decompressSequences"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif - return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); +} +static size_t +ZSTD_decompressSequencesSplitLitBuffer(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize, int nbSeq, + const ZSTD_longOffset_e isLongOffset, + const int frame) +{ + DEBUGLOG(5, "ZSTD_decompressSequencesSplitLitBuffer"); +#if DYNAMIC_BMI2 + if (ZSTD_DCtx_get_bmi2(dctx)) { + return ZSTD_decompressSequencesSplitLitBuffer_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); + } +#endif + return ZSTD_decompressSequencesSplitLitBuffer_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif /* ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG */ @@ -1415,7 +1936,7 @@ ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx, { DEBUGLOG(5, "ZSTD_decompressSequencesLong"); #if DYNAMIC_BMI2 - if (dctx->bmi2) { + if (ZSTD_DCtx_get_bmi2(dctx)) { return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset, frame); } #endif @@ -1456,7 +1977,7 @@ ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable) size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame) + const void* src, size_t srcSize, const int frame, const streaming_operation streaming) { /* blockType == blockCompressed */ const BYTE* ip = (const BYTE*)src; /* isLongOffset must be true if there are long offsets. @@ -1471,7 +1992,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, RETURN_ERROR_IF(srcSize >= ZSTD_BLOCKSIZE_MAX, srcSize_wrong, ""); /* Decode literals section */ - { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize, dst, dstCapacity, streaming); DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; @@ -1519,7 +2040,10 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_LONG /* else */ - return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + if (dctx->litBufferLocation == ZSTD_split) + return ZSTD_decompressSequencesSplitLitBuffer(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); + else + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset, frame); #endif } } @@ -1542,7 +2066,7 @@ size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, { size_t dSize; ZSTD_checkContinuity(dctx, dst, dstCapacity); - dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0, not_streaming); dctx->previousDstEnd = (char*)dst + dSize; return dSize; } diff --git a/thirdparty/zstd/decompress/zstd_decompress_block.h b/thirdparty/zstd/decompress/zstd_decompress_block.h index 049a0cd84c..c61a9d0c4b 100644 --- a/thirdparty/zstd/decompress/zstd_decompress_block.h +++ b/thirdparty/zstd/decompress/zstd_decompress_block.h @@ -33,6 +33,12 @@ */ + /* Streaming state is used to inform allocation of the literal buffer */ +typedef enum { + not_streaming = 0, + is_streaming = 1 +} streaming_operation; + /* ZSTD_decompressBlock_internal() : * decompress block, starting at `src`, * into destination buffer `dst`. @@ -41,7 +47,7 @@ */ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, - const void* src, size_t srcSize, const int frame); + const void* src, size_t srcSize, const int frame, const streaming_operation streaming); /* ZSTD_buildFSETable() : * generate FSE decoding table for one symbol (ll, ml or off) @@ -54,7 +60,7 @@ size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, */ void ZSTD_buildFSETable(ZSTD_seqSymbol* dt, const short* normalizedCounter, unsigned maxSymbolValue, - const U32* baseValue, const U32* nbAdditionalBits, + const U32* baseValue, const U8* nbAdditionalBits, unsigned tableLog, void* wksp, size_t wkspSize, int bmi2); diff --git a/thirdparty/zstd/decompress/zstd_decompress_internal.h b/thirdparty/zstd/decompress/zstd_decompress_internal.h index ebda0c9031..2b5a53850a 100644 --- a/thirdparty/zstd/decompress/zstd_decompress_internal.h +++ b/thirdparty/zstd/decompress/zstd_decompress_internal.h @@ -20,7 +20,7 @@ * Dependencies *********************************************************/ #include "../common/mem.h" /* BYTE, U16, U32 */ -#include "../common/zstd_internal.h" /* ZSTD_seqSymbol */ +#include "../common/zstd_internal.h" /* constants : MaxLL, MaxML, MaxOff, LLFSELog, etc. */ @@ -40,7 +40,7 @@ static UNUSED_ATTR const U32 OF_base[MaxOff+1] = { 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD }; -static UNUSED_ATTR const U32 OF_bits[MaxOff+1] = { +static UNUSED_ATTR const U8 OF_bits[MaxOff+1] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, @@ -106,6 +106,22 @@ typedef struct { size_t ddictPtrCount; } ZSTD_DDictHashSet; +#ifndef ZSTD_DECODER_INTERNAL_BUFFER +# define ZSTD_DECODER_INTERNAL_BUFFER (1 << 16) +#endif + +#define ZSTD_LBMIN 64 +#define ZSTD_LBMAX (128 << 10) + +/* extra buffer, compensates when dst is not large enough to store litBuffer */ +#define ZSTD_LITBUFFEREXTRASIZE BOUNDED(ZSTD_LBMIN, ZSTD_DECODER_INTERNAL_BUFFER, ZSTD_LBMAX) + +typedef enum { + ZSTD_not_in_dst = 0, /* Stored entirely within litExtraBuffer */ + ZSTD_in_dst = 1, /* Stored entirely within dst (in memory after current output write) */ + ZSTD_split = 2 /* Split between litExtraBuffer and dst */ +} ZSTD_litLocation_e; + struct ZSTD_DCtx_s { const ZSTD_seqSymbol* LLTptr; @@ -136,7 +152,9 @@ struct ZSTD_DCtx_s size_t litSize; size_t rleSize; size_t staticSize; +#if DYNAMIC_BMI2 != 0 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */ +#endif /* dictionary */ ZSTD_DDict* ddictLocal; @@ -158,16 +176,21 @@ struct ZSTD_DCtx_s size_t outStart; size_t outEnd; size_t lhSize; +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1) void* legacyContext; U32 previousLegacyVersion; U32 legacyVersion; +#endif U32 hostageByte; int noForwardProgress; ZSTD_bufferMode_e outBufferMode; ZSTD_outBuffer expectedOutBuffer; /* workspace */ - BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH]; + BYTE* litBuffer; + const BYTE* litBufferEnd; + ZSTD_litLocation_e litBufferLocation; + BYTE litExtraBuffer[ZSTD_LITBUFFEREXTRASIZE + WILDCOPY_OVERLENGTH]; /* literal buffer can be split between storage within dst and within this scratch buffer */ BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; size_t oversizedDuration; @@ -183,6 +206,14 @@ struct ZSTD_DCtx_s #endif }; /* typedef'd to ZSTD_DCtx within "zstd.h" */ +MEM_STATIC int ZSTD_DCtx_get_bmi2(const struct ZSTD_DCtx_s *dctx) { +#if DYNAMIC_BMI2 != 0 + return dctx->bmi2; +#else + (void)dctx; + return 0; +#endif +} /*-******************************************************* * Shared internal functions diff --git a/thirdparty/zstd/zstd.h b/thirdparty/zstd/zstd.h index 4651e6c4dc..a88ae7bf8e 100644 --- a/thirdparty/zstd/zstd.h +++ b/thirdparty/zstd/zstd.h @@ -20,19 +20,21 @@ extern "C" { /* ===== ZSTDLIB_API : control library symbols visibility ===== */ -#ifndef ZSTDLIB_VISIBILITY -# if defined(__GNUC__) && (__GNUC__ >= 4) -# define ZSTDLIB_VISIBILITY __attribute__ ((visibility ("default"))) +#ifndef ZSTDLIB_VISIBLE +# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__) +# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default"))) +# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden"))) # else -# define ZSTDLIB_VISIBILITY +# define ZSTDLIB_VISIBLE +# define ZSTDLIB_HIDDEN # endif #endif #if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) -# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBILITY +# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE #elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) -# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ +# define ZSTDLIB_API __declspec(dllimport) ZSTDLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/ #else -# define ZSTDLIB_API ZSTDLIB_VISIBILITY +# define ZSTDLIB_API ZSTDLIB_VISIBLE #endif @@ -72,7 +74,7 @@ extern "C" { /*------ Version ------*/ #define ZSTD_VERSION_MAJOR 1 #define ZSTD_VERSION_MINOR 5 -#define ZSTD_VERSION_RELEASE 0 +#define ZSTD_VERSION_RELEASE 2 #define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE) /*! ZSTD_versionNumber() : @@ -247,7 +249,7 @@ ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, * * It's possible to reset all parameters to "default" using ZSTD_CCtx_reset(). * - * This API supercedes all other "advanced" API entry points in the experimental section. + * This API supersedes all other "advanced" API entry points in the experimental section. * In the future, we expect to remove from experimental API entry points which are redundant with this API. */ @@ -417,7 +419,7 @@ typedef enum { * ZSTD_c_stableOutBuffer * ZSTD_c_blockDelimiters * ZSTD_c_validateSequences - * ZSTD_c_splitBlocks + * ZSTD_c_useBlockSplitter * ZSTD_c_useRowMatchFinder * Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them. * note : never ever use experimentalParam? names directly; @@ -932,7 +934,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize); * Advanced dictionary and prefix API (Requires v1.4.0+) * * This API allows dictionaries to be used with ZSTD_compress2(), - * ZSTD_compressStream2(), and ZSTD_decompress(). Dictionaries are sticky, and + * ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and * only reset with the context is reset with ZSTD_reset_parameters or * ZSTD_reset_session_and_parameters. Prefixes are single-use. ******************************************************************************/ @@ -1073,25 +1075,36 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #if defined(ZSTD_STATIC_LINKING_ONLY) && !defined(ZSTD_H_ZSTD_STATIC_LINKING_ONLY) #define ZSTD_H_ZSTD_STATIC_LINKING_ONLY +/* This can be overridden externally to hide static symbols. */ +#ifndef ZSTDLIB_STATIC_API +# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1) +# define ZSTDLIB_STATIC_API __declspec(dllexport) ZSTDLIB_VISIBLE +# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1) +# define ZSTDLIB_STATIC_API __declspec(dllimport) ZSTDLIB_VISIBLE +# else +# define ZSTDLIB_STATIC_API ZSTDLIB_VISIBLE +# endif +#endif + /* Deprecation warnings : * Should these warnings be a problem, it is generally possible to disable them, * typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual. * Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS. */ #ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS -# define ZSTD_DEPRECATED(message) ZSTDLIB_API /* disable deprecation warnings */ +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */ #else # if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */ -# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_API +# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API # elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__) -# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated(message))) +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message))) # elif defined(__GNUC__) && (__GNUC__ >= 3) -# define ZSTD_DEPRECATED(message) ZSTDLIB_API __attribute__((deprecated)) +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated)) # elif defined(_MSC_VER) -# define ZSTD_DEPRECATED(message) ZSTDLIB_API __declspec(deprecated(message)) +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message)) # else # pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler") -# define ZSTD_DEPRECATED(message) ZSTDLIB_API +# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API # endif #endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */ @@ -1157,9 +1170,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict); #define ZSTD_SRCSIZEHINT_MIN 0 #define ZSTD_SRCSIZEHINT_MAX INT_MAX -/* internal */ -#define ZSTD_HASHLOG3_MAX 17 - /* --- Advanced types --- */ @@ -1302,10 +1312,14 @@ typedef enum { } ZSTD_literalCompressionMode_e; typedef enum { - ZSTD_urm_auto = 0, /* Automatically determine whether or not we use row matchfinder */ - ZSTD_urm_disableRowMatchFinder = 1, /* Never use row matchfinder */ - ZSTD_urm_enableRowMatchFinder = 2 /* Always use row matchfinder when applicable */ -} ZSTD_useRowMatchFinderMode_e; + /* Note: This enum controls features which are conditionally beneficial. Zstd typically will make a final + * decision on whether or not to enable the feature (ZSTD_ps_auto), but setting the switch to ZSTD_ps_enable + * or ZSTD_ps_disable allow for a force enable/disable the feature. + */ + ZSTD_ps_auto = 0, /* Let the library automatically determine whether the feature shall be enabled */ + ZSTD_ps_enable = 1, /* Force-enable the feature */ + ZSTD_ps_disable = 2 /* Do not use the feature */ +} ZSTD_paramSwitch_e; /*************************************** * Frame size functions @@ -1332,7 +1346,7 @@ typedef enum { * note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to * read each contained frame header. This is fast as most of the data is skipped, * however it does mean that all frame data must be present and valid. */ -ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize); /*! ZSTD_decompressBound() : * `src` should point to the start of a series of ZSTD encoded and/or skippable frames @@ -1347,13 +1361,13 @@ ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t * note 3 : when the decompressed size field isn't available, the upper-bound for that frame is calculated by: * upper-bound = # blocks * min(128 KB, Window_Size) */ -ZSTDLIB_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size_t srcSize); /*! ZSTD_frameHeaderSize() : * srcSize must be >= ZSTD_FRAMEHEADERSIZE_PREFIX. * @return : size of the Frame Header, * or an error code (if srcSize is too small) */ -ZSTDLIB_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize); typedef enum { ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */ @@ -1376,7 +1390,7 @@ typedef enum { * @return : number of sequences generated */ -ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, +ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, size_t outSeqsSize, const void* src, size_t srcSize); /*! ZSTD_mergeBlockDelimiters() : @@ -1390,7 +1404,7 @@ ZSTDLIB_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs, * setting of ZSTD_c_blockDelimiters as ZSTD_sf_noBlockDelimiters * @return : number of sequences left after merging */ -ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); +ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize); /*! ZSTD_compressSequences() : * Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst. @@ -1420,7 +1434,7 @@ ZSTDLIB_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t se * and cannot emit an RLE block that disagrees with the repcode history * @return : final compressed size or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, +ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize, const ZSTD_Sequence* inSeqs, size_t inSeqsSize, const void* src, size_t srcSize); @@ -1438,9 +1452,29 @@ ZSTDLIB_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size * * @return : number of bytes written or a ZSTD error. */ -ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, +ZSTDLIB_STATIC_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned magicVariant); +/*! ZSTD_readSkippableFrame() : + * Retrieves a zstd skippable frame containing data given by src, and writes it to dst buffer. + * + * The parameter magicVariant will receive the magicVariant that was supplied when the frame was written, + * i.e. magicNumber - ZSTD_MAGIC_SKIPPABLE_START. This can be NULL if the caller is not interested + * in the magicVariant. + * + * Returns an error if destination buffer is not large enough, or if the frame is not skippable. + * + * @return : number of bytes written or a ZSTD error. + */ +ZSTDLIB_API size_t ZSTD_readSkippableFrame(void* dst, size_t dstCapacity, unsigned* magicVariant, + const void* src, size_t srcSize); + +/*! ZSTD_isSkippableFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier for a skippable frame. + */ +ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size); + + /*************************************** * Memory management @@ -1469,10 +1503,10 @@ ZSTDLIB_API size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity, * Note 2 : only single-threaded compression is supported. * ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1. */ -ZSTDLIB_API size_t ZSTD_estimateCCtxSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void); /*! ZSTD_estimateCStreamSize() : * ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one. @@ -1487,20 +1521,20 @@ ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void); * Note : if streaming is init with function ZSTD_init?Stream_usingDict(), * an internal ?Dict will be created, which additional size is not estimated here. * In this case, get total size by adding ZSTD_estimate?DictSize */ -ZSTDLIB_API size_t ZSTD_estimateCStreamSize(int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); -ZSTDLIB_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize(size_t windowSize); -ZSTDLIB_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize(size_t windowSize); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize); /*! ZSTD_estimate?DictSize() : * ZSTD_estimateCDictSize() will bet that src size is relatively "small", and content is copied, like ZSTD_createCDict(). * ZSTD_estimateCDictSize_advanced() makes it possible to control compression parameters precisely, like ZSTD_createCDict_advanced(). * Note : dictionaries created by reference (`ZSTD_dlm_byRef`) are logically smaller. */ -ZSTDLIB_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); -ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_estimateCDictSize_advanced(size_t dictSize, ZSTD_compressionParameters cParams, ZSTD_dictLoadMethod_e dictLoadMethod); +ZSTDLIB_STATIC_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod); /*! ZSTD_initStatic*() : * Initialize an object using a pre-allocated fixed-size buffer. @@ -1523,20 +1557,20 @@ ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e * Limitation 2 : static cctx currently not compatible with multi-threading. * Limitation 3 : static dctx is incompatible with legacy support. */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_initStaticCStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticCCtx() */ -ZSTDLIB_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); -ZSTDLIB_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_initStaticDCtx(void* workspace, size_t workspaceSize); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_initStaticDStream(void* workspace, size_t workspaceSize); /**< same as ZSTD_initStaticDCtx() */ -ZSTDLIB_API const ZSTD_CDict* ZSTD_initStaticCDict( +ZSTDLIB_STATIC_API const ZSTD_CDict* ZSTD_initStaticCDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams); -ZSTDLIB_API const ZSTD_DDict* ZSTD_initStaticDDict( +ZSTDLIB_STATIC_API const ZSTD_DDict* ZSTD_initStaticDDict( void* workspace, size_t workspaceSize, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, @@ -1557,44 +1591,44 @@ __attribute__((__unused__)) #endif ZSTD_customMem const ZSTD_defaultCMem = { NULL, NULL, NULL }; /**< this constant defers to stdlib's functions */ -ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem); +ZSTDLIB_STATIC_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, ZSTD_compressionParameters cParams, ZSTD_customMem customMem); -/* ! Thread pool : - * These prototypes make it possible to share a thread pool among multiple compression contexts. - * This can limit resources for applications with multiple threads where each one uses - * a threaded compression mode (via ZSTD_c_nbWorkers parameter). - * ZSTD_createThreadPool creates a new thread pool with a given number of threads. - * Note that the lifetime of such pool must exist while being used. - * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value - * to use an internal thread pool). - * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. +/*! Thread pool : + * These prototypes make it possible to share a thread pool among multiple compression contexts. + * This can limit resources for applications with multiple threads where each one uses + * a threaded compression mode (via ZSTD_c_nbWorkers parameter). + * ZSTD_createThreadPool creates a new thread pool with a given number of threads. + * Note that the lifetime of such pool must exist while being used. + * ZSTD_CCtx_refThreadPool assigns a thread pool to a context (use NULL argument value + * to use an internal thread pool). + * ZSTD_freeThreadPool frees a thread pool, accepts NULL pointer. */ typedef struct POOL_ctx_s ZSTD_threadPool; -ZSTDLIB_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); -ZSTDLIB_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ -ZSTDLIB_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); +ZSTDLIB_STATIC_API ZSTD_threadPool* ZSTD_createThreadPool(size_t numThreads); +ZSTDLIB_STATIC_API void ZSTD_freeThreadPool (ZSTD_threadPool* pool); /* accept NULL pointer */ +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool); /* * This API is temporary and is expected to change or disappear in the future! */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced2( +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_advanced2( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, const ZSTD_CCtx_params* cctxParams, ZSTD_customMem customMem); -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced( +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_advanced( const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType, @@ -1611,22 +1645,22 @@ ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced( * As a consequence, `dictBuffer` **must** outlive CDict, * and its content must remain unmodified throughout the lifetime of CDict. * note: equivalent to ZSTD_createCDict_advanced(), with dictLoadMethod==ZSTD_dlm_byRef */ -ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, size_t dictSize, int compressionLevel); /*! ZSTD_getCParams() : * @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. * `estimatedSrcSize` value is optional, select 0 if not known */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_getParams() : * same as ZSTD_getCParams(), but @return a full `ZSTD_parameters` object instead of sub-component `ZSTD_compressionParameters`. * All fields of `ZSTD_frameParameters` are set to default : contentSize=1, checksum=0, noDictID=0 */ -ZSTDLIB_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize); /*! ZSTD_checkCParams() : * Ensure param values remain within authorized range. * @return 0 on success, or an error code (can be checked with ZSTD_isError()) */ -ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); +ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); /*! ZSTD_adjustCParams() : * optimize params for a given `srcSize` and `dictSize`. @@ -1634,7 +1668,7 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params); * `dictSize` must be `0` when there is no dictionary. * cPar can be invalid : all parameters will be clamped within valid range in the @return struct. * This function never fails (wide contract) */ -ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize); /*! ZSTD_compress_advanced() : * Note : this function is now DEPRECATED. @@ -1662,18 +1696,18 @@ size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx, /*! ZSTD_CCtx_loadDictionary_byReference() : * Same as ZSTD_CCtx_loadDictionary(), but dictionary content is referenced, instead of being copied into CCtx. * It saves some memory, but also requires that `dict` outlives its usage within `cctx` */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_byReference(ZSTD_CCtx* cctx, const void* dict, size_t dictSize); /*! ZSTD_CCtx_loadDictionary_advanced() : * Same as ZSTD_CCtx_loadDictionary(), but gives finer control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_loadDictionary_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_CCtx_refPrefix_advanced() : * Same as ZSTD_CCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /* === experimental parameters === */ /* these parameters can be used with ZSTD_setParameter() @@ -1712,9 +1746,15 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * See the comments on that enum for an explanation of the feature. */ #define ZSTD_c_forceAttachDict ZSTD_c_experimentalParam4 -/* Controls how the literals are compressed (default is auto). - * The value must be of type ZSTD_literalCompressionMode_e. - * See ZSTD_literalCompressionMode_e enum definition for details. +/* Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never compress literals. + * Set to ZSTD_ps_enable to always compress literals. (Note: uncompressed literals + * may still be emitted if huffman is not beneficial to use.) + * + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * literals compression based on the compression parameters - specifically, + * negative compression levels do not use literal compression. */ #define ZSTD_c_literalCompressionMode ZSTD_c_experimentalParam5 @@ -1777,7 +1817,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * * Note that this means that the CDict tables can no longer be copied into the * CCtx, so the dict attachment mode ZSTD_dictForceCopy will no longer be - * useable. The dictionary can only be attached or reloaded. + * usable. The dictionary can only be attached or reloaded. * * In general, you should expect compression to be faster--sometimes very much * so--and CDict creation to be slightly slower. Eventually, we will probably @@ -1866,23 +1906,26 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre */ #define ZSTD_c_validateSequences ZSTD_c_experimentalParam12 -/* ZSTD_c_splitBlocks - * Default is 0 == disabled. Set to 1 to enable block splitting. +/* ZSTD_c_useBlockSplitter + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use block splitter. + * Set to ZSTD_ps_enable to always use block splitter. * - * Will attempt to split blocks in order to improve compression ratio at the cost of speed. + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * block splitting based on the compression parameters. */ -#define ZSTD_c_splitBlocks ZSTD_c_experimentalParam13 +#define ZSTD_c_useBlockSplitter ZSTD_c_experimentalParam13 /* ZSTD_c_useRowMatchFinder - * Default is ZSTD_urm_auto. - * Controlled with ZSTD_useRowMatchFinderMode_e enum. + * Controlled with ZSTD_paramSwitch_e enum. + * Default is ZSTD_ps_auto. + * Set to ZSTD_ps_disable to never use row-based matchfinder. + * Set to ZSTD_ps_enable to force usage of row-based matchfinder. * - * By default, in ZSTD_urm_auto, when finalizing the compression parameters, the library - * will decide at runtime whether to use the row-based matchfinder based on support for SIMD - * instructions as well as the windowLog. - * - * Set to ZSTD_urm_disableRowMatchFinder to never use row-based matchfinder. - * Set to ZSTD_urm_enableRowMatchFinder to force usage of row-based matchfinder. + * By default, in ZSTD_ps_auto, the library will decide at runtime whether to use + * the row-based matchfinder based on support for SIMD instructions and the window log. + * Note that this only pertains to compression strategies: greedy, lazy, and lazy2 */ #define ZSTD_c_useRowMatchFinder ZSTD_c_experimentalParam14 @@ -1911,7 +1954,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const void* pre * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_params : @@ -1931,25 +1974,25 @@ ZSTDLIB_API size_t ZSTD_CCtx_getParameter(const ZSTD_CCtx* cctx, ZSTD_cParameter * This can be used with ZSTD_estimateCCtxSize_advanced_usingCCtxParams() * for static allocation of CCtx for single-threaded compression. */ -ZSTDLIB_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); -ZSTDLIB_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ +ZSTDLIB_STATIC_API ZSTD_CCtx_params* ZSTD_createCCtxParams(void); +ZSTDLIB_STATIC_API size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params); /* accept NULL pointer */ /*! ZSTD_CCtxParams_reset() : * Reset params to default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params); /*! ZSTD_CCtxParams_init() : * Initializes the compression parameters of cctxParams according to * compression level. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel); /*! ZSTD_CCtxParams_init_advanced() : * Initializes the compression and frame parameters of cctxParams according to * params. All other parameters are reset to their default values. */ -ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params); /*! ZSTD_CCtxParams_setParameter() : Requires v1.4.0+ * Similar to ZSTD_CCtx_setParameter. @@ -1959,14 +2002,14 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, Z * @result : a code representing success or failure (which can be tested with * ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* params, ZSTD_cParameter param, int value); /*! ZSTD_CCtxParams_getParameter() : * Similar to ZSTD_CCtx_getParameter. * Get the requested value of one compression parameter, selected by enum ZSTD_cParameter. * @result : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, ZSTD_cParameter param, int* value); /*! ZSTD_CCtx_setParametersUsingCCtxParams() : * Apply a set of ZSTD_CCtx_params to the compression context. @@ -1975,7 +2018,7 @@ ZSTDLIB_API size_t ZSTD_CCtxParams_getParameter(const ZSTD_CCtx_params* params, * if nbWorkers>=1, new parameters will be picked up at next job, * with a few restrictions (windowLog, pledgedSrcSize, nbWorkers, jobSize, and overlapLog are not updated). */ -ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( +ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params); /*! ZSTD_compressStream2_simpleArgs() : @@ -1984,7 +2027,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_setParametersUsingCCtxParams( * This variant might be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_compressStream2_simpleArgs ( ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos, @@ -2000,33 +2043,33 @@ ZSTDLIB_API size_t ZSTD_compressStream2_simpleArgs ( * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. * Note 3 : Skippable Frame Identifiers are considered valid. */ -ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size); +ZSTDLIB_STATIC_API unsigned ZSTD_isFrame(const void* buffer, size_t size); /*! ZSTD_createDDict_byReference() : * Create a digested dictionary, ready to start decompression operation without startup delay. * Dictionary content is referenced, and therefore stays in dictBuffer. * It is important that dictBuffer outlives DDict, * it must remain read accessible throughout the lifetime of DDict */ -ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); +ZSTDLIB_STATIC_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_byReference() : * Same as ZSTD_DCtx_loadDictionary(), * but references `dict` content instead of copying it into `dctx`. * This saves memory if `dict` remains around., * However, it's imperative that `dict` remains accessible (and unmodified) while being used, so it must outlive decompression. */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); /*! ZSTD_DCtx_loadDictionary_advanced() : * Same as ZSTD_DCtx_loadDictionary(), * but gives direct control over * how to load the dictionary (by copy ? by reference ?) * and how to interpret it (automatic ? force raw mode ? full mode only ?). */ -ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_refPrefix_advanced() : * Same as ZSTD_DCtx_refPrefix(), but gives finer control over * how to interpret prefix content (automatic ? force raw mode (default) ? full mode only ?) */ -ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType); /*! ZSTD_DCtx_setMaxWindowSize() : * Refuses allocating internal buffers for frames requiring a window size larger than provided limit. @@ -2035,14 +2078,14 @@ ZSTDLIB_API size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* pre * By default, a decompression context accepts all window sizes <= (1 << ZSTD_WINDOWLOG_LIMIT_DEFAULT) * @return : 0, or an error code (which can be tested using ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize); /*! ZSTD_DCtx_getParameter() : * Get the requested decompression parameter value, selected by enum ZSTD_dParameter, * and store it into int* value. * @return : 0, or an error code (which can be tested with ZSTD_isError()). */ -ZSTDLIB_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); +ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParameter param, int* value); /* ZSTD_d_format * experimental parameter, @@ -2131,7 +2174,7 @@ size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format); * This can be helpful for binders from dynamic languages * which have troubles handling structures containing memory pointers. */ -ZSTDLIB_API size_t ZSTD_decompressStream_simpleArgs ( +ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs ( ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, size_t* dstPos, const void* src, size_t srcSize, size_t* srcPos); @@ -2205,7 +2248,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, * This function is DEPRECATED, and equivalent to: * ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only); * ZSTD_CCtx_refCDict(zcs, cdict); - * + * * note : cdict will just be referenced, and must outlive compression session * This prototype will generate compilation warnings. */ @@ -2270,7 +2313,7 @@ typedef struct { * Note : (ingested - consumed) is amount of input data buffered internally, not yet compressed. * Aggregates progression inside active worker threads. */ -ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx); /*! ZSTD_toFlushNow() : * Tell how many bytes are ready to be flushed immediately. @@ -2285,7 +2328,7 @@ ZSTDLIB_API ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx * therefore flush speed is limited by production speed of oldest job * irrespective of the speed of concurrent (and newer) jobs. */ -ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); /*===== Advanced Streaming decompression functions =====*/ @@ -2299,7 +2342,7 @@ ZSTDLIB_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx); * note: no dictionary will be used if dict == NULL or dictSize < 8 * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /*! * This function is deprecated, and is equivalent to: @@ -2310,7 +2353,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dic * note : ddict is referenced, it must outlive decompression session * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); +ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); /*! * This function is deprecated, and is equivalent to: @@ -2320,7 +2363,7 @@ ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDi * re-use decompression parameters from previous init; saves dictionary loading * Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x */ -ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); +ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); /********************************************************************* @@ -2362,13 +2405,13 @@ ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds); */ /*===== Buffer-less streaming compression functions =====*/ -ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ -ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel); +ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */ +ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */ ZSTD_DEPRECATED("use advanced API to access custom parameters") @@ -2465,24 +2508,24 @@ typedef struct { * @return : 0, `zfhPtr` is correctly filled, * >0, `srcSize` is too small, value is wanted `srcSize` amount, * or an error code, which can be tested using ZSTD_isError() */ -ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */ /*! ZSTD_getFrameHeader_advanced() : * same as ZSTD_getFrameHeader(), * with added capability to select a format (like ZSTD_f_zstd1_magicless) */ -ZSTDLIB_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); -ZSTDLIB_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ +ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format); +ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */ -ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); -ZSTDLIB_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict); -ZSTDLIB_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); -ZSTDLIB_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); /* misc */ -ZSTDLIB_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); +ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx); typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e; -ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); +ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); @@ -2519,10 +2562,10 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx); */ /*===== Raw zstd block functions =====*/ -ZSTDLIB_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); -ZSTDLIB_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); -ZSTDLIB_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ +ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx); +ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize); +ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */ #endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */ |