diff options
Diffstat (limited to 'thirdparty/harfbuzz/src/hb-bit-set.hh')
-rw-r--r-- | thirdparty/harfbuzz/src/hb-bit-set.hh | 64 |
1 files changed, 53 insertions, 11 deletions
diff --git a/thirdparty/harfbuzz/src/hb-bit-set.hh b/thirdparty/harfbuzz/src/hb-bit-set.hh index fcaff9f3be..11a4359dc9 100644 --- a/thirdparty/harfbuzz/src/hb-bit-set.hh +++ b/thirdparty/harfbuzz/src/hb-bit-set.hh @@ -97,6 +97,13 @@ struct hb_bit_set_t return true; } + void alloc (unsigned sz) + { + sz >>= (page_t::PAGE_BITS_LOG_2 - 1); + pages.alloc (sz); + page_map.alloc (sz); + } + void reset () { successful = true; @@ -119,6 +126,14 @@ struct hb_bit_set_t } explicit operator bool () const { return !is_empty (); } + uint32_t hash () const + { + uint32_t h = 0; + for (auto &map : page_map) + h = h * 31 + hb_hash (map.major) + hb_hash (pages[map.index]); + return h; + } + private: void dirty () { population = UINT_MAX; } public: @@ -341,15 +356,14 @@ struct hb_bit_set_t return; population = other.population; - /* TODO switch to vector operator =. */ - hb_memcpy ((void *) pages, (const void *) other.pages, count * pages.item_size); - hb_memcpy ((void *) page_map, (const void *) other.page_map, count * page_map.item_size); + page_map = other.page_map; + pages = other.pages; } bool is_equal (const hb_bit_set_t &other) const { if (has_population () && other.has_population () && - get_population () != other.get_population ()) + population != other.population) return false; unsigned int na = pages.length; @@ -377,7 +391,7 @@ struct hb_bit_set_t bool is_subset (const hb_bit_set_t &larger_set) const { if (has_population () && larger_set.has_population () && - get_population () != larger_set.get_population ()) + population != larger_set.population) return false; uint32_t spi = 0; @@ -874,7 +888,19 @@ struct hb_bit_set_t page_t *page_for (hb_codepoint_t g, bool insert = false) { - page_map_t map = {get_major (g), pages.length}; + unsigned major = get_major (g); + + /* The extra page_map length is necessary; can't just rely on vector here, + * since the next check would be tricked because a null page also has + * major==0, which we can't distinguish from an actualy major==0 page... */ + if (likely (last_page_lookup < page_map.length)) + { + auto &cached_page = page_map.arrayZ[last_page_lookup]; + if (cached_page.major == major) + return &pages[cached_page.index]; + } + + page_map_t map = {major, pages.length}; unsigned int i; if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST)) { @@ -890,15 +916,31 @@ struct hb_bit_set_t (page_map.length - 1 - i) * page_map.item_size); page_map[i] = map; } + + last_page_lookup = i; return &pages[page_map[i].index]; } const page_t *page_for (hb_codepoint_t g) const { - page_map_t key = {get_major (g)}; - const page_map_t *found = page_map.bsearch (key); - if (found) - return &pages[found->index]; - return nullptr; + unsigned major = get_major (g); + + /* The extra page_map length is necessary; can't just rely on vector here, + * since the next check would be tricked because a null page also has + * major==0, which we can't distinguish from an actualy major==0 page... */ + if (likely (last_page_lookup < page_map.length)) + { + auto &cached_page = page_map.arrayZ[last_page_lookup]; + if (cached_page.major == major) + return &pages[cached_page.index]; + } + + page_map_t key = {major}; + unsigned int i; + if (!page_map.bfind (key, &i)) + return nullptr; + + last_page_lookup = i; + return &pages[page_map[i].index]; } page_t &page_at (unsigned int i) { return pages[page_map[i].index]; } const page_t &page_at (unsigned int i) const { return pages[page_map[i].index]; } |