summaryrefslogtreecommitdiff
path: root/thirdparty/harfbuzz/src/hb-repacker.hh
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/harfbuzz/src/hb-repacker.hh')
-rw-r--r--thirdparty/harfbuzz/src/hb-repacker.hh161
1 files changed, 158 insertions, 3 deletions
diff --git a/thirdparty/harfbuzz/src/hb-repacker.hh b/thirdparty/harfbuzz/src/hb-repacker.hh
index 173fe4a2fb..61b142238e 100644
--- a/thirdparty/harfbuzz/src/hb-repacker.hh
+++ b/thirdparty/harfbuzz/src/hb-repacker.hh
@@ -29,10 +29,9 @@
#include "hb-open-type.hh"
#include "hb-map.hh"
-#include "hb-priority-queue.hh"
-#include "hb-serialize.hh"
#include "hb-vector.hh"
#include "graph/graph.hh"
+#include "graph/gsubgpos-graph.hh"
#include "graph/serialize.hh"
using graph::graph_t;
@@ -42,6 +41,143 @@ using graph::graph_t;
* docs/repacker.md
*/
+struct lookup_size_t
+{
+ unsigned lookup_index;
+ size_t size;
+ unsigned num_subtables;
+
+ static int cmp (const void* a, const void* b)
+ {
+ return cmp ((const lookup_size_t*) a,
+ (const lookup_size_t*) b);
+ }
+
+ static int cmp (const lookup_size_t* a, const lookup_size_t* b)
+ {
+ double subtables_per_byte_a = (double) a->num_subtables / (double) a->size;
+ double subtables_per_byte_b = (double) b->num_subtables / (double) b->size;
+ if (subtables_per_byte_a == subtables_per_byte_b) {
+ return b->lookup_index - a->lookup_index;
+ }
+
+ double cmp = subtables_per_byte_b - subtables_per_byte_a;
+ if (cmp < 0) return -1;
+ if (cmp > 0) return 1;
+ return 0;
+ }
+};
+
+static inline
+bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context)
+{
+ // For each lookup this will check the size of subtables and split them as needed
+ // so that no subtable is at risk of overflowing. (where we support splitting for
+ // that subtable type).
+ //
+ // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup
+ // pass after this processing is done. Not super necessary as splits are
+ // only done where overflow is likely, so de-dup probably will get undone
+ // later anyways.
+ for (unsigned lookup_index : ext_context.lookups.keys ())
+ {
+ graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
+ if (!lookup->split_subtables_if_needed (ext_context, lookup_index))
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
+ * to extension lookups.
+ */
+static inline
+bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context)
+{
+ // Simple Algorithm (v1, current):
+ // 1. Calculate how many bytes each non-extension lookup consumes.
+ // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first)
+ // 3. Promote the rest.
+ //
+ // Advanced Algorithm (v2, not implemented):
+ // 1. Perform connected component analysis using lookups as roots.
+ // 2. Compute size of each connected component.
+ // 3. Select up to 64k worth of connected components to remain as non-extensions.
+ // (greedy, highest subtables per byte first)
+ // 4. Promote the rest.
+
+ // TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
+ // TODO(garretrieger): also support extension promotion during iterative resolution phase, then
+ // we can use a less conservative threshold here.
+ // TODO(grieger): skip this for the 24 bit case.
+ if (!ext_context.lookups) return true;
+
+ hb_vector_t<lookup_size_t> lookup_sizes;
+ lookup_sizes.alloc (ext_context.lookups.get_population ());
+
+ for (unsigned lookup_index : ext_context.lookups.keys ())
+ {
+ const graph::Lookup* lookup = ext_context.lookups.get(lookup_index);
+ hb_set_t visited;
+ lookup_sizes.push (lookup_size_t {
+ lookup_index,
+ ext_context.graph.find_subgraph_size (lookup_index, visited),
+ lookup->number_of_subtables (),
+ });
+ }
+
+ lookup_sizes.qsort ();
+
+ size_t lookup_list_size = ext_context.graph.vertices_[ext_context.lookup_list_index].table_size ();
+ size_t l2_l3_size = lookup_list_size; // Lookup List + Lookups
+ size_t l3_l4_size = 0; // Lookups + SubTables
+ size_t l4_plus_size = 0; // SubTables + their descendants
+
+ // Start by assuming all lookups are using extension subtables, this size will be removed later
+ // if it's decided to not make a lookup extension.
+ for (auto p : lookup_sizes)
+ {
+ unsigned subtables_size = p.num_subtables * 8;
+ l3_l4_size += subtables_size;
+ l4_plus_size += subtables_size;
+ }
+
+ bool layers_full = false;
+ for (auto p : lookup_sizes)
+ {
+ const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index);
+ if (lookup->is_extension (ext_context.table_tag))
+ // already an extension so size is counted by the loop above.
+ continue;
+
+ if (!layers_full)
+ {
+ size_t lookup_size = ext_context.graph.vertices_[p.lookup_index].table_size ();
+ hb_set_t visited;
+ size_t subtables_size = ext_context.graph.find_subgraph_size (p.lookup_index, visited, 1) - lookup_size;
+ size_t remaining_size = p.size - subtables_size - lookup_size;
+
+ l2_l3_size += lookup_size;
+ l3_l4_size += lookup_size + subtables_size;
+ l3_l4_size -= p.num_subtables * 8;
+ l4_plus_size += subtables_size + remaining_size;
+
+ if (l2_l3_size < (1 << 16)
+ && l3_l4_size < (1 << 16)
+ && l4_plus_size < (1 << 16)) continue; // this lookup fits within all layers groups
+
+ layers_full = true;
+ }
+
+ if (!ext_context.lookups.get(p.lookup_index)->make_extension (ext_context, p.lookup_index))
+ return false;
+ }
+
+ return true;
+}
+
static inline
bool _try_isolating_subgraphs (const hb_vector_t<graph::overflow_record_t>& overflows,
graph_t& sorted_graph)
@@ -157,7 +293,8 @@ template<typename T>
inline hb_blob_t*
hb_resolve_overflows (const T& packed,
hb_tag_t table_tag,
- unsigned max_rounds = 20) {
+ unsigned max_rounds = 20,
+ bool recalculate_extensions = false) {
graph_t sorted_graph (packed);
sorted_graph.sort_shortest_distance ();
@@ -167,13 +304,31 @@ hb_resolve_overflows (const T& packed,
return graph::serialize (sorted_graph);
}
+ graph::gsubgpos_graph_context_t ext_context (table_tag, sorted_graph);
if ((table_tag == HB_OT_TAG_GPOS
|| table_tag == HB_OT_TAG_GSUB)
&& will_overflow)
{
+ if (recalculate_extensions)
+ {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Splitting subtables if needed.");
+ if (!_presplit_subtables_if_needed (ext_context)) {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Subtable splitting failed.");
+ return nullptr;
+ }
+
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Promoting lookups to extensions if needed.");
+ if (!_promote_extensions_if_needed (ext_context)) {
+ DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed.");
+ return nullptr;
+ }
+ }
+
DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs.");
if (sorted_graph.assign_spaces ())
sorted_graph.sort_shortest_distance ();
+ else
+ sorted_graph.sort_shortest_distance_if_needed ();
}
unsigned round = 0;