diff options
Diffstat (limited to 'thirdparty/harfbuzz/src/hb-ot-layout-common.hh')
-rw-r--r-- | thirdparty/harfbuzz/src/hb-ot-layout-common.hh | 350 |
1 files changed, 215 insertions, 135 deletions
diff --git a/thirdparty/harfbuzz/src/hb-ot-layout-common.hh b/thirdparty/harfbuzz/src/hb-ot-layout-common.hh index 579abf011a..0f424f5aa6 100644 --- a/thirdparty/harfbuzz/src/hb-ot-layout-common.hh +++ b/thirdparty/harfbuzz/src/hb-ot-layout-common.hh @@ -478,7 +478,7 @@ struct IndexArray : Array16Of<Index> { if (_count) { - + this->sub_array (start_offset, _count) + + this->as_array ().sub_array (start_offset, _count) | hb_sink (hb_array (_indexes, *_count)) ; } @@ -658,7 +658,7 @@ struct FeatureParamsCharacterVariants { if (char_count) { - + characters.sub_array (start_offset, char_count) + + characters.as_array ().sub_array (start_offset, char_count) | hb_sink (hb_array (chars, *char_count)) ; } @@ -932,7 +932,7 @@ struct RecordArrayOf : SortedArray16Of<Record<Type>> { if (record_count) { - + this->sub_array (start_offset, record_count) + + this->as_array ().sub_array (start_offset, record_count) | hb_map (&Record<Type>::tag) | hb_sink (hb_array (record_tags, *record_count)) ; @@ -980,18 +980,16 @@ struct RecordListOfFeature : RecordListOf<Feature> auto *out = c->serializer->start_embed (*this); if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); - unsigned count = this->len; - - + hb_zip (*this, hb_range (count)) - | hb_filter (l->feature_index_map, hb_second) - | hb_apply ([l, out, this] (const hb_pair_t<const Record<Feature>&, unsigned>& _) + + hb_enumerate (*this) + | hb_filter (l->feature_index_map, hb_first) + | hb_apply ([l, out, this] (const hb_pair_t<unsigned, const Record<Feature>&>& _) { const Feature *f_sub = nullptr; const Feature **f = nullptr; - if (l->feature_substitutes_map->has (_.second, &f)) + if (l->feature_substitutes_map->has (_.first, &f)) f_sub = *f; - subset_record_array (l, out, this, f_sub) (_.first); + subset_record_array (l, out, this, f_sub) (_.second); }) ; @@ -1079,7 +1077,7 @@ struct LangSys auto *out = c->serializer->start_embed (*this); if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); - const unsigned *v; + const uint32_t *v; out->reqFeatureIndex = l->feature_index_map->has (reqFeatureIndex, &v) ? *v : 0xFFFFu; if (!l->visitFeatureIndex (featureIndex.len)) @@ -1147,7 +1145,6 @@ struct Script return; } - unsigned langsys_count = get_lang_sys_count (); if (has_default_lang_sys ()) { //only collect features from non-redundant langsys @@ -1156,24 +1153,24 @@ struct Script d.collect_features (c); } - for (auto _ : + hb_zip (langSys, hb_range (langsys_count))) + for (auto _ : + hb_enumerate (langSys)) { - const LangSys& l = this+_.first.offset; + const LangSys& l = this+_.second.offset; if (!c->visitLangsys (l.get_feature_count ())) continue; if (l.compare (d, c->duplicate_feature_map)) continue; l.collect_features (c); - c->script_langsys_map->get (script_index)->add (_.second); + c->script_langsys_map->get (script_index)->add (_.first); } } else { - for (auto _ : + hb_zip (langSys, hb_range (langsys_count))) + for (auto _ : + hb_enumerate (langSys)) { - const LangSys& l = this+_.first.offset; + const LangSys& l = this+_.second.offset; if (!c->visitLangsys (l.get_feature_count ())) continue; l.collect_features (c); - c->script_langsys_map->get (script_index)->add (_.second); + c->script_langsys_map->get (script_index)->add (_.first); } } } @@ -1211,10 +1208,9 @@ struct Script const hb_set_t *active_langsys = l->script_langsys_map->get (l->cur_script_index); if (active_langsys) { - unsigned count = langSys.len; - + hb_zip (langSys, hb_range (count)) - | hb_filter (active_langsys, hb_second) - | hb_map (hb_first) + + hb_enumerate (langSys) + | hb_filter (active_langsys, hb_first) + | hb_map (hb_second) | hb_filter ([=] (const Record<LangSys>& record) {return l->visitLangSys (); }) | hb_apply (subset_record_array (l, &(out->langSys), this)) ; @@ -1250,12 +1246,11 @@ struct RecordListOfScript : RecordListOf<Script> auto *out = c->serializer->start_embed (*this); if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); - unsigned count = this->len; - for (auto _ : + hb_zip (*this, hb_range (count))) + for (auto _ : + hb_enumerate (*this)) { auto snap = c->serializer->snapshot (); - l->cur_script_index = _.second; - bool ret = _.first.subset (l, this); + l->cur_script_index = _.first; + bool ret = _.second.subset (l, this); if (!ret) c->serializer->revert (snap); else out->len++; } @@ -1388,7 +1383,13 @@ struct Lookup outMarkFilteringSet = markFilteringSet; } - return_trace (out->subTable.len); + // Always keep the lookup even if it's empty. The rest of layout subsetting depends on lookup + // indices being consistent with those computed during planning. So if an empty lookup is + // discarded during the subset phase it will invalidate all subsequent lookup indices. + // Generally we shouldn't end up with an empty lookup as we pre-prune them during the planning + // phase, but it can happen in rare cases such as when during closure subtable is considered + // degenerate (see: https://github.com/harfbuzz/harfbuzz/issues/3853) + return true; } template <typename TSubTable> @@ -1454,10 +1455,9 @@ struct LookupOffsetList : List16OfOffsetTo<TLookup, OffsetType> auto *out = c->serializer->start_embed (this); if (unlikely (!out || !c->serializer->extend_min (out))) return_trace (false); - unsigned count = this->len; - + hb_zip (*this, hb_range (count)) - | hb_filter (l->lookup_index_map, hb_second) - | hb_map (hb_first) + + hb_enumerate (*this) + | hb_filter (l->lookup_index_map, hb_first) + | hb_map (hb_second) | hb_apply (subset_offset_array (c, *out, this)) ; return_trace (true); @@ -1491,7 +1491,7 @@ static bool ClassDef_remap_and_serialize (hb_serialize_context_t *c, klass_map->set (0, 0); unsigned idx = klass_map->has (0) ? 1 : 0; - for (const unsigned k: klasses.iter ()) + for (const unsigned k: klasses) { if (klass_map->has (k)) continue; klass_map->set (k, idx); @@ -1524,6 +1524,11 @@ struct ClassDefFormat1_3 return classValue[(unsigned int) (glyph_id - startGlyph)]; } + unsigned get_population () const + { + return classValue.len; + } + template<typename Iterator, hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))> bool serialize (hb_serialize_context_t *c, @@ -1548,7 +1553,7 @@ struct ClassDefFormat1_3 startGlyph = glyph_min; if (unlikely (!classValue.serialize (c, glyph_count))) return_trace (false); - for (const hb_pair_t<hb_codepoint_t, unsigned> gid_klass_pair : + it) + for (const hb_pair_t<hb_codepoint_t, uint32_t> gid_klass_pair : + it) { unsigned idx = gid_klass_pair.first - glyph_min; classValue[idx] = gid_klass_pair.second; @@ -1639,11 +1644,10 @@ struct ClassDefFormat1_3 bool intersects (const hb_set_t *glyphs) const { - /* TODO Speed up, using hb_set_next()? */ hb_codepoint_t start = startGlyph; hb_codepoint_t end = startGlyph + classValue.len; for (hb_codepoint_t iter = startGlyph - 1; - hb_set_next (glyphs, &iter) && iter < end;) + glyphs->next (&iter) && iter < end;) if (classValue[iter - start]) return true; return false; } @@ -1654,10 +1658,10 @@ struct ClassDefFormat1_3 { /* Match if there's any glyph that is not listed! */ hb_codepoint_t g = HB_SET_VALUE_INVALID; - if (!hb_set_next (glyphs, &g)) return false; + if (!glyphs->next (&g)) return false; if (g < startGlyph) return true; g = startGlyph + count - 1; - if (hb_set_next (glyphs, &g)) return true; + if (glyphs->next (&g)) return true; /* Fall through. */ } /* TODO Speed up, using set overlap first? */ @@ -1675,12 +1679,12 @@ struct ClassDefFormat1_3 if (klass == 0) { unsigned start_glyph = startGlyph; - for (unsigned g = HB_SET_VALUE_INVALID; - hb_set_next (glyphs, &g) && g < start_glyph;) + for (uint32_t g = HB_SET_VALUE_INVALID; + glyphs->next (&g) && g < start_glyph;) intersect_glyphs->add (g); - for (unsigned g = startGlyph + count - 1; - hb_set_next (glyphs, &g);) + for (uint32_t g = startGlyph + count - 1; + glyphs-> next (&g);) intersect_glyphs->add (g); return; @@ -1696,7 +1700,7 @@ struct ClassDefFormat1_3 unsigned start_glyph = startGlyph; unsigned end_glyph = start_glyph + count; for (unsigned g = startGlyph - 1; - hb_set_next (glyphs, &g) && g < end_glyph;) + glyphs->next (&g) && g < end_glyph;) if (classValue.arrayZ[g - start_glyph] == klass) intersect_glyphs->add (g); #endif @@ -1739,6 +1743,14 @@ struct ClassDefFormat2_4 return rangeRecord.bsearch (glyph_id).value; } + unsigned get_population () const + { + typename Types::large_int ret = 0; + for (const auto &r : rangeRecord) + ret += r.get_population (); + return ret > UINT_MAX ? UINT_MAX : (unsigned) ret; + } + template<typename Iterator, hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))> bool serialize (hb_serialize_context_t *c, @@ -1802,28 +1814,45 @@ struct ClassDefFormat2_4 { TRACE_SUBSET (this); const hb_map_t &glyph_map = *c->plan->glyph_map_gsub; + const hb_set_t &glyph_set = *c->plan->glyphset_gsub (); hb_sorted_vector_t<hb_pair_t<hb_codepoint_t, hb_codepoint_t>> glyph_and_klass; hb_set_t orig_klasses; - unsigned num_source_glyphs = c->plan->source->get_num_glyphs (); - unsigned count = rangeRecord.len; - for (unsigned i = 0; i < count; i++) + if (glyph_set.get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2 + < get_population ()) { - unsigned klass = rangeRecord[i].value; - if (!klass) continue; - hb_codepoint_t start = rangeRecord[i].first; - hb_codepoint_t end = hb_min (rangeRecord[i].last + 1, num_source_glyphs); - for (hb_codepoint_t g = start; g < end; g++) + for (hb_codepoint_t g : glyph_set) { - hb_codepoint_t new_gid = glyph_map[g]; + unsigned klass = get_class (g); + if (!klass) continue; + hb_codepoint_t new_gid = glyph_map[g]; if (new_gid == HB_MAP_VALUE_INVALID) continue; - if (glyph_filter && !glyph_filter->has (g)) continue; - + if (glyph_filter && !glyph_filter->has (g)) continue; glyph_and_klass.push (hb_pair (new_gid, klass)); orig_klasses.add (klass); } } + else + { + unsigned num_source_glyphs = c->plan->source->get_num_glyphs (); + for (auto &range : rangeRecord) + { + unsigned klass = range.value; + if (!klass) continue; + hb_codepoint_t start = range.first; + hb_codepoint_t end = hb_min (range.last + 1, num_source_glyphs); + for (hb_codepoint_t g = start; g < end; g++) + { + hb_codepoint_t new_gid = glyph_map[g]; + if (new_gid == HB_MAP_VALUE_INVALID) continue; + if (glyph_filter && !glyph_filter->has (g)) continue; + + glyph_and_klass.push (hb_pair (new_gid, klass)); + orig_klasses.add (klass); + } + } + } const hb_set_t& glyphset = *c->plan->glyphset_gsub (); unsigned glyph_count = glyph_filter @@ -1850,10 +1879,9 @@ struct ClassDefFormat2_4 template <typename set_t> bool collect_coverage (set_t *glyphs) const { - unsigned int count = rangeRecord.len; - for (unsigned int i = 0; i < count; i++) - if (rangeRecord[i].value) - if (unlikely (!rangeRecord[i].collect_coverage (glyphs))) + for (auto &range : rangeRecord) + if (range.value) + if (unlikely (!range.collect_coverage (glyphs))) return false; return true; } @@ -1861,11 +1889,10 @@ struct ClassDefFormat2_4 template <typename set_t> bool collect_class (set_t *glyphs, unsigned int klass) const { - unsigned int count = rangeRecord.len; - for (unsigned int i = 0; i < count; i++) + for (auto &range : rangeRecord) { - if (rangeRecord[i].value == klass) - if (unlikely (!rangeRecord[i].collect_coverage (glyphs))) + if (range.value == klass) + if (unlikely (!range.collect_coverage (glyphs))) return false; } return true; @@ -1873,32 +1900,32 @@ struct ClassDefFormat2_4 bool intersects (const hb_set_t *glyphs) const { - /* TODO Speed up, using hb_set_next() and bsearch()? */ - unsigned int count = rangeRecord.len; - for (unsigned int i = 0; i < count; i++) + if (rangeRecord.len > glyphs->get_population () * hb_bit_storage ((unsigned) rangeRecord.len) / 2) { - const auto& range = rangeRecord[i]; - if (range.intersects (*glyphs) && range.value) - return true; + for (hb_codepoint_t g = HB_SET_VALUE_INVALID; glyphs->next (&g);) + if (get_class (g)) + return true; + return false; } - return false; + + return hb_any (+ hb_iter (rangeRecord) + | hb_map ([glyphs] (const RangeRecord<Types> &range) { return range.intersects (*glyphs) && range.value; })); } bool intersects_class (const hb_set_t *glyphs, uint16_t klass) const { - unsigned int count = rangeRecord.len; if (klass == 0) { /* Match if there's any glyph that is not listed! */ hb_codepoint_t g = HB_SET_VALUE_INVALID; - for (unsigned int i = 0; i < count; i++) + for (auto &range : rangeRecord) { - if (!hb_set_next (glyphs, &g)) + if (!glyphs->next (&g)) break; - if (g < rangeRecord[i].first) + if (g < range.first) return true; - g = rangeRecord[i].last; + g = range.last; } - if (g != HB_SET_VALUE_INVALID && hb_set_next (glyphs, &g)) + if (g != HB_SET_VALUE_INVALID && glyphs->next (&g)) return true; /* Fall through. */ } @@ -1910,49 +1937,49 @@ struct ClassDefFormat2_4 void intersected_class_glyphs (const hb_set_t *glyphs, unsigned klass, hb_set_t *intersect_glyphs) const { - unsigned count = rangeRecord.len; if (klass == 0) { hb_codepoint_t g = HB_SET_VALUE_INVALID; - for (unsigned int i = 0; i < count; i++) + for (auto &range : rangeRecord) { - if (!hb_set_next (glyphs, &g)) + if (!glyphs->next (&g)) goto done; - while (g < rangeRecord[i].first) + while (g < range.first) { intersect_glyphs->add (g); - if (!hb_set_next (glyphs, &g)) + if (!glyphs->next (&g)) goto done; } - g = rangeRecord[i].last; + g = range.last; } - while (hb_set_next (glyphs, &g)) + while (glyphs->next (&g)) intersect_glyphs->add (g); done: return; } -#if 0 - /* The following implementation is faster asymptotically, but slower - * in practice. */ - if ((count >> 3) > glyphs->get_population ()) + unsigned count = rangeRecord.len; + if (count > glyphs->get_population () * hb_bit_storage (count) * 8) { for (hb_codepoint_t g = HB_SET_VALUE_INVALID; - hb_set_next (glyphs, &g);) - if (rangeRecord.as_array ().bfind (g)) + glyphs->next (&g);) + { + unsigned i; + if (rangeRecord.as_array ().bfind (g, &i) && + rangeRecord.arrayZ[i].value == klass) intersect_glyphs->add (g); + } return; } -#endif - for (unsigned int i = 0; i < count; i++) + for (auto &range : rangeRecord) { - if (rangeRecord[i].value != klass) continue; + if (range.value != klass) continue; - unsigned end = rangeRecord[i].last + 1; - for (hb_codepoint_t g = rangeRecord[i].first - 1; - hb_set_next (glyphs, &g) && g < end;) + unsigned end = range.last + 1; + for (hb_codepoint_t g = range.first - 1; + glyphs->next (&g) && g < end;) intersect_glyphs->add (g); } } @@ -1961,25 +1988,24 @@ struct ClassDefFormat2_4 { if (glyphs->is_empty ()) return; - unsigned count = rangeRecord.len; hb_codepoint_t g = HB_SET_VALUE_INVALID; - for (unsigned int i = 0; i < count; i++) + for (auto &range : rangeRecord) { - if (!hb_set_next (glyphs, &g)) + if (!glyphs->next (&g)) break; - if (g < rangeRecord[i].first) + if (g < range.first) { intersect_classes->add (0); break; } - g = rangeRecord[i].last; + g = range.last; } - if (g != HB_SET_VALUE_INVALID && hb_set_next (glyphs, &g)) + if (g != HB_SET_VALUE_INVALID && glyphs->next (&g)) intersect_classes->add (0); - for (const auto& record : rangeRecord.iter ()) - if (record.intersects (*glyphs)) - intersect_classes->add (record.value); + for (const auto& range : rangeRecord) + if (range.intersects (*glyphs)) + intersect_classes->add (range.value); } protected: @@ -1994,10 +2020,8 @@ struct ClassDefFormat2_4 struct ClassDef { /* Has interface. */ - static constexpr unsigned SENTINEL = 0; - typedef unsigned int value_t; - value_t operator [] (hb_codepoint_t k) const { return get (k); } - bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; } + unsigned operator [] (hb_codepoint_t k) const { return get (k); } + bool has (hb_codepoint_t k) const { return (*this)[k]; } /* Projection. */ hb_codepoint_t operator () (hb_codepoint_t k) const { return get (k); } @@ -2015,6 +2039,19 @@ struct ClassDef } } + unsigned get_population () const + { + switch (u.format) { + case 1: return u.format1.get_population (); + case 2: return u.format2.get_population (); +#ifndef HB_NO_BEYOND_64K + case 3: return u.format3.get_population (); + case 4: return u.format4.get_population (); +#endif + default:return NOT_COVERED; + } + } + template<typename Iterator, hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))> bool serialize (hb_serialize_context_t *c, Iterator it_with_class_zero) @@ -2332,7 +2369,7 @@ struct VarRegionList { unsigned int backward = region_map.backward (r); if (backward >= region_count) return_trace (false); - memcpy (&axesZ[axisCount * r], &src->axesZ[axisCount * backward], VarRegionAxis::static_size * axisCount); + hb_memcpy (&axesZ[axisCount * r], &src->axesZ[axisCount * backward], VarRegionAxis::static_size * axisCount); } return_trace (true); @@ -2442,21 +2479,26 @@ struct VarData unsigned ri_count = src->regionIndices.len; enum delta_size_t { kZero=0, kNonWord, kWord }; hb_vector_t<delta_size_t> delta_sz; - hb_vector_t<unsigned int> ri_map; /* maps old index to new index */ + hb_vector_t<unsigned int> ri_map; /* maps new index to old index */ delta_sz.resize (ri_count); ri_map.resize (ri_count); unsigned int new_word_count = 0; unsigned int r; + const HBUINT8 *src_delta_bytes = src->get_delta_bytes (); + unsigned src_row_size = src->get_row_size (); + unsigned src_word_count = src->wordCount (); + bool src_long_words = src->longWords (); + bool has_long = false; - if (src->longWords ()) + if (src_long_words) { - for (r = 0; r < ri_count; r++) + for (r = 0; r < src_word_count; r++) { for (unsigned int i = 0; i < inner_map.get_next_value (); i++) { unsigned int old = inner_map.backward (i); - int32_t delta = src->get_item_delta (old, r); + int32_t delta = src->get_item_delta_fast (old, r, src_delta_bytes, src_row_size); if (delta < -65536 || 65535 < delta) { has_long = true; @@ -2470,11 +2512,13 @@ struct VarData signed max_threshold = has_long ? +65535 : +127; for (r = 0; r < ri_count; r++) { + bool short_circuit = src_long_words == has_long && src_word_count <= r; + delta_sz[r] = kZero; for (unsigned int i = 0; i < inner_map.get_next_value (); i++) { unsigned int old = inner_map.backward (i); - int32_t delta = src->get_item_delta (old, r); + int32_t delta = src->get_item_delta_fast (old, r, src_delta_bytes, src_row_size); if (delta < min_threshold || max_threshold < delta) { delta_sz[r] = kWord; @@ -2482,7 +2526,11 @@ struct VarData break; } else if (delta != 0) + { delta_sz[r] = kNonWord; + if (short_circuit) + break; + } } } @@ -2492,7 +2540,8 @@ struct VarData for (r = 0; r < ri_count; r++) if (delta_sz[r]) { - ri_map[r] = (delta_sz[r] == kWord)? word_index++ : non_word_index++; + unsigned new_r = (delta_sz[r] == kWord)? word_index++ : non_word_index++; + ri_map[new_r] = r; new_ri_count++; } @@ -2502,14 +2551,20 @@ struct VarData if (unlikely (!c->extend (this))) return_trace (false); - for (r = 0; r < ri_count; r++) - if (delta_sz[r]) regionIndices[ri_map[r]] = region_map[src->regionIndices[r]]; + for (r = 0; r < new_ri_count; r++) + regionIndices[r] = region_map[src->regionIndices[ri_map[r]]]; - for (unsigned int i = 0; i < itemCount; i++) + HBUINT8 *delta_bytes = get_delta_bytes (); + unsigned row_size = get_row_size (); + unsigned count = itemCount; + for (unsigned int i = 0; i < count; i++) { - unsigned int old = inner_map.backward (i); - for (unsigned int r = 0; r < ri_count; r++) - if (delta_sz[r]) set_item_delta (i, ri_map[r], src->get_item_delta (old, r)); + unsigned int old = inner_map.backward (i); + for (unsigned int r = 0; r < new_ri_count; r++) + set_item_delta_fast (i, r, + src->get_item_delta_fast (old, ri_map[r], + src_delta_bytes, src_row_size), + delta_bytes, row_size); } return_trace (true); @@ -2517,12 +2572,15 @@ struct VarData void collect_region_refs (hb_set_t ®ion_indices, const hb_inc_bimap_t &inner_map) const { + const HBUINT8 *delta_bytes = get_delta_bytes (); + unsigned row_size = get_row_size (); + for (unsigned int r = 0; r < regionIndices.len; r++) { - unsigned int region = regionIndices[r]; + unsigned int region = regionIndices.arrayZ[r]; if (region_indices.has (region)) continue; for (unsigned int i = 0; i < inner_map.get_next_value (); i++) - if (get_item_delta (inner_map.backward (i), r) != 0) + if (get_item_delta_fast (inner_map.backward (i), r, delta_bytes, row_size) != 0) { region_indices.add (region); break; @@ -2537,10 +2595,12 @@ struct VarData HBUINT8 *get_delta_bytes () { return &StructAfter<HBUINT8> (regionIndices); } - int32_t get_item_delta (unsigned int item, unsigned int region) const + int32_t get_item_delta_fast (unsigned int item, unsigned int region, + const HBUINT8 *delta_bytes, unsigned row_size) const { - if ( item >= itemCount || unlikely (region >= regionIndices.len)) return 0; - const HBINT8 *p = (const HBINT8 *) get_delta_bytes () + item * get_row_size (); + if (unlikely (item >= itemCount || region >= regionIndices.len)) return 0; + + const HBINT8 *p = (const HBINT8 *) delta_bytes + item * row_size; unsigned word_count = wordCount (); bool is_long = longWords (); if (is_long) @@ -2558,10 +2618,17 @@ struct VarData return (p + HBINT16::static_size * word_count)[region - word_count]; } } + int32_t get_item_delta (unsigned int item, unsigned int region) const + { + return get_item_delta_fast (item, region, + get_delta_bytes (), + get_row_size ()); + } - void set_item_delta (unsigned int item, unsigned int region, int32_t delta) + void set_item_delta_fast (unsigned int item, unsigned int region, int32_t delta, + HBUINT8 *delta_bytes, unsigned row_size) { - HBINT8 *p = (HBINT8 *)get_delta_bytes () + item * get_row_size (); + HBINT8 *p = (HBINT8 *) delta_bytes + item * row_size; unsigned word_count = wordCount (); bool is_long = longWords (); if (is_long) @@ -2579,6 +2646,12 @@ struct VarData (p + HBINT16::static_size * word_count)[region - word_count] = delta; } } + void set_item_delta (unsigned int item, unsigned int region, int32_t delta) + { + set_item_delta_fast (item, region, delta, + get_delta_bytes (), + get_row_size ()); + } bool longWords () const { return wordSizeCount & 0x8000u /* LONG_WORDS */; } unsigned wordCount () const { return wordSizeCount & 0x7FFFu /* WORD_DELTA_COUNT_MASK */; } @@ -2642,6 +2715,14 @@ struct VariationStore unsigned int inner = index & 0xFFFF; return get_delta (outer, inner, coords, coord_count, cache); } + float get_delta (unsigned int index, + hb_array_t<int> coords, + VarRegionList::cache_t *cache = nullptr) const + { + return get_delta (index, + coords.arrayZ, coords.length, + cache); + } bool sanitize (hb_sanitize_context_t *c) const { @@ -2948,7 +3029,7 @@ struct ConditionSet // all conditions met if (num_kept_cond == 0) return DROP_COND_WITH_VAR; - + //check if condition_set is unique with variations if (c->conditionset_map->has (p)) //duplicate found, drop the entire record @@ -3420,17 +3501,16 @@ struct VariationDevice { TRACE_SERIALIZE (this); if (!layout_variation_idx_delta_map) return_trace (nullptr); - auto snap = c->snapshot (); + + hb_pair_t<unsigned, int> *v; + if (!layout_variation_idx_delta_map->has (varIdx, &v)) + return_trace (nullptr); + + c->start_zerocopy (this->static_size); auto *out = c->embed (this); if (unlikely (!out)) return_trace (nullptr); - /* TODO Just get() and bail if NO_VARIATION. Needs to setup the map to return that. */ - if (!layout_variation_idx_delta_map->has (varIdx)) - { - c->revert (snap); - return_trace (nullptr); - } - unsigned new_idx = hb_first (layout_variation_idx_delta_map->get (varIdx)); + unsigned new_idx = hb_first (*v); out->varIdx = new_idx; return_trace (out); } |