2 * Copyright © 2020 Google, Inc.
4 * This is part of HarfBuzz, a text shaping library.
6 * Permission is hereby granted, without written agreement and without
7 * license or royalty fees, to use, copy, modify, and distribute this
8 * software and its documentation for any purpose, provided that the
9 * above copyright notice and the following two paragraphs appear in
10 * all copies of this software.
12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
24 * Google Author(s): Garret Rieger
27 #ifndef HB_REPACKER_HH
28 #define HB_REPACKER_HH
30 #include "hb-open-type.hh"
32 #include "hb-vector.hh"
33 #include "graph/graph.hh"
34 #include "graph/gsubgpos-graph.hh"
35 #include "graph/serialize.hh"
40 * For a detailed writeup on the overflow resolution algorithm see:
46 unsigned lookup_index
;
48 unsigned num_subtables
;
50 static int cmp (const void* a
, const void* b
)
52 return cmp ((const lookup_size_t
*) a
,
53 (const lookup_size_t
*) b
);
56 static int cmp (const lookup_size_t
* a
, const lookup_size_t
* b
)
58 double subtables_per_byte_a
= (double) a
->num_subtables
/ (double) a
->size
;
59 double subtables_per_byte_b
= (double) b
->num_subtables
/ (double) b
->size
;
60 if (subtables_per_byte_a
== subtables_per_byte_b
) {
61 return b
->lookup_index
- a
->lookup_index
;
64 double cmp
= subtables_per_byte_b
- subtables_per_byte_a
;
65 if (cmp
< 0) return -1;
66 if (cmp
> 0) return 1;
72 bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t
& ext_context
)
74 // For each lookup this will check the size of subtables and split them as needed
75 // so that no subtable is at risk of overflowing. (where we support splitting for
76 // that subtable type).
78 // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup
79 // pass after this processing is done. Not super necessary as splits are
80 // only done where overflow is likely, so de-dup probably will get undone
83 // The loop below can modify the contents of ext_context.lookups if new subtables are added
84 // to a lookup during a split. So save the initial set of lookup indices so the iteration doesn't
85 // risk access free'd memory if ext_context.lookups gets resized.
86 hb_set_t
lookup_indices(ext_context
.lookups
.keys ());
87 for (unsigned lookup_index
: lookup_indices
)
89 graph::Lookup
* lookup
= ext_context
.lookups
.get(lookup_index
);
90 if (!lookup
->split_subtables_if_needed (ext_context
, lookup_index
))
98 * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted
99 * to extension lookups.
102 bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t
& ext_context
)
104 // Simple Algorithm (v1, current):
105 // 1. Calculate how many bytes each non-extension lookup consumes.
106 // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first)
107 // 3. Promote the rest.
109 // Advanced Algorithm (v2, not implemented):
110 // 1. Perform connected component analysis using lookups as roots.
111 // 2. Compute size of each connected component.
112 // 3. Select up to 64k worth of connected components to remain as non-extensions.
113 // (greedy, highest subtables per byte first)
114 // 4. Promote the rest.
116 // TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo.
117 // TODO(garretrieger): also support extension promotion during iterative resolution phase, then
118 // we can use a less conservative threshold here.
119 // TODO(grieger): skip this for the 24 bit case.
120 if (!ext_context
.lookups
) return true;
122 unsigned total_lookup_table_sizes
= 0;
123 hb_vector_t
<lookup_size_t
> lookup_sizes
;
124 lookup_sizes
.alloc (ext_context
.lookups
.get_population (), true);
126 for (unsigned lookup_index
: ext_context
.lookups
.keys ())
128 const auto& lookup_v
= ext_context
.graph
.vertices_
[lookup_index
];
129 total_lookup_table_sizes
+= lookup_v
.table_size ();
131 const graph::Lookup
* lookup
= ext_context
.lookups
.get(lookup_index
);
133 lookup_sizes
.push (lookup_size_t
{
135 ext_context
.graph
.find_subgraph_size (lookup_index
, visited
),
136 lookup
->number_of_subtables (),
140 lookup_sizes
.qsort ();
142 size_t lookup_list_size
= ext_context
.graph
.vertices_
[ext_context
.lookup_list_index
].table_size ();
143 size_t l2_l3_size
= lookup_list_size
+ total_lookup_table_sizes
; // Lookup List + Lookups
144 size_t l3_l4_size
= total_lookup_table_sizes
; // Lookups + SubTables
145 size_t l4_plus_size
= 0; // SubTables + their descendants
147 // Start by assuming all lookups are using extension subtables, this size will be removed later
148 // if it's decided to not make a lookup extension.
149 for (auto p
: lookup_sizes
)
151 // TODO(garretrieger): this overestimates the extension subtables size because some extension subtables may be
152 // reused. However, we can't correct this until we have connected component analysis in place.
153 unsigned subtables_size
= p
.num_subtables
* 8;
154 l3_l4_size
+= subtables_size
;
155 l4_plus_size
+= subtables_size
;
158 bool layers_full
= false;
159 for (auto p
: lookup_sizes
)
161 const graph::Lookup
* lookup
= ext_context
.lookups
.get(p
.lookup_index
);
162 if (lookup
->is_extension (ext_context
.table_tag
))
163 // already an extension so size is counted by the loop above.
168 size_t lookup_size
= ext_context
.graph
.vertices_
[p
.lookup_index
].table_size ();
170 size_t subtables_size
= ext_context
.graph
.find_subgraph_size (p
.lookup_index
, visited
, 1) - lookup_size
;
171 size_t remaining_size
= p
.size
- subtables_size
- lookup_size
;
173 l3_l4_size
+= subtables_size
;
174 l3_l4_size
-= p
.num_subtables
* 8;
175 l4_plus_size
+= subtables_size
+ remaining_size
;
177 if (l2_l3_size
< (1 << 16)
178 && l3_l4_size
< (1 << 16)
179 && l4_plus_size
< (1 << 16)) continue; // this lookup fits within all layers groups
184 if (!ext_context
.lookups
.get(p
.lookup_index
)->make_extension (ext_context
, p
.lookup_index
))
192 bool _try_isolating_subgraphs (const hb_vector_t
<graph::overflow_record_t
>& overflows
,
193 graph_t
& sorted_graph
)
196 hb_set_t roots_to_isolate
;
198 for (int i
= overflows
.length
- 1; i
>= 0; i
--)
200 const graph::overflow_record_t
& r
= overflows
[i
];
203 unsigned overflow_space
= sorted_graph
.space_for (r
.parent
, &root
);
204 if (!overflow_space
) continue;
205 if (sorted_graph
.num_roots_for_space (overflow_space
) <= 1) continue;
208 space
= overflow_space
;
211 if (space
== overflow_space
)
212 roots_to_isolate
.add(root
);
215 if (!roots_to_isolate
) return false;
217 unsigned maximum_to_move
= hb_max ((sorted_graph
.num_roots_for_space (space
) / 2u), 1u);
218 if (roots_to_isolate
.get_population () > maximum_to_move
) {
219 // Only move at most half of the roots in a space at a time.
220 unsigned extra
= roots_to_isolate
.get_population () - maximum_to_move
;
222 uint32_t root
= HB_SET_VALUE_INVALID
;
223 roots_to_isolate
.previous (&root
);
224 roots_to_isolate
.del (root
);
228 DEBUG_MSG (SUBSET_REPACK
, nullptr,
229 "Overflow in space %u (%u roots). Moving %u roots to space %u.",
231 sorted_graph
.num_roots_for_space (space
),
232 roots_to_isolate
.get_population (),
233 sorted_graph
.next_space ());
235 sorted_graph
.isolate_subgraph (roots_to_isolate
);
236 sorted_graph
.move_to_new_space (roots_to_isolate
);
242 bool _process_overflows (const hb_vector_t
<graph::overflow_record_t
>& overflows
,
243 hb_set_t
& priority_bumped_parents
,
244 graph_t
& sorted_graph
)
246 bool resolution_attempted
= false;
248 // Try resolving the furthest overflows first.
249 for (int i
= overflows
.length
- 1; i
>= 0; i
--)
251 const graph::overflow_record_t
& r
= overflows
[i
];
252 const auto& child
= sorted_graph
.vertices_
[r
.child
];
253 if (child
.is_shared ())
255 // The child object is shared, we may be able to eliminate the overflow
256 // by duplicating it.
257 if (sorted_graph
.duplicate (r
.parent
, r
.child
) == (unsigned) -1) continue;
261 if (child
.is_leaf () && !priority_bumped_parents
.has (r
.parent
))
263 // This object is too far from it's parent, attempt to move it closer.
265 // TODO(garretrieger): initially limiting this to leaf's since they can be
266 // moved closer with fewer consequences. However, this can
267 // likely can be used for non-leafs as well.
268 // TODO(garretrieger): also try lowering priority of the parent. Make it
269 // get placed further up in the ordering, closer to it's children.
270 // this is probably preferable if the total size of the parent object
271 // is < then the total size of the children (and the parent can be moved).
272 // Since in that case moving the parent will cause a smaller increase in
273 // the length of other offsets.
274 if (sorted_graph
.raise_childrens_priority (r
.parent
)) {
275 priority_bumped_parents
.add (r
.parent
);
276 resolution_attempted
= true;
281 // TODO(garretrieger): add additional offset resolution strategies
282 // - Promotion to extension lookups.
283 // - Table splitting.
286 return resolution_attempted
;
290 hb_resolve_graph_overflows (hb_tag_t table_tag
,
291 unsigned max_rounds
,
292 bool recalculate_extensions
,
293 graph_t
& sorted_graph
/* IN/OUT */)
295 sorted_graph
.sort_shortest_distance ();
296 if (sorted_graph
.in_error ())
298 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Sorted graph in error state after initial sort.");
302 bool will_overflow
= graph::will_overflow (sorted_graph
);
306 graph::gsubgpos_graph_context_t
ext_context (table_tag
, sorted_graph
);
307 if ((table_tag
== HB_OT_TAG_GPOS
308 || table_tag
== HB_OT_TAG_GSUB
)
311 if (recalculate_extensions
)
313 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Splitting subtables if needed.");
314 if (!_presplit_subtables_if_needed (ext_context
)) {
315 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Subtable splitting failed.");
319 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Promoting lookups to extensions if needed.");
320 if (!_promote_extensions_if_needed (ext_context
)) {
321 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Extensions promotion failed.");
326 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Assigning spaces to 32 bit subgraphs.");
327 if (sorted_graph
.assign_spaces ())
328 sorted_graph
.sort_shortest_distance ();
330 sorted_graph
.sort_shortest_distance_if_needed ();
334 hb_vector_t
<graph::overflow_record_t
> overflows
;
335 // TODO(garretrieger): select a good limit for max rounds.
336 while (!sorted_graph
.in_error ()
337 && graph::will_overflow (sorted_graph
, &overflows
)
338 && round
< max_rounds
) {
339 DEBUG_MSG (SUBSET_REPACK
, nullptr, "=== Overflow resolution round %u ===", round
);
340 print_overflows (sorted_graph
, overflows
);
342 hb_set_t priority_bumped_parents
;
344 if (!_try_isolating_subgraphs (overflows
, sorted_graph
))
346 // Don't count space isolation towards round limit. Only increment
347 // round counter if space isolation made no changes.
349 if (!_process_overflows (overflows
, priority_bumped_parents
, sorted_graph
))
351 DEBUG_MSG (SUBSET_REPACK
, nullptr, "No resolution available :(");
356 sorted_graph
.sort_shortest_distance ();
359 if (sorted_graph
.in_error ())
361 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Sorted graph in error state.");
365 if (graph::will_overflow (sorted_graph
))
367 DEBUG_MSG (SUBSET_REPACK
, nullptr, "Offset overflow resolution failed.");
375 * Attempts to modify the topological sorting of the provided object graph to
376 * eliminate offset overflows in the links between objects of the graph. If a
377 * non-overflowing ordering is found the updated graph is serialized it into the
378 * provided serialization context.
380 * If necessary the structure of the graph may be modified in ways that do not
381 * affect the functionality of the graph. For example shared objects may be
384 * For a detailed writeup describing how the algorithm operates see:
389 hb_resolve_overflows (const T
& packed
,
391 unsigned max_rounds
= 20,
392 bool recalculate_extensions
= false) {
393 graph_t
sorted_graph (packed
);
394 if (sorted_graph
.in_error ())
396 // Invalid graph definition.
400 if (!sorted_graph
.is_fully_connected ())
402 sorted_graph
.print_orphaned_nodes ();
406 if (sorted_graph
.in_error ())
408 // Allocations failed somewhere
409 DEBUG_MSG (SUBSET_REPACK
, nullptr,
410 "Graph is in error, likely due to a memory allocation error.");
414 if (!hb_resolve_graph_overflows (table_tag
, max_rounds
, recalculate_extensions
, sorted_graph
))
417 return graph::serialize (sorted_graph
);
420 #endif /* HB_REPACKER_HH */