1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
5 //! Implements parallel traversal over the DOM tree.
7 //! This traversal is based on Rayon, and therefore its safety is largely
8 //! verified by the type system.
10 //! The primary trickiness and fine print for the above relates to the
11 //! thread safety of the DOM nodes themselves. Accessing a DOM element
12 //! concurrently on multiple threads is actually mostly "safe", since all
13 //! the mutable state is protected by an AtomicRefCell, and so we'll
14 //! generally panic if something goes wrong. Still, we try to to enforce our
15 //! thread invariants at compile time whenever possible. As such, TNode and
16 //! TElement are not Send, so ordinary style system code cannot accidentally
17 //! share them with other threads. In the parallel traversal, we explicitly
18 //! invoke |unsafe { SendNode::new(n) }| to put nodes in containers that may
19 //! be sent to other threads. This occurs in only a handful of places and is
20 //! easy to grep for. At the time of this writing, there is no other unsafe
21 //! code in the parallel traversal.
23 #![deny(missing_docs)]
25 use crate::context::{StyleContext, ThreadLocalStyleContext};
26 use crate::dom::{OpaqueNode, SendNode, TElement};
27 use crate::scoped_tls::ScopedTLS;
28 use crate::traversal::{DomTraversal, PerLevelTraversalData};
30 use std::collections::VecDeque;
32 /// The minimum stack size for a thread in the styling pool, in kilobytes.
33 pub const STYLE_THREAD_STACK_SIZE_KB: usize = 256;
35 /// The stack margin. If we get this deep in the stack, we will skip recursive
36 /// optimizations to ensure that there is sufficient room for non-recursive work.
38 /// We allocate large safety margins because certain OS calls can use very large
39 /// amounts of stack space [1]. Reserving a larger-than-necessary stack costs us
40 /// address space, but if we keep our safety margin big, we will generally avoid
41 /// committing those extra pages, and only use them in edge cases that would
42 /// otherwise cause crashes.
44 /// When measured with 128KB stacks and 40KB margin, we could support 53
45 /// levels of recursion before the limiter kicks in, on x86_64-Linux [2]. When
46 /// we doubled the stack size, we added it all to the safety margin, so we should
47 /// be able to get the same amount of recursion.
49 /// [1] https://bugzilla.mozilla.org/show_bug.cgi?id=1395708#c15
50 /// [2] See Gecko bug 1376883 for more discussion on the measurements.
51 pub const STACK_SAFETY_MARGIN_KB: usize = 168;
53 /// A callback to create our thread local context. This needs to be
54 /// out of line so we don't allocate stack space for the entire struct
57 pub (crate) fn create_thread_local_context<'scope, E>(slot: &mut Option<ThreadLocalStyleContext<E>>)
61 *slot = Some(ThreadLocalStyleContext::new());
64 // Sends one chunk of work to the thread-pool.
65 fn distribute_one_chunk<'a, 'scope, E, D>(
66 items: VecDeque<SendNode<E::ConcreteNode>>,
67 traversal_root: OpaqueNode,
69 traversal_data: PerLevelTraversalData,
70 scope: &'a rayon::ScopeFifo<'scope>,
72 tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
77 scope.spawn_fifo(move |scope| {
78 gecko_profiler_label!(Layout, StyleComputation);
79 let mut tlc = tls.ensure(create_thread_local_context);
80 let mut context = StyleContext {
81 shared: traversal.shared_context(),
82 thread_local: &mut *tlc,
97 /// Distributes all items into the thread pool, in `work_unit_max` chunks.
98 fn distribute_work<'a, 'scope, E, D>(
99 mut items: VecDeque<SendNode<E::ConcreteNode>>,
100 traversal_root: OpaqueNode,
101 work_unit_max: usize,
102 traversal_data: PerLevelTraversalData,
103 scope: &'a rayon::ScopeFifo<'scope>,
104 traversal: &'scope D,
105 tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
107 E: TElement + 'scope,
110 while items.len() > work_unit_max {
111 let rest = items.split_off(work_unit_max);
112 distribute_one_chunk(
123 distribute_one_chunk(
134 /// Processes `discovered` items, possibly spawning work in other threads as needed.
136 pub fn style_trees<'a, 'scope, E, D>(
137 context: &mut StyleContext<E>,
138 mut discovered: VecDeque<SendNode<E::ConcreteNode>>,
139 traversal_root: OpaqueNode,
140 work_unit_max: usize,
141 mut traversal_data: PerLevelTraversalData,
142 scope: Option<&'a rayon::ScopeFifo<'scope>>,
143 traversal: &'scope D,
144 tls: &'scope ScopedTLS<'scope, ThreadLocalStyleContext<E>>,
146 E: TElement + 'scope,
149 let local_queue_size = if tls.current_thread_index() == 0 {
150 static_prefs::pref!("layout.css.stylo-local-work-queue.in-main-thread")
152 static_prefs::pref!("layout.css.stylo-local-work-queue.in-worker")
155 let mut nodes_remaining_at_current_depth = discovered.len();
156 while let Some(node) = discovered.pop_front() {
157 let mut children_to_process = 0isize;
158 traversal.process_preorder(&traversal_data, context, *node, |n| {
159 children_to_process += 1;
160 discovered.push_back(unsafe { SendNode::new(n) });
163 traversal.handle_postorder_traversal(context, traversal_root, *node, children_to_process);
165 nodes_remaining_at_current_depth -= 1;
167 // If we have enough children at the next depth in the DOM, spawn them to a different job
168 // relatively soon, while keeping always at least `local_queue_size` worth of work for
170 let discovered_children = discovered.len() - nodes_remaining_at_current_depth;
171 if discovered_children >= work_unit_max &&
172 discovered.len() >= local_queue_size + work_unit_max &&
175 let kept_work = std::cmp::max(nodes_remaining_at_current_depth, local_queue_size);
176 let mut traversal_data_copy = traversal_data.clone();
177 traversal_data_copy.current_dom_depth += 1;
179 discovered.split_off(kept_work),
189 if nodes_remaining_at_current_depth == 0 {
190 traversal_data.current_dom_depth += 1;
191 nodes_remaining_at_current_depth = discovered.len();