4 * Userspace RCU library with explicit memory barriers
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright 2015 Red Hat, Inc.
10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com>
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
29 #include "qemu/osdep.h"
31 #include "qemu/atomic.h"
32 #include "qemu/thread.h"
33 #include "qemu/main-loop.h"
34 #include "qemu/lockable.h"
35 #if defined(CONFIG_MALLOC_TRIM)
40 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
41 * Bits 1 and above are defined in synchronize_rcu.
43 #define RCU_GP_LOCKED (1UL << 0)
44 #define RCU_GP_CTR (1UL << 1)
46 unsigned long rcu_gp_ctr
= RCU_GP_LOCKED
;
48 QemuEvent rcu_gp_event
;
49 static QemuMutex rcu_registry_lock
;
50 static QemuMutex rcu_sync_lock
;
53 * Check whether a quiescent state was crossed between the beginning of
54 * update_counter_and_wait and now.
56 static inline int rcu_gp_ongoing(unsigned long *ctr
)
61 return v
&& (v
!= rcu_gp_ctr
);
64 /* Written to only by each individual reader. Read by both the reader and the
67 __thread
struct rcu_reader_data rcu_reader
;
69 /* Protected by rcu_registry_lock. */
70 typedef QLIST_HEAD(, rcu_reader_data
) ThreadList
;
71 static ThreadList registry
= QLIST_HEAD_INITIALIZER(registry
);
73 /* Wait for previous parity/grace period to be empty of readers. */
74 static void wait_for_readers(void)
76 ThreadList qsreaders
= QLIST_HEAD_INITIALIZER(qsreaders
);
77 struct rcu_reader_data
*index
, *tmp
;
80 /* We want to be notified of changes made to rcu_gp_ongoing
81 * while we walk the list.
83 qemu_event_reset(&rcu_gp_event
);
85 /* Instead of using atomic_mb_set for index->waiting, and
86 * atomic_mb_read for index->ctr, memory barriers are placed
87 * manually since writes to different threads are independent.
88 * qemu_event_reset has acquire semantics, so no memory barrier
91 QLIST_FOREACH(index
, ®istry
, node
) {
92 atomic_set(&index
->waiting
, true);
95 /* Here, order the stores to index->waiting before the loads of
96 * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
97 * ensuring that the loads of index->ctr are sequentially consistent.
101 QLIST_FOREACH_SAFE(index
, ®istry
, node
, tmp
) {
102 if (!rcu_gp_ongoing(&index
->ctr
)) {
103 QLIST_REMOVE(index
, node
);
104 QLIST_INSERT_HEAD(&qsreaders
, index
, node
);
106 /* No need for mb_set here, worst of all we
107 * get some extra futex wakeups.
109 atomic_set(&index
->waiting
, false);
113 if (QLIST_EMPTY(®istry
)) {
117 /* Wait for one thread to report a quiescent state and try again.
118 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
119 * wait too much time.
121 * rcu_register_thread() may add nodes to ®istry; it will not
122 * wake up synchronize_rcu, but that is okay because at least another
123 * thread must exit its RCU read-side critical section before
124 * synchronize_rcu is done. The next iteration of the loop will
125 * move the new thread's rcu_reader from ®istry to &qsreaders,
126 * because rcu_gp_ongoing() will return false.
128 * rcu_unregister_thread() may remove nodes from &qsreaders instead
129 * of ®istry if it runs during qemu_event_wait. That's okay;
130 * the node then will not be added back to ®istry by QLIST_SWAP
131 * below. The invariant is that the node is part of one list when
132 * rcu_registry_lock is released.
134 qemu_mutex_unlock(&rcu_registry_lock
);
135 qemu_event_wait(&rcu_gp_event
);
136 qemu_mutex_lock(&rcu_registry_lock
);
139 /* put back the reader list in the registry */
140 QLIST_SWAP(®istry
, &qsreaders
, node
);
143 void synchronize_rcu(void)
145 QEMU_LOCK_GUARD(&rcu_sync_lock
);
147 /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
148 * Pairs with smp_mb_placeholder() in rcu_read_lock().
152 QEMU_LOCK_GUARD(&rcu_registry_lock
);
153 if (!QLIST_EMPTY(®istry
)) {
154 /* In either case, the atomic_mb_set below blocks stores that free
155 * old RCU-protected pointers.
157 if (sizeof(rcu_gp_ctr
) < 8) {
158 /* For architectures with 32-bit longs, a two-subphases algorithm
159 * ensures we do not encounter overflow bugs.
161 * Switch parity: 0 -> 1, 1 -> 0.
163 atomic_mb_set(&rcu_gp_ctr
, rcu_gp_ctr
^ RCU_GP_CTR
);
165 atomic_mb_set(&rcu_gp_ctr
, rcu_gp_ctr
^ RCU_GP_CTR
);
167 /* Increment current grace period. */
168 atomic_mb_set(&rcu_gp_ctr
, rcu_gp_ctr
+ RCU_GP_CTR
);
176 #define RCU_CALL_MIN_SIZE 30
178 /* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
179 * from liburcu. Note that head is only used by the consumer.
181 static struct rcu_head dummy
;
182 static struct rcu_head
*head
= &dummy
, **tail
= &dummy
.next
;
183 static int rcu_call_count
;
184 static QemuEvent rcu_call_ready_event
;
186 static void enqueue(struct rcu_head
*node
)
188 struct rcu_head
**old_tail
;
191 old_tail
= atomic_xchg(&tail
, &node
->next
);
192 atomic_mb_set(old_tail
, node
);
195 static struct rcu_head
*try_dequeue(void)
197 struct rcu_head
*node
, *next
;
200 /* Test for an empty list, which we do not expect. Note that for
201 * the consumer head and tail are always consistent. The head
202 * is consistent because only the consumer reads/writes it.
203 * The tail, because it is the first step in the enqueuing.
204 * It is only the next pointers that might be inconsistent.
206 if (head
== &dummy
&& atomic_mb_read(&tail
) == &dummy
.next
) {
210 /* If the head node has NULL in its next pointer, the value is
211 * wrong and we need to wait until its enqueuer finishes the update.
214 next
= atomic_mb_read(&head
->next
);
219 /* Since we are the sole consumer, and we excluded the empty case
220 * above, the queue will always have at least two nodes: the
221 * dummy node, and the one being removed. So we do not need to update
226 /* If we dequeued the dummy node, add it back at the end and retry. */
227 if (node
== &dummy
) {
235 static void *call_rcu_thread(void *opaque
)
237 struct rcu_head
*node
;
239 rcu_register_thread();
243 int n
= atomic_read(&rcu_call_count
);
245 /* Heuristically wait for a decent number of callbacks to pile up.
246 * Fetch rcu_call_count now, we only must process elements that were
247 * added before synchronize_rcu() starts.
249 while (n
== 0 || (n
< RCU_CALL_MIN_SIZE
&& ++tries
<= 5)) {
252 qemu_event_reset(&rcu_call_ready_event
);
253 n
= atomic_read(&rcu_call_count
);
255 #if defined(CONFIG_MALLOC_TRIM)
256 malloc_trim(4 * 1024 * 1024);
258 qemu_event_wait(&rcu_call_ready_event
);
261 n
= atomic_read(&rcu_call_count
);
264 atomic_sub(&rcu_call_count
, n
);
266 qemu_mutex_lock_iothread();
268 node
= try_dequeue();
270 qemu_mutex_unlock_iothread();
271 qemu_event_reset(&rcu_call_ready_event
);
272 node
= try_dequeue();
274 qemu_event_wait(&rcu_call_ready_event
);
275 node
= try_dequeue();
277 qemu_mutex_lock_iothread();
283 qemu_mutex_unlock_iothread();
288 void call_rcu1(struct rcu_head
*node
, void (*func
)(struct rcu_head
*node
))
292 atomic_inc(&rcu_call_count
);
293 qemu_event_set(&rcu_call_ready_event
);
296 void rcu_register_thread(void)
298 assert(rcu_reader
.ctr
== 0);
299 qemu_mutex_lock(&rcu_registry_lock
);
300 QLIST_INSERT_HEAD(®istry
, &rcu_reader
, node
);
301 qemu_mutex_unlock(&rcu_registry_lock
);
304 void rcu_unregister_thread(void)
306 qemu_mutex_lock(&rcu_registry_lock
);
307 QLIST_REMOVE(&rcu_reader
, node
);
308 qemu_mutex_unlock(&rcu_registry_lock
);
311 static void rcu_init_complete(void)
315 qemu_mutex_init(&rcu_registry_lock
);
316 qemu_mutex_init(&rcu_sync_lock
);
317 qemu_event_init(&rcu_gp_event
, true);
319 qemu_event_init(&rcu_call_ready_event
, false);
321 /* The caller is assumed to have iothread lock, so the call_rcu thread
322 * must have been quiescent even after forking, just recreate it.
324 qemu_thread_create(&thread
, "call_rcu", call_rcu_thread
,
325 NULL
, QEMU_THREAD_DETACHED
);
327 rcu_register_thread();
330 static int atfork_depth
= 1;
332 void rcu_enable_atfork(void)
337 void rcu_disable_atfork(void)
343 static void rcu_init_lock(void)
345 if (atfork_depth
< 1) {
349 qemu_mutex_lock(&rcu_sync_lock
);
350 qemu_mutex_lock(&rcu_registry_lock
);
353 static void rcu_init_unlock(void)
355 if (atfork_depth
< 1) {
359 qemu_mutex_unlock(&rcu_registry_lock
);
360 qemu_mutex_unlock(&rcu_sync_lock
);
363 static void rcu_init_child(void)
365 if (atfork_depth
< 1) {
369 memset(®istry
, 0, sizeof(registry
));
374 static void __attribute__((__constructor__
)) rcu_init(void)
376 smp_mb_global_init();
378 pthread_atfork(rcu_init_lock
, rcu_init_unlock
, rcu_init_child
);