qom: Use returned bool to check for failure, Coccinelle part
[qemu/ar7.git] / include / qemu / rcu_queue.h
blob558961cc272a589f9f0a0a29084cedfd2bf04289
1 #ifndef QEMU_RCU_QUEUE_H
2 #define QEMU_RCU_QUEUE_H
4 /*
5 * rcu_queue.h
7 * RCU-friendly versions of the queue.h primitives.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
25 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
28 #include "qemu/queue.h"
29 #include "qemu/atomic.h"
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
37 * List access methods.
39 #define QLIST_EMPTY_RCU(head) (atomic_read(&(head)->lh_first) == NULL)
40 #define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
41 #define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
44 * List functions.
49 * The difference between atomic_read/set and atomic_rcu_read/set
50 * is in the including of a read/write memory barrier to the volatile
51 * access. atomic_rcu_* macros include the memory barrier, the
52 * plain atomic macros do not. Therefore, it should be correct to
53 * issue a series of reads or writes to the same element using only
54 * the atomic_* macro, until the last read or write, which should be
55 * atomic_rcu_* to introduce a read or write memory barrier as
56 * appropriate.
59 /* Upon publication of the listelm->next value, list readers
60 * will see the new node when following next pointers from
61 * antecedent nodes, but may not see the new node when following
62 * prev pointers from subsequent nodes until after the RCU grace
63 * period expires.
64 * see linux/include/rculist.h __list_add_rcu(new, prev, next)
66 #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
67 (elm)->field.le_next = (listelm)->field.le_next; \
68 (elm)->field.le_prev = &(listelm)->field.le_next; \
69 atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
70 if ((elm)->field.le_next != NULL) { \
71 (elm)->field.le_next->field.le_prev = \
72 &(elm)->field.le_next; \
73 } \
74 } while (/*CONSTCOND*/0)
76 /* Upon publication of the listelm->prev->next value, list
77 * readers will see the new element when following prev pointers
78 * from subsequent elements, but may not see the new element
79 * when following next pointers from antecedent elements
80 * until after the RCU grace period expires.
82 #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
83 (elm)->field.le_prev = (listelm)->field.le_prev; \
84 (elm)->field.le_next = (listelm); \
85 atomic_rcu_set((listelm)->field.le_prev, (elm)); \
86 (listelm)->field.le_prev = &(elm)->field.le_next; \
87 } while (/*CONSTCOND*/0)
89 /* Upon publication of the head->first value, list readers
90 * will see the new element when following the head, but may
91 * not see the new element when following prev pointers from
92 * subsequent elements until after the RCU grace period has
93 * expired.
95 #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
96 (elm)->field.le_prev = &(head)->lh_first; \
97 (elm)->field.le_next = (head)->lh_first; \
98 atomic_rcu_set((&(head)->lh_first), (elm)); \
99 if ((elm)->field.le_next != NULL) { \
100 (elm)->field.le_next->field.le_prev = \
101 &(elm)->field.le_next; \
103 } while (/*CONSTCOND*/0)
106 /* prior to publication of the elm->prev->next value, some list
107 * readers may still see the removed element when following
108 * the antecedent's next pointer.
110 #define QLIST_REMOVE_RCU(elm, field) do { \
111 if ((elm)->field.le_next != NULL) { \
112 (elm)->field.le_next->field.le_prev = \
113 (elm)->field.le_prev; \
115 atomic_set((elm)->field.le_prev, (elm)->field.le_next); \
116 } while (/*CONSTCOND*/0)
118 /* List traversal must occur within an RCU critical section. */
119 #define QLIST_FOREACH_RCU(var, head, field) \
120 for ((var) = atomic_rcu_read(&(head)->lh_first); \
121 (var); \
122 (var) = atomic_rcu_read(&(var)->field.le_next))
124 /* List traversal must occur within an RCU critical section. */
125 #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
126 for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
127 (var) && \
128 ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
129 (var) = (next_var))
132 * RCU simple queue
135 /* Simple queue access methods */
136 #define QSIMPLEQ_EMPTY_RCU(head) (atomic_read(&(head)->sqh_first) == NULL)
137 #define QSIMPLEQ_FIRST_RCU(head) atomic_rcu_read(&(head)->sqh_first)
138 #define QSIMPLEQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sqe_next)
140 /* Simple queue functions */
141 #define QSIMPLEQ_INSERT_HEAD_RCU(head, elm, field) do { \
142 (elm)->field.sqe_next = (head)->sqh_first; \
143 if ((elm)->field.sqe_next == NULL) { \
144 (head)->sqh_last = &(elm)->field.sqe_next; \
146 atomic_rcu_set(&(head)->sqh_first, (elm)); \
147 } while (/*CONSTCOND*/0)
149 #define QSIMPLEQ_INSERT_TAIL_RCU(head, elm, field) do { \
150 (elm)->field.sqe_next = NULL; \
151 atomic_rcu_set((head)->sqh_last, (elm)); \
152 (head)->sqh_last = &(elm)->field.sqe_next; \
153 } while (/*CONSTCOND*/0)
155 #define QSIMPLEQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
156 (elm)->field.sqe_next = (listelm)->field.sqe_next; \
157 if ((elm)->field.sqe_next == NULL) { \
158 (head)->sqh_last = &(elm)->field.sqe_next; \
160 atomic_rcu_set(&(listelm)->field.sqe_next, (elm)); \
161 } while (/*CONSTCOND*/0)
163 #define QSIMPLEQ_REMOVE_HEAD_RCU(head, field) do { \
164 atomic_set(&(head)->sqh_first, (head)->sqh_first->field.sqe_next); \
165 if ((head)->sqh_first == NULL) { \
166 (head)->sqh_last = &(head)->sqh_first; \
168 } while (/*CONSTCOND*/0)
170 #define QSIMPLEQ_REMOVE_RCU(head, elm, type, field) do { \
171 if ((head)->sqh_first == (elm)) { \
172 QSIMPLEQ_REMOVE_HEAD_RCU((head), field); \
173 } else { \
174 struct type *curr = (head)->sqh_first; \
175 while (curr->field.sqe_next != (elm)) { \
176 curr = curr->field.sqe_next; \
178 atomic_set(&curr->field.sqe_next, \
179 curr->field.sqe_next->field.sqe_next); \
180 if (curr->field.sqe_next == NULL) { \
181 (head)->sqh_last = &(curr)->field.sqe_next; \
184 } while (/*CONSTCOND*/0)
186 #define QSIMPLEQ_FOREACH_RCU(var, head, field) \
187 for ((var) = atomic_rcu_read(&(head)->sqh_first); \
188 (var); \
189 (var) = atomic_rcu_read(&(var)->field.sqe_next))
191 #define QSIMPLEQ_FOREACH_SAFE_RCU(var, head, field, next) \
192 for ((var) = atomic_rcu_read(&(head)->sqh_first); \
193 (var) && ((next) = atomic_rcu_read(&(var)->field.sqe_next), 1); \
194 (var) = (next))
197 * RCU tail queue
200 /* Tail queue access methods */
201 #define QTAILQ_EMPTY_RCU(head) (atomic_read(&(head)->tqh_first) == NULL)
202 #define QTAILQ_FIRST_RCU(head) atomic_rcu_read(&(head)->tqh_first)
203 #define QTAILQ_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.tqe_next)
205 /* Tail queue functions */
206 #define QTAILQ_INSERT_HEAD_RCU(head, elm, field) do { \
207 (elm)->field.tqe_next = (head)->tqh_first; \
208 if ((elm)->field.tqe_next != NULL) { \
209 (head)->tqh_first->field.tqe_circ.tql_prev = \
210 &(elm)->field.tqe_circ; \
211 } else { \
212 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
214 atomic_rcu_set(&(head)->tqh_first, (elm)); \
215 (elm)->field.tqe_circ.tql_prev = &(head)->tqh_circ; \
216 } while (/*CONSTCOND*/0)
218 #define QTAILQ_INSERT_TAIL_RCU(head, elm, field) do { \
219 (elm)->field.tqe_next = NULL; \
220 (elm)->field.tqe_circ.tql_prev = (head)->tqh_circ.tql_prev; \
221 atomic_rcu_set(&(head)->tqh_circ.tql_prev->tql_next, (elm)); \
222 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
223 } while (/*CONSTCOND*/0)
225 #define QTAILQ_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
226 (elm)->field.tqe_next = (listelm)->field.tqe_next; \
227 if ((elm)->field.tqe_next != NULL) { \
228 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
229 &(elm)->field.tqe_circ; \
230 } else { \
231 (head)->tqh_circ.tql_prev = &(elm)->field.tqe_circ; \
233 atomic_rcu_set(&(listelm)->field.tqe_next, (elm)); \
234 (elm)->field.tqe_circ.tql_prev = &(listelm)->field.tqe_circ; \
235 } while (/*CONSTCOND*/0)
237 #define QTAILQ_INSERT_BEFORE_RCU(listelm, elm, field) do { \
238 (elm)->field.tqe_circ.tql_prev = (listelm)->field.tqe_circ.tql_prev; \
239 (elm)->field.tqe_next = (listelm); \
240 atomic_rcu_set(&(listelm)->field.tqe_circ.tql_prev->tql_next, (elm)); \
241 (listelm)->field.tqe_circ.tql_prev = &(elm)->field.tqe_circ; \
242 } while (/*CONSTCOND*/0)
244 #define QTAILQ_REMOVE_RCU(head, elm, field) do { \
245 if (((elm)->field.tqe_next) != NULL) { \
246 (elm)->field.tqe_next->field.tqe_circ.tql_prev = \
247 (elm)->field.tqe_circ.tql_prev; \
248 } else { \
249 (head)->tqh_circ.tql_prev = (elm)->field.tqe_circ.tql_prev; \
251 atomic_set(&(elm)->field.tqe_circ.tql_prev->tql_next, (elm)->field.tqe_next); \
252 (elm)->field.tqe_circ.tql_prev = NULL; \
253 } while (/*CONSTCOND*/0)
255 #define QTAILQ_FOREACH_RCU(var, head, field) \
256 for ((var) = atomic_rcu_read(&(head)->tqh_first); \
257 (var); \
258 (var) = atomic_rcu_read(&(var)->field.tqe_next))
260 #define QTAILQ_FOREACH_SAFE_RCU(var, head, field, next) \
261 for ((var) = atomic_rcu_read(&(head)->tqh_first); \
262 (var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
263 (var) = (next))
266 * RCU singly-linked list
269 /* Singly-linked list access methods */
270 #define QSLIST_EMPTY_RCU(head) (atomic_read(&(head)->slh_first) == NULL)
271 #define QSLIST_FIRST_RCU(head) atomic_rcu_read(&(head)->slh_first)
272 #define QSLIST_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sle_next)
274 /* Singly-linked list functions */
275 #define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
276 (elm)->field.sle_next = (head)->slh_first; \
277 atomic_rcu_set(&(head)->slh_first, (elm)); \
278 } while (/*CONSTCOND*/0)
280 #define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
281 (elm)->field.sle_next = (listelm)->field.sle_next; \
282 atomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
283 } while (/*CONSTCOND*/0)
285 #define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
286 atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \
287 } while (/*CONSTCOND*/0)
289 #define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
290 if ((head)->slh_first == (elm)) { \
291 QSLIST_REMOVE_HEAD_RCU((head), field); \
292 } else { \
293 struct type *curr = (head)->slh_first; \
294 while (curr->field.sle_next != (elm)) { \
295 curr = curr->field.sle_next; \
297 atomic_set(&curr->field.sle_next, \
298 curr->field.sle_next->field.sle_next); \
300 } while (/*CONSTCOND*/0)
302 #define QSLIST_FOREACH_RCU(var, head, field) \
303 for ((var) = atomic_rcu_read(&(head)->slh_first); \
304 (var); \
305 (var) = atomic_rcu_read(&(var)->field.sle_next))
307 #define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
308 for ((var) = atomic_rcu_read(&(head)->slh_first); \
309 (var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \
310 (var) = (next))
312 #ifdef __cplusplus
314 #endif
315 #endif /* QEMU_RCU_QUEUE_H */