fpu: Bound increment for scalbn
[qemu.git] / include / qemu / rcu_queue.h
blob01be77407ba776366f5d23ec827e60a4323fe6ef
1 #ifndef QEMU_RCU_QUEUE_H
2 #define QEMU_RCU_QUEUE_H
4 /*
5 * rcu_queue.h
7 * RCU-friendly versions of the queue.h primitives.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
25 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
28 #include "qemu/queue.h"
29 #include "qemu/atomic.h"
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
37 * List access methods.
39 #define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL)
40 #define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
41 #define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
44 * List functions.
49 * The difference between atomic_read/set and atomic_rcu_read/set
50 * is in the including of a read/write memory barrier to the volatile
51 * access. atomic_rcu_* macros include the memory barrier, the
52 * plain atomic macros do not. Therefore, it should be correct to
53 * issue a series of reads or writes to the same element using only
54 * the atomic_* macro, until the last read or write, which should be
55 * atomic_rcu_* to introduce a read or write memory barrier as
56 * appropriate.
59 /* Upon publication of the listelm->next value, list readers
60 * will see the new node when following next pointers from
61 * antecedent nodes, but may not see the new node when following
62 * prev pointers from subsequent nodes until after the RCU grace
63 * period expires.
64 * see linux/include/rculist.h __list_add_rcu(new, prev, next)
66 #define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
67 (elm)->field.le_next = (listelm)->field.le_next; \
68 (elm)->field.le_prev = &(listelm)->field.le_next; \
69 atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
70 if ((elm)->field.le_next != NULL) { \
71 (elm)->field.le_next->field.le_prev = \
72 &(elm)->field.le_next; \
73 } \
74 } while (/*CONSTCOND*/0)
76 /* Upon publication of the listelm->prev->next value, list
77 * readers will see the new element when following prev pointers
78 * from subsequent elements, but may not see the new element
79 * when following next pointers from antecedent elements
80 * until after the RCU grace period expires.
82 #define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
83 (elm)->field.le_prev = (listelm)->field.le_prev; \
84 (elm)->field.le_next = (listelm); \
85 atomic_rcu_set((listelm)->field.le_prev, (elm)); \
86 (listelm)->field.le_prev = &(elm)->field.le_next; \
87 } while (/*CONSTCOND*/0)
89 /* Upon publication of the head->first value, list readers
90 * will see the new element when following the head, but may
91 * not see the new element when following prev pointers from
92 * subsequent elements until after the RCU grace period has
93 * expired.
95 #define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
96 (elm)->field.le_prev = &(head)->lh_first; \
97 (elm)->field.le_next = (head)->lh_first; \
98 atomic_rcu_set((&(head)->lh_first), (elm)); \
99 if ((elm)->field.le_next != NULL) { \
100 (elm)->field.le_next->field.le_prev = \
101 &(elm)->field.le_next; \
103 } while (/*CONSTCOND*/0)
106 /* prior to publication of the elm->prev->next value, some list
107 * readers may still see the removed element when following
108 * the antecedent's next pointer.
110 #define QLIST_REMOVE_RCU(elm, field) do { \
111 if ((elm)->field.le_next != NULL) { \
112 (elm)->field.le_next->field.le_prev = \
113 (elm)->field.le_prev; \
115 *(elm)->field.le_prev = (elm)->field.le_next; \
116 } while (/*CONSTCOND*/0)
118 /* List traversal must occur within an RCU critical section. */
119 #define QLIST_FOREACH_RCU(var, head, field) \
120 for ((var) = atomic_rcu_read(&(head)->lh_first); \
121 (var); \
122 (var) = atomic_rcu_read(&(var)->field.le_next))
124 /* List traversal must occur within an RCU critical section. */
125 #define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
126 for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
127 (var) && \
128 ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
129 (var) = (next_var))
131 #ifdef __cplusplus
133 #endif
134 #endif /* QEMU_RCU_QUEUE_H */