kdc: set kvno to zero if reply key replaced
[heimdal.git] / include / heimqueue.h
blob5e922cfcbc30314708a6d41deb7486ea47152318
1 /* $NetBSD: queue.h,v 1.38 2004/04/18 14:12:05 lukem Exp $ */
2 /* $Id$ */
4 /*
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)queue.h 8.5 (Berkeley) 8/20/94
35 #ifndef _HEIM_QUEUE_H_
36 #define _HEIM_QUEUE_H_
39 * Singly-linked List definitions.
41 #define HEIM_SLIST_HEAD(name, type) \
42 struct name { \
43 struct type *slh_first; /* first element */ \
46 #define HEIM_SLIST_HEAD_INITIALIZER(head) \
47 { NULL }
49 #define HEIM_SLIST_ENTRY(type) \
50 struct { \
51 struct type *sle_next; /* next element */ \
55 * Singly-linked List functions.
57 #define HEIM_SLIST_INIT(head) do { \
58 (head)->slh_first = NULL; \
59 } while (/*CONSTCOND*/0)
61 #define HEIM_SLIST_INSERT_AFTER(slistelm, elm, field) do { \
62 (elm)->field.sle_next = (slistelm)->field.sle_next; \
63 (slistelm)->field.sle_next = (elm); \
64 } while (/*CONSTCOND*/0)
66 #define HEIM_SLIST_INSERT_HEAD(head, elm, field) do { \
67 (elm)->field.sle_next = (head)->slh_first; \
68 (head)->slh_first = (elm); \
69 } while (/*CONSTCOND*/0)
71 #define HEIM_SLIST_REMOVE_HEAD(head, field) do { \
72 (head)->slh_first = (head)->slh_first->field.sle_next; \
73 } while (/*CONSTCOND*/0)
75 #define HEIM_SLIST_REMOVE(head, elm, type, field) do { \
76 if ((head)->slh_first == (elm)) { \
77 HEIM_SLIST_REMOVE_HEAD((head), field); \
78 } \
79 else { \
80 struct type *curelm = (head)->slh_first; \
81 while(curelm->field.sle_next != (elm)) \
82 curelm = curelm->field.sle_next; \
83 curelm->field.sle_next = \
84 curelm->field.sle_next->field.sle_next; \
85 } \
86 } while (/*CONSTCOND*/0)
88 #define HEIM_SLIST_FOREACH(var, head, field) \
89 for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
92 * Singly-linked List access methods.
94 #define HEIM_SLIST_EMPTY(head) ((head)->slh_first == NULL)
95 #define HEIM_SLIST_FIRST(head) ((head)->slh_first)
96 #define HEIM_SLIST_NEXT(elm, field) ((elm)->field.sle_next)
99 * Singly-linked List atomic functions.
101 #include "heimbase.h"
103 #define HEIM_SLIST_ATOMIC_HEAD(name, type) \
104 struct name { \
105 heim_base_atomic(struct type *) slh_first; /* first element */ \
108 #define HEIM_SLIST_ATOMIC_ENTRY(type) \
109 struct { \
110 heim_base_atomic(struct type *) sle_next; /* next element */ \
113 #define HEIM_SLIST_ATOMIC_INSERT_HEAD(head, elm, field) do { \
114 (elm)->field.sle_next = \
115 heim_base_exchange_pointer(&(head)->slh_first, (elm)); \
116 } while (/*CONSTCOND*/0)
118 #define HEIM_SLIST_ATOMIC_FOREACH(var, head, field) \
119 for ((var) = heim_base_atomic_load(&(head)->slh_first); \
120 (var) != NULL; \
121 (var) = heim_base_atomic_load(&(var)->field.sle_next))
123 * Tail queue definitions.
125 #define HEIM_TAILQ_HEAD(name, type) \
126 struct name { \
127 struct type *tqh_first; /* first element */ \
128 struct type **tqh_last; /* addr of last next element */ \
131 #define HEIM_TAILQ_HEAD_INITIALIZER(head) \
132 { NULL, &(head).tqh_first }
133 #define HEIM_TAILQ_ENTRY(type) \
134 struct { \
135 struct type *tqe_next; /* next element */ \
136 struct type **tqe_prev; /* address of previous next element */ \
140 * Tail queue functions.
142 #if defined(_KERNEL) && defined(QUEUEDEBUG)
143 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field) \
144 if ((head)->tqh_first && \
145 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
146 panic("HEIM_TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
147 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field) \
148 if (*(head)->tqh_last != NULL) \
149 panic("HEIM_TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
150 #define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field) \
151 if ((elm)->field.tqe_next && \
152 (elm)->field.tqe_next->field.tqe_prev != \
153 &(elm)->field.tqe_next) \
154 panic("HEIM_TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
155 if (*(elm)->field.tqe_prev != (elm)) \
156 panic("HEIM_TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
157 #define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field) \
158 if ((elm)->field.tqe_next == NULL && \
159 (head)->tqh_last != &(elm)->field.tqe_next) \
160 panic("HEIM_TAILQ_PREREMOVE head %p elm %p %s:%d", \
161 (head), (elm), __FILE__, __LINE__);
162 #define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field) \
163 (elm)->field.tqe_next = (void *)1L; \
164 (elm)->field.tqe_prev = (void *)1L;
165 #else
166 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field)
167 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field)
168 #define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field)
169 #define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field)
170 #define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field)
171 #endif
173 #define HEIM_TAILQ_INIT(head) do { \
174 (head)->tqh_first = NULL; \
175 (head)->tqh_last = &(head)->tqh_first; \
176 } while (/*CONSTCOND*/0)
178 #define HEIM_TAILQ_INSERT_HEAD(head, elm, field) do { \
179 QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD((head), (elm), field) \
180 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
181 (head)->tqh_first->field.tqe_prev = \
182 &(elm)->field.tqe_next; \
183 else \
184 (head)->tqh_last = &(elm)->field.tqe_next; \
185 (head)->tqh_first = (elm); \
186 (elm)->field.tqe_prev = &(head)->tqh_first; \
187 } while (/*CONSTCOND*/0)
189 #define HEIM_TAILQ_INSERT_TAIL(head, elm, field) do { \
190 QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL((head), (elm), field) \
191 (elm)->field.tqe_next = NULL; \
192 (elm)->field.tqe_prev = (head)->tqh_last; \
193 *(head)->tqh_last = (elm); \
194 (head)->tqh_last = &(elm)->field.tqe_next; \
195 } while (/*CONSTCOND*/0)
197 #define HEIM_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
198 QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
199 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
200 (elm)->field.tqe_next->field.tqe_prev = \
201 &(elm)->field.tqe_next; \
202 else \
203 (head)->tqh_last = &(elm)->field.tqe_next; \
204 (listelm)->field.tqe_next = (elm); \
205 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
206 } while (/*CONSTCOND*/0)
208 #define HEIM_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
209 QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
210 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
211 (elm)->field.tqe_next = (listelm); \
212 *(listelm)->field.tqe_prev = (elm); \
213 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
214 } while (/*CONSTCOND*/0)
216 #define HEIM_TAILQ_REMOVE(head, elm, field) do { \
217 QUEUEDEBUG_HEIM_TAILQ_PREREMOVE((head), (elm), field) \
218 QUEUEDEBUG_HEIM_TAILQ_OP((elm), field) \
219 if (((elm)->field.tqe_next) != NULL) \
220 (elm)->field.tqe_next->field.tqe_prev = \
221 (elm)->field.tqe_prev; \
222 else \
223 (head)->tqh_last = (elm)->field.tqe_prev; \
224 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
225 QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE((elm), field); \
226 } while (/*CONSTCOND*/0)
228 #define HEIM_TAILQ_FOREACH(var, head, field) \
229 for ((var) = ((head)->tqh_first); \
230 (var); \
231 (var) = ((var)->field.tqe_next))
233 #define HEIM_TAILQ_FOREACH_SAFE(var, head, field, next) \
234 for ((var) = ((head)->tqh_first); \
235 (var) != NULL && ((next) = HEIM_TAILQ_NEXT(var, field), 1); \
236 (var) = (next))
238 #define HEIM_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
239 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
240 (var); \
241 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
243 #define HEIM_TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
244 for ((var) = HEIM_TAILQ_LAST((head), headname); \
245 (var) && ((prev) = HEIM_TAILQ_PREV((var), headname, field), 1);\
246 (var) = (prev))
248 #define HEIM_TAILQ_CONCAT(head1, head2, field) do { \
249 if (!HEIM_TAILQ_EMPTY(head2)) { \
250 *(head1)->tqh_last = (head2)->tqh_first; \
251 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
252 (head1)->tqh_last = (head2)->tqh_last; \
253 HEIM_TAILQ_INIT((head2)); \
255 } while (/*CONSTCOND*/0)
258 * Tail queue access methods.
260 #define HEIM_TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
261 #define HEIM_TAILQ_FIRST(head) ((head)->tqh_first)
262 #define HEIM_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
264 #define HEIM_TAILQ_LAST(head, headname) \
265 (*(((struct headname *)((head)->tqh_last))->tqh_last))
266 #define HEIM_TAILQ_PREV(elm, headname, field) \
267 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
270 #endif /* !_HEIM_QUEUE_H_ */