8446 uts: pci_check_bios() should check for BIOS
[unleashed.git] / include / sys / queue.h
blob39a476450f5da0ee4d8564a7eb0acc2ae0e29414
1 /* $NetBSD: queue.h,v 1.42 2005/07/13 15:08:24 wiz Exp $ */
3 /*
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)queue.h 8.5 (Berkeley) 8/20/94
34 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
35 * Use is subject to license terms.
38 #ifndef _SYS_QUEUE_H
39 #define _SYS_QUEUE_H
41 #include <sys/note.h>
43 #ifdef __cplusplus
44 extern "C" {
45 #endif
48 * This file defines five types of data structures: singly-linked lists,
49 * lists, simple queues, tail queues, and circular queues.
51 * A singly-linked list is headed by a single forward pointer. The
52 * elements are singly linked for minimum space and pointer manipulation
53 * overhead at the expense of O(n) removal for arbitrary elements. New
54 * elements can be added to the list after an existing element or at the
55 * head of the list. Elements being removed from the head of the list
56 * should use the explicit macro for this purpose for optimum
57 * efficiency. A singly-linked list may only be traversed in the forward
58 * direction. Singly-linked lists are ideal for applications with large
59 * datasets and few or no removals or for implementing a LIFO queue.
61 * A list is headed by a single forward pointer (or an array of forward
62 * pointers for a hash table header). The elements are doubly linked
63 * so that an arbitrary element can be removed without a need to
64 * traverse the list. New elements can be added to the list before
65 * or after an existing element or at the head of the list. A list
66 * may only be traversed in the forward direction.
68 * A simple queue is headed by a pair of pointers, one the head of the
69 * list and the other to the tail of the list. The elements are singly
70 * linked to save space, so elements can only be removed from the
71 * head of the list. New elements can be added to the list after
72 * an existing element, at the head of the list, or at the end of the
73 * list. A simple queue may only be traversed in the forward direction.
75 * A tail queue is headed by a pair of pointers, one to the head of the
76 * list and the other to the tail of the list. The elements are doubly
77 * linked so that an arbitrary element can be removed without a need to
78 * traverse the list. New elements can be added to the list before or
79 * after an existing element, at the head of the list, or at the end of
80 * the list. A tail queue may be traversed in either direction.
82 * A circle queue is headed by a pair of pointers, one to the head of the
83 * list and the other to the tail of the list. The elements are doubly
84 * linked so that an arbitrary element can be removed without a need to
85 * traverse the list. New elements can be added to the list before or after
86 * an existing element, at the head of the list, or at the end of the list.
87 * A circle queue may be traversed in either direction, but has a more
88 * complex end of list detection.
90 * For details on the use of these macros, see the queue(3) manual page.
94 * List definitions.
96 #define LIST_HEAD(name, type) \
97 struct name { \
98 struct type *lh_first; /* first element */ \
101 #define LIST_HEAD_INITIALIZER(head) \
102 { NULL }
104 #define LIST_ENTRY(type) \
105 struct { \
106 struct type *le_next; /* next element */ \
107 struct type **le_prev; /* address of previous next element */ \
111 * List functions.
113 #if defined(_KERNEL) && defined(QUEUEDEBUG)
114 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
115 if ((head)->lh_first && \
116 (head)->lh_first->field.le_prev != &(head)->lh_first) \
117 panic("LIST_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
118 #define QUEUEDEBUG_LIST_OP(elm, field) \
119 if ((elm)->field.le_next && \
120 (elm)->field.le_next->field.le_prev != \
121 &(elm)->field.le_next) \
122 panic("LIST_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
123 if (*(elm)->field.le_prev != (elm)) \
124 panic("LIST_* back %p %s:%d", (elm), __FILE__, __LINE__);
125 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
126 (elm)->field.le_next = (void *)1L; \
127 (elm)->field.le_prev = (void *)1L;
128 #else
129 #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
130 #define QUEUEDEBUG_LIST_OP(elm, field)
131 #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
132 #endif
134 #define LIST_INIT(head) do { \
135 (head)->lh_first = NULL; \
136 _NOTE(CONSTCOND) \
137 } while (0)
139 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
140 QUEUEDEBUG_LIST_OP((listelm), field) \
141 if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
142 (listelm)->field.le_next->field.le_prev = \
143 &(elm)->field.le_next; \
144 (listelm)->field.le_next = (elm); \
145 (elm)->field.le_prev = &(listelm)->field.le_next; \
146 _NOTE(CONSTCOND) \
147 } while (0)
149 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
150 QUEUEDEBUG_LIST_OP((listelm), field) \
151 (elm)->field.le_prev = (listelm)->field.le_prev; \
152 (elm)->field.le_next = (listelm); \
153 *(listelm)->field.le_prev = (elm); \
154 (listelm)->field.le_prev = &(elm)->field.le_next; \
155 _NOTE(CONSTCOND) \
156 } while (0)
158 #define LIST_INSERT_HEAD(head, elm, field) do { \
159 QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
160 if (((elm)->field.le_next = (head)->lh_first) != NULL) \
161 (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
162 (head)->lh_first = (elm); \
163 (elm)->field.le_prev = &(head)->lh_first; \
164 _NOTE(CONSTCOND) \
165 } while (0)
167 #define LIST_REMOVE(elm, field) do { \
168 QUEUEDEBUG_LIST_OP((elm), field) \
169 if ((elm)->field.le_next != NULL) \
170 (elm)->field.le_next->field.le_prev = \
171 (elm)->field.le_prev; \
172 *(elm)->field.le_prev = (elm)->field.le_next; \
173 QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
174 _NOTE(CONSTCOND) \
175 } while (0)
177 #define LIST_FOREACH(var, head, field) \
178 for ((var) = ((head)->lh_first); \
179 (var); \
180 (var) = ((var)->field.le_next))
183 * List access methods.
185 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
186 #define LIST_FIRST(head) ((head)->lh_first)
187 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
191 * Singly-linked List definitions.
193 #define SLIST_HEAD(name, type) \
194 struct name { \
195 struct type *slh_first; /* first element */ \
198 #define SLIST_HEAD_INITIALIZER(head) \
199 { NULL }
201 #define SLIST_ENTRY(type) \
202 struct { \
203 struct type *sle_next; /* next element */ \
207 * Singly-linked List functions.
209 #define SLIST_INIT(head) do { \
210 (head)->slh_first = NULL; \
211 _NOTE(CONSTCOND) \
212 } while (0)
214 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
215 (elm)->field.sle_next = (slistelm)->field.sle_next; \
216 (slistelm)->field.sle_next = (elm); \
217 _NOTE(CONSTCOND) \
218 } while (0)
220 #define SLIST_INSERT_HEAD(head, elm, field) do { \
221 (elm)->field.sle_next = (head)->slh_first; \
222 (head)->slh_first = (elm); \
223 _NOTE(CONSTCOND) \
224 } while (0)
226 #define SLIST_REMOVE_HEAD(head, field) do { \
227 (head)->slh_first = (head)->slh_first->field.sle_next; \
228 _NOTE(CONSTCOND) \
229 } while (0)
231 #define SLIST_REMOVE(head, elm, type, field) do { \
232 if ((head)->slh_first == (elm)) { \
233 SLIST_REMOVE_HEAD((head), field); \
235 else { \
236 struct type *curelm = (head)->slh_first; \
237 while (curelm->field.sle_next != (elm)) \
238 curelm = curelm->field.sle_next; \
239 curelm->field.sle_next = \
240 curelm->field.sle_next->field.sle_next; \
242 _NOTE(CONSTCOND) \
243 } while (0)
245 #define SLIST_FOREACH(var, head, field) \
246 for ((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next)
249 * Singly-linked List access methods.
251 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
252 #define SLIST_FIRST(head) ((head)->slh_first)
253 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
257 * Singly-linked Tail queue declarations.
259 #define STAILQ_HEAD(name, type) \
260 struct name { \
261 struct type *stqh_first; /* first element */ \
262 struct type **stqh_last; /* addr of last next element */ \
265 #define STAILQ_HEAD_INITIALIZER(head) \
266 { NULL, &(head).stqh_first }
268 #define STAILQ_ENTRY(type) \
269 struct { \
270 struct type *stqe_next; /* next element */ \
274 * Singly-linked Tail queue functions.
276 #define STAILQ_INIT(head) do { \
277 (head)->stqh_first = NULL; \
278 (head)->stqh_last = &(head)->stqh_first; \
279 _NOTE(CONSTCOND) \
280 } while (0)
282 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
283 if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
284 (head)->stqh_last = &(elm)->field.stqe_next; \
285 (head)->stqh_first = (elm); \
286 _NOTE(CONSTCOND) \
287 } while (0)
289 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
290 (elm)->field.stqe_next = NULL; \
291 *(head)->stqh_last = (elm); \
292 (head)->stqh_last = &(elm)->field.stqe_next; \
293 _NOTE(CONSTCOND) \
294 } while (0)
296 #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
297 if (((elm)->field.stqe_next = (listelm)->field.stqe_next) \
298 == NULL) \
299 (head)->stqh_last = &(elm)->field.stqe_next; \
300 (listelm)->field.stqe_next = (elm); \
301 _NOTE(CONSTCOND) \
302 } while (0)
304 #define STAILQ_REMOVE_HEAD(head, field) do { \
305 if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) \
306 == NULL) \
307 (head)->stqh_last = &(head)->stqh_first; \
308 _NOTE(CONSTCOND) \
309 } while (0)
311 #define STAILQ_REMOVE(head, elm, type, field) do { \
312 if ((head)->stqh_first == (elm)) { \
313 STAILQ_REMOVE_HEAD((head), field); \
314 } else { \
315 struct type *curelm = (head)->stqh_first; \
316 while (curelm->field.stqe_next != (elm)) \
317 curelm = curelm->field.stqe_next; \
318 if ((curelm->field.stqe_next = \
319 curelm->field.stqe_next->field.stqe_next) == NULL) \
320 (head)->stqh_last = &(curelm)->field.stqe_next; \
322 _NOTE(CONSTCOND) \
323 } while (0)
325 #define STAILQ_FOREACH(var, head, field) \
326 for ((var) = ((head)->stqh_first); \
327 (var); \
328 (var) = ((var)->field.stqe_next))
331 * Singly-linked Tail queue access methods.
333 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
334 #define STAILQ_FIRST(head) ((head)->stqh_first)
335 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
339 * Simple queue definitions.
341 #define SIMPLEQ_HEAD(name, type) \
342 struct name { \
343 struct type *sqh_first; /* first element */ \
344 struct type **sqh_last; /* addr of last next element */ \
347 #define SIMPLEQ_HEAD_INITIALIZER(head) \
348 { NULL, &(head).sqh_first }
350 #define SIMPLEQ_ENTRY(type) \
351 struct { \
352 struct type *sqe_next; /* next element */ \
356 * Simple queue functions.
358 #define SIMPLEQ_INIT(head) do { \
359 (head)->sqh_first = NULL; \
360 (head)->sqh_last = &(head)->sqh_first; \
361 _NOTE(CONSTCOND) \
362 } while (0)
364 #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
365 if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
366 (head)->sqh_last = &(elm)->field.sqe_next; \
367 (head)->sqh_first = (elm); \
368 _NOTE(CONSTCOND) \
369 } while (0)
371 #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
372 (elm)->field.sqe_next = NULL; \
373 *(head)->sqh_last = (elm); \
374 (head)->sqh_last = &(elm)->field.sqe_next; \
375 _NOTE(CONSTCOND) \
376 } while (0)
378 #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
379 if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
380 (head)->sqh_last = &(elm)->field.sqe_next; \
381 (listelm)->field.sqe_next = (elm); \
382 _NOTE(CONSTCOND) \
383 } while (0)
385 #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
386 if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
387 (head)->sqh_last = &(head)->sqh_first; \
388 _NOTE(CONSTCOND) \
389 } while (0)
391 #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
392 if ((head)->sqh_first == (elm)) { \
393 SIMPLEQ_REMOVE_HEAD((head), field); \
394 } else { \
395 struct type *curelm = (head)->sqh_first; \
396 while (curelm->field.sqe_next != (elm)) \
397 curelm = curelm->field.sqe_next; \
398 if ((curelm->field.sqe_next = \
399 curelm->field.sqe_next->field.sqe_next) == NULL) \
400 (head)->sqh_last = &(curelm)->field.sqe_next; \
402 _NOTE(CONSTCOND) \
403 } while (0)
405 #define SIMPLEQ_FOREACH(var, head, field) \
406 for ((var) = ((head)->sqh_first); \
407 (var); \
408 (var) = ((var)->field.sqe_next))
411 * Simple queue access methods.
413 #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL)
414 #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
415 #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
419 * Tail queue definitions.
421 #define _TAILQ_HEAD(name, type) \
422 struct name { \
423 type *tqh_first; /* first element */ \
424 type **tqh_last; /* addr of last next element */ \
426 #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type)
428 #define TAILQ_HEAD_INITIALIZER(head) \
429 { NULL, &(head).tqh_first }
431 #define _TAILQ_ENTRY(type) \
432 struct { \
433 type *tqe_next; /* next element */ \
434 type **tqe_prev; /* address of previous next element */\
436 #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type)
439 * Tail queue functions.
441 #if defined(_KERNEL) && defined(QUEUEDEBUG)
442 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
443 if ((head)->tqh_first && \
444 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
445 panic("TAILQ_INSERT_HEAD %p %s:%d", (void *)(head), \
446 __FILE__, __LINE__);
447 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
448 if (*(head)->tqh_last != NULL) \
449 panic("TAILQ_INSERT_TAIL %p %s:%d", (void *)(head), \
450 __FILE__, __LINE__);
451 #define QUEUEDEBUG_TAILQ_OP(elm, field) \
452 if ((elm)->field.tqe_next && \
453 (elm)->field.tqe_next->field.tqe_prev != \
454 &(elm)->field.tqe_next) \
455 panic("TAILQ_* forw %p %s:%d", (void *)(elm), \
456 __FILE__, __LINE__);\
457 if (*(elm)->field.tqe_prev != (elm)) \
458 panic("TAILQ_* back %p %s:%d", (void *)(elm), \
459 __FILE__, __LINE__);
460 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
461 if ((elm)->field.tqe_next == NULL && \
462 (head)->tqh_last != &(elm)->field.tqe_next) \
463 panic("TAILQ_PREREMOVE head %p elm %p %s:%d", \
464 (void *)(head), (void *)(elm), __FILE__, __LINE__);
465 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
466 (elm)->field.tqe_next = (void *)1L; \
467 (elm)->field.tqe_prev = (void *)1L;
468 #else
469 #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
470 #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
471 #define QUEUEDEBUG_TAILQ_OP(elm, field)
472 #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
473 #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
474 #endif
476 #define TAILQ_INIT(head) do { \
477 (head)->tqh_first = NULL; \
478 (head)->tqh_last = &(head)->tqh_first; \
479 _NOTE(CONSTCOND) \
480 } while (0)
482 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
483 QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
484 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
485 (head)->tqh_first->field.tqe_prev = \
486 &(elm)->field.tqe_next; \
487 else \
488 (head)->tqh_last = &(elm)->field.tqe_next; \
489 (head)->tqh_first = (elm); \
490 (elm)->field.tqe_prev = &(head)->tqh_first; \
491 _NOTE(CONSTCOND) \
492 } while (0)
494 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
495 QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
496 (elm)->field.tqe_next = NULL; \
497 (elm)->field.tqe_prev = (head)->tqh_last; \
498 *(head)->tqh_last = (elm); \
499 (head)->tqh_last = &(elm)->field.tqe_next; \
500 _NOTE(CONSTCOND) \
501 } while (0)
503 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
504 QUEUEDEBUG_TAILQ_OP((listelm), field) \
505 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
506 (elm)->field.tqe_next->field.tqe_prev = \
507 &(elm)->field.tqe_next; \
508 else \
509 (head)->tqh_last = &(elm)->field.tqe_next; \
510 (listelm)->field.tqe_next = (elm); \
511 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
512 _NOTE(CONSTCOND) \
513 } while (0)
515 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
516 QUEUEDEBUG_TAILQ_OP((listelm), field) \
517 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
518 (elm)->field.tqe_next = (listelm); \
519 *(listelm)->field.tqe_prev = (elm); \
520 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
521 _NOTE(CONSTCOND) \
522 } while (0)
524 #define TAILQ_REMOVE(head, elm, field) do { \
525 QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
526 QUEUEDEBUG_TAILQ_OP((elm), field) \
527 if (((elm)->field.tqe_next) != NULL) \
528 (elm)->field.tqe_next->field.tqe_prev = \
529 (elm)->field.tqe_prev; \
530 else \
531 (head)->tqh_last = (elm)->field.tqe_prev; \
532 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
533 QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
534 _NOTE(CONSTCOND) \
535 } while (0)
537 #define TAILQ_FOREACH(var, head, field) \
538 for ((var) = ((head)->tqh_first); \
539 (var); \
540 (var) = ((var)->field.tqe_next))
542 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
543 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
544 (var); \
545 (var) = \
546 (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
549 * Tail queue access methods.
551 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
552 #define TAILQ_FIRST(head) ((head)->tqh_first)
553 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
555 #define TAILQ_LAST(head, headname) \
556 (*(((struct headname *)((head)->tqh_last))->tqh_last))
557 #define TAILQ_PREV(elm, headname, field) \
558 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
562 * Circular queue definitions.
564 #define CIRCLEQ_HEAD(name, type) \
565 struct name { \
566 struct type *cqh_first; /* first element */ \
567 struct type *cqh_last; /* last element */ \
570 #define CIRCLEQ_HEAD_INITIALIZER(head) \
571 { (void *)&head, (void *)&head }
573 #define CIRCLEQ_ENTRY(type) \
574 struct { \
575 struct type *cqe_next; /* next element */ \
576 struct type *cqe_prev; /* previous element */ \
580 * Circular queue functions.
582 #define CIRCLEQ_INIT(head) do { \
583 (head)->cqh_first = (void *)(head); \
584 (head)->cqh_last = (void *)(head); \
585 _NOTE(CONSTCOND) \
586 } while (0)
588 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
589 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
590 (elm)->field.cqe_prev = (listelm); \
591 if ((listelm)->field.cqe_next == (void *)(head)) \
592 (head)->cqh_last = (elm); \
593 else \
594 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
595 (listelm)->field.cqe_next = (elm); \
596 _NOTE(CONSTCOND) \
597 } while (0)
599 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
600 (elm)->field.cqe_next = (listelm); \
601 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
602 if ((listelm)->field.cqe_prev == (void *)(head)) \
603 (head)->cqh_first = (elm); \
604 else \
605 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
606 (listelm)->field.cqe_prev = (elm); \
607 _NOTE(CONSTCOND) \
608 } while (0)
610 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
611 (elm)->field.cqe_next = (head)->cqh_first; \
612 (elm)->field.cqe_prev = (void *)(head); \
613 if ((head)->cqh_last == (void *)(head)) \
614 (head)->cqh_last = (elm); \
615 else \
616 (head)->cqh_first->field.cqe_prev = (elm); \
617 (head)->cqh_first = (elm); \
618 _NOTE(CONSTCOND) \
619 } while (0)
621 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
622 (elm)->field.cqe_next = (void *)(head); \
623 (elm)->field.cqe_prev = (head)->cqh_last; \
624 if ((head)->cqh_first == (void *)(head)) \
625 (head)->cqh_first = (elm); \
626 else \
627 (head)->cqh_last->field.cqe_next = (elm); \
628 (head)->cqh_last = (elm); \
629 _NOTE(CONSTCOND) \
630 } while (0)
632 #define CIRCLEQ_REMOVE(head, elm, field) do { \
633 if ((elm)->field.cqe_next == (void *)(head)) \
634 (head)->cqh_last = (elm)->field.cqe_prev; \
635 else \
636 (elm)->field.cqe_next->field.cqe_prev = \
637 (elm)->field.cqe_prev; \
638 if ((elm)->field.cqe_prev == (void *)(head)) \
639 (head)->cqh_first = (elm)->field.cqe_next; \
640 else \
641 (elm)->field.cqe_prev->field.cqe_next = \
642 (elm)->field.cqe_next; \
643 _NOTE(CONSTCOND) \
644 } while (0)
646 #define CIRCLEQ_FOREACH(var, head, field) \
647 for ((var) = ((head)->cqh_first); \
648 (var) != (void *)(head); \
649 (var) = ((var)->field.cqe_next))
651 #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
652 for ((var) = ((head)->cqh_last); \
653 (var) != (void *)(head); \
654 (var) = ((var)->field.cqe_prev))
657 * Circular queue access methods.
659 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
660 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
661 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
662 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
663 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
665 #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
666 (((elm)->field.cqe_next == (void *)(head)) \
667 ? ((head)->cqh_first) \
668 : (elm->field.cqe_next))
669 #define CIRCLEQ_LOOP_PREV(head, elm, field) \
670 (((elm)->field.cqe_prev == (void *)(head)) \
671 ? ((head)->cqh_last) \
672 : (elm->field.cqe_prev))
674 #ifdef __cplusplus
676 #endif
678 #endif /* !_SYS_QUEUE_H */