add glue for krb5_einval
[heimdal.git] / base / heimqueue.h
blob423a6847879254f9e845c7909d36937749647da3
1 /* $NetBSD: queue.h,v 1.38 2004/04/18 14:12:05 lukem Exp $ */
2 /* $Id$ */
4 /*
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
32 * @(#)queue.h 8.5 (Berkeley) 8/20/94
35 #ifndef _HEIM_QUEUE_H_
36 #define _HEIM_QUEUE_H_
39 * Tail queue definitions.
41 #define HEIM_TAILQ_HEAD(name, type) \
42 struct name { \
43 struct type *tqh_first; /* first element */ \
44 struct type **tqh_last; /* addr of last next element */ \
47 #define HEIM_TAILQ_HEAD_INITIALIZER(head) \
48 { NULL, &(head).tqh_first }
49 #define HEIM_TAILQ_ENTRY(type) \
50 struct { \
51 struct type *tqe_next; /* next element */ \
52 struct type **tqe_prev; /* address of previous next element */ \
56 * Tail queue functions.
58 #if defined(_KERNEL) && defined(QUEUEDEBUG)
59 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field) \
60 if ((head)->tqh_first && \
61 (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
62 panic("HEIM_TAILQ_INSERT_HEAD %p %s:%d", (head), __FILE__, __LINE__);
63 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field) \
64 if (*(head)->tqh_last != NULL) \
65 panic("HEIM_TAILQ_INSERT_TAIL %p %s:%d", (head), __FILE__, __LINE__);
66 #define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field) \
67 if ((elm)->field.tqe_next && \
68 (elm)->field.tqe_next->field.tqe_prev != \
69 &(elm)->field.tqe_next) \
70 panic("HEIM_TAILQ_* forw %p %s:%d", (elm), __FILE__, __LINE__);\
71 if (*(elm)->field.tqe_prev != (elm)) \
72 panic("HEIM_TAILQ_* back %p %s:%d", (elm), __FILE__, __LINE__);
73 #define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field) \
74 if ((elm)->field.tqe_next == NULL && \
75 (head)->tqh_last != &(elm)->field.tqe_next) \
76 panic("HEIM_TAILQ_PREREMOVE head %p elm %p %s:%d", \
77 (head), (elm), __FILE__, __LINE__);
78 #define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field) \
79 (elm)->field.tqe_next = (void *)1L; \
80 (elm)->field.tqe_prev = (void *)1L;
81 #else
82 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD(head, elm, field)
83 #define QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL(head, elm, field)
84 #define QUEUEDEBUG_HEIM_TAILQ_OP(elm, field)
85 #define QUEUEDEBUG_HEIM_TAILQ_PREREMOVE(head, elm, field)
86 #define QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE(elm, field)
87 #endif
89 #define HEIM_TAILQ_INIT(head) do { \
90 (head)->tqh_first = NULL; \
91 (head)->tqh_last = &(head)->tqh_first; \
92 } while (/*CONSTCOND*/0)
94 #define HEIM_TAILQ_INSERT_HEAD(head, elm, field) do { \
95 QUEUEDEBUG_HEIM_TAILQ_INSERT_HEAD((head), (elm), field) \
96 if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
97 (head)->tqh_first->field.tqe_prev = \
98 &(elm)->field.tqe_next; \
99 else \
100 (head)->tqh_last = &(elm)->field.tqe_next; \
101 (head)->tqh_first = (elm); \
102 (elm)->field.tqe_prev = &(head)->tqh_first; \
103 } while (/*CONSTCOND*/0)
105 #define HEIM_TAILQ_INSERT_TAIL(head, elm, field) do { \
106 QUEUEDEBUG_HEIM_TAILQ_INSERT_TAIL((head), (elm), field) \
107 (elm)->field.tqe_next = NULL; \
108 (elm)->field.tqe_prev = (head)->tqh_last; \
109 *(head)->tqh_last = (elm); \
110 (head)->tqh_last = &(elm)->field.tqe_next; \
111 } while (/*CONSTCOND*/0)
113 #define HEIM_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
114 QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
115 if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
116 (elm)->field.tqe_next->field.tqe_prev = \
117 &(elm)->field.tqe_next; \
118 else \
119 (head)->tqh_last = &(elm)->field.tqe_next; \
120 (listelm)->field.tqe_next = (elm); \
121 (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
122 } while (/*CONSTCOND*/0)
124 #define HEIM_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
125 QUEUEDEBUG_HEIM_TAILQ_OP((listelm), field) \
126 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
127 (elm)->field.tqe_next = (listelm); \
128 *(listelm)->field.tqe_prev = (elm); \
129 (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
130 } while (/*CONSTCOND*/0)
132 #define HEIM_TAILQ_REMOVE(head, elm, field) do { \
133 QUEUEDEBUG_HEIM_TAILQ_PREREMOVE((head), (elm), field) \
134 QUEUEDEBUG_HEIM_TAILQ_OP((elm), field) \
135 if (((elm)->field.tqe_next) != NULL) \
136 (elm)->field.tqe_next->field.tqe_prev = \
137 (elm)->field.tqe_prev; \
138 else \
139 (head)->tqh_last = (elm)->field.tqe_prev; \
140 *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
141 QUEUEDEBUG_HEIM_TAILQ_POSTREMOVE((elm), field); \
142 } while (/*CONSTCOND*/0)
144 #define HEIM_TAILQ_FOREACH(var, head, field) \
145 for ((var) = ((head)->tqh_first); \
146 (var); \
147 (var) = ((var)->field.tqe_next))
149 #define HEIM_TAILQ_FOREACH_REVERSE(var, head, headname, field) \
150 for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
151 (var); \
152 (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
155 * Tail queue access methods.
157 #define HEIM_TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
158 #define HEIM_TAILQ_FIRST(head) ((head)->tqh_first)
159 #define HEIM_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
161 #define HEIM_TAILQ_LAST(head, headname) \
162 (*(((struct headname *)((head)->tqh_last))->tqh_last))
163 #define HEIM_TAILQ_PREV(elm, headname, field) \
164 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
167 #endif /* !_HEIM_QUEUE_H_ */