ed(1): simplify by using arc4random_buf().
[freebsd-src.git] / sys / net / pfil.c
blob2494deae20524ae8b6210e60ef385271a2a28728
1 /* $FreeBSD$ */
2 /* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */
4 /*-
5 * Copyright (c) 1996 Matthew R. Green
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/errno.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/rmlock.h>
38 #include <sys/socket.h>
39 #include <sys/socketvar.h>
40 #include <sys/systm.h>
41 #include <sys/condvar.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/proc.h>
45 #include <sys/queue.h>
47 #include <net/if.h>
48 #include <net/if_var.h>
49 #include <net/pfil.h>
51 static struct mtx pfil_global_lock;
53 MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
54 MTX_DEF);
56 static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *);
57 static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int);
58 static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *);
60 LIST_HEAD(pfilheadhead, pfil_head);
61 VNET_DEFINE(struct pfilheadhead, pfil_head_list);
62 #define V_pfil_head_list VNET(pfil_head_list)
63 VNET_DEFINE(struct rmlock, pfil_lock);
64 #define V_pfil_lock VNET(pfil_lock)
67 * pfil_run_hooks() runs the specified packet filter hook chain.
69 int
70 pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
71 int dir, struct inpcb *inp)
73 struct rm_priotracker rmpt;
74 struct packet_filter_hook *pfh;
75 struct mbuf *m = *mp;
76 int rv = 0;
78 PFIL_RLOCK(ph, &rmpt);
79 KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
80 for (pfh = pfil_chain_get(dir, ph); pfh != NULL;
81 pfh = TAILQ_NEXT(pfh, pfil_chain)) {
82 if (pfh->pfil_func != NULL) {
83 rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
84 inp);
85 if (rv != 0 || m == NULL)
86 break;
89 PFIL_RUNLOCK(ph, &rmpt);
90 *mp = m;
91 return (rv);
94 static struct packet_filter_hook *
95 pfil_chain_get(int dir, struct pfil_head *ph)
98 if (dir == PFIL_IN)
99 return (TAILQ_FIRST(&ph->ph_in));
100 else if (dir == PFIL_OUT)
101 return (TAILQ_FIRST(&ph->ph_out));
102 else
103 return (NULL);
107 * pfil_try_rlock() acquires rm reader lock for specified head
108 * if this is immediately possible.
111 pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
114 return (PFIL_TRY_RLOCK(ph, tracker));
118 * pfil_rlock() acquires rm reader lock for specified head.
120 void
121 pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
124 PFIL_RLOCK(ph, tracker);
128 * pfil_runlock() releases reader lock for specified head.
130 void
131 pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker)
134 PFIL_RUNLOCK(ph, tracker);
138 * pfil_wlock() acquires writer lock for specified head.
140 void
141 pfil_wlock(struct pfil_head *ph)
144 PFIL_WLOCK(ph);
148 * pfil_wunlock() releases writer lock for specified head.
150 void
151 pfil_wunlock(struct pfil_head *ph)
154 PFIL_WUNLOCK(ph);
158 * pfil_wowned() returns a non-zero value if the current thread owns
159 * an exclusive lock.
162 pfil_wowned(struct pfil_head *ph)
165 return (PFIL_WOWNED(ph));
169 * pfil_head_register() registers a pfil_head with the packet filter hook
170 * mechanism.
173 pfil_head_register(struct pfil_head *ph)
175 struct pfil_head *lph;
177 PFIL_HEADLIST_LOCK();
178 LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
179 if (ph->ph_type == lph->ph_type &&
180 ph->ph_un.phu_val == lph->ph_un.phu_val) {
181 PFIL_HEADLIST_UNLOCK();
182 return (EEXIST);
185 PFIL_LOCK_INIT(ph);
186 ph->ph_nhooks = 0;
187 TAILQ_INIT(&ph->ph_in);
188 TAILQ_INIT(&ph->ph_out);
189 LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
190 PFIL_HEADLIST_UNLOCK();
191 return (0);
195 * pfil_head_unregister() removes a pfil_head from the packet filter hook
196 * mechanism. The producer of the hook promises that all outstanding
197 * invocations of the hook have completed before it unregisters the hook.
200 pfil_head_unregister(struct pfil_head *ph)
202 struct packet_filter_hook *pfh, *pfnext;
204 PFIL_HEADLIST_LOCK();
205 LIST_REMOVE(ph, ph_list);
206 PFIL_HEADLIST_UNLOCK();
207 TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext)
208 free(pfh, M_IFADDR);
209 TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext)
210 free(pfh, M_IFADDR);
211 PFIL_LOCK_DESTROY(ph);
212 return (0);
216 * pfil_head_get() returns the pfil_head for a given key/dlt.
218 struct pfil_head *
219 pfil_head_get(int type, u_long val)
221 struct pfil_head *ph;
223 PFIL_HEADLIST_LOCK();
224 LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
225 if (ph->ph_type == type && ph->ph_un.phu_val == val)
226 break;
227 PFIL_HEADLIST_UNLOCK();
228 return (ph);
232 * pfil_add_hook() adds a function to the packet filter hook. the
233 * flags are:
234 * PFIL_IN call me on incoming packets
235 * PFIL_OUT call me on outgoing packets
236 * PFIL_ALL call me on all of the above
237 * PFIL_WAITOK OK to call malloc with M_WAITOK.
240 pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
242 struct packet_filter_hook *pfh1 = NULL;
243 struct packet_filter_hook *pfh2 = NULL;
244 int err;
246 if (flags & PFIL_IN) {
247 pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
248 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
249 if (pfh1 == NULL) {
250 err = ENOMEM;
251 goto error;
254 if (flags & PFIL_OUT) {
255 pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
256 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
257 if (pfh2 == NULL) {
258 err = ENOMEM;
259 goto error;
262 PFIL_WLOCK(ph);
263 if (flags & PFIL_IN) {
264 pfh1->pfil_func = func;
265 pfh1->pfil_arg = arg;
266 err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
267 if (err)
268 goto locked_error;
269 ph->ph_nhooks++;
271 if (flags & PFIL_OUT) {
272 pfh2->pfil_func = func;
273 pfh2->pfil_arg = arg;
274 err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
275 if (err) {
276 if (flags & PFIL_IN)
277 pfil_chain_remove(&ph->ph_in, func, arg);
278 goto locked_error;
280 ph->ph_nhooks++;
282 PFIL_WUNLOCK(ph);
283 return (0);
284 locked_error:
285 PFIL_WUNLOCK(ph);
286 error:
287 if (pfh1 != NULL)
288 free(pfh1, M_IFADDR);
289 if (pfh2 != NULL)
290 free(pfh2, M_IFADDR);
291 return (err);
295 * pfil_remove_hook removes a specific function from the packet filter hook
296 * chain.
299 pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
301 int err = 0;
303 PFIL_WLOCK(ph);
304 if (flags & PFIL_IN) {
305 err = pfil_chain_remove(&ph->ph_in, func, arg);
306 if (err == 0)
307 ph->ph_nhooks--;
309 if ((err == 0) && (flags & PFIL_OUT)) {
310 err = pfil_chain_remove(&ph->ph_out, func, arg);
311 if (err == 0)
312 ph->ph_nhooks--;
314 PFIL_WUNLOCK(ph);
315 return (err);
319 * Internal: Add a new pfil hook into a hook chain.
321 static int
322 pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags)
324 struct packet_filter_hook *pfh;
327 * First make sure the hook is not already there.
329 TAILQ_FOREACH(pfh, chain, pfil_chain)
330 if (pfh->pfil_func == pfh1->pfil_func &&
331 pfh->pfil_arg == pfh1->pfil_arg)
332 return (EEXIST);
335 * Insert the input list in reverse order of the output list so that
336 * the same path is followed in or out of the kernel.
338 if (flags & PFIL_IN)
339 TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain);
340 else
341 TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain);
342 return (0);
346 * Internal: Remove a pfil hook from a hook chain.
348 static int
349 pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg)
351 struct packet_filter_hook *pfh;
353 TAILQ_FOREACH(pfh, chain, pfil_chain)
354 if (pfh->pfil_func == func && pfh->pfil_arg == arg) {
355 TAILQ_REMOVE(chain, pfh, pfil_chain);
356 free(pfh, M_IFADDR);
357 return (0);
359 return (ENOENT);
363 * Stuff that must be initialized for every instance (including the first of
364 * course).
366 static int
367 vnet_pfil_init(const void *unused)
370 LIST_INIT(&V_pfil_head_list);
371 PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared");
372 return (0);
376 * Called for the removal of each instance.
378 static int
379 vnet_pfil_uninit(const void *unused)
382 KASSERT(LIST_EMPTY(&V_pfil_head_list),
383 ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list));
384 PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
385 return (0);
388 /* Define startup order. */
389 #define PFIL_SYSINIT_ORDER SI_SUB_PROTO_BEGIN
390 #define PFIL_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */
391 #define PFIL_VNET_ORDER (PFIL_MODEVENT_ORDER + 2) /* Later still. */
394 * Starting up.
396 * VNET_SYSINIT is called for each existing vnet and each new vnet.
398 VNET_SYSINIT(vnet_pfil_init, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
399 vnet_pfil_init, NULL);
402 * Closing up shop. These are done in REVERSE ORDER. Not called on reboot.
404 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
406 VNET_SYSUNINIT(vnet_pfil_uninit, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER,
407 vnet_pfil_uninit, NULL);