Remove advertising clause from all that isn't contrib or userland bin.
[dragonfly.git] / sys / net / bpf.c
blobce504f0ce7621faf9676c19fa4107b9c0078f608
1 /*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
36 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
39 #include "use_bpf.h"
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/conf.h>
44 #include <sys/device.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/time.h>
48 #include <sys/proc.h>
49 #include <sys/signalvar.h>
50 #include <sys/filio.h>
51 #include <sys/sockio.h>
52 #include <sys/ttycom.h>
53 #include <sys/filedesc.h>
55 #include <sys/event.h>
57 #include <sys/socket.h>
58 #include <sys/vnode.h>
60 #include <sys/thread2.h>
62 #include <net/if.h>
63 #include <net/bpf.h>
64 #include <net/bpfdesc.h>
65 #include <net/netmsg2.h>
66 #include <net/netisr2.h>
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 #include <sys/kernel.h>
71 #include <sys/sysctl.h>
73 #include <sys/devfs.h>
75 struct netmsg_bpf_output {
76 struct netmsg_base base;
77 struct mbuf *nm_mbuf;
78 struct ifnet *nm_ifp;
79 struct sockaddr *nm_dst;
82 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
83 DEVFS_DECLARE_CLONE_BITMAP(bpf);
85 #if NBPF <= 1
86 #define BPF_PREALLOCATED_UNITS 4
87 #else
88 #define BPF_PREALLOCATED_UNITS NBPF
89 #endif
91 #if NBPF > 0
94 * The default read buffer size is patchable.
96 static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
97 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
98 &bpf_bufsize, 0, "Current size of bpf buffer");
99 int bpf_maxbufsize = BPF_MAXBUFSIZE;
100 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
101 &bpf_maxbufsize, 0, "Maximum size of bpf buffer");
104 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
106 static struct bpf_if *bpf_iflist;
108 static struct lwkt_token bpf_token = LWKT_TOKEN_INITIALIZER(bpf_token);
110 static int bpf_allocbufs(struct bpf_d *);
111 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
112 static void bpf_detachd(struct bpf_d *d);
113 static void bpf_resetd(struct bpf_d *);
114 static void bpf_freed(struct bpf_d *);
115 static void bpf_mcopy(const void *, void *, size_t);
116 static int bpf_movein(struct uio *, int, struct mbuf **,
117 struct sockaddr *, int *, struct bpf_insn *);
118 static int bpf_setif(struct bpf_d *, struct ifreq *);
119 static void bpf_timed_out(void *);
120 static void bpf_wakeup(struct bpf_d *);
121 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
122 void (*)(const void *, void *, size_t),
123 const struct timeval *);
124 static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
125 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
126 static int bpf_setdlt(struct bpf_d *, u_int);
127 static void bpf_drvinit(void *unused);
128 static void bpf_filter_detach(struct knote *kn);
129 static int bpf_filter_read(struct knote *kn, long hint);
131 static d_open_t bpfopen;
132 static d_clone_t bpfclone;
133 static d_close_t bpfclose;
134 static d_read_t bpfread;
135 static d_write_t bpfwrite;
136 static d_ioctl_t bpfioctl;
137 static d_kqfilter_t bpfkqfilter;
139 #define CDEV_MAJOR 23
140 static struct dev_ops bpf_ops = {
141 { "bpf", 0, D_MPSAFE },
142 .d_open = bpfopen,
143 .d_close = bpfclose,
144 .d_read = bpfread,
145 .d_write = bpfwrite,
146 .d_ioctl = bpfioctl,
147 .d_kqfilter = bpfkqfilter
151 static int
152 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
153 struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter)
155 struct mbuf *m;
156 int error;
157 int len;
158 int hlen;
159 int slen;
161 *datlen = 0;
162 *mp = NULL;
165 * Build a sockaddr based on the data link layer type.
166 * We do this at this level because the ethernet header
167 * is copied directly into the data field of the sockaddr.
168 * In the case of SLIP, there is no header and the packet
169 * is forwarded as is.
170 * Also, we are careful to leave room at the front of the mbuf
171 * for the link level header.
173 switch (linktype) {
174 case DLT_SLIP:
175 sockp->sa_family = AF_INET;
176 hlen = 0;
177 break;
179 case DLT_EN10MB:
180 sockp->sa_family = AF_UNSPEC;
181 /* XXX Would MAXLINKHDR be better? */
182 hlen = sizeof(struct ether_header);
183 break;
185 case DLT_RAW:
186 case DLT_NULL:
187 sockp->sa_family = AF_UNSPEC;
188 hlen = 0;
189 break;
191 case DLT_ATM_RFC1483:
193 * en atm driver requires 4-byte atm pseudo header.
194 * though it isn't standard, vpi:vci needs to be
195 * specified anyway.
197 sockp->sa_family = AF_UNSPEC;
198 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
199 break;
201 case DLT_PPP:
202 sockp->sa_family = AF_UNSPEC;
203 hlen = 4; /* This should match PPP_HDRLEN */
204 break;
206 default:
207 return(EIO);
210 len = uio->uio_resid;
211 *datlen = len - hlen;
212 if ((unsigned)len > MCLBYTES)
213 return(EIO);
215 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
216 if (m == NULL)
217 return(ENOBUFS);
218 m->m_pkthdr.len = m->m_len = len;
219 m->m_pkthdr.rcvif = NULL;
220 *mp = m;
222 if (m->m_len < hlen) {
223 error = EPERM;
224 goto bad;
227 error = uiomove(mtod(m, u_char *), len, uio);
228 if (error)
229 goto bad;
231 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
232 if (slen == 0) {
233 error = EPERM;
234 goto bad;
238 * Make room for link header, and copy it to sockaddr.
240 if (hlen != 0) {
241 bcopy(m->m_data, sockp->sa_data, hlen);
242 m->m_pkthdr.len -= hlen;
243 m->m_len -= hlen;
244 m->m_data += hlen; /* XXX */
246 return (0);
247 bad:
248 m_freem(m);
249 return(error);
253 * Attach file to the bpf interface, i.e. make d listen on bp.
254 * Must be called at splimp.
256 static void
257 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
260 * Point d at bp, and add d to the interface's list of listeners.
261 * Finally, point the driver's bpf cookie at the interface so
262 * it will divert packets to bpf.
264 lwkt_gettoken(&bpf_token);
265 d->bd_bif = bp;
266 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
267 *bp->bif_driverp = bp;
269 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
270 lwkt_reltoken(&bpf_token);
274 * Detach a file from its interface.
276 static void
277 bpf_detachd(struct bpf_d *d)
279 int error;
280 struct bpf_if *bp;
281 struct ifnet *ifp;
283 lwkt_gettoken(&bpf_token);
284 bp = d->bd_bif;
285 ifp = bp->bif_ifp;
287 /* Remove d from the interface's descriptor list. */
288 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
290 if (SLIST_EMPTY(&bp->bif_dlist)) {
292 * Let the driver know that there are no more listeners.
294 *bp->bif_driverp = NULL;
296 d->bd_bif = NULL;
298 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
301 * Check if this descriptor had requested promiscuous mode.
302 * If so, turn it off.
304 if (d->bd_promisc) {
305 d->bd_promisc = 0;
306 error = ifpromisc(ifp, 0);
307 if (error != 0 && error != ENXIO) {
309 * ENXIO can happen if a pccard is unplugged,
310 * Something is really wrong if we were able to put
311 * the driver into promiscuous mode, but can't
312 * take it out.
314 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n",
315 error);
318 lwkt_reltoken(&bpf_token);
322 * Open ethernet device. Returns ENXIO for illegal minor device number,
323 * EBUSY if file is open by another process.
325 /* ARGSUSED */
326 static int
327 bpfopen(struct dev_open_args *ap)
329 cdev_t dev = ap->a_head.a_dev;
330 struct bpf_d *d;
332 lwkt_gettoken(&bpf_token);
333 if (ap->a_cred->cr_prison) {
334 lwkt_reltoken(&bpf_token);
335 return(EPERM);
338 d = dev->si_drv1;
340 * Each minor can be opened by only one process. If the requested
341 * minor is in use, return EBUSY.
343 if (d != NULL) {
344 lwkt_reltoken(&bpf_token);
345 return(EBUSY);
348 d = kmalloc(sizeof *d, M_BPF, M_WAITOK | M_ZERO);
349 dev->si_drv1 = d;
350 d->bd_bufsize = bpf_bufsize;
351 d->bd_sig = SIGIO;
352 d->bd_seesent = 1;
353 callout_init(&d->bd_callout);
354 lwkt_reltoken(&bpf_token);
356 return(0);
359 static int
360 bpfclone(struct dev_clone_args *ap)
362 int unit;
364 unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0);
365 ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit);
367 return 0;
371 * Close the descriptor by detaching it from its interface,
372 * deallocating its buffers, and marking it free.
374 /* ARGSUSED */
375 static int
376 bpfclose(struct dev_close_args *ap)
378 cdev_t dev = ap->a_head.a_dev;
379 struct bpf_d *d = dev->si_drv1;
381 lwkt_gettoken(&bpf_token);
382 funsetown(&d->bd_sigio);
383 if (d->bd_state == BPF_WAITING)
384 callout_stop(&d->bd_callout);
385 d->bd_state = BPF_IDLE;
386 if (d->bd_bif != NULL)
387 bpf_detachd(d);
388 bpf_freed(d);
389 dev->si_drv1 = NULL;
390 if (dev->si_uminor >= BPF_PREALLOCATED_UNITS) {
391 devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor);
392 destroy_dev(dev);
394 kfree(d, M_BPF);
395 lwkt_reltoken(&bpf_token);
397 return(0);
401 * Rotate the packet buffers in descriptor d. Move the store buffer
402 * into the hold slot, and the free buffer into the store slot.
403 * Zero the length of the new store buffer.
405 #define ROTATE_BUFFERS(d) \
406 (d)->bd_hbuf = (d)->bd_sbuf; \
407 (d)->bd_hlen = (d)->bd_slen; \
408 (d)->bd_sbuf = (d)->bd_fbuf; \
409 (d)->bd_slen = 0; \
410 (d)->bd_fbuf = NULL;
412 * bpfread - read next chunk of packets from buffers
414 static int
415 bpfread(struct dev_read_args *ap)
417 cdev_t dev = ap->a_head.a_dev;
418 struct bpf_d *d = dev->si_drv1;
419 int timed_out;
420 int error;
422 lwkt_gettoken(&bpf_token);
424 * Restrict application to use a buffer the same size as
425 * as kernel buffers.
427 if (ap->a_uio->uio_resid != d->bd_bufsize) {
428 lwkt_reltoken(&bpf_token);
429 return(EINVAL);
432 if (d->bd_state == BPF_WAITING)
433 callout_stop(&d->bd_callout);
434 timed_out = (d->bd_state == BPF_TIMED_OUT);
435 d->bd_state = BPF_IDLE;
437 * If the hold buffer is empty, then do a timed sleep, which
438 * ends when the timeout expires or when enough packets
439 * have arrived to fill the store buffer.
441 while (d->bd_hbuf == NULL) {
442 if ((d->bd_immediate || (ap->a_ioflag & IO_NDELAY) || timed_out)
443 && d->bd_slen != 0) {
445 * A packet(s) either arrived since the previous,
446 * We're in immediate mode, or are reading
447 * in non-blocking mode, and a packet(s)
448 * either arrived since the previous
449 * read or arrived while we were asleep.
450 * Rotate the buffers and return what's here.
452 ROTATE_BUFFERS(d);
453 break;
457 * No data is available, check to see if the bpf device
458 * is still pointed at a real interface. If not, return
459 * ENXIO so that the userland process knows to rebind
460 * it before using it again.
462 if (d->bd_bif == NULL) {
463 lwkt_reltoken(&bpf_token);
464 return(ENXIO);
467 if (ap->a_ioflag & IO_NDELAY) {
468 lwkt_reltoken(&bpf_token);
469 return(EWOULDBLOCK);
471 error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
472 if (error == EINTR || error == ERESTART) {
473 lwkt_reltoken(&bpf_token);
474 return(error);
476 if (error == EWOULDBLOCK) {
478 * On a timeout, return what's in the buffer,
479 * which may be nothing. If there is something
480 * in the store buffer, we can rotate the buffers.
482 if (d->bd_hbuf)
484 * We filled up the buffer in between
485 * getting the timeout and arriving
486 * here, so we don't need to rotate.
488 break;
490 if (d->bd_slen == 0) {
491 lwkt_reltoken(&bpf_token);
492 return(0);
494 ROTATE_BUFFERS(d);
495 break;
499 * At this point, we know we have something in the hold slot.
503 * Move data from hold buffer into user space.
504 * We know the entire buffer is transferred since
505 * we checked above that the read buffer is bpf_bufsize bytes.
507 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio);
509 d->bd_fbuf = d->bd_hbuf;
510 d->bd_hbuf = NULL;
511 d->bd_hlen = 0;
512 lwkt_reltoken(&bpf_token);
514 return(error);
519 * If there are processes sleeping on this descriptor, wake them up.
521 static void
522 bpf_wakeup(struct bpf_d *d)
524 if (d->bd_state == BPF_WAITING) {
525 callout_stop(&d->bd_callout);
526 d->bd_state = BPF_IDLE;
528 wakeup(d);
529 if (d->bd_async && d->bd_sig && d->bd_sigio)
530 pgsigio(d->bd_sigio, d->bd_sig, 0);
532 KNOTE(&d->bd_kq.ki_note, 0);
535 static void
536 bpf_timed_out(void *arg)
538 struct bpf_d *d = (struct bpf_d *)arg;
540 if (d->bd_state == BPF_WAITING) {
541 d->bd_state = BPF_TIMED_OUT;
542 if (d->bd_slen != 0)
543 bpf_wakeup(d);
547 static void
548 bpf_output_dispatch(netmsg_t msg)
550 struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)msg;
551 struct ifnet *ifp = bmsg->nm_ifp;
552 int error;
555 * The driver frees the mbuf.
557 error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL);
558 lwkt_replymsg(&msg->lmsg, error);
561 static int
562 bpfwrite(struct dev_write_args *ap)
564 cdev_t dev = ap->a_head.a_dev;
565 struct bpf_d *d = dev->si_drv1;
566 struct ifnet *ifp;
567 struct mbuf *m;
568 int error, ret;
569 struct sockaddr dst;
570 int datlen;
571 struct netmsg_bpf_output bmsg;
573 lwkt_gettoken(&bpf_token);
574 if (d->bd_bif == NULL) {
575 lwkt_reltoken(&bpf_token);
576 return(ENXIO);
579 ifp = d->bd_bif->bif_ifp;
581 if (ap->a_uio->uio_resid == 0) {
582 lwkt_reltoken(&bpf_token);
583 return(0);
586 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m,
587 &dst, &datlen, d->bd_wfilter);
588 if (error) {
589 lwkt_reltoken(&bpf_token);
590 return(error);
593 if (datlen > ifp->if_mtu) {
594 m_freem(m);
595 lwkt_reltoken(&bpf_token);
596 return(EMSGSIZE);
599 if (d->bd_hdrcmplt)
600 dst.sa_family = pseudo_AF_HDRCMPLT;
602 netmsg_init(&bmsg.base, NULL, &curthread->td_msgport,
603 0, bpf_output_dispatch);
604 bmsg.nm_mbuf = m;
605 bmsg.nm_ifp = ifp;
606 bmsg.nm_dst = &dst;
608 ret = lwkt_domsg(netisr_cpuport(0), &bmsg.base.lmsg, 0);
609 lwkt_reltoken(&bpf_token);
611 return ret;
615 * Reset a descriptor by flushing its packet buffer and clearing the
616 * receive and drop counts. Should be called at splimp.
618 static void
619 bpf_resetd(struct bpf_d *d)
621 if (d->bd_hbuf) {
622 /* Free the hold buffer. */
623 d->bd_fbuf = d->bd_hbuf;
624 d->bd_hbuf = NULL;
626 d->bd_slen = 0;
627 d->bd_hlen = 0;
628 d->bd_rcount = 0;
629 d->bd_dcount = 0;
633 * FIONREAD Check for read packet available.
634 * SIOCGIFADDR Get interface address - convenient hook to driver.
635 * BIOCGBLEN Get buffer len [for read()].
636 * BIOCSETF Set ethernet read filter.
637 * BIOCSETWF Set ethernet write filter.
638 * BIOCFLUSH Flush read packet buffer.
639 * BIOCPROMISC Put interface into promiscuous mode.
640 * BIOCGDLT Get link layer type.
641 * BIOCGETIF Get interface name.
642 * BIOCSETIF Set interface.
643 * BIOCSRTIMEOUT Set read timeout.
644 * BIOCGRTIMEOUT Get read timeout.
645 * BIOCGSTATS Get packet stats.
646 * BIOCIMMEDIATE Set immediate mode.
647 * BIOCVERSION Get filter language version.
648 * BIOCGHDRCMPLT Get "header already complete" flag
649 * BIOCSHDRCMPLT Set "header already complete" flag
650 * BIOCGSEESENT Get "see packets sent" flag
651 * BIOCSSEESENT Set "see packets sent" flag
652 * BIOCLOCK Set "locked" flag
654 /* ARGSUSED */
655 static int
656 bpfioctl(struct dev_ioctl_args *ap)
658 cdev_t dev = ap->a_head.a_dev;
659 struct bpf_d *d = dev->si_drv1;
660 int error = 0;
662 lwkt_gettoken(&bpf_token);
663 if (d->bd_state == BPF_WAITING)
664 callout_stop(&d->bd_callout);
665 d->bd_state = BPF_IDLE;
667 if (d->bd_locked == 1) {
668 switch (ap->a_cmd) {
669 case BIOCGBLEN:
670 case BIOCFLUSH:
671 case BIOCGDLT:
672 case BIOCGDLTLIST:
673 case BIOCGETIF:
674 case BIOCGRTIMEOUT:
675 case BIOCGSTATS:
676 case BIOCVERSION:
677 case BIOCGRSIG:
678 case BIOCGHDRCMPLT:
679 case FIONREAD:
680 case BIOCLOCK:
681 case BIOCSRTIMEOUT:
682 case BIOCIMMEDIATE:
683 case TIOCGPGRP:
684 break;
685 default:
686 lwkt_reltoken(&bpf_token);
687 return (EPERM);
690 switch (ap->a_cmd) {
691 default:
692 error = EINVAL;
693 break;
696 * Check for read packet available.
698 case FIONREAD:
700 int n;
702 n = d->bd_slen;
703 if (d->bd_hbuf)
704 n += d->bd_hlen;
706 *(int *)ap->a_data = n;
707 break;
710 case SIOCGIFADDR:
712 struct ifnet *ifp;
714 if (d->bd_bif == NULL) {
715 error = EINVAL;
716 } else {
717 ifp = d->bd_bif->bif_ifp;
718 ifnet_serialize_all(ifp);
719 error = ifp->if_ioctl(ifp, ap->a_cmd,
720 ap->a_data, ap->a_cred);
721 ifnet_deserialize_all(ifp);
723 break;
727 * Get buffer len [for read()].
729 case BIOCGBLEN:
730 *(u_int *)ap->a_data = d->bd_bufsize;
731 break;
734 * Set buffer length.
736 case BIOCSBLEN:
737 if (d->bd_bif != NULL) {
738 error = EINVAL;
739 } else {
740 u_int size = *(u_int *)ap->a_data;
742 if (size > bpf_maxbufsize)
743 *(u_int *)ap->a_data = size = bpf_maxbufsize;
744 else if (size < BPF_MINBUFSIZE)
745 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE;
746 d->bd_bufsize = size;
748 break;
751 * Set link layer read filter.
753 case BIOCSETF:
754 case BIOCSETWF:
755 error = bpf_setf(d, (struct bpf_program *)ap->a_data,
756 ap->a_cmd);
757 break;
760 * Flush read packet buffer.
762 case BIOCFLUSH:
763 bpf_resetd(d);
764 break;
767 * Put interface into promiscuous mode.
769 case BIOCPROMISC:
770 if (d->bd_bif == NULL) {
772 * No interface attached yet.
774 error = EINVAL;
775 break;
777 if (d->bd_promisc == 0) {
778 error = ifpromisc(d->bd_bif->bif_ifp, 1);
779 if (error == 0)
780 d->bd_promisc = 1;
782 break;
785 * Get device parameters.
787 case BIOCGDLT:
788 if (d->bd_bif == NULL)
789 error = EINVAL;
790 else
791 *(u_int *)ap->a_data = d->bd_bif->bif_dlt;
792 break;
795 * Get a list of supported data link types.
797 case BIOCGDLTLIST:
798 if (d->bd_bif == NULL) {
799 error = EINVAL;
800 } else {
801 error = bpf_getdltlist(d,
802 (struct bpf_dltlist *)ap->a_data);
804 break;
807 * Set data link type.
809 case BIOCSDLT:
810 if (d->bd_bif == NULL)
811 error = EINVAL;
812 else
813 error = bpf_setdlt(d, *(u_int *)ap->a_data);
814 break;
817 * Get interface name.
819 case BIOCGETIF:
820 if (d->bd_bif == NULL) {
821 error = EINVAL;
822 } else {
823 struct ifnet *const ifp = d->bd_bif->bif_ifp;
824 struct ifreq *const ifr = (struct ifreq *)ap->a_data;
826 strlcpy(ifr->ifr_name, ifp->if_xname,
827 sizeof ifr->ifr_name);
829 break;
832 * Set interface.
834 case BIOCSETIF:
835 error = bpf_setif(d, (struct ifreq *)ap->a_data);
836 break;
839 * Set read timeout.
841 case BIOCSRTIMEOUT:
843 struct timeval *tv = (struct timeval *)ap->a_data;
846 * Subtract 1 tick from tvtohz() since this isn't
847 * a one-shot timer.
849 if ((error = itimerfix(tv)) == 0)
850 d->bd_rtout = tvtohz_low(tv);
851 break;
855 * Get read timeout.
857 case BIOCGRTIMEOUT:
859 struct timeval *tv = (struct timeval *)ap->a_data;
861 tv->tv_sec = d->bd_rtout / hz;
862 tv->tv_usec = (d->bd_rtout % hz) * ustick;
863 break;
867 * Get packet stats.
869 case BIOCGSTATS:
871 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data;
873 bs->bs_recv = d->bd_rcount;
874 bs->bs_drop = d->bd_dcount;
875 break;
879 * Set immediate mode.
881 case BIOCIMMEDIATE:
882 d->bd_immediate = *(u_int *)ap->a_data;
883 break;
885 case BIOCVERSION:
887 struct bpf_version *bv = (struct bpf_version *)ap->a_data;
889 bv->bv_major = BPF_MAJOR_VERSION;
890 bv->bv_minor = BPF_MINOR_VERSION;
891 break;
895 * Get "header already complete" flag
897 case BIOCGHDRCMPLT:
898 *(u_int *)ap->a_data = d->bd_hdrcmplt;
899 break;
902 * Set "header already complete" flag
904 case BIOCSHDRCMPLT:
905 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0;
906 break;
909 * Get "see sent packets" flag
911 case BIOCGSEESENT:
912 *(u_int *)ap->a_data = d->bd_seesent;
913 break;
916 * Set "see sent packets" flag
918 case BIOCSSEESENT:
919 d->bd_seesent = *(u_int *)ap->a_data;
920 break;
922 case FIOASYNC: /* Send signal on receive packets */
923 d->bd_async = *(int *)ap->a_data;
924 break;
926 case FIOSETOWN:
927 error = fsetown(*(int *)ap->a_data, &d->bd_sigio);
928 break;
930 case FIOGETOWN:
931 *(int *)ap->a_data = fgetown(&d->bd_sigio);
932 break;
934 /* This is deprecated, FIOSETOWN should be used instead. */
935 case TIOCSPGRP:
936 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio);
937 break;
939 /* This is deprecated, FIOGETOWN should be used instead. */
940 case TIOCGPGRP:
941 *(int *)ap->a_data = -fgetown(&d->bd_sigio);
942 break;
944 case BIOCSRSIG: /* Set receive signal */
946 u_int sig;
948 sig = *(u_int *)ap->a_data;
950 if (sig >= NSIG)
951 error = EINVAL;
952 else
953 d->bd_sig = sig;
954 break;
956 case BIOCGRSIG:
957 *(u_int *)ap->a_data = d->bd_sig;
958 break;
959 case BIOCLOCK:
960 d->bd_locked = 1;
961 break;
963 lwkt_reltoken(&bpf_token);
965 return(error);
969 * Set d's packet filter program to fp. If this file already has a filter,
970 * free it and replace it. Returns EINVAL for bogus requests.
972 static int
973 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
975 struct bpf_insn *fcode, *old;
976 u_int wfilter, flen, size;
978 if (cmd == BIOCSETWF) {
979 old = d->bd_wfilter;
980 wfilter = 1;
981 } else {
982 wfilter = 0;
983 old = d->bd_rfilter;
985 if (fp->bf_insns == NULL) {
986 if (fp->bf_len != 0)
987 return(EINVAL);
988 if (wfilter)
989 d->bd_wfilter = NULL;
990 else
991 d->bd_rfilter = NULL;
992 bpf_resetd(d);
993 if (old != NULL)
994 kfree(old, M_BPF);
995 return(0);
997 flen = fp->bf_len;
998 if (flen > BPF_MAXINSNS)
999 return(EINVAL);
1001 size = flen * sizeof *fp->bf_insns;
1002 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK);
1003 if (copyin(fp->bf_insns, fcode, size) == 0 &&
1004 bpf_validate(fcode, (int)flen)) {
1005 if (wfilter)
1006 d->bd_wfilter = fcode;
1007 else
1008 d->bd_rfilter = fcode;
1009 bpf_resetd(d);
1010 if (old != NULL)
1011 kfree(old, M_BPF);
1013 return(0);
1015 kfree(fcode, M_BPF);
1016 return(EINVAL);
1020 * Detach a file from its current interface (if attached at all) and attach
1021 * to the interface indicated by the name stored in ifr.
1022 * Return an errno or 0.
1024 static int
1025 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1027 struct bpf_if *bp;
1028 int error;
1029 struct ifnet *theywant;
1031 theywant = ifunit(ifr->ifr_name);
1032 if (theywant == NULL)
1033 return(ENXIO);
1036 * Look through attached interfaces for the named one.
1038 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1039 struct ifnet *ifp = bp->bif_ifp;
1041 if (ifp == NULL || ifp != theywant)
1042 continue;
1043 /* skip additional entry */
1044 if (bp->bif_driverp != &ifp->if_bpf)
1045 continue;
1047 * We found the requested interface.
1048 * Allocate the packet buffers if we need to.
1049 * If we're already attached to requested interface,
1050 * just flush the buffer.
1052 if (d->bd_sbuf == NULL) {
1053 error = bpf_allocbufs(d);
1054 if (error != 0)
1055 return(error);
1057 if (bp != d->bd_bif) {
1058 if (d->bd_bif != NULL) {
1060 * Detach if attached to something else.
1062 bpf_detachd(d);
1065 bpf_attachd(d, bp);
1067 bpf_resetd(d);
1068 return(0);
1071 /* Not found. */
1072 return(ENXIO);
1075 static struct filterops bpf_read_filtops =
1076 { FILTEROP_ISFD, NULL, bpf_filter_detach, bpf_filter_read };
1078 static int
1079 bpfkqfilter(struct dev_kqfilter_args *ap)
1081 cdev_t dev = ap->a_head.a_dev;
1082 struct knote *kn = ap->a_kn;
1083 struct klist *klist;
1084 struct bpf_d *d;
1086 lwkt_gettoken(&bpf_token);
1087 d = dev->si_drv1;
1088 if (d->bd_bif == NULL) {
1089 ap->a_result = 1;
1090 lwkt_reltoken(&bpf_token);
1091 return (0);
1094 ap->a_result = 0;
1095 switch (kn->kn_filter) {
1096 case EVFILT_READ:
1097 kn->kn_fop = &bpf_read_filtops;
1098 kn->kn_hook = (caddr_t)d;
1099 break;
1100 default:
1101 ap->a_result = EOPNOTSUPP;
1102 lwkt_reltoken(&bpf_token);
1103 return (0);
1106 klist = &d->bd_kq.ki_note;
1107 knote_insert(klist, kn);
1108 lwkt_reltoken(&bpf_token);
1110 return (0);
1113 static void
1114 bpf_filter_detach(struct knote *kn)
1116 struct klist *klist;
1117 struct bpf_d *d;
1119 d = (struct bpf_d *)kn->kn_hook;
1120 klist = &d->bd_kq.ki_note;
1121 knote_remove(klist, kn);
1124 static int
1125 bpf_filter_read(struct knote *kn, long hint)
1127 struct bpf_d *d;
1128 int ready = 0;
1130 d = (struct bpf_d *)kn->kn_hook;
1131 if (d->bd_hlen != 0 ||
1132 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1133 d->bd_slen != 0)) {
1134 ready = 1;
1135 } else {
1136 /* Start the read timeout if necessary. */
1137 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1138 callout_reset(&d->bd_callout, d->bd_rtout,
1139 bpf_timed_out, d);
1140 d->bd_state = BPF_WAITING;
1144 return (ready);
1149 * Process the packet pkt of length pktlen. The packet is parsed
1150 * by each listener's filter, and if accepted, stashed into the
1151 * corresponding buffer.
1153 void
1154 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1156 struct bpf_d *d;
1157 struct timeval tv;
1158 int gottime = 0;
1159 u_int slen;
1161 lwkt_gettoken(&bpf_token);
1162 /* Re-check */
1163 if (bp == NULL) {
1164 lwkt_reltoken(&bpf_token);
1165 return;
1169 * Note that the ipl does not have to be raised at this point.
1170 * The only problem that could arise here is that if two different
1171 * interfaces shared any data. This is not the case.
1173 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1174 ++d->bd_rcount;
1175 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1176 if (slen != 0) {
1177 if (!gottime) {
1178 microtime(&tv);
1179 gottime = 1;
1181 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv);
1184 lwkt_reltoken(&bpf_token);
1188 * Copy data from an mbuf chain into a buffer. This code is derived
1189 * from m_copydata in sys/uipc_mbuf.c.
1191 static void
1192 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1194 const struct mbuf *m;
1195 u_int count;
1196 u_char *dst;
1198 m = src_arg;
1199 dst = dst_arg;
1200 while (len > 0) {
1201 if (m == NULL)
1202 panic("bpf_mcopy");
1203 count = min(m->m_len, len);
1204 bcopy(mtod(m, void *), dst, count);
1205 m = m->m_next;
1206 dst += count;
1207 len -= count;
1212 * Process the packet in the mbuf chain m. The packet is parsed by each
1213 * listener's filter, and if accepted, stashed into the corresponding
1214 * buffer.
1216 void
1217 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1219 struct bpf_d *d;
1220 u_int pktlen, slen;
1221 struct timeval tv;
1222 int gottime = 0;
1224 lwkt_gettoken(&bpf_token);
1225 /* Re-check */
1226 if (bp == NULL) {
1227 lwkt_reltoken(&bpf_token);
1228 return;
1231 /* Don't compute pktlen, if no descriptor is attached. */
1232 if (SLIST_EMPTY(&bp->bif_dlist)) {
1233 lwkt_reltoken(&bpf_token);
1234 return;
1237 pktlen = m_lengthm(m, NULL);
1239 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1240 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1241 continue;
1242 ++d->bd_rcount;
1243 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1244 if (slen != 0) {
1245 if (!gottime) {
1246 microtime(&tv);
1247 gottime = 1;
1249 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy,
1250 &tv);
1253 lwkt_reltoken(&bpf_token);
1257 * Incoming linkage from device drivers, where we have a mbuf chain
1258 * but need to prepend some arbitrary header from a linear buffer.
1260 * Con up a minimal dummy header to pacify bpf. Allocate (only) a
1261 * struct m_hdr on the stack. This is safe as bpf only reads from the
1262 * fields in this header that we initialize, and will not try to free
1263 * it or keep a pointer to it.
1265 void
1266 bpf_mtap_hdr(struct bpf_if *arg, caddr_t data, u_int dlen, struct mbuf *m, u_int direction)
1268 struct m_hdr mh;
1270 mh.mh_flags = 0;
1271 mh.mh_next = m;
1272 mh.mh_len = dlen;
1273 mh.mh_data = data;
1275 return bpf_mtap(arg, (struct mbuf *) &mh);
1278 void
1279 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family)
1281 u_int family4;
1283 KKASSERT(family != AF_UNSPEC);
1285 family4 = (u_int)family;
1286 bpf_ptap(bp, m, &family4, sizeof(family4));
1290 * Process the packet in the mbuf chain m with the header in m prepended.
1291 * The packet is parsed by each listener's filter, and if accepted,
1292 * stashed into the corresponding buffer.
1294 void
1295 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1297 struct mbuf mb;
1300 * Craft on-stack mbuf suitable for passing to bpf_mtap.
1301 * Note that we cut corners here; we only setup what's
1302 * absolutely needed--this mbuf should never go anywhere else.
1304 mb.m_next = m;
1305 mb.m_data = __DECONST(void *, data); /* LINTED */
1306 mb.m_len = dlen;
1307 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1309 bpf_mtap(bp, &mb);
1313 * Move the packet data from interface memory (pkt) into the
1314 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1315 * otherwise 0. "copy" is the routine called to do the actual data
1316 * transfer. bcopy is passed in to copy contiguous chunks, while
1317 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1318 * pkt is really an mbuf.
1320 static void
1321 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1322 void (*cpfn)(const void *, void *, size_t),
1323 const struct timeval *tv)
1325 struct bpf_hdr *hp;
1326 int totlen, curlen;
1327 int hdrlen = d->bd_bif->bif_hdrlen;
1328 int wakeup = 0;
1330 * Figure out how many bytes to move. If the packet is
1331 * greater or equal to the snapshot length, transfer that
1332 * much. Otherwise, transfer the whole packet (unless
1333 * we hit the buffer size limit).
1335 totlen = hdrlen + min(snaplen, pktlen);
1336 if (totlen > d->bd_bufsize)
1337 totlen = d->bd_bufsize;
1340 * Round up the end of the previous packet to the next longword.
1342 curlen = BPF_WORDALIGN(d->bd_slen);
1343 if (curlen + totlen > d->bd_bufsize) {
1345 * This packet will overflow the storage buffer.
1346 * Rotate the buffers if we can, then wakeup any
1347 * pending reads.
1349 if (d->bd_fbuf == NULL) {
1351 * We haven't completed the previous read yet,
1352 * so drop the packet.
1354 ++d->bd_dcount;
1355 return;
1357 ROTATE_BUFFERS(d);
1358 wakeup = 1;
1359 curlen = 0;
1360 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1362 * Immediate mode is set, or the read timeout has
1363 * already expired during a select call. A packet
1364 * arrived, so the reader should be woken up.
1366 wakeup = 1;
1370 * Append the bpf header.
1372 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1373 hp->bh_tstamp = *tv;
1374 hp->bh_datalen = pktlen;
1375 hp->bh_hdrlen = hdrlen;
1377 * Copy the packet data into the store buffer and update its length.
1379 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1380 d->bd_slen = curlen + totlen;
1382 if (wakeup)
1383 bpf_wakeup(d);
1387 * Initialize all nonzero fields of a descriptor.
1389 static int
1390 bpf_allocbufs(struct bpf_d *d)
1392 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1393 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1394 d->bd_slen = 0;
1395 d->bd_hlen = 0;
1396 return(0);
1400 * Free buffers and packet filter program currently in use by a descriptor.
1401 * Called on close.
1403 static void
1404 bpf_freed(struct bpf_d *d)
1407 * We don't need to lock out interrupts since this descriptor has
1408 * been detached from its interface and it yet hasn't been marked
1409 * free.
1411 if (d->bd_sbuf != NULL) {
1412 kfree(d->bd_sbuf, M_BPF);
1413 if (d->bd_hbuf != NULL)
1414 kfree(d->bd_hbuf, M_BPF);
1415 if (d->bd_fbuf != NULL)
1416 kfree(d->bd_fbuf, M_BPF);
1418 if (d->bd_rfilter)
1419 kfree(d->bd_rfilter, M_BPF);
1420 if (d->bd_wfilter)
1421 kfree(d->bd_wfilter, M_BPF);
1425 * Attach an interface to bpf. ifp is a pointer to the structure
1426 * defining the interface to be attached, dlt is the link layer type,
1427 * and hdrlen is the fixed size of the link header (variable length
1428 * headers are not yet supported).
1430 void
1431 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1433 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf);
1436 void
1437 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1439 struct bpf_if *bp;
1441 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO);
1443 lwkt_gettoken(&bpf_token);
1445 SLIST_INIT(&bp->bif_dlist);
1446 bp->bif_ifp = ifp;
1447 bp->bif_dlt = dlt;
1448 bp->bif_driverp = driverp;
1449 *bp->bif_driverp = NULL;
1451 bp->bif_next = bpf_iflist;
1452 bpf_iflist = bp;
1455 * Compute the length of the bpf header. This is not necessarily
1456 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1457 * that the network layer header begins on a longword boundary (for
1458 * performance reasons and to alleviate alignment restrictions).
1460 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1462 lwkt_reltoken(&bpf_token);
1464 if (bootverbose)
1465 if_printf(ifp, "bpf attached\n");
1469 * Detach bpf from an interface. This involves detaching each descriptor
1470 * associated with the interface, and leaving bd_bif NULL. Notify each
1471 * descriptor as it's detached so that any sleepers wake up and get
1472 * ENXIO.
1474 void
1475 bpfdetach(struct ifnet *ifp)
1477 struct bpf_if *bp, *bp_prev;
1478 struct bpf_d *d;
1480 lwkt_gettoken(&bpf_token);
1482 /* Locate BPF interface information */
1483 bp_prev = NULL;
1484 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1485 if (ifp == bp->bif_ifp)
1486 break;
1487 bp_prev = bp;
1490 /* Interface wasn't attached */
1491 if (bp->bif_ifp == NULL) {
1492 lwkt_reltoken(&bpf_token);
1493 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname);
1494 return;
1497 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
1498 bpf_detachd(d);
1499 bpf_wakeup(d);
1502 if (bp_prev != NULL)
1503 bp_prev->bif_next = bp->bif_next;
1504 else
1505 bpf_iflist = bp->bif_next;
1507 kfree(bp, M_BPF);
1509 lwkt_reltoken(&bpf_token);
1513 * Get a list of available data link type of the interface.
1515 static int
1516 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1518 int n, error;
1519 struct ifnet *ifp;
1520 struct bpf_if *bp;
1522 ifp = d->bd_bif->bif_ifp;
1523 n = 0;
1524 error = 0;
1525 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1526 if (bp->bif_ifp != ifp)
1527 continue;
1528 if (bfl->bfl_list != NULL) {
1529 if (n >= bfl->bfl_len) {
1530 return (ENOMEM);
1532 error = copyout(&bp->bif_dlt,
1533 bfl->bfl_list + n, sizeof(u_int));
1535 n++;
1537 bfl->bfl_len = n;
1538 return(error);
1542 * Set the data link type of a BPF instance.
1544 static int
1545 bpf_setdlt(struct bpf_d *d, u_int dlt)
1547 int error, opromisc;
1548 struct ifnet *ifp;
1549 struct bpf_if *bp;
1551 if (d->bd_bif->bif_dlt == dlt)
1552 return (0);
1553 ifp = d->bd_bif->bif_ifp;
1554 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1555 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1556 break;
1558 if (bp != NULL) {
1559 opromisc = d->bd_promisc;
1560 bpf_detachd(d);
1561 bpf_attachd(d, bp);
1562 bpf_resetd(d);
1563 if (opromisc) {
1564 error = ifpromisc(bp->bif_ifp, 1);
1565 if (error) {
1566 if_printf(bp->bif_ifp,
1567 "bpf_setdlt: ifpromisc failed (%d)\n",
1568 error);
1569 } else {
1570 d->bd_promisc = 1;
1574 return(bp == NULL ? EINVAL : 0);
1577 void
1578 bpf_gettoken(void)
1580 lwkt_gettoken(&bpf_token);
1583 void
1584 bpf_reltoken(void)
1586 lwkt_reltoken(&bpf_token);
1589 static void
1590 bpf_drvinit(void *unused)
1592 int i;
1594 make_autoclone_dev(&bpf_ops, &DEVFS_CLONE_BITMAP(bpf),
1595 bpfclone, 0, 0, 0600, "bpf");
1596 for (i = 0; i < BPF_PREALLOCATED_UNITS; i++) {
1597 make_dev(&bpf_ops, i, 0, 0, 0600, "bpf%d", i);
1598 devfs_clone_bitmap_set(&DEVFS_CLONE_BITMAP(bpf), i);
1602 static void
1603 bpf_drvuninit(void *unused)
1605 devfs_clone_handler_del("bpf");
1606 dev_ops_remove_all(&bpf_ops);
1607 devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf));
1610 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1611 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL);
1613 #else /* !BPF */
1615 * NOP stubs to allow bpf-using drivers to load and function.
1617 * A 'better' implementation would allow the core bpf functionality
1618 * to be loaded at runtime.
1621 void
1622 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1626 void
1627 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1631 void
1632 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1636 void
1637 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1641 void
1642 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1646 void
1647 bpfdetach(struct ifnet *ifp)
1651 u_int
1652 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
1654 return -1; /* "no filter" behaviour */
1657 void
1658 bpf_gettoken(void)
1662 void
1663 bpf_reltoken(void)
1667 #endif /* !BPF */