Kernel part of bluetooth stack ported by Dmitry Komissaroff. Very much work
[dragonfly.git] / sys / net / bpf.c
blob8895d64cdba0a59ba59b803a906d175c55d18586
1 /*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
40 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
41 * $DragonFly: src/sys/net/bpf.c,v 1.42 2007/10/13 09:53:51 sephe Exp $
44 #include "use_bpf.h"
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/conf.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/time.h>
53 #include <sys/proc.h>
54 #include <sys/signalvar.h>
55 #include <sys/filio.h>
56 #include <sys/sockio.h>
57 #include <sys/ttycom.h>
58 #include <sys/filedesc.h>
60 #include <sys/poll.h>
62 #include <sys/socket.h>
63 #include <sys/vnode.h>
65 #include <sys/thread2.h>
67 #include <net/if.h>
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
71 #include <netinet/in.h>
72 #include <netinet/if_ether.h>
73 #include <sys/kernel.h>
74 #include <sys/sysctl.h>
76 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
78 #if NBPF > 0
81 * The default read buffer size is patchable.
83 static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
84 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
85 &bpf_bufsize, 0, "");
86 static int bpf_maxbufsize = BPF_MAXBUFSIZE;
87 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
88 &bpf_maxbufsize, 0, "");
91 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
93 static struct bpf_if *bpf_iflist;
95 static int bpf_allocbufs(struct bpf_d *);
96 static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
97 static void bpf_detachd(struct bpf_d *d);
98 static void bpf_resetd(struct bpf_d *);
99 static void bpf_freed(struct bpf_d *);
100 static void bpf_mcopy(const void *, void *, size_t);
101 static int bpf_movein(struct uio *, int, struct mbuf **,
102 struct sockaddr *, int *);
103 static int bpf_setif(struct bpf_d *, struct ifreq *);
104 static void bpf_timed_out(void *);
105 static void bpf_wakeup(struct bpf_d *);
106 static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
107 void (*)(const void *, void *, size_t),
108 const struct timeval *);
109 static int bpf_setf(struct bpf_d *, struct bpf_program *);
110 static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
111 static int bpf_setdlt(struct bpf_d *, u_int);
112 static void bpf_drvinit(void *unused);
114 static d_open_t bpfopen;
115 static d_close_t bpfclose;
116 static d_read_t bpfread;
117 static d_write_t bpfwrite;
118 static d_ioctl_t bpfioctl;
119 static d_poll_t bpfpoll;
121 #define CDEV_MAJOR 23
122 static struct dev_ops bpf_ops = {
123 { "bpf", CDEV_MAJOR, 0 },
124 .d_open = bpfopen,
125 .d_close = bpfclose,
126 .d_read = bpfread,
127 .d_write = bpfwrite,
128 .d_ioctl = bpfioctl,
129 .d_poll = bpfpoll,
133 static int
134 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
135 struct sockaddr *sockp, int *datlen)
137 struct mbuf *m;
138 int error;
139 int len;
140 int hlen;
142 *datlen = 0;
143 *mp = NULL;
146 * Build a sockaddr based on the data link layer type.
147 * We do this at this level because the ethernet header
148 * is copied directly into the data field of the sockaddr.
149 * In the case of SLIP, there is no header and the packet
150 * is forwarded as is.
151 * Also, we are careful to leave room at the front of the mbuf
152 * for the link level header.
154 switch (linktype) {
155 case DLT_SLIP:
156 sockp->sa_family = AF_INET;
157 hlen = 0;
158 break;
160 case DLT_EN10MB:
161 sockp->sa_family = AF_UNSPEC;
162 /* XXX Would MAXLINKHDR be better? */
163 hlen = sizeof(struct ether_header);
164 break;
166 case DLT_RAW:
167 case DLT_NULL:
168 sockp->sa_family = AF_UNSPEC;
169 hlen = 0;
170 break;
172 case DLT_ATM_RFC1483:
174 * en atm driver requires 4-byte atm pseudo header.
175 * though it isn't standard, vpi:vci needs to be
176 * specified anyway.
178 sockp->sa_family = AF_UNSPEC;
179 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
180 break;
182 case DLT_PPP:
183 sockp->sa_family = AF_UNSPEC;
184 hlen = 4; /* This should match PPP_HDRLEN */
185 break;
187 default:
188 return(EIO);
191 len = uio->uio_resid;
192 *datlen = len - hlen;
193 if ((unsigned)len > MCLBYTES)
194 return(EIO);
196 m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
197 if (m == NULL)
198 return(ENOBUFS);
199 m->m_pkthdr.len = m->m_len = len;
200 m->m_pkthdr.rcvif = NULL;
201 *mp = m;
203 * Make room for link header.
205 if (hlen != 0) {
206 m->m_pkthdr.len -= hlen;
207 m->m_len -= hlen;
208 m->m_data += hlen; /* XXX */
209 error = uiomove(sockp->sa_data, hlen, uio);
210 if (error)
211 goto bad;
213 error = uiomove(mtod(m, caddr_t), len - hlen, uio);
214 if (!error)
215 return(0);
216 bad:
217 m_freem(m);
218 return(error);
222 * Attach file to the bpf interface, i.e. make d listen on bp.
223 * Must be called at splimp.
225 static void
226 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
229 * Point d at bp, and add d to the interface's list of listeners.
230 * Finally, point the driver's bpf cookie at the interface so
231 * it will divert packets to bpf.
233 d->bd_bif = bp;
234 SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
235 *bp->bif_driverp = bp;
239 * Detach a file from its interface.
241 static void
242 bpf_detachd(struct bpf_d *d)
244 int error;
245 struct bpf_if *bp;
246 struct ifnet *ifp;
248 bp = d->bd_bif;
249 ifp = bp->bif_ifp;
251 /* Remove d from the interface's descriptor list. */
252 SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
254 if (SLIST_EMPTY(&bp->bif_dlist)) {
256 * Let the driver know that there are no more listeners.
258 *bp->bif_driverp = NULL;
260 d->bd_bif = NULL;
262 * Check if this descriptor had requested promiscuous mode.
263 * If so, turn it off.
265 if (d->bd_promisc) {
266 d->bd_promisc = 0;
267 error = ifpromisc(ifp, 0);
268 if (error != 0 && error != ENXIO) {
270 * ENXIO can happen if a pccard is unplugged,
271 * Something is really wrong if we were able to put
272 * the driver into promiscuous mode, but can't
273 * take it out.
275 if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n",
276 error);
282 * Open ethernet device. Returns ENXIO for illegal minor device number,
283 * EBUSY if file is open by another process.
285 /* ARGSUSED */
286 static int
287 bpfopen(struct dev_open_args *ap)
289 cdev_t dev = ap->a_head.a_dev;
290 struct bpf_d *d;
292 if (ap->a_cred->cr_prison)
293 return(EPERM);
295 d = dev->si_drv1;
297 * Each minor can be opened by only one process. If the requested
298 * minor is in use, return EBUSY.
300 if (d != NULL)
301 return(EBUSY);
302 make_dev(&bpf_ops, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
303 MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO);
304 dev->si_drv1 = d;
305 d->bd_bufsize = bpf_bufsize;
306 d->bd_sig = SIGIO;
307 d->bd_seesent = 1;
308 callout_init(&d->bd_callout);
309 return(0);
313 * Close the descriptor by detaching it from its interface,
314 * deallocating its buffers, and marking it free.
316 /* ARGSUSED */
317 static int
318 bpfclose(struct dev_close_args *ap)
320 cdev_t dev = ap->a_head.a_dev;
321 struct bpf_d *d = dev->si_drv1;
323 funsetown(d->bd_sigio);
324 crit_enter();
325 if (d->bd_state == BPF_WAITING)
326 callout_stop(&d->bd_callout);
327 d->bd_state = BPF_IDLE;
328 if (d->bd_bif != NULL)
329 bpf_detachd(d);
330 crit_exit();
331 bpf_freed(d);
332 dev->si_drv1 = NULL;
333 kfree(d, M_BPF);
335 return(0);
339 * Rotate the packet buffers in descriptor d. Move the store buffer
340 * into the hold slot, and the free buffer into the store slot.
341 * Zero the length of the new store buffer.
343 #define ROTATE_BUFFERS(d) \
344 (d)->bd_hbuf = (d)->bd_sbuf; \
345 (d)->bd_hlen = (d)->bd_slen; \
346 (d)->bd_sbuf = (d)->bd_fbuf; \
347 (d)->bd_slen = 0; \
348 (d)->bd_fbuf = NULL;
350 * bpfread - read next chunk of packets from buffers
352 static int
353 bpfread(struct dev_read_args *ap)
355 cdev_t dev = ap->a_head.a_dev;
356 struct bpf_d *d = dev->si_drv1;
357 int timed_out;
358 int error;
361 * Restrict application to use a buffer the same size as
362 * as kernel buffers.
364 if (ap->a_uio->uio_resid != d->bd_bufsize)
365 return(EINVAL);
367 crit_enter();
368 if (d->bd_state == BPF_WAITING)
369 callout_stop(&d->bd_callout);
370 timed_out = (d->bd_state == BPF_TIMED_OUT);
371 d->bd_state = BPF_IDLE;
373 * If the hold buffer is empty, then do a timed sleep, which
374 * ends when the timeout expires or when enough packets
375 * have arrived to fill the store buffer.
377 while (d->bd_hbuf == NULL) {
378 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
380 * A packet(s) either arrived since the previous
381 * read or arrived while we were asleep.
382 * Rotate the buffers and return what's here.
384 ROTATE_BUFFERS(d);
385 break;
389 * No data is available, check to see if the bpf device
390 * is still pointed at a real interface. If not, return
391 * ENXIO so that the userland process knows to rebind
392 * it before using it again.
394 if (d->bd_bif == NULL) {
395 crit_exit();
396 return(ENXIO);
399 if (ap->a_ioflag & IO_NDELAY) {
400 crit_exit();
401 return(EWOULDBLOCK);
403 error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
404 if (error == EINTR || error == ERESTART) {
405 crit_exit();
406 return(error);
408 if (error == EWOULDBLOCK) {
410 * On a timeout, return what's in the buffer,
411 * which may be nothing. If there is something
412 * in the store buffer, we can rotate the buffers.
414 if (d->bd_hbuf)
416 * We filled up the buffer in between
417 * getting the timeout and arriving
418 * here, so we don't need to rotate.
420 break;
422 if (d->bd_slen == 0) {
423 crit_exit();
424 return(0);
426 ROTATE_BUFFERS(d);
427 break;
431 * At this point, we know we have something in the hold slot.
433 crit_exit();
436 * Move data from hold buffer into user space.
437 * We know the entire buffer is transferred since
438 * we checked above that the read buffer is bpf_bufsize bytes.
440 error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio);
442 crit_enter();
443 d->bd_fbuf = d->bd_hbuf;
444 d->bd_hbuf = NULL;
445 d->bd_hlen = 0;
446 crit_exit();
448 return(error);
453 * If there are processes sleeping on this descriptor, wake them up.
455 static void
456 bpf_wakeup(struct bpf_d *d)
458 if (d->bd_state == BPF_WAITING) {
459 callout_stop(&d->bd_callout);
460 d->bd_state = BPF_IDLE;
462 wakeup(d);
463 if (d->bd_async && d->bd_sig && d->bd_sigio)
464 pgsigio(d->bd_sigio, d->bd_sig, 0);
466 get_mplock();
467 selwakeup(&d->bd_sel);
468 rel_mplock();
469 /* XXX */
470 d->bd_sel.si_pid = 0;
473 static void
474 bpf_timed_out(void *arg)
476 struct bpf_d *d = (struct bpf_d *)arg;
478 crit_enter();
479 if (d->bd_state == BPF_WAITING) {
480 d->bd_state = BPF_TIMED_OUT;
481 if (d->bd_slen != 0)
482 bpf_wakeup(d);
484 crit_exit();
487 static int
488 bpfwrite(struct dev_write_args *ap)
490 cdev_t dev = ap->a_head.a_dev;
491 struct bpf_d *d = dev->si_drv1;
492 struct ifnet *ifp;
493 struct mbuf *m;
494 int error;
495 static struct sockaddr dst;
496 int datlen;
498 if (d->bd_bif == NULL)
499 return(ENXIO);
501 ifp = d->bd_bif->bif_ifp;
503 if (ap->a_uio->uio_resid == 0)
504 return(0);
506 error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m,
507 &dst, &datlen);
508 if (error)
509 return(error);
511 if (datlen > ifp->if_mtu) {
512 m_freem(m);
513 return(EMSGSIZE);
516 if (d->bd_hdrcmplt)
517 dst.sa_family = pseudo_AF_HDRCMPLT;
519 crit_enter();
520 lwkt_serialize_enter(ifp->if_serializer);
521 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)NULL);
522 lwkt_serialize_exit(ifp->if_serializer);
523 crit_exit();
525 * The driver frees the mbuf.
527 return(error);
531 * Reset a descriptor by flushing its packet buffer and clearing the
532 * receive and drop counts. Should be called at splimp.
534 static void
535 bpf_resetd(struct bpf_d *d)
537 if (d->bd_hbuf) {
538 /* Free the hold buffer. */
539 d->bd_fbuf = d->bd_hbuf;
540 d->bd_hbuf = NULL;
542 d->bd_slen = 0;
543 d->bd_hlen = 0;
544 d->bd_rcount = 0;
545 d->bd_dcount = 0;
549 * FIONREAD Check for read packet available.
550 * SIOCGIFADDR Get interface address - convenient hook to driver.
551 * BIOCGBLEN Get buffer len [for read()].
552 * BIOCSETF Set ethernet read filter.
553 * BIOCFLUSH Flush read packet buffer.
554 * BIOCPROMISC Put interface into promiscuous mode.
555 * BIOCGDLT Get link layer type.
556 * BIOCGETIF Get interface name.
557 * BIOCSETIF Set interface.
558 * BIOCSRTIMEOUT Set read timeout.
559 * BIOCGRTIMEOUT Get read timeout.
560 * BIOCGSTATS Get packet stats.
561 * BIOCIMMEDIATE Set immediate mode.
562 * BIOCVERSION Get filter language version.
563 * BIOCGHDRCMPLT Get "header already complete" flag
564 * BIOCSHDRCMPLT Set "header already complete" flag
565 * BIOCGSEESENT Get "see packets sent" flag
566 * BIOCSSEESENT Set "see packets sent" flag
568 /* ARGSUSED */
569 static int
570 bpfioctl(struct dev_ioctl_args *ap)
572 cdev_t dev = ap->a_head.a_dev;
573 struct bpf_d *d = dev->si_drv1;
574 int error = 0;
576 crit_enter();
577 if (d->bd_state == BPF_WAITING)
578 callout_stop(&d->bd_callout);
579 d->bd_state = BPF_IDLE;
580 crit_exit();
582 switch (ap->a_cmd) {
583 default:
584 error = EINVAL;
585 break;
588 * Check for read packet available.
590 case FIONREAD:
592 int n;
594 crit_enter();
595 n = d->bd_slen;
596 if (d->bd_hbuf)
597 n += d->bd_hlen;
598 crit_exit();
600 *(int *)ap->a_data = n;
601 break;
604 case SIOCGIFADDR:
606 struct ifnet *ifp;
608 if (d->bd_bif == NULL) {
609 error = EINVAL;
610 } else {
611 ifp = d->bd_bif->bif_ifp;
612 lwkt_serialize_enter(ifp->if_serializer);
613 error = ifp->if_ioctl(ifp, ap->a_cmd,
614 ap->a_data, ap->a_cred);
615 lwkt_serialize_exit(ifp->if_serializer);
617 break;
621 * Get buffer len [for read()].
623 case BIOCGBLEN:
624 *(u_int *)ap->a_data = d->bd_bufsize;
625 break;
628 * Set buffer length.
630 case BIOCSBLEN:
631 if (d->bd_bif != NULL) {
632 error = EINVAL;
633 } else {
634 u_int size = *(u_int *)ap->a_data;
636 if (size > bpf_maxbufsize)
637 *(u_int *)ap->a_data = size = bpf_maxbufsize;
638 else if (size < BPF_MINBUFSIZE)
639 *(u_int *)ap->a_data = size = BPF_MINBUFSIZE;
640 d->bd_bufsize = size;
642 break;
645 * Set link layer read filter.
647 case BIOCSETF:
648 error = bpf_setf(d, (struct bpf_program *)ap->a_data);
649 break;
652 * Flush read packet buffer.
654 case BIOCFLUSH:
655 crit_enter();
656 bpf_resetd(d);
657 crit_exit();
658 break;
661 * Put interface into promiscuous mode.
663 case BIOCPROMISC:
664 if (d->bd_bif == NULL) {
666 * No interface attached yet.
668 error = EINVAL;
669 break;
671 crit_enter();
672 if (d->bd_promisc == 0) {
673 error = ifpromisc(d->bd_bif->bif_ifp, 1);
674 if (error == 0)
675 d->bd_promisc = 1;
677 crit_exit();
678 break;
681 * Get device parameters.
683 case BIOCGDLT:
684 if (d->bd_bif == NULL)
685 error = EINVAL;
686 else
687 *(u_int *)ap->a_data = d->bd_bif->bif_dlt;
688 break;
691 * Get a list of supported data link types.
693 case BIOCGDLTLIST:
694 if (d->bd_bif == NULL) {
695 error = EINVAL;
696 } else {
697 error = bpf_getdltlist(d,
698 (struct bpf_dltlist *)ap->a_data);
700 break;
703 * Set data link type.
705 case BIOCSDLT:
706 if (d->bd_bif == NULL)
707 error = EINVAL;
708 else
709 error = bpf_setdlt(d, *(u_int *)ap->a_data);
710 break;
713 * Get interface name.
715 case BIOCGETIF:
716 if (d->bd_bif == NULL) {
717 error = EINVAL;
718 } else {
719 struct ifnet *const ifp = d->bd_bif->bif_ifp;
720 struct ifreq *const ifr = (struct ifreq *)ap->a_data;
722 strlcpy(ifr->ifr_name, ifp->if_xname,
723 sizeof ifr->ifr_name);
725 break;
728 * Set interface.
730 case BIOCSETIF:
731 error = bpf_setif(d, (struct ifreq *)ap->a_data);
732 break;
735 * Set read timeout.
737 case BIOCSRTIMEOUT:
739 struct timeval *tv = (struct timeval *)ap->a_data;
742 * Subtract 1 tick from tvtohz() since this isn't
743 * a one-shot timer.
745 if ((error = itimerfix(tv)) == 0)
746 d->bd_rtout = tvtohz_low(tv);
747 break;
751 * Get read timeout.
753 case BIOCGRTIMEOUT:
755 struct timeval *tv = (struct timeval *)ap->a_data;
757 tv->tv_sec = d->bd_rtout / hz;
758 tv->tv_usec = (d->bd_rtout % hz) * tick;
759 break;
763 * Get packet stats.
765 case BIOCGSTATS:
767 struct bpf_stat *bs = (struct bpf_stat *)ap->a_data;
769 bs->bs_recv = d->bd_rcount;
770 bs->bs_drop = d->bd_dcount;
771 break;
775 * Set immediate mode.
777 case BIOCIMMEDIATE:
778 d->bd_immediate = *(u_int *)ap->a_data;
779 break;
781 case BIOCVERSION:
783 struct bpf_version *bv = (struct bpf_version *)ap->a_data;
785 bv->bv_major = BPF_MAJOR_VERSION;
786 bv->bv_minor = BPF_MINOR_VERSION;
787 break;
791 * Get "header already complete" flag
793 case BIOCGHDRCMPLT:
794 *(u_int *)ap->a_data = d->bd_hdrcmplt;
795 break;
798 * Set "header already complete" flag
800 case BIOCSHDRCMPLT:
801 d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0;
802 break;
805 * Get "see sent packets" flag
807 case BIOCGSEESENT:
808 *(u_int *)ap->a_data = d->bd_seesent;
809 break;
812 * Set "see sent packets" flag
814 case BIOCSSEESENT:
815 d->bd_seesent = *(u_int *)ap->a_data;
816 break;
818 case FIOASYNC: /* Send signal on receive packets */
819 d->bd_async = *(int *)ap->a_data;
820 break;
822 case FIOSETOWN:
823 error = fsetown(*(int *)ap->a_data, &d->bd_sigio);
824 break;
826 case FIOGETOWN:
827 *(int *)ap->a_data = fgetown(d->bd_sigio);
828 break;
830 /* This is deprecated, FIOSETOWN should be used instead. */
831 case TIOCSPGRP:
832 error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio);
833 break;
835 /* This is deprecated, FIOGETOWN should be used instead. */
836 case TIOCGPGRP:
837 *(int *)ap->a_data = -fgetown(d->bd_sigio);
838 break;
840 case BIOCSRSIG: /* Set receive signal */
842 u_int sig;
844 sig = *(u_int *)ap->a_data;
846 if (sig >= NSIG)
847 error = EINVAL;
848 else
849 d->bd_sig = sig;
850 break;
852 case BIOCGRSIG:
853 *(u_int *)ap->a_data = d->bd_sig;
854 break;
856 return(error);
860 * Set d's packet filter program to fp. If this file already has a filter,
861 * free it and replace it. Returns EINVAL for bogus requests.
863 static int
864 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
866 struct bpf_insn *fcode, *old;
867 u_int flen, size;
869 old = d->bd_filter;
870 if (fp->bf_insns == NULL) {
871 if (fp->bf_len != 0)
872 return(EINVAL);
873 crit_enter();
874 d->bd_filter = NULL;
875 bpf_resetd(d);
876 crit_exit();
877 if (old != NULL)
878 kfree(old, M_BPF);
879 return(0);
881 flen = fp->bf_len;
882 if (flen > BPF_MAXINSNS)
883 return(EINVAL);
885 size = flen * sizeof *fp->bf_insns;
886 fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK);
887 if (copyin(fp->bf_insns, fcode, size) == 0 &&
888 bpf_validate(fcode, (int)flen)) {
889 crit_enter();
890 d->bd_filter = fcode;
891 bpf_resetd(d);
892 crit_exit();
893 if (old != NULL)
894 kfree(old, M_BPF);
896 return(0);
898 kfree(fcode, M_BPF);
899 return(EINVAL);
903 * Detach a file from its current interface (if attached at all) and attach
904 * to the interface indicated by the name stored in ifr.
905 * Return an errno or 0.
907 static int
908 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
910 struct bpf_if *bp;
911 int error;
912 struct ifnet *theywant;
914 theywant = ifunit(ifr->ifr_name);
915 if (theywant == NULL)
916 return(ENXIO);
919 * Look through attached interfaces for the named one.
921 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
922 struct ifnet *ifp = bp->bif_ifp;
924 if (ifp == NULL || ifp != theywant)
925 continue;
926 /* skip additional entry */
927 if (bp->bif_driverp != &ifp->if_bpf)
928 continue;
930 * We found the requested interface.
931 * If it's not up, return an error.
932 * Allocate the packet buffers if we need to.
933 * If we're already attached to requested interface,
934 * just flush the buffer.
936 if (!(ifp->if_flags & IFF_UP))
937 return(ENETDOWN);
939 if (d->bd_sbuf == NULL) {
940 error = bpf_allocbufs(d);
941 if (error != 0)
942 return(error);
944 crit_enter();
945 if (bp != d->bd_bif) {
946 if (d->bd_bif != NULL) {
948 * Detach if attached to something else.
950 bpf_detachd(d);
953 bpf_attachd(d, bp);
955 bpf_resetd(d);
956 crit_exit();
957 return(0);
960 /* Not found. */
961 return(ENXIO);
965 * Support for select() and poll() system calls
967 * Return true iff the specific operation will not block indefinitely.
968 * Otherwise, return false but make a note that a selwakeup() must be done.
971 bpfpoll(struct dev_poll_args *ap)
973 cdev_t dev = ap->a_head.a_dev;
974 struct bpf_d *d;
975 int revents;
977 d = dev->si_drv1;
978 if (d->bd_bif == NULL)
979 return(ENXIO);
981 revents = ap->a_events & (POLLOUT | POLLWRNORM);
982 crit_enter();
983 if (ap->a_events & (POLLIN | POLLRDNORM)) {
985 * An imitation of the FIONREAD ioctl code.
986 * XXX not quite. An exact imitation:
987 * if (d->b_slen != 0 ||
988 * (d->bd_hbuf != NULL && d->bd_hlen != 0)
990 if (d->bd_hlen != 0 ||
991 ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
992 d->bd_slen != 0)) {
993 revents |= ap->a_events & (POLLIN | POLLRDNORM);
994 } else {
995 selrecord(curthread, &d->bd_sel);
996 /* Start the read timeout if necessary. */
997 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
998 callout_reset(&d->bd_callout, d->bd_rtout,
999 bpf_timed_out, d);
1000 d->bd_state = BPF_WAITING;
1004 crit_exit();
1005 ap->a_events = revents;
1006 return(0);
1010 * Process the packet pkt of length pktlen. The packet is parsed
1011 * by each listener's filter, and if accepted, stashed into the
1012 * corresponding buffer.
1014 void
1015 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1017 struct bpf_d *d;
1018 struct timeval tv;
1019 int gottime = 0;
1020 u_int slen;
1023 * Note that the ipl does not have to be raised at this point.
1024 * The only problem that could arise here is that if two different
1025 * interfaces shared any data. This is not the case.
1027 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1028 ++d->bd_rcount;
1029 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1030 if (slen != 0) {
1031 if (!gottime) {
1032 microtime(&tv);
1033 gottime = 1;
1035 catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv);
1041 * Copy data from an mbuf chain into a buffer. This code is derived
1042 * from m_copydata in sys/uipc_mbuf.c.
1044 static void
1045 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1047 const struct mbuf *m;
1048 u_int count;
1049 u_char *dst;
1051 m = src_arg;
1052 dst = dst_arg;
1053 while (len > 0) {
1054 if (m == NULL)
1055 panic("bpf_mcopy");
1056 count = min(m->m_len, len);
1057 bcopy(mtod(m, void *), dst, count);
1058 m = m->m_next;
1059 dst += count;
1060 len -= count;
1065 * Process the packet in the mbuf chain m. The packet is parsed by each
1066 * listener's filter, and if accepted, stashed into the corresponding
1067 * buffer.
1069 void
1070 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1072 struct bpf_d *d;
1073 u_int pktlen, slen;
1074 struct timeval tv;
1075 int gottime = 0;
1077 /* Don't compute pktlen, if no descriptor is attached. */
1078 if (SLIST_EMPTY(&bp->bif_dlist))
1079 return;
1081 pktlen = m_lengthm(m, NULL);
1083 SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1084 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1085 continue;
1086 ++d->bd_rcount;
1087 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1088 if (slen != 0) {
1089 if (!gottime) {
1090 microtime(&tv);
1091 gottime = 1;
1093 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy,
1094 &tv);
1099 void
1100 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family)
1102 u_int family4;
1104 KKASSERT(family != AF_UNSPEC);
1106 family4 = (u_int)family;
1107 bpf_ptap(bp, m, &family4, sizeof(family4));
1111 * Process the packet in the mbuf chain m with the header in m prepended.
1112 * The packet is parsed by each listener's filter, and if accepted,
1113 * stashed into the corresponding buffer.
1115 void
1116 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1118 struct mbuf mb;
1121 * Craft on-stack mbuf suitable for passing to bpf_mtap.
1122 * Note that we cut corners here; we only setup what's
1123 * absolutely needed--this mbuf should never go anywhere else.
1125 mb.m_next = m;
1126 mb.m_data = __DECONST(void *, data); /* LINTED */
1127 mb.m_len = dlen;
1128 mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1130 bpf_mtap(bp, &mb);
1134 * Move the packet data from interface memory (pkt) into the
1135 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
1136 * otherwise 0. "copy" is the routine called to do the actual data
1137 * transfer. bcopy is passed in to copy contiguous chunks, while
1138 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1139 * pkt is really an mbuf.
1141 static void
1142 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1143 void (*cpfn)(const void *, void *, size_t),
1144 const struct timeval *tv)
1146 struct bpf_hdr *hp;
1147 int totlen, curlen;
1148 int hdrlen = d->bd_bif->bif_hdrlen;
1150 * Figure out how many bytes to move. If the packet is
1151 * greater or equal to the snapshot length, transfer that
1152 * much. Otherwise, transfer the whole packet (unless
1153 * we hit the buffer size limit).
1155 totlen = hdrlen + min(snaplen, pktlen);
1156 if (totlen > d->bd_bufsize)
1157 totlen = d->bd_bufsize;
1160 * Round up the end of the previous packet to the next longword.
1162 curlen = BPF_WORDALIGN(d->bd_slen);
1163 if (curlen + totlen > d->bd_bufsize) {
1165 * This packet will overflow the storage buffer.
1166 * Rotate the buffers if we can, then wakeup any
1167 * pending reads.
1169 if (d->bd_fbuf == NULL) {
1171 * We haven't completed the previous read yet,
1172 * so drop the packet.
1174 ++d->bd_dcount;
1175 return;
1177 ROTATE_BUFFERS(d);
1178 bpf_wakeup(d);
1179 curlen = 0;
1180 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1182 * Immediate mode is set, or the read timeout has
1183 * already expired during a select call. A packet
1184 * arrived, so the reader should be woken up.
1186 bpf_wakeup(d);
1190 * Append the bpf header.
1192 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1193 hp->bh_tstamp = *tv;
1194 hp->bh_datalen = pktlen;
1195 hp->bh_hdrlen = hdrlen;
1197 * Copy the packet data into the store buffer and update its length.
1199 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1200 d->bd_slen = curlen + totlen;
1204 * Initialize all nonzero fields of a descriptor.
1206 static int
1207 bpf_allocbufs(struct bpf_d *d)
1209 d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1210 d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1211 d->bd_slen = 0;
1212 d->bd_hlen = 0;
1213 return(0);
1217 * Free buffers and packet filter program currently in use by a descriptor.
1218 * Called on close.
1220 static void
1221 bpf_freed(struct bpf_d *d)
1224 * We don't need to lock out interrupts since this descriptor has
1225 * been detached from its interface and it yet hasn't been marked
1226 * free.
1228 if (d->bd_sbuf != NULL) {
1229 kfree(d->bd_sbuf, M_BPF);
1230 if (d->bd_hbuf != NULL)
1231 kfree(d->bd_hbuf, M_BPF);
1232 if (d->bd_fbuf != NULL)
1233 kfree(d->bd_fbuf, M_BPF);
1235 if (d->bd_filter != NULL)
1236 kfree(d->bd_filter, M_BPF);
1240 * Attach an interface to bpf. ifp is a pointer to the structure
1241 * defining the interface to be attached, dlt is the link layer type,
1242 * and hdrlen is the fixed size of the link header (variable length
1243 * headers are not yet supported).
1245 void
1246 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1248 bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf);
1251 void
1252 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1254 struct bpf_if *bp;
1256 bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO);
1258 SLIST_INIT(&bp->bif_dlist);
1259 bp->bif_ifp = ifp;
1260 bp->bif_dlt = dlt;
1261 bp->bif_driverp = driverp;
1262 *bp->bif_driverp = NULL;
1264 bp->bif_next = bpf_iflist;
1265 bpf_iflist = bp;
1268 * Compute the length of the bpf header. This is not necessarily
1269 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1270 * that the network layer header begins on a longword boundary (for
1271 * performance reasons and to alleviate alignment restrictions).
1273 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1275 if (bootverbose)
1276 if_printf(ifp, "bpf attached\n");
1280 * Detach bpf from an interface. This involves detaching each descriptor
1281 * associated with the interface, and leaving bd_bif NULL. Notify each
1282 * descriptor as it's detached so that any sleepers wake up and get
1283 * ENXIO.
1285 void
1286 bpfdetach(struct ifnet *ifp)
1288 struct bpf_if *bp, *bp_prev;
1289 struct bpf_d *d;
1291 crit_enter();
1293 /* Locate BPF interface information */
1294 bp_prev = NULL;
1295 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1296 if (ifp == bp->bif_ifp)
1297 break;
1298 bp_prev = bp;
1301 /* Interface wasn't attached */
1302 if (bp->bif_ifp == NULL) {
1303 crit_exit();
1304 kprintf("bpfdetach: %s was not attached\n", ifp->if_xname);
1305 return;
1308 while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
1309 bpf_detachd(d);
1310 bpf_wakeup(d);
1313 if (bp_prev != NULL)
1314 bp_prev->bif_next = bp->bif_next;
1315 else
1316 bpf_iflist = bp->bif_next;
1318 kfree(bp, M_BPF);
1320 crit_exit();
1324 * Get a list of available data link type of the interface.
1326 static int
1327 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1329 int n, error;
1330 struct ifnet *ifp;
1331 struct bpf_if *bp;
1333 ifp = d->bd_bif->bif_ifp;
1334 n = 0;
1335 error = 0;
1336 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1337 if (bp->bif_ifp != ifp)
1338 continue;
1339 if (bfl->bfl_list != NULL) {
1340 if (n >= bfl->bfl_len) {
1341 return (ENOMEM);
1343 error = copyout(&bp->bif_dlt,
1344 bfl->bfl_list + n, sizeof(u_int));
1346 n++;
1348 bfl->bfl_len = n;
1349 return(error);
1353 * Set the data link type of a BPF instance.
1355 static int
1356 bpf_setdlt(struct bpf_d *d, u_int dlt)
1358 int error, opromisc;
1359 struct ifnet *ifp;
1360 struct bpf_if *bp;
1362 if (d->bd_bif->bif_dlt == dlt)
1363 return (0);
1364 ifp = d->bd_bif->bif_ifp;
1365 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1366 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1367 break;
1369 if (bp != NULL) {
1370 opromisc = d->bd_promisc;
1371 crit_enter();
1372 bpf_detachd(d);
1373 bpf_attachd(d, bp);
1374 bpf_resetd(d);
1375 if (opromisc) {
1376 error = ifpromisc(bp->bif_ifp, 1);
1377 if (error) {
1378 if_printf(bp->bif_ifp,
1379 "bpf_setdlt: ifpromisc failed (%d)\n",
1380 error);
1381 } else {
1382 d->bd_promisc = 1;
1385 crit_exit();
1387 return(bp == NULL ? EINVAL : 0);
1390 static void
1391 bpf_drvinit(void *unused)
1393 dev_ops_add(&bpf_ops, 0, 0);
1396 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1398 #else /* !BPF */
1400 * NOP stubs to allow bpf-using drivers to load and function.
1402 * A 'better' implementation would allow the core bpf functionality
1403 * to be loaded at runtime.
1406 void
1407 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1411 void
1412 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1416 void
1417 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1421 void
1422 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1426 void
1427 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1431 void
1432 bpfdetach(struct ifnet *ifp)
1436 u_int
1437 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
1439 return -1; /* "no filter" behaviour */
1442 #endif /* !BPF */