[NET]: Make packet reception network namespace safe
[linux-2.6/kmemtrace.git] / net / packet / af_packet.c
blobcae1ee4f2ad68a7f1d13552dd97c5787fc71479c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Fixes:
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
43 * and packet_mreq.
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
53 #include <linux/mm.h>
54 #include <linux/capability.h>
55 #include <linux/fcntl.h>
56 #include <linux/socket.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/if_packet.h>
61 #include <linux/wireless.h>
62 #include <linux/kernel.h>
63 #include <linux/kmod.h>
64 #include <net/net_namespace.h>
65 #include <net/ip.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
68 #include <net/sock.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
74 #include <asm/page.h>
75 #include <asm/cacheflush.h>
76 #include <asm/io.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
83 #ifdef CONFIG_INET
84 #include <net/inet_common.h>
85 #endif
88 Assumptions:
89 - if device has no dev->hard_header routine, it adds and removes ll header
90 inside itself. In this case ll header is invisible outside of device,
91 but higher levels still should reserve dev->hard_header_len.
92 Some devices are enough clever to reallocate skb, when header
93 will not fit to reserved space (tunnel), another ones are silly
94 (PPP).
95 - packet socket receives packets with pulled ll header,
96 so that SOCK_RAW should push it back.
98 On receive:
99 -----------
101 Incoming, dev->hard_header!=NULL
102 mac_header -> ll header
103 data -> data
105 Outgoing, dev->hard_header!=NULL
106 mac_header -> ll header
107 data -> ll header
109 Incoming, dev->hard_header==NULL
110 mac_header -> UNKNOWN position. It is very likely, that it points to ll
111 header. PPP makes it, that is wrong, because introduce
112 assymetry between rx and tx paths.
113 data -> data
115 Outgoing, dev->hard_header==NULL
116 mac_header -> data. ll header is still not built!
117 data -> data
119 Resume
120 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
123 On transmit:
124 ------------
126 dev->hard_header != NULL
127 mac_header -> ll header
128 data -> ll header
130 dev->hard_header == NULL (ll header is added by device, we cannot control it)
131 mac_header -> data
132 data -> data
134 We should set nh.raw on output to correct posistion,
135 packet classifier depends on it.
138 /* List of all packet sockets. */
139 static HLIST_HEAD(packet_sklist);
140 static DEFINE_RWLOCK(packet_sklist_lock);
142 static atomic_t packet_socks_nr;
145 /* Private packet socket structures. */
147 struct packet_mclist
149 struct packet_mclist *next;
150 int ifindex;
151 int count;
152 unsigned short type;
153 unsigned short alen;
154 unsigned char addr[MAX_ADDR_LEN];
156 /* identical to struct packet_mreq except it has
157 * a longer address field.
159 struct packet_mreq_max
161 int mr_ifindex;
162 unsigned short mr_type;
163 unsigned short mr_alen;
164 unsigned char mr_address[MAX_ADDR_LEN];
167 #ifdef CONFIG_PACKET_MMAP
168 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
169 #endif
171 static void packet_flush_mclist(struct sock *sk);
173 struct packet_sock {
174 /* struct sock has to be the first member of packet_sock */
175 struct sock sk;
176 struct tpacket_stats stats;
177 #ifdef CONFIG_PACKET_MMAP
178 char * *pg_vec;
179 unsigned int head;
180 unsigned int frames_per_block;
181 unsigned int frame_size;
182 unsigned int frame_max;
183 int copy_thresh;
184 #endif
185 struct packet_type prot_hook;
186 spinlock_t bind_lock;
187 unsigned int running:1, /* prot_hook is attached*/
188 auxdata:1,
189 origdev:1;
190 int ifindex; /* bound device */
191 __be16 num;
192 struct packet_mclist *mclist;
193 #ifdef CONFIG_PACKET_MMAP
194 atomic_t mapped;
195 unsigned int pg_vec_order;
196 unsigned int pg_vec_pages;
197 unsigned int pg_vec_len;
198 #endif
201 struct packet_skb_cb {
202 unsigned int origlen;
203 union {
204 struct sockaddr_pkt pkt;
205 struct sockaddr_ll ll;
206 } sa;
209 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
211 #ifdef CONFIG_PACKET_MMAP
213 static inline struct tpacket_hdr *packet_lookup_frame(struct packet_sock *po, unsigned int position)
215 unsigned int pg_vec_pos, frame_offset;
217 pg_vec_pos = position / po->frames_per_block;
218 frame_offset = position % po->frames_per_block;
220 return (struct tpacket_hdr *)(po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size));
222 #endif
224 static inline struct packet_sock *pkt_sk(struct sock *sk)
226 return (struct packet_sock *)sk;
229 static void packet_sock_destruct(struct sock *sk)
231 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
232 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
234 if (!sock_flag(sk, SOCK_DEAD)) {
235 printk("Attempt to release alive packet socket: %p\n", sk);
236 return;
239 atomic_dec(&packet_socks_nr);
240 #ifdef PACKET_REFCNT_DEBUG
241 printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
242 #endif
246 static const struct proto_ops packet_ops;
248 static const struct proto_ops packet_ops_spkt;
250 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
252 struct sock *sk;
253 struct sockaddr_pkt *spkt;
255 if (dev->nd_net != &init_net)
256 goto out;
259 * When we registered the protocol we saved the socket in the data
260 * field for just this event.
263 sk = pt->af_packet_priv;
266 * Yank back the headers [hope the device set this
267 * right or kerboom...]
269 * Incoming packets have ll header pulled,
270 * push it back.
272 * For outgoing ones skb->data == skb_mac_header(skb)
273 * so that this procedure is noop.
276 if (skb->pkt_type == PACKET_LOOPBACK)
277 goto out;
279 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
280 goto oom;
282 /* drop any routing info */
283 dst_release(skb->dst);
284 skb->dst = NULL;
286 /* drop conntrack reference */
287 nf_reset(skb);
289 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
291 skb_push(skb, skb->data - skb_mac_header(skb));
294 * The SOCK_PACKET socket receives _all_ frames.
297 spkt->spkt_family = dev->type;
298 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
299 spkt->spkt_protocol = skb->protocol;
302 * Charge the memory to the socket. This is done specifically
303 * to prevent sockets using all the memory up.
306 if (sock_queue_rcv_skb(sk,skb) == 0)
307 return 0;
309 out:
310 kfree_skb(skb);
311 oom:
312 return 0;
317 * Output a raw packet to a device layer. This bypasses all the other
318 * protocol layers and you must therefore supply it with a complete frame
321 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
322 struct msghdr *msg, size_t len)
324 struct sock *sk = sock->sk;
325 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
326 struct sk_buff *skb;
327 struct net_device *dev;
328 __be16 proto=0;
329 int err;
332 * Get and verify the address.
335 if (saddr)
337 if (msg->msg_namelen < sizeof(struct sockaddr))
338 return(-EINVAL);
339 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
340 proto=saddr->spkt_protocol;
342 else
343 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
346 * Find the device first to size check it
349 saddr->spkt_device[13] = 0;
350 dev = dev_get_by_name(saddr->spkt_device);
351 err = -ENODEV;
352 if (dev == NULL)
353 goto out_unlock;
355 err = -ENETDOWN;
356 if (!(dev->flags & IFF_UP))
357 goto out_unlock;
360 * You may not queue a frame bigger than the mtu. This is the lowest level
361 * raw protocol and you must do your own fragmentation at this level.
364 err = -EMSGSIZE;
365 if (len > dev->mtu + dev->hard_header_len)
366 goto out_unlock;
368 err = -ENOBUFS;
369 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
372 * If the write buffer is full, then tough. At this level the user gets to
373 * deal with the problem - do your own algorithmic backoffs. That's far
374 * more flexible.
377 if (skb == NULL)
378 goto out_unlock;
381 * Fill it in
384 /* FIXME: Save some space for broken drivers that write a
385 * hard header at transmission time by themselves. PPP is the
386 * notable one here. This should really be fixed at the driver level.
388 skb_reserve(skb, LL_RESERVED_SPACE(dev));
389 skb_reset_network_header(skb);
391 /* Try to align data part correctly */
392 if (dev->hard_header) {
393 skb->data -= dev->hard_header_len;
394 skb->tail -= dev->hard_header_len;
395 if (len < dev->hard_header_len)
396 skb_reset_network_header(skb);
399 /* Returns -EFAULT on error */
400 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
401 skb->protocol = proto;
402 skb->dev = dev;
403 skb->priority = sk->sk_priority;
404 if (err)
405 goto out_free;
408 * Now send it
411 dev_queue_xmit(skb);
412 dev_put(dev);
413 return(len);
415 out_free:
416 kfree_skb(skb);
417 out_unlock:
418 if (dev)
419 dev_put(dev);
420 return err;
423 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
424 unsigned int res)
426 struct sk_filter *filter;
428 rcu_read_lock_bh();
429 filter = rcu_dereference(sk->sk_filter);
430 if (filter != NULL)
431 res = sk_run_filter(skb, filter->insns, filter->len);
432 rcu_read_unlock_bh();
434 return res;
438 This function makes lazy skb cloning in hope that most of packets
439 are discarded by BPF.
441 Note tricky part: we DO mangle shared skb! skb->data, skb->len
442 and skb->cb are mangled. It works because (and until) packets
443 falling here are owned by current CPU. Output packets are cloned
444 by dev_queue_xmit_nit(), input packets are processed by net_bh
445 sequencially, so that if we return skb to original state on exit,
446 we will not harm anyone.
449 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
451 struct sock *sk;
452 struct sockaddr_ll *sll;
453 struct packet_sock *po;
454 u8 * skb_head = skb->data;
455 int skb_len = skb->len;
456 unsigned int snaplen, res;
458 if (dev->nd_net != &init_net)
459 goto drop;
461 if (skb->pkt_type == PACKET_LOOPBACK)
462 goto drop;
464 sk = pt->af_packet_priv;
465 po = pkt_sk(sk);
467 skb->dev = dev;
469 if (dev->hard_header) {
470 /* The device has an explicit notion of ll header,
471 exported to higher levels.
473 Otherwise, the device hides datails of it frame
474 structure, so that corresponding packet head
475 never delivered to user.
477 if (sk->sk_type != SOCK_DGRAM)
478 skb_push(skb, skb->data - skb_mac_header(skb));
479 else if (skb->pkt_type == PACKET_OUTGOING) {
480 /* Special case: outgoing packets have ll header at head */
481 skb_pull(skb, skb_network_offset(skb));
485 snaplen = skb->len;
487 res = run_filter(skb, sk, snaplen);
488 if (!res)
489 goto drop_n_restore;
490 if (snaplen > res)
491 snaplen = res;
493 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
494 (unsigned)sk->sk_rcvbuf)
495 goto drop_n_acct;
497 if (skb_shared(skb)) {
498 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
499 if (nskb == NULL)
500 goto drop_n_acct;
502 if (skb_head != skb->data) {
503 skb->data = skb_head;
504 skb->len = skb_len;
506 kfree_skb(skb);
507 skb = nskb;
510 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
511 sizeof(skb->cb));
513 sll = &PACKET_SKB_CB(skb)->sa.ll;
514 sll->sll_family = AF_PACKET;
515 sll->sll_hatype = dev->type;
516 sll->sll_protocol = skb->protocol;
517 sll->sll_pkttype = skb->pkt_type;
518 if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
519 sll->sll_ifindex = orig_dev->ifindex;
520 else
521 sll->sll_ifindex = dev->ifindex;
522 sll->sll_halen = 0;
524 if (dev->hard_header_parse)
525 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
527 PACKET_SKB_CB(skb)->origlen = skb->len;
529 if (pskb_trim(skb, snaplen))
530 goto drop_n_acct;
532 skb_set_owner_r(skb, sk);
533 skb->dev = NULL;
534 dst_release(skb->dst);
535 skb->dst = NULL;
537 /* drop conntrack reference */
538 nf_reset(skb);
540 spin_lock(&sk->sk_receive_queue.lock);
541 po->stats.tp_packets++;
542 __skb_queue_tail(&sk->sk_receive_queue, skb);
543 spin_unlock(&sk->sk_receive_queue.lock);
544 sk->sk_data_ready(sk, skb->len);
545 return 0;
547 drop_n_acct:
548 spin_lock(&sk->sk_receive_queue.lock);
549 po->stats.tp_drops++;
550 spin_unlock(&sk->sk_receive_queue.lock);
552 drop_n_restore:
553 if (skb_head != skb->data && skb_shared(skb)) {
554 skb->data = skb_head;
555 skb->len = skb_len;
557 drop:
558 kfree_skb(skb);
559 return 0;
562 #ifdef CONFIG_PACKET_MMAP
563 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
565 struct sock *sk;
566 struct packet_sock *po;
567 struct sockaddr_ll *sll;
568 struct tpacket_hdr *h;
569 u8 * skb_head = skb->data;
570 int skb_len = skb->len;
571 unsigned int snaplen, res;
572 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
573 unsigned short macoff, netoff;
574 struct sk_buff *copy_skb = NULL;
575 struct timeval tv;
577 if (dev->nd_net != &init_net)
578 goto drop;
580 if (skb->pkt_type == PACKET_LOOPBACK)
581 goto drop;
583 sk = pt->af_packet_priv;
584 po = pkt_sk(sk);
586 if (dev->hard_header) {
587 if (sk->sk_type != SOCK_DGRAM)
588 skb_push(skb, skb->data - skb_mac_header(skb));
589 else if (skb->pkt_type == PACKET_OUTGOING) {
590 /* Special case: outgoing packets have ll header at head */
591 skb_pull(skb, skb_network_offset(skb));
595 if (skb->ip_summed == CHECKSUM_PARTIAL)
596 status |= TP_STATUS_CSUMNOTREADY;
598 snaplen = skb->len;
600 res = run_filter(skb, sk, snaplen);
601 if (!res)
602 goto drop_n_restore;
603 if (snaplen > res)
604 snaplen = res;
606 if (sk->sk_type == SOCK_DGRAM) {
607 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
608 } else {
609 unsigned maclen = skb_network_offset(skb);
610 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
611 macoff = netoff - maclen;
614 if (macoff + snaplen > po->frame_size) {
615 if (po->copy_thresh &&
616 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
617 (unsigned)sk->sk_rcvbuf) {
618 if (skb_shared(skb)) {
619 copy_skb = skb_clone(skb, GFP_ATOMIC);
620 } else {
621 copy_skb = skb_get(skb);
622 skb_head = skb->data;
624 if (copy_skb)
625 skb_set_owner_r(copy_skb, sk);
627 snaplen = po->frame_size - macoff;
628 if ((int)snaplen < 0)
629 snaplen = 0;
632 spin_lock(&sk->sk_receive_queue.lock);
633 h = packet_lookup_frame(po, po->head);
635 if (h->tp_status)
636 goto ring_is_full;
637 po->head = po->head != po->frame_max ? po->head+1 : 0;
638 po->stats.tp_packets++;
639 if (copy_skb) {
640 status |= TP_STATUS_COPY;
641 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
643 if (!po->stats.tp_drops)
644 status &= ~TP_STATUS_LOSING;
645 spin_unlock(&sk->sk_receive_queue.lock);
647 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
649 h->tp_len = skb->len;
650 h->tp_snaplen = snaplen;
651 h->tp_mac = macoff;
652 h->tp_net = netoff;
653 if (skb->tstamp.tv64)
654 tv = ktime_to_timeval(skb->tstamp);
655 else
656 do_gettimeofday(&tv);
657 h->tp_sec = tv.tv_sec;
658 h->tp_usec = tv.tv_usec;
660 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
661 sll->sll_halen = 0;
662 if (dev->hard_header_parse)
663 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
664 sll->sll_family = AF_PACKET;
665 sll->sll_hatype = dev->type;
666 sll->sll_protocol = skb->protocol;
667 sll->sll_pkttype = skb->pkt_type;
668 if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
669 sll->sll_ifindex = orig_dev->ifindex;
670 else
671 sll->sll_ifindex = dev->ifindex;
673 h->tp_status = status;
674 smp_mb();
677 struct page *p_start, *p_end;
678 u8 *h_end = (u8 *)h + macoff + snaplen - 1;
680 p_start = virt_to_page(h);
681 p_end = virt_to_page(h_end);
682 while (p_start <= p_end) {
683 flush_dcache_page(p_start);
684 p_start++;
688 sk->sk_data_ready(sk, 0);
690 drop_n_restore:
691 if (skb_head != skb->data && skb_shared(skb)) {
692 skb->data = skb_head;
693 skb->len = skb_len;
695 drop:
696 kfree_skb(skb);
697 return 0;
699 ring_is_full:
700 po->stats.tp_drops++;
701 spin_unlock(&sk->sk_receive_queue.lock);
703 sk->sk_data_ready(sk, 0);
704 if (copy_skb)
705 kfree_skb(copy_skb);
706 goto drop_n_restore;
709 #endif
712 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
713 struct msghdr *msg, size_t len)
715 struct sock *sk = sock->sk;
716 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
717 struct sk_buff *skb;
718 struct net_device *dev;
719 __be16 proto;
720 unsigned char *addr;
721 int ifindex, err, reserve = 0;
724 * Get and verify the address.
727 if (saddr == NULL) {
728 struct packet_sock *po = pkt_sk(sk);
730 ifindex = po->ifindex;
731 proto = po->num;
732 addr = NULL;
733 } else {
734 err = -EINVAL;
735 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
736 goto out;
737 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
738 goto out;
739 ifindex = saddr->sll_ifindex;
740 proto = saddr->sll_protocol;
741 addr = saddr->sll_addr;
745 dev = dev_get_by_index(ifindex);
746 err = -ENXIO;
747 if (dev == NULL)
748 goto out_unlock;
749 if (sock->type == SOCK_RAW)
750 reserve = dev->hard_header_len;
752 err = -ENETDOWN;
753 if (!(dev->flags & IFF_UP))
754 goto out_unlock;
756 err = -EMSGSIZE;
757 if (len > dev->mtu+reserve)
758 goto out_unlock;
760 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
761 msg->msg_flags & MSG_DONTWAIT, &err);
762 if (skb==NULL)
763 goto out_unlock;
765 skb_reserve(skb, LL_RESERVED_SPACE(dev));
766 skb_reset_network_header(skb);
768 if (dev->hard_header) {
769 int res;
770 err = -EINVAL;
771 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
772 if (sock->type != SOCK_DGRAM) {
773 skb_reset_tail_pointer(skb);
774 skb->len = 0;
775 } else if (res < 0)
776 goto out_free;
779 /* Returns -EFAULT on error */
780 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
781 if (err)
782 goto out_free;
784 skb->protocol = proto;
785 skb->dev = dev;
786 skb->priority = sk->sk_priority;
789 * Now send it
792 err = dev_queue_xmit(skb);
793 if (err > 0 && (err = net_xmit_errno(err)) != 0)
794 goto out_unlock;
796 dev_put(dev);
798 return(len);
800 out_free:
801 kfree_skb(skb);
802 out_unlock:
803 if (dev)
804 dev_put(dev);
805 out:
806 return err;
810 * Close a PACKET socket. This is fairly simple. We immediately go
811 * to 'closed' state and remove our protocol entry in the device list.
814 static int packet_release(struct socket *sock)
816 struct sock *sk = sock->sk;
817 struct packet_sock *po;
819 if (!sk)
820 return 0;
822 po = pkt_sk(sk);
824 write_lock_bh(&packet_sklist_lock);
825 sk_del_node_init(sk);
826 write_unlock_bh(&packet_sklist_lock);
829 * Unhook packet receive handler.
832 if (po->running) {
834 * Remove the protocol hook
836 dev_remove_pack(&po->prot_hook);
837 po->running = 0;
838 po->num = 0;
839 __sock_put(sk);
842 packet_flush_mclist(sk);
844 #ifdef CONFIG_PACKET_MMAP
845 if (po->pg_vec) {
846 struct tpacket_req req;
847 memset(&req, 0, sizeof(req));
848 packet_set_ring(sk, &req, 1);
850 #endif
853 * Now the socket is dead. No more input will appear.
856 sock_orphan(sk);
857 sock->sk = NULL;
859 /* Purge queues */
861 skb_queue_purge(&sk->sk_receive_queue);
863 sock_put(sk);
864 return 0;
868 * Attach a packet hook.
871 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
873 struct packet_sock *po = pkt_sk(sk);
875 * Detach an existing hook if present.
878 lock_sock(sk);
880 spin_lock(&po->bind_lock);
881 if (po->running) {
882 __sock_put(sk);
883 po->running = 0;
884 po->num = 0;
885 spin_unlock(&po->bind_lock);
886 dev_remove_pack(&po->prot_hook);
887 spin_lock(&po->bind_lock);
890 po->num = protocol;
891 po->prot_hook.type = protocol;
892 po->prot_hook.dev = dev;
894 po->ifindex = dev ? dev->ifindex : 0;
896 if (protocol == 0)
897 goto out_unlock;
899 if (dev) {
900 if (dev->flags&IFF_UP) {
901 dev_add_pack(&po->prot_hook);
902 sock_hold(sk);
903 po->running = 1;
904 } else {
905 sk->sk_err = ENETDOWN;
906 if (!sock_flag(sk, SOCK_DEAD))
907 sk->sk_error_report(sk);
909 } else {
910 dev_add_pack(&po->prot_hook);
911 sock_hold(sk);
912 po->running = 1;
915 out_unlock:
916 spin_unlock(&po->bind_lock);
917 release_sock(sk);
918 return 0;
922 * Bind a packet socket to a device
925 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
927 struct sock *sk=sock->sk;
928 char name[15];
929 struct net_device *dev;
930 int err = -ENODEV;
933 * Check legality
936 if (addr_len != sizeof(struct sockaddr))
937 return -EINVAL;
938 strlcpy(name,uaddr->sa_data,sizeof(name));
940 dev = dev_get_by_name(name);
941 if (dev) {
942 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
943 dev_put(dev);
945 return err;
948 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
950 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
951 struct sock *sk=sock->sk;
952 struct net_device *dev = NULL;
953 int err;
957 * Check legality
960 if (addr_len < sizeof(struct sockaddr_ll))
961 return -EINVAL;
962 if (sll->sll_family != AF_PACKET)
963 return -EINVAL;
965 if (sll->sll_ifindex) {
966 err = -ENODEV;
967 dev = dev_get_by_index(sll->sll_ifindex);
968 if (dev == NULL)
969 goto out;
971 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
972 if (dev)
973 dev_put(dev);
975 out:
976 return err;
979 static struct proto packet_proto = {
980 .name = "PACKET",
981 .owner = THIS_MODULE,
982 .obj_size = sizeof(struct packet_sock),
986 * Create a packet of type SOCK_PACKET.
989 static int packet_create(struct net *net, struct socket *sock, int protocol)
991 struct sock *sk;
992 struct packet_sock *po;
993 __be16 proto = (__force __be16)protocol; /* weird, but documented */
994 int err;
996 if (net != &init_net)
997 return -EAFNOSUPPORT;
999 if (!capable(CAP_NET_RAW))
1000 return -EPERM;
1001 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1002 sock->type != SOCK_PACKET)
1003 return -ESOCKTNOSUPPORT;
1005 sock->state = SS_UNCONNECTED;
1007 err = -ENOBUFS;
1008 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, 1);
1009 if (sk == NULL)
1010 goto out;
1012 sock->ops = &packet_ops;
1013 if (sock->type == SOCK_PACKET)
1014 sock->ops = &packet_ops_spkt;
1016 sock_init_data(sock, sk);
1018 po = pkt_sk(sk);
1019 sk->sk_family = PF_PACKET;
1020 po->num = proto;
1022 sk->sk_destruct = packet_sock_destruct;
1023 atomic_inc(&packet_socks_nr);
1026 * Attach a protocol block
1029 spin_lock_init(&po->bind_lock);
1030 po->prot_hook.func = packet_rcv;
1032 if (sock->type == SOCK_PACKET)
1033 po->prot_hook.func = packet_rcv_spkt;
1035 po->prot_hook.af_packet_priv = sk;
1037 if (proto) {
1038 po->prot_hook.type = proto;
1039 dev_add_pack(&po->prot_hook);
1040 sock_hold(sk);
1041 po->running = 1;
1044 write_lock_bh(&packet_sklist_lock);
1045 sk_add_node(sk, &packet_sklist);
1046 write_unlock_bh(&packet_sklist_lock);
1047 return(0);
1048 out:
1049 return err;
1053 * Pull a packet from our receive queue and hand it to the user.
1054 * If necessary we block.
1057 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1058 struct msghdr *msg, size_t len, int flags)
1060 struct sock *sk = sock->sk;
1061 struct sk_buff *skb;
1062 int copied, err;
1063 struct sockaddr_ll *sll;
1065 err = -EINVAL;
1066 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1067 goto out;
1069 #if 0
1070 /* What error should we return now? EUNATTACH? */
1071 if (pkt_sk(sk)->ifindex < 0)
1072 return -ENODEV;
1073 #endif
1076 * Call the generic datagram receiver. This handles all sorts
1077 * of horrible races and re-entrancy so we can forget about it
1078 * in the protocol layers.
1080 * Now it will return ENETDOWN, if device have just gone down,
1081 * but then it will block.
1084 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1087 * An error occurred so return it. Because skb_recv_datagram()
1088 * handles the blocking we don't see and worry about blocking
1089 * retries.
1092 if (skb == NULL)
1093 goto out;
1096 * If the address length field is there to be filled in, we fill
1097 * it in now.
1100 sll = &PACKET_SKB_CB(skb)->sa.ll;
1101 if (sock->type == SOCK_PACKET)
1102 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1103 else
1104 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1107 * You lose any data beyond the buffer you gave. If it worries a
1108 * user program they can ask the device for its MTU anyway.
1111 copied = skb->len;
1112 if (copied > len)
1114 copied=len;
1115 msg->msg_flags|=MSG_TRUNC;
1118 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1119 if (err)
1120 goto out_free;
1122 sock_recv_timestamp(msg, sk, skb);
1124 if (msg->msg_name)
1125 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1126 msg->msg_namelen);
1128 if (pkt_sk(sk)->auxdata) {
1129 struct tpacket_auxdata aux;
1131 aux.tp_status = TP_STATUS_USER;
1132 if (skb->ip_summed == CHECKSUM_PARTIAL)
1133 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1134 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1135 aux.tp_snaplen = skb->len;
1136 aux.tp_mac = 0;
1137 aux.tp_net = skb_network_offset(skb);
1139 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1143 * Free or return the buffer as appropriate. Again this
1144 * hides all the races and re-entrancy issues from us.
1146 err = (flags&MSG_TRUNC) ? skb->len : copied;
1148 out_free:
1149 skb_free_datagram(sk, skb);
1150 out:
1151 return err;
1154 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1155 int *uaddr_len, int peer)
1157 struct net_device *dev;
1158 struct sock *sk = sock->sk;
1160 if (peer)
1161 return -EOPNOTSUPP;
1163 uaddr->sa_family = AF_PACKET;
1164 dev = dev_get_by_index(pkt_sk(sk)->ifindex);
1165 if (dev) {
1166 strlcpy(uaddr->sa_data, dev->name, 15);
1167 dev_put(dev);
1168 } else
1169 memset(uaddr->sa_data, 0, 14);
1170 *uaddr_len = sizeof(*uaddr);
1172 return 0;
1175 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1176 int *uaddr_len, int peer)
1178 struct net_device *dev;
1179 struct sock *sk = sock->sk;
1180 struct packet_sock *po = pkt_sk(sk);
1181 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1183 if (peer)
1184 return -EOPNOTSUPP;
1186 sll->sll_family = AF_PACKET;
1187 sll->sll_ifindex = po->ifindex;
1188 sll->sll_protocol = po->num;
1189 dev = dev_get_by_index(po->ifindex);
1190 if (dev) {
1191 sll->sll_hatype = dev->type;
1192 sll->sll_halen = dev->addr_len;
1193 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1194 dev_put(dev);
1195 } else {
1196 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1197 sll->sll_halen = 0;
1199 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1201 return 0;
1204 static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
1206 switch (i->type) {
1207 case PACKET_MR_MULTICAST:
1208 if (what > 0)
1209 dev_mc_add(dev, i->addr, i->alen, 0);
1210 else
1211 dev_mc_delete(dev, i->addr, i->alen, 0);
1212 break;
1213 case PACKET_MR_PROMISC:
1214 dev_set_promiscuity(dev, what);
1215 break;
1216 case PACKET_MR_ALLMULTI:
1217 dev_set_allmulti(dev, what);
1218 break;
1219 default:;
1223 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1225 for ( ; i; i=i->next) {
1226 if (i->ifindex == dev->ifindex)
1227 packet_dev_mc(dev, i, what);
1231 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1233 struct packet_sock *po = pkt_sk(sk);
1234 struct packet_mclist *ml, *i;
1235 struct net_device *dev;
1236 int err;
1238 rtnl_lock();
1240 err = -ENODEV;
1241 dev = __dev_get_by_index(mreq->mr_ifindex);
1242 if (!dev)
1243 goto done;
1245 err = -EINVAL;
1246 if (mreq->mr_alen > dev->addr_len)
1247 goto done;
1249 err = -ENOBUFS;
1250 i = kmalloc(sizeof(*i), GFP_KERNEL);
1251 if (i == NULL)
1252 goto done;
1254 err = 0;
1255 for (ml = po->mclist; ml; ml = ml->next) {
1256 if (ml->ifindex == mreq->mr_ifindex &&
1257 ml->type == mreq->mr_type &&
1258 ml->alen == mreq->mr_alen &&
1259 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1260 ml->count++;
1261 /* Free the new element ... */
1262 kfree(i);
1263 goto done;
1267 i->type = mreq->mr_type;
1268 i->ifindex = mreq->mr_ifindex;
1269 i->alen = mreq->mr_alen;
1270 memcpy(i->addr, mreq->mr_address, i->alen);
1271 i->count = 1;
1272 i->next = po->mclist;
1273 po->mclist = i;
1274 packet_dev_mc(dev, i, +1);
1276 done:
1277 rtnl_unlock();
1278 return err;
1281 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1283 struct packet_mclist *ml, **mlp;
1285 rtnl_lock();
1287 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1288 if (ml->ifindex == mreq->mr_ifindex &&
1289 ml->type == mreq->mr_type &&
1290 ml->alen == mreq->mr_alen &&
1291 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1292 if (--ml->count == 0) {
1293 struct net_device *dev;
1294 *mlp = ml->next;
1295 dev = dev_get_by_index(ml->ifindex);
1296 if (dev) {
1297 packet_dev_mc(dev, ml, -1);
1298 dev_put(dev);
1300 kfree(ml);
1302 rtnl_unlock();
1303 return 0;
1306 rtnl_unlock();
1307 return -EADDRNOTAVAIL;
1310 static void packet_flush_mclist(struct sock *sk)
1312 struct packet_sock *po = pkt_sk(sk);
1313 struct packet_mclist *ml;
1315 if (!po->mclist)
1316 return;
1318 rtnl_lock();
1319 while ((ml = po->mclist) != NULL) {
1320 struct net_device *dev;
1322 po->mclist = ml->next;
1323 if ((dev = dev_get_by_index(ml->ifindex)) != NULL) {
1324 packet_dev_mc(dev, ml, -1);
1325 dev_put(dev);
1327 kfree(ml);
1329 rtnl_unlock();
1332 static int
1333 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1335 struct sock *sk = sock->sk;
1336 struct packet_sock *po = pkt_sk(sk);
1337 int ret;
1339 if (level != SOL_PACKET)
1340 return -ENOPROTOOPT;
1342 switch(optname) {
1343 case PACKET_ADD_MEMBERSHIP:
1344 case PACKET_DROP_MEMBERSHIP:
1346 struct packet_mreq_max mreq;
1347 int len = optlen;
1348 memset(&mreq, 0, sizeof(mreq));
1349 if (len < sizeof(struct packet_mreq))
1350 return -EINVAL;
1351 if (len > sizeof(mreq))
1352 len = sizeof(mreq);
1353 if (copy_from_user(&mreq,optval,len))
1354 return -EFAULT;
1355 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1356 return -EINVAL;
1357 if (optname == PACKET_ADD_MEMBERSHIP)
1358 ret = packet_mc_add(sk, &mreq);
1359 else
1360 ret = packet_mc_drop(sk, &mreq);
1361 return ret;
1364 #ifdef CONFIG_PACKET_MMAP
1365 case PACKET_RX_RING:
1367 struct tpacket_req req;
1369 if (optlen<sizeof(req))
1370 return -EINVAL;
1371 if (copy_from_user(&req,optval,sizeof(req)))
1372 return -EFAULT;
1373 return packet_set_ring(sk, &req, 0);
1375 case PACKET_COPY_THRESH:
1377 int val;
1379 if (optlen!=sizeof(val))
1380 return -EINVAL;
1381 if (copy_from_user(&val,optval,sizeof(val)))
1382 return -EFAULT;
1384 pkt_sk(sk)->copy_thresh = val;
1385 return 0;
1387 #endif
1388 case PACKET_AUXDATA:
1390 int val;
1392 if (optlen < sizeof(val))
1393 return -EINVAL;
1394 if (copy_from_user(&val, optval, sizeof(val)))
1395 return -EFAULT;
1397 po->auxdata = !!val;
1398 return 0;
1400 case PACKET_ORIGDEV:
1402 int val;
1404 if (optlen < sizeof(val))
1405 return -EINVAL;
1406 if (copy_from_user(&val, optval, sizeof(val)))
1407 return -EFAULT;
1409 po->origdev = !!val;
1410 return 0;
1412 default:
1413 return -ENOPROTOOPT;
1417 static int packet_getsockopt(struct socket *sock, int level, int optname,
1418 char __user *optval, int __user *optlen)
1420 int len;
1421 int val;
1422 struct sock *sk = sock->sk;
1423 struct packet_sock *po = pkt_sk(sk);
1424 void *data;
1425 struct tpacket_stats st;
1427 if (level != SOL_PACKET)
1428 return -ENOPROTOOPT;
1430 if (get_user(len, optlen))
1431 return -EFAULT;
1433 if (len < 0)
1434 return -EINVAL;
1436 switch(optname) {
1437 case PACKET_STATISTICS:
1438 if (len > sizeof(struct tpacket_stats))
1439 len = sizeof(struct tpacket_stats);
1440 spin_lock_bh(&sk->sk_receive_queue.lock);
1441 st = po->stats;
1442 memset(&po->stats, 0, sizeof(st));
1443 spin_unlock_bh(&sk->sk_receive_queue.lock);
1444 st.tp_packets += st.tp_drops;
1446 data = &st;
1447 break;
1448 case PACKET_AUXDATA:
1449 if (len > sizeof(int))
1450 len = sizeof(int);
1451 val = po->auxdata;
1453 data = &val;
1454 break;
1455 case PACKET_ORIGDEV:
1456 if (len > sizeof(int))
1457 len = sizeof(int);
1458 val = po->origdev;
1460 data = &val;
1461 break;
1462 default:
1463 return -ENOPROTOOPT;
1466 if (put_user(len, optlen))
1467 return -EFAULT;
1468 if (copy_to_user(optval, data, len))
1469 return -EFAULT;
1470 return 0;
1474 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1476 struct sock *sk;
1477 struct hlist_node *node;
1478 struct net_device *dev = data;
1480 read_lock(&packet_sklist_lock);
1481 sk_for_each(sk, node, &packet_sklist) {
1482 struct packet_sock *po = pkt_sk(sk);
1484 switch (msg) {
1485 case NETDEV_UNREGISTER:
1486 if (po->mclist)
1487 packet_dev_mclist(dev, po->mclist, -1);
1488 /* fallthrough */
1490 case NETDEV_DOWN:
1491 if (dev->ifindex == po->ifindex) {
1492 spin_lock(&po->bind_lock);
1493 if (po->running) {
1494 __dev_remove_pack(&po->prot_hook);
1495 __sock_put(sk);
1496 po->running = 0;
1497 sk->sk_err = ENETDOWN;
1498 if (!sock_flag(sk, SOCK_DEAD))
1499 sk->sk_error_report(sk);
1501 if (msg == NETDEV_UNREGISTER) {
1502 po->ifindex = -1;
1503 po->prot_hook.dev = NULL;
1505 spin_unlock(&po->bind_lock);
1507 break;
1508 case NETDEV_UP:
1509 spin_lock(&po->bind_lock);
1510 if (dev->ifindex == po->ifindex && po->num &&
1511 !po->running) {
1512 dev_add_pack(&po->prot_hook);
1513 sock_hold(sk);
1514 po->running = 1;
1516 spin_unlock(&po->bind_lock);
1517 break;
1520 read_unlock(&packet_sklist_lock);
1521 return NOTIFY_DONE;
1525 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1526 unsigned long arg)
1528 struct sock *sk = sock->sk;
1530 switch(cmd) {
1531 case SIOCOUTQ:
1533 int amount = atomic_read(&sk->sk_wmem_alloc);
1534 return put_user(amount, (int __user *)arg);
1536 case SIOCINQ:
1538 struct sk_buff *skb;
1539 int amount = 0;
1541 spin_lock_bh(&sk->sk_receive_queue.lock);
1542 skb = skb_peek(&sk->sk_receive_queue);
1543 if (skb)
1544 amount = skb->len;
1545 spin_unlock_bh(&sk->sk_receive_queue.lock);
1546 return put_user(amount, (int __user *)arg);
1548 case SIOCGSTAMP:
1549 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1550 case SIOCGSTAMPNS:
1551 return sock_get_timestampns(sk, (struct timespec __user *)arg);
1553 #ifdef CONFIG_INET
1554 case SIOCADDRT:
1555 case SIOCDELRT:
1556 case SIOCDARP:
1557 case SIOCGARP:
1558 case SIOCSARP:
1559 case SIOCGIFADDR:
1560 case SIOCSIFADDR:
1561 case SIOCGIFBRDADDR:
1562 case SIOCSIFBRDADDR:
1563 case SIOCGIFNETMASK:
1564 case SIOCSIFNETMASK:
1565 case SIOCGIFDSTADDR:
1566 case SIOCSIFDSTADDR:
1567 case SIOCSIFFLAGS:
1568 return inet_dgram_ops.ioctl(sock, cmd, arg);
1569 #endif
1571 default:
1572 return -ENOIOCTLCMD;
1574 return 0;
1577 #ifndef CONFIG_PACKET_MMAP
1578 #define packet_mmap sock_no_mmap
1579 #define packet_poll datagram_poll
1580 #else
1582 static unsigned int packet_poll(struct file * file, struct socket *sock,
1583 poll_table *wait)
1585 struct sock *sk = sock->sk;
1586 struct packet_sock *po = pkt_sk(sk);
1587 unsigned int mask = datagram_poll(file, sock, wait);
1589 spin_lock_bh(&sk->sk_receive_queue.lock);
1590 if (po->pg_vec) {
1591 unsigned last = po->head ? po->head-1 : po->frame_max;
1592 struct tpacket_hdr *h;
1594 h = packet_lookup_frame(po, last);
1596 if (h->tp_status)
1597 mask |= POLLIN | POLLRDNORM;
1599 spin_unlock_bh(&sk->sk_receive_queue.lock);
1600 return mask;
1604 /* Dirty? Well, I still did not learn better way to account
1605 * for user mmaps.
1608 static void packet_mm_open(struct vm_area_struct *vma)
1610 struct file *file = vma->vm_file;
1611 struct socket * sock = file->private_data;
1612 struct sock *sk = sock->sk;
1614 if (sk)
1615 atomic_inc(&pkt_sk(sk)->mapped);
1618 static void packet_mm_close(struct vm_area_struct *vma)
1620 struct file *file = vma->vm_file;
1621 struct socket * sock = file->private_data;
1622 struct sock *sk = sock->sk;
1624 if (sk)
1625 atomic_dec(&pkt_sk(sk)->mapped);
1628 static struct vm_operations_struct packet_mmap_ops = {
1629 .open = packet_mm_open,
1630 .close =packet_mm_close,
1633 static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
1635 return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
1638 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1640 int i;
1642 for (i = 0; i < len; i++) {
1643 if (likely(pg_vec[i]))
1644 free_pages((unsigned long) pg_vec[i], order);
1646 kfree(pg_vec);
1649 static inline char *alloc_one_pg_vec_page(unsigned long order)
1651 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
1652 order);
1655 static char **alloc_pg_vec(struct tpacket_req *req, int order)
1657 unsigned int block_nr = req->tp_block_nr;
1658 char **pg_vec;
1659 int i;
1661 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
1662 if (unlikely(!pg_vec))
1663 goto out;
1665 for (i = 0; i < block_nr; i++) {
1666 pg_vec[i] = alloc_one_pg_vec_page(order);
1667 if (unlikely(!pg_vec[i]))
1668 goto out_free_pgvec;
1671 out:
1672 return pg_vec;
1674 out_free_pgvec:
1675 free_pg_vec(pg_vec, order, block_nr);
1676 pg_vec = NULL;
1677 goto out;
1680 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
1682 char **pg_vec = NULL;
1683 struct packet_sock *po = pkt_sk(sk);
1684 int was_running, order = 0;
1685 __be16 num;
1686 int err = 0;
1688 if (req->tp_block_nr) {
1689 int i, l;
1691 /* Sanity tests and some calculations */
1693 if (unlikely(po->pg_vec))
1694 return -EBUSY;
1696 if (unlikely((int)req->tp_block_size <= 0))
1697 return -EINVAL;
1698 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1699 return -EINVAL;
1700 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
1701 return -EINVAL;
1702 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1703 return -EINVAL;
1705 po->frames_per_block = req->tp_block_size/req->tp_frame_size;
1706 if (unlikely(po->frames_per_block <= 0))
1707 return -EINVAL;
1708 if (unlikely((po->frames_per_block * req->tp_block_nr) !=
1709 req->tp_frame_nr))
1710 return -EINVAL;
1712 err = -ENOMEM;
1713 order = get_order(req->tp_block_size);
1714 pg_vec = alloc_pg_vec(req, order);
1715 if (unlikely(!pg_vec))
1716 goto out;
1718 l = 0;
1719 for (i = 0; i < req->tp_block_nr; i++) {
1720 char *ptr = pg_vec[i];
1721 struct tpacket_hdr *header;
1722 int k;
1724 for (k = 0; k < po->frames_per_block; k++) {
1725 header = (struct tpacket_hdr *) ptr;
1726 header->tp_status = TP_STATUS_KERNEL;
1727 ptr += req->tp_frame_size;
1730 /* Done */
1731 } else {
1732 if (unlikely(req->tp_frame_nr))
1733 return -EINVAL;
1736 lock_sock(sk);
1738 /* Detach socket from network */
1739 spin_lock(&po->bind_lock);
1740 was_running = po->running;
1741 num = po->num;
1742 if (was_running) {
1743 __dev_remove_pack(&po->prot_hook);
1744 po->num = 0;
1745 po->running = 0;
1746 __sock_put(sk);
1748 spin_unlock(&po->bind_lock);
1750 synchronize_net();
1752 err = -EBUSY;
1753 if (closing || atomic_read(&po->mapped) == 0) {
1754 err = 0;
1755 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1757 spin_lock_bh(&sk->sk_receive_queue.lock);
1758 pg_vec = XC(po->pg_vec, pg_vec);
1759 po->frame_max = (req->tp_frame_nr - 1);
1760 po->head = 0;
1761 po->frame_size = req->tp_frame_size;
1762 spin_unlock_bh(&sk->sk_receive_queue.lock);
1764 order = XC(po->pg_vec_order, order);
1765 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
1767 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1768 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
1769 skb_queue_purge(&sk->sk_receive_queue);
1770 #undef XC
1771 if (atomic_read(&po->mapped))
1772 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1775 spin_lock(&po->bind_lock);
1776 if (was_running && !po->running) {
1777 sock_hold(sk);
1778 po->running = 1;
1779 po->num = num;
1780 dev_add_pack(&po->prot_hook);
1782 spin_unlock(&po->bind_lock);
1784 release_sock(sk);
1786 if (pg_vec)
1787 free_pg_vec(pg_vec, order, req->tp_block_nr);
1788 out:
1789 return err;
1792 static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1794 struct sock *sk = sock->sk;
1795 struct packet_sock *po = pkt_sk(sk);
1796 unsigned long size;
1797 unsigned long start;
1798 int err = -EINVAL;
1799 int i;
1801 if (vma->vm_pgoff)
1802 return -EINVAL;
1804 size = vma->vm_end - vma->vm_start;
1806 lock_sock(sk);
1807 if (po->pg_vec == NULL)
1808 goto out;
1809 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
1810 goto out;
1812 start = vma->vm_start;
1813 for (i = 0; i < po->pg_vec_len; i++) {
1814 struct page *page = virt_to_page(po->pg_vec[i]);
1815 int pg_num;
1817 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
1818 err = vm_insert_page(vma, start, page);
1819 if (unlikely(err))
1820 goto out;
1821 start += PAGE_SIZE;
1824 atomic_inc(&po->mapped);
1825 vma->vm_ops = &packet_mmap_ops;
1826 err = 0;
1828 out:
1829 release_sock(sk);
1830 return err;
1832 #endif
1835 static const struct proto_ops packet_ops_spkt = {
1836 .family = PF_PACKET,
1837 .owner = THIS_MODULE,
1838 .release = packet_release,
1839 .bind = packet_bind_spkt,
1840 .connect = sock_no_connect,
1841 .socketpair = sock_no_socketpair,
1842 .accept = sock_no_accept,
1843 .getname = packet_getname_spkt,
1844 .poll = datagram_poll,
1845 .ioctl = packet_ioctl,
1846 .listen = sock_no_listen,
1847 .shutdown = sock_no_shutdown,
1848 .setsockopt = sock_no_setsockopt,
1849 .getsockopt = sock_no_getsockopt,
1850 .sendmsg = packet_sendmsg_spkt,
1851 .recvmsg = packet_recvmsg,
1852 .mmap = sock_no_mmap,
1853 .sendpage = sock_no_sendpage,
1856 static const struct proto_ops packet_ops = {
1857 .family = PF_PACKET,
1858 .owner = THIS_MODULE,
1859 .release = packet_release,
1860 .bind = packet_bind,
1861 .connect = sock_no_connect,
1862 .socketpair = sock_no_socketpair,
1863 .accept = sock_no_accept,
1864 .getname = packet_getname,
1865 .poll = packet_poll,
1866 .ioctl = packet_ioctl,
1867 .listen = sock_no_listen,
1868 .shutdown = sock_no_shutdown,
1869 .setsockopt = packet_setsockopt,
1870 .getsockopt = packet_getsockopt,
1871 .sendmsg = packet_sendmsg,
1872 .recvmsg = packet_recvmsg,
1873 .mmap = packet_mmap,
1874 .sendpage = sock_no_sendpage,
1877 static struct net_proto_family packet_family_ops = {
1878 .family = PF_PACKET,
1879 .create = packet_create,
1880 .owner = THIS_MODULE,
1883 static struct notifier_block packet_netdev_notifier = {
1884 .notifier_call =packet_notifier,
1887 #ifdef CONFIG_PROC_FS
1888 static inline struct sock *packet_seq_idx(loff_t off)
1890 struct sock *s;
1891 struct hlist_node *node;
1893 sk_for_each(s, node, &packet_sklist) {
1894 if (!off--)
1895 return s;
1897 return NULL;
1900 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1902 read_lock(&packet_sklist_lock);
1903 return *pos ? packet_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1906 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1908 ++*pos;
1909 return (v == SEQ_START_TOKEN)
1910 ? sk_head(&packet_sklist)
1911 : sk_next((struct sock*)v) ;
1914 static void packet_seq_stop(struct seq_file *seq, void *v)
1916 read_unlock(&packet_sklist_lock);
1919 static int packet_seq_show(struct seq_file *seq, void *v)
1921 if (v == SEQ_START_TOKEN)
1922 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1923 else {
1924 struct sock *s = v;
1925 const struct packet_sock *po = pkt_sk(s);
1927 seq_printf(seq,
1928 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1930 atomic_read(&s->sk_refcnt),
1931 s->sk_type,
1932 ntohs(po->num),
1933 po->ifindex,
1934 po->running,
1935 atomic_read(&s->sk_rmem_alloc),
1936 sock_i_uid(s),
1937 sock_i_ino(s) );
1940 return 0;
1943 static const struct seq_operations packet_seq_ops = {
1944 .start = packet_seq_start,
1945 .next = packet_seq_next,
1946 .stop = packet_seq_stop,
1947 .show = packet_seq_show,
1950 static int packet_seq_open(struct inode *inode, struct file *file)
1952 return seq_open(file, &packet_seq_ops);
1955 static const struct file_operations packet_seq_fops = {
1956 .owner = THIS_MODULE,
1957 .open = packet_seq_open,
1958 .read = seq_read,
1959 .llseek = seq_lseek,
1960 .release = seq_release,
1963 #endif
1965 static void __exit packet_exit(void)
1967 proc_net_remove(&init_net, "packet");
1968 unregister_netdevice_notifier(&packet_netdev_notifier);
1969 sock_unregister(PF_PACKET);
1970 proto_unregister(&packet_proto);
1973 static int __init packet_init(void)
1975 int rc = proto_register(&packet_proto, 0);
1977 if (rc != 0)
1978 goto out;
1980 sock_register(&packet_family_ops);
1981 register_netdevice_notifier(&packet_netdev_notifier);
1982 proc_net_fops_create(&init_net, "packet", 0, &packet_seq_fops);
1983 out:
1984 return rc;
1987 module_init(packet_init);
1988 module_exit(packet_exit);
1989 MODULE_LICENSE("GPL");
1990 MODULE_ALIAS_NETPROTO(PF_PACKET);