GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / net / packet / af_packet.c
blobdb6dc88586bbcd3423485c44d663d5885904e3c6
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Fixes:
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
52 #include <linux/mm.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
56 #include <linux/in.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <net/net_namespace.h>
65 #include <net/ip.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
68 #include <net/sock.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
74 #include <asm/page.h>
75 #include <asm/cacheflush.h>
76 #include <asm/io.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/mutex.h>
83 #include <linux/if_vlan.h>
84 #include <linux/virtio_net.h>
85 #include <linux/errqueue.h>
86 #include <linux/net_tstamp.h>
88 #ifdef CONFIG_INET
89 #include <net/inet_common.h>
90 #endif
93 Assumptions:
94 - if device has no dev->hard_header routine, it adds and removes ll header
95 inside itself. In this case ll header is invisible outside of device,
96 but higher levels still should reserve dev->hard_header_len.
97 Some devices are enough clever to reallocate skb, when header
98 will not fit to reserved space (tunnel), another ones are silly
99 (PPP).
100 - packet socket receives packets with pulled ll header,
101 so that SOCK_RAW should push it back.
103 On receive:
104 -----------
106 Incoming, dev->hard_header!=NULL
107 mac_header -> ll header
108 data -> data
110 Outgoing, dev->hard_header!=NULL
111 mac_header -> ll header
112 data -> ll header
114 Incoming, dev->hard_header==NULL
115 mac_header -> UNKNOWN position. It is very likely, that it points to ll
116 header. PPP makes it, that is wrong, because introduce
117 assymetry between rx and tx paths.
118 data -> data
120 Outgoing, dev->hard_header==NULL
121 mac_header -> data. ll header is still not built!
122 data -> data
124 Resume
125 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
128 On transmit:
129 ------------
131 dev->hard_header != NULL
132 mac_header -> ll header
133 data -> ll header
135 dev->hard_header == NULL (ll header is added by device, we cannot control it)
136 mac_header -> data
137 data -> data
139 We should set nh.raw on output to correct posistion,
140 packet classifier depends on it.
143 /* Private packet socket structures. */
145 struct packet_mclist {
146 struct packet_mclist *next;
147 int ifindex;
148 int count;
149 unsigned short type;
150 unsigned short alen;
151 unsigned char addr[MAX_ADDR_LEN];
153 /* identical to struct packet_mreq except it has
154 * a longer address field.
156 struct packet_mreq_max {
157 int mr_ifindex;
158 unsigned short mr_type;
159 unsigned short mr_alen;
160 unsigned char mr_address[MAX_ADDR_LEN];
163 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
164 int closing, int tx_ring);
166 struct packet_ring_buffer {
167 char **pg_vec;
168 unsigned int head;
169 unsigned int frames_per_block;
170 unsigned int frame_size;
171 unsigned int frame_max;
173 unsigned int pg_vec_order;
174 unsigned int pg_vec_pages;
175 unsigned int pg_vec_len;
177 atomic_t pending;
180 struct packet_sock;
181 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
183 static void packet_flush_mclist(struct sock *sk);
185 struct packet_sock {
186 /* struct sock has to be the first member of packet_sock */
187 struct sock sk;
188 struct tpacket_stats stats;
189 struct packet_ring_buffer rx_ring;
190 struct packet_ring_buffer tx_ring;
191 int copy_thresh;
192 spinlock_t bind_lock;
193 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/
195 auxdata:1,
196 origdev:1,
197 has_vnet_hdr:1;
198 int ifindex; /* bound device */
199 __be16 num;
200 struct packet_mclist *mclist;
201 atomic_t mapped;
202 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen;
204 unsigned int tp_reserve;
205 unsigned int tp_loss:1;
206 unsigned int tp_tstamp;
207 struct packet_type prot_hook ____cacheline_aligned_in_smp;
210 struct packet_skb_cb {
211 unsigned int origlen;
212 union {
213 struct sockaddr_pkt pkt;
214 struct sockaddr_ll ll;
215 } sa;
218 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
220 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
222 union {
223 struct tpacket_hdr *h1;
224 struct tpacket2_hdr *h2;
225 void *raw;
226 } h;
228 h.raw = frame;
229 switch (po->tp_version) {
230 case TPACKET_V1:
231 h.h1->tp_status = status;
232 flush_dcache_page(virt_to_page(&h.h1->tp_status));
233 break;
234 case TPACKET_V2:
235 h.h2->tp_status = status;
236 flush_dcache_page(virt_to_page(&h.h2->tp_status));
237 break;
238 default:
239 pr_err("TPACKET version not supported\n");
240 BUG();
243 smp_wmb();
246 static int __packet_get_status(struct packet_sock *po, void *frame)
248 union {
249 struct tpacket_hdr *h1;
250 struct tpacket2_hdr *h2;
251 void *raw;
252 } h;
254 smp_rmb();
256 h.raw = frame;
257 switch (po->tp_version) {
258 case TPACKET_V1:
259 flush_dcache_page(virt_to_page(&h.h1->tp_status));
260 return h.h1->tp_status;
261 case TPACKET_V2:
262 flush_dcache_page(virt_to_page(&h.h2->tp_status));
263 return h.h2->tp_status;
264 default:
265 pr_err("TPACKET version not supported\n");
266 BUG();
267 return 0;
271 static void *packet_lookup_frame(struct packet_sock *po,
272 struct packet_ring_buffer *rb,
273 unsigned int position,
274 int status)
276 unsigned int pg_vec_pos, frame_offset;
277 union {
278 struct tpacket_hdr *h1;
279 struct tpacket2_hdr *h2;
280 void *raw;
281 } h;
283 pg_vec_pos = position / rb->frames_per_block;
284 frame_offset = position % rb->frames_per_block;
286 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
288 if (status != __packet_get_status(po, h.raw))
289 return NULL;
291 return h.raw;
294 static inline void *packet_current_frame(struct packet_sock *po,
295 struct packet_ring_buffer *rb,
296 int status)
298 return packet_lookup_frame(po, rb, rb->head, status);
301 static inline void *packet_previous_frame(struct packet_sock *po,
302 struct packet_ring_buffer *rb,
303 int status)
305 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
306 return packet_lookup_frame(po, rb, previous, status);
309 static inline void packet_increment_head(struct packet_ring_buffer *buff)
311 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
314 static inline struct packet_sock *pkt_sk(struct sock *sk)
316 return (struct packet_sock *)sk;
319 static void packet_sock_destruct(struct sock *sk)
321 skb_queue_purge(&sk->sk_error_queue);
323 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
324 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
326 if (!sock_flag(sk, SOCK_DEAD)) {
327 pr_err("Attempt to release alive packet socket: %p\n", sk);
328 return;
331 sk_refcnt_debug_dec(sk);
335 static const struct proto_ops packet_ops;
337 static const struct proto_ops packet_ops_spkt;
339 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
340 struct packet_type *pt, struct net_device *orig_dev)
342 struct sock *sk;
343 struct sockaddr_pkt *spkt;
346 * When we registered the protocol we saved the socket in the data
347 * field for just this event.
350 sk = pt->af_packet_priv;
353 * Yank back the headers [hope the device set this
354 * right or kerboom...]
356 * Incoming packets have ll header pulled,
357 * push it back.
359 * For outgoing ones skb->data == skb_mac_header(skb)
360 * so that this procedure is noop.
363 if (skb->pkt_type == PACKET_LOOPBACK)
364 goto out;
366 if (!net_eq(dev_net(dev), sock_net(sk)))
367 goto out;
369 skb = skb_share_check(skb, GFP_ATOMIC);
370 if (skb == NULL)
371 goto oom;
373 /* drop any routing info */
374 skb_dst_drop(skb);
376 /* drop conntrack reference */
377 nf_reset(skb);
379 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
381 skb_push(skb, skb->data - skb_mac_header(skb));
384 * The SOCK_PACKET socket receives _all_ frames.
387 spkt->spkt_family = dev->type;
388 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
389 spkt->spkt_protocol = skb->protocol;
392 * Charge the memory to the socket. This is done specifically
393 * to prevent sockets using all the memory up.
396 if (sock_queue_rcv_skb(sk, skb) == 0)
397 return 0;
399 out:
400 kfree_skb(skb);
401 oom:
402 return 0;
407 * Output a raw packet to a device layer. This bypasses all the other
408 * protocol layers and you must therefore supply it with a complete frame
411 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
412 struct msghdr *msg, size_t len)
414 struct sock *sk = sock->sk;
415 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
416 struct sk_buff *skb = NULL;
417 struct net_device *dev;
418 __be16 proto = 0;
419 int err;
422 * Get and verify the address.
425 if (saddr) {
426 if (msg->msg_namelen < sizeof(struct sockaddr))
427 return -EINVAL;
428 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
429 proto = saddr->spkt_protocol;
430 } else
431 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
434 * Find the device first to size check it
437 saddr->spkt_device[13] = 0;
438 retry:
439 rcu_read_lock();
440 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
441 err = -ENODEV;
442 if (dev == NULL)
443 goto out_unlock;
445 err = -ENETDOWN;
446 if (!(dev->flags & IFF_UP))
447 goto out_unlock;
450 * You may not queue a frame bigger than the mtu. This is the lowest level
451 * raw protocol and you must do your own fragmentation at this level.
454 err = -EMSGSIZE;
455 if (len > dev->mtu + dev->hard_header_len)
456 goto out_unlock;
458 if (!skb) {
459 size_t reserved = LL_RESERVED_SPACE(dev);
460 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
462 rcu_read_unlock();
463 skb = sock_wmalloc(sk, len + reserved, 0, GFP_KERNEL);
464 if (skb == NULL)
465 return -ENOBUFS;
466 skb_reserve(skb, reserved);
467 skb_reset_network_header(skb);
469 /* Try to align data part correctly */
470 if (hhlen) {
471 skb->data -= hhlen;
472 skb->tail -= hhlen;
473 if (len < hhlen)
474 skb_reset_network_header(skb);
476 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
477 if (err)
478 goto out_free;
479 goto retry;
483 skb->protocol = proto;
484 skb->dev = dev;
485 skb->priority = sk->sk_priority;
486 skb->mark = sk->sk_mark;
487 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
488 if (err < 0)
489 goto out_unlock;
491 dev_queue_xmit(skb);
492 rcu_read_unlock();
493 return len;
495 out_unlock:
496 rcu_read_unlock();
497 out_free:
498 kfree_skb(skb);
499 return err;
502 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
503 unsigned int res)
505 struct sk_filter *filter;
507 rcu_read_lock_bh();
508 filter = rcu_dereference_bh(sk->sk_filter);
509 if (filter != NULL)
510 res = sk_run_filter(skb, filter->insns, filter->len);
511 rcu_read_unlock_bh();
513 return res;
517 This function makes lazy skb cloning in hope that most of packets
518 are discarded by BPF.
520 Note tricky part: we DO mangle shared skb! skb->data, skb->len
521 and skb->cb are mangled. It works because (and until) packets
522 falling here are owned by current CPU. Output packets are cloned
523 by dev_queue_xmit_nit(), input packets are processed by net_bh
524 sequencially, so that if we return skb to original state on exit,
525 we will not harm anyone.
528 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
529 struct packet_type *pt, struct net_device *orig_dev)
531 struct sock *sk;
532 struct sockaddr_ll *sll;
533 struct packet_sock *po;
534 u8 *skb_head = skb->data;
535 int skb_len = skb->len;
536 unsigned int snaplen, res;
538 if (skb->pkt_type == PACKET_LOOPBACK)
539 goto drop;
541 sk = pt->af_packet_priv;
542 po = pkt_sk(sk);
544 if (!net_eq(dev_net(dev), sock_net(sk)))
545 goto drop;
547 skb->dev = dev;
549 if (dev->header_ops) {
550 /* The device has an explicit notion of ll header,
551 exported to higher levels.
553 Otherwise, the device hides datails of it frame
554 structure, so that corresponding packet head
555 never delivered to user.
557 if (sk->sk_type != SOCK_DGRAM)
558 skb_push(skb, skb->data - skb_mac_header(skb));
559 else if (skb->pkt_type == PACKET_OUTGOING) {
560 /* Special case: outgoing packets have ll header at head */
561 skb_pull(skb, skb_network_offset(skb));
565 snaplen = skb->len;
567 res = run_filter(skb, sk, snaplen);
568 if (!res)
569 goto drop_n_restore;
570 if (snaplen > res)
571 snaplen = res;
573 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
574 (unsigned)sk->sk_rcvbuf)
575 goto drop_n_acct;
577 if (skb_shared(skb)) {
578 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
579 if (nskb == NULL)
580 goto drop_n_acct;
582 if (skb_head != skb->data) {
583 skb->data = skb_head;
584 skb->len = skb_len;
586 kfree_skb(skb);
587 skb = nskb;
590 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
591 sizeof(skb->cb));
593 sll = &PACKET_SKB_CB(skb)->sa.ll;
594 sll->sll_family = AF_PACKET;
595 sll->sll_hatype = dev->type;
596 sll->sll_protocol = skb->protocol;
597 sll->sll_pkttype = skb->pkt_type;
598 if (unlikely(po->origdev))
599 sll->sll_ifindex = orig_dev->ifindex;
600 else
601 sll->sll_ifindex = dev->ifindex;
603 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
605 PACKET_SKB_CB(skb)->origlen = skb->len;
607 if (pskb_trim(skb, snaplen))
608 goto drop_n_acct;
610 skb_set_owner_r(skb, sk);
611 skb->dev = NULL;
612 skb_dst_drop(skb);
614 /* drop conntrack reference */
615 nf_reset(skb);
617 spin_lock(&sk->sk_receive_queue.lock);
618 po->stats.tp_packets++;
619 skb->dropcount = atomic_read(&sk->sk_drops);
620 __skb_queue_tail(&sk->sk_receive_queue, skb);
621 spin_unlock(&sk->sk_receive_queue.lock);
622 sk->sk_data_ready(sk, skb->len);
623 return 0;
625 drop_n_acct:
626 po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
628 drop_n_restore:
629 if (skb_head != skb->data && skb_shared(skb)) {
630 skb->data = skb_head;
631 skb->len = skb_len;
633 drop:
634 consume_skb(skb);
635 return 0;
638 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
639 struct packet_type *pt, struct net_device *orig_dev)
641 struct sock *sk;
642 struct packet_sock *po;
643 struct sockaddr_ll *sll;
644 union {
645 struct tpacket_hdr *h1;
646 struct tpacket2_hdr *h2;
647 void *raw;
648 } h;
649 u8 *skb_head = skb->data;
650 int skb_len = skb->len;
651 unsigned int snaplen, res;
652 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
653 unsigned short macoff, netoff, hdrlen;
654 struct sk_buff *copy_skb = NULL;
655 struct timeval tv;
656 struct timespec ts;
657 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
659 if (skb->pkt_type == PACKET_LOOPBACK)
660 goto drop;
662 sk = pt->af_packet_priv;
663 po = pkt_sk(sk);
665 if (!net_eq(dev_net(dev), sock_net(sk)))
666 goto drop;
668 if (dev->header_ops) {
669 if (sk->sk_type != SOCK_DGRAM)
670 skb_push(skb, skb->data - skb_mac_header(skb));
671 else if (skb->pkt_type == PACKET_OUTGOING) {
672 /* Special case: outgoing packets have ll header at head */
673 skb_pull(skb, skb_network_offset(skb));
677 if (skb->ip_summed == CHECKSUM_PARTIAL)
678 status |= TP_STATUS_CSUMNOTREADY;
680 snaplen = skb->len;
682 res = run_filter(skb, sk, snaplen);
683 if (!res)
684 goto drop_n_restore;
685 if (snaplen > res)
686 snaplen = res;
688 if (sk->sk_type == SOCK_DGRAM) {
689 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
690 po->tp_reserve;
691 } else {
692 unsigned maclen = skb_network_offset(skb);
693 netoff = TPACKET_ALIGN(po->tp_hdrlen +
694 (maclen < 16 ? 16 : maclen)) +
695 po->tp_reserve;
696 macoff = netoff - maclen;
699 if (macoff + snaplen > po->rx_ring.frame_size) {
700 if (po->copy_thresh &&
701 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
702 (unsigned)sk->sk_rcvbuf) {
703 if (skb_shared(skb)) {
704 copy_skb = skb_clone(skb, GFP_ATOMIC);
705 } else {
706 copy_skb = skb_get(skb);
707 skb_head = skb->data;
709 if (copy_skb)
710 skb_set_owner_r(copy_skb, sk);
712 snaplen = po->rx_ring.frame_size - macoff;
713 if ((int)snaplen < 0)
714 snaplen = 0;
717 spin_lock(&sk->sk_receive_queue.lock);
718 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
719 if (!h.raw)
720 goto ring_is_full;
721 packet_increment_head(&po->rx_ring);
722 po->stats.tp_packets++;
723 if (copy_skb) {
724 status |= TP_STATUS_COPY;
725 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
727 if (!po->stats.tp_drops)
728 status &= ~TP_STATUS_LOSING;
729 spin_unlock(&sk->sk_receive_queue.lock);
731 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
733 switch (po->tp_version) {
734 case TPACKET_V1:
735 h.h1->tp_len = skb->len;
736 h.h1->tp_snaplen = snaplen;
737 h.h1->tp_mac = macoff;
738 h.h1->tp_net = netoff;
739 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
740 && shhwtstamps->syststamp.tv64)
741 tv = ktime_to_timeval(shhwtstamps->syststamp);
742 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
743 && shhwtstamps->hwtstamp.tv64)
744 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
745 else if (skb->tstamp.tv64)
746 tv = ktime_to_timeval(skb->tstamp);
747 else
748 do_gettimeofday(&tv);
749 h.h1->tp_sec = tv.tv_sec;
750 h.h1->tp_usec = tv.tv_usec;
751 hdrlen = sizeof(*h.h1);
752 break;
753 case TPACKET_V2:
754 h.h2->tp_len = skb->len;
755 h.h2->tp_snaplen = snaplen;
756 h.h2->tp_mac = macoff;
757 h.h2->tp_net = netoff;
758 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
759 && shhwtstamps->syststamp.tv64)
760 ts = ktime_to_timespec(shhwtstamps->syststamp);
761 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
762 && shhwtstamps->hwtstamp.tv64)
763 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
764 else if (skb->tstamp.tv64)
765 ts = ktime_to_timespec(skb->tstamp);
766 else
767 getnstimeofday(&ts);
768 h.h2->tp_sec = ts.tv_sec;
769 h.h2->tp_nsec = ts.tv_nsec;
770 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
771 hdrlen = sizeof(*h.h2);
772 break;
773 default:
774 BUG();
777 sll = h.raw + TPACKET_ALIGN(hdrlen);
778 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
779 sll->sll_family = AF_PACKET;
780 sll->sll_hatype = dev->type;
781 sll->sll_protocol = skb->protocol;
782 sll->sll_pkttype = skb->pkt_type;
783 if (unlikely(po->origdev))
784 sll->sll_ifindex = orig_dev->ifindex;
785 else
786 sll->sll_ifindex = dev->ifindex;
788 __packet_set_status(po, h.raw, status);
789 smp_mb();
791 struct page *p_start, *p_end;
792 u8 *h_end = h.raw + macoff + snaplen - 1;
794 p_start = virt_to_page(h.raw);
795 p_end = virt_to_page(h_end);
796 while (p_start <= p_end) {
797 flush_dcache_page(p_start);
798 p_start++;
802 sk->sk_data_ready(sk, 0);
804 drop_n_restore:
805 if (skb_head != skb->data && skb_shared(skb)) {
806 skb->data = skb_head;
807 skb->len = skb_len;
809 drop:
810 kfree_skb(skb);
811 return 0;
813 ring_is_full:
814 po->stats.tp_drops++;
815 spin_unlock(&sk->sk_receive_queue.lock);
817 sk->sk_data_ready(sk, 0);
818 kfree_skb(copy_skb);
819 goto drop_n_restore;
822 static void tpacket_destruct_skb(struct sk_buff *skb)
824 struct packet_sock *po = pkt_sk(skb->sk);
825 void *ph;
827 BUG_ON(skb == NULL);
829 if (likely(po->tx_ring.pg_vec)) {
830 ph = skb_shinfo(skb)->destructor_arg;
831 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
832 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
833 atomic_dec(&po->tx_ring.pending);
834 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
837 sock_wfree(skb);
840 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
841 void *frame, struct net_device *dev, int size_max,
842 __be16 proto, unsigned char *addr)
844 union {
845 struct tpacket_hdr *h1;
846 struct tpacket2_hdr *h2;
847 void *raw;
848 } ph;
849 int to_write, offset, len, tp_len, nr_frags, len_max;
850 struct socket *sock = po->sk.sk_socket;
851 struct page *page;
852 void *data;
853 int err;
855 ph.raw = frame;
857 skb->protocol = proto;
858 skb->dev = dev;
859 skb->priority = po->sk.sk_priority;
860 skb->mark = po->sk.sk_mark;
861 skb_shinfo(skb)->destructor_arg = ph.raw;
863 switch (po->tp_version) {
864 case TPACKET_V2:
865 tp_len = ph.h2->tp_len;
866 break;
867 default:
868 tp_len = ph.h1->tp_len;
869 break;
871 if (unlikely(tp_len > size_max)) {
872 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
873 return -EMSGSIZE;
876 skb_reserve(skb, LL_RESERVED_SPACE(dev));
877 skb_reset_network_header(skb);
879 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
880 to_write = tp_len;
882 if (sock->type == SOCK_DGRAM) {
883 err = dev_hard_header(skb, dev, ntohs(proto), addr,
884 NULL, tp_len);
885 if (unlikely(err < 0))
886 return -EINVAL;
887 } else if (dev->hard_header_len) {
888 /* net device doesn't like empty head */
889 if (unlikely(tp_len <= dev->hard_header_len)) {
890 pr_err("packet size is too short (%d < %d)\n",
891 tp_len, dev->hard_header_len);
892 return -EINVAL;
895 skb_push(skb, dev->hard_header_len);
896 err = skb_store_bits(skb, 0, data,
897 dev->hard_header_len);
898 if (unlikely(err))
899 return err;
901 data += dev->hard_header_len;
902 to_write -= dev->hard_header_len;
905 err = -EFAULT;
906 page = virt_to_page(data);
907 offset = offset_in_page(data);
908 len_max = PAGE_SIZE - offset;
909 len = ((to_write > len_max) ? len_max : to_write);
911 skb->data_len = to_write;
912 skb->len += to_write;
913 skb->truesize += to_write;
914 atomic_add(to_write, &po->sk.sk_wmem_alloc);
916 while (likely(to_write)) {
917 nr_frags = skb_shinfo(skb)->nr_frags;
919 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
920 pr_err("Packet exceed the number of skb frags(%lu)\n",
921 MAX_SKB_FRAGS);
922 return -EFAULT;
925 flush_dcache_page(page);
926 get_page(page);
927 skb_fill_page_desc(skb,
928 nr_frags,
929 page++, offset, len);
930 to_write -= len;
931 offset = 0;
932 len_max = PAGE_SIZE;
933 len = ((to_write > len_max) ? len_max : to_write);
936 return tp_len;
939 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
941 struct socket *sock;
942 struct sk_buff *skb;
943 struct net_device *dev;
944 __be16 proto;
945 int ifindex, err, reserve = 0;
946 void *ph;
947 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
948 int tp_len, size_max;
949 unsigned char *addr;
950 int len_sum = 0;
951 int status = 0;
953 sock = po->sk.sk_socket;
955 mutex_lock(&po->pg_vec_lock);
957 err = -EBUSY;
958 if (saddr == NULL) {
959 ifindex = po->ifindex;
960 proto = po->num;
961 addr = NULL;
962 } else {
963 err = -EINVAL;
964 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
965 goto out;
966 if (msg->msg_namelen < (saddr->sll_halen
967 + offsetof(struct sockaddr_ll,
968 sll_addr)))
969 goto out;
970 ifindex = saddr->sll_ifindex;
971 proto = saddr->sll_protocol;
972 addr = saddr->sll_addr;
975 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
976 err = -ENXIO;
977 if (unlikely(dev == NULL))
978 goto out;
980 reserve = dev->hard_header_len;
982 err = -ENETDOWN;
983 if (unlikely(!(dev->flags & IFF_UP)))
984 goto out_put;
986 size_max = po->tx_ring.frame_size
987 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
989 if (size_max > dev->mtu + reserve)
990 size_max = dev->mtu + reserve;
992 do {
993 ph = packet_current_frame(po, &po->tx_ring,
994 TP_STATUS_SEND_REQUEST);
996 if (unlikely(ph == NULL)) {
997 schedule();
998 continue;
1001 status = TP_STATUS_SEND_REQUEST;
1002 skb = sock_alloc_send_skb(&po->sk,
1003 LL_ALLOCATED_SPACE(dev)
1004 + sizeof(struct sockaddr_ll),
1005 0, &err);
1007 if (unlikely(skb == NULL))
1008 goto out_status;
1010 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1011 addr);
1013 if (unlikely(tp_len < 0)) {
1014 if (po->tp_loss) {
1015 __packet_set_status(po, ph,
1016 TP_STATUS_AVAILABLE);
1017 packet_increment_head(&po->tx_ring);
1018 kfree_skb(skb);
1019 continue;
1020 } else {
1021 status = TP_STATUS_WRONG_FORMAT;
1022 err = tp_len;
1023 goto out_status;
1027 skb->destructor = tpacket_destruct_skb;
1028 __packet_set_status(po, ph, TP_STATUS_SENDING);
1029 atomic_inc(&po->tx_ring.pending);
1031 status = TP_STATUS_SEND_REQUEST;
1032 err = dev_queue_xmit(skb);
1033 if (unlikely(err > 0)) {
1034 err = net_xmit_errno(err);
1035 if (err && __packet_get_status(po, ph) ==
1036 TP_STATUS_AVAILABLE) {
1037 /* skb was destructed already */
1038 skb = NULL;
1039 goto out_status;
1042 * skb was dropped but not destructed yet;
1043 * let's treat it like congestion or err < 0
1045 err = 0;
1047 packet_increment_head(&po->tx_ring);
1048 len_sum += tp_len;
1049 } while (likely((ph != NULL) ||
1050 ((!(msg->msg_flags & MSG_DONTWAIT)) &&
1051 (atomic_read(&po->tx_ring.pending))))
1054 err = len_sum;
1055 goto out_put;
1057 out_status:
1058 __packet_set_status(po, ph, status);
1059 kfree_skb(skb);
1060 out_put:
1061 dev_put(dev);
1062 out:
1063 mutex_unlock(&po->pg_vec_lock);
1064 return err;
1067 static inline struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
1068 size_t reserve, size_t len,
1069 size_t linear, int noblock,
1070 int *err)
1072 struct sk_buff *skb;
1074 /* Under a page? Don't bother with paged skb. */
1075 if (prepad + len < PAGE_SIZE || !linear)
1076 linear = len;
1078 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1079 err);
1080 if (!skb)
1081 return NULL;
1083 skb_reserve(skb, reserve);
1084 skb_put(skb, linear);
1085 skb->data_len = len - linear;
1086 skb->len += len - linear;
1088 return skb;
1091 static int packet_snd(struct socket *sock,
1092 struct msghdr *msg, size_t len)
1094 struct sock *sk = sock->sk;
1095 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1096 struct sk_buff *skb;
1097 struct net_device *dev;
1098 __be16 proto;
1099 unsigned char *addr;
1100 int ifindex, err, reserve = 0;
1101 struct virtio_net_hdr vnet_hdr = { 0 };
1102 int offset = 0;
1103 int vnet_hdr_len;
1104 struct packet_sock *po = pkt_sk(sk);
1105 unsigned short gso_type = 0;
1108 * Get and verify the address.
1111 if (saddr == NULL) {
1112 ifindex = po->ifindex;
1113 proto = po->num;
1114 addr = NULL;
1115 } else {
1116 err = -EINVAL;
1117 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1118 goto out;
1119 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1120 goto out;
1121 ifindex = saddr->sll_ifindex;
1122 proto = saddr->sll_protocol;
1123 addr = saddr->sll_addr;
1127 dev = dev_get_by_index(sock_net(sk), ifindex);
1128 err = -ENXIO;
1129 if (dev == NULL)
1130 goto out_unlock;
1131 if (sock->type == SOCK_RAW)
1132 reserve = dev->hard_header_len;
1134 err = -ENETDOWN;
1135 if (!(dev->flags & IFF_UP))
1136 goto out_unlock;
1138 if (po->has_vnet_hdr) {
1139 vnet_hdr_len = sizeof(vnet_hdr);
1141 err = -EINVAL;
1142 if (len < vnet_hdr_len)
1143 goto out_unlock;
1145 len -= vnet_hdr_len;
1147 err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
1148 vnet_hdr_len);
1149 if (err < 0)
1150 goto out_unlock;
1152 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1153 (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
1154 vnet_hdr.hdr_len))
1155 vnet_hdr.hdr_len = vnet_hdr.csum_start +
1156 vnet_hdr.csum_offset + 2;
1158 err = -EINVAL;
1159 if (vnet_hdr.hdr_len > len)
1160 goto out_unlock;
1162 if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1163 switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1164 case VIRTIO_NET_HDR_GSO_TCPV4:
1165 gso_type = SKB_GSO_TCPV4;
1166 break;
1167 case VIRTIO_NET_HDR_GSO_TCPV6:
1168 gso_type = SKB_GSO_TCPV6;
1169 break;
1170 case VIRTIO_NET_HDR_GSO_UDP:
1171 gso_type = SKB_GSO_UDP;
1172 break;
1173 default:
1174 goto out_unlock;
1177 if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1178 gso_type |= SKB_GSO_TCP_ECN;
1180 if (vnet_hdr.gso_size == 0)
1181 goto out_unlock;
1186 err = -EMSGSIZE;
1187 if (!gso_type && (len > dev->mtu+reserve))
1188 goto out_unlock;
1190 err = -ENOBUFS;
1191 skb = packet_alloc_skb(sk, LL_ALLOCATED_SPACE(dev),
1192 LL_RESERVED_SPACE(dev), len, vnet_hdr.hdr_len,
1193 msg->msg_flags & MSG_DONTWAIT, &err);
1194 if (skb == NULL)
1195 goto out_unlock;
1197 skb_set_network_header(skb, reserve);
1199 err = -EINVAL;
1200 if (sock->type == SOCK_DGRAM &&
1201 (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
1202 goto out_free;
1204 /* Returns -EFAULT on error */
1205 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1206 if (err)
1207 goto out_free;
1208 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
1209 if (err < 0)
1210 goto out_free;
1212 skb->protocol = proto;
1213 skb->dev = dev;
1214 skb->priority = sk->sk_priority;
1215 skb->mark = sk->sk_mark;
1217 if (po->has_vnet_hdr) {
1218 if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1219 if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
1220 vnet_hdr.csum_offset)) {
1221 err = -EINVAL;
1222 goto out_free;
1226 skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
1227 skb_shinfo(skb)->gso_type = gso_type;
1229 /* Header must be checked, and gso_segs computed. */
1230 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1231 skb_shinfo(skb)->gso_segs = 0;
1233 len += vnet_hdr_len;
1237 * Now send it
1240 err = dev_queue_xmit(skb);
1241 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1242 goto out_unlock;
1244 dev_put(dev);
1246 return len;
1248 out_free:
1249 kfree_skb(skb);
1250 out_unlock:
1251 if (dev)
1252 dev_put(dev);
1253 out:
1254 return err;
1257 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1258 struct msghdr *msg, size_t len)
1260 struct sock *sk = sock->sk;
1261 struct packet_sock *po = pkt_sk(sk);
1262 if (po->tx_ring.pg_vec)
1263 return tpacket_snd(po, msg);
1264 else
1265 return packet_snd(sock, msg, len);
1269 * Close a PACKET socket. This is fairly simple. We immediately go
1270 * to 'closed' state and remove our protocol entry in the device list.
1273 static int packet_release(struct socket *sock)
1275 struct sock *sk = sock->sk;
1276 struct packet_sock *po;
1277 struct net *net;
1278 struct tpacket_req req;
1280 if (!sk)
1281 return 0;
1283 net = sock_net(sk);
1284 po = pkt_sk(sk);
1286 spin_lock_bh(&net->packet.sklist_lock);
1287 sk_del_node_init_rcu(sk);
1288 sock_prot_inuse_add(net, sk->sk_prot, -1);
1289 spin_unlock_bh(&net->packet.sklist_lock);
1291 spin_lock(&po->bind_lock);
1292 if (po->running) {
1294 * Remove from protocol table
1296 po->running = 0;
1297 po->num = 0;
1298 __dev_remove_pack(&po->prot_hook);
1299 __sock_put(sk);
1301 spin_unlock(&po->bind_lock);
1303 packet_flush_mclist(sk);
1305 memset(&req, 0, sizeof(req));
1307 if (po->rx_ring.pg_vec)
1308 packet_set_ring(sk, &req, 1, 0);
1310 if (po->tx_ring.pg_vec)
1311 packet_set_ring(sk, &req, 1, 1);
1313 synchronize_net();
1315 * Now the socket is dead. No more input will appear.
1317 sock_orphan(sk);
1318 sock->sk = NULL;
1320 /* Purge queues */
1322 skb_queue_purge(&sk->sk_receive_queue);
1323 sk_refcnt_debug_release(sk);
1325 sock_put(sk);
1326 return 0;
1330 * Attach a packet hook.
1333 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1335 struct packet_sock *po = pkt_sk(sk);
1337 * Detach an existing hook if present.
1340 lock_sock(sk);
1342 spin_lock(&po->bind_lock);
1343 if (po->running) {
1344 __sock_put(sk);
1345 po->running = 0;
1346 po->num = 0;
1347 spin_unlock(&po->bind_lock);
1348 dev_remove_pack(&po->prot_hook);
1349 spin_lock(&po->bind_lock);
1352 po->num = protocol;
1353 po->prot_hook.type = protocol;
1354 po->prot_hook.dev = dev;
1356 po->ifindex = dev ? dev->ifindex : 0;
1358 if (protocol == 0)
1359 goto out_unlock;
1361 if (!dev || (dev->flags & IFF_UP)) {
1362 dev_add_pack(&po->prot_hook);
1363 sock_hold(sk);
1364 po->running = 1;
1365 } else {
1366 sk->sk_err = ENETDOWN;
1367 if (!sock_flag(sk, SOCK_DEAD))
1368 sk->sk_error_report(sk);
1371 out_unlock:
1372 spin_unlock(&po->bind_lock);
1373 release_sock(sk);
1374 return 0;
1378 * Bind a packet socket to a device
1381 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1382 int addr_len)
1384 struct sock *sk = sock->sk;
1385 char name[15];
1386 struct net_device *dev;
1387 int err = -ENODEV;
1390 * Check legality
1393 if (addr_len != sizeof(struct sockaddr))
1394 return -EINVAL;
1395 strlcpy(name, uaddr->sa_data, sizeof(name));
1397 dev = dev_get_by_name(sock_net(sk), name);
1398 if (dev) {
1399 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1400 dev_put(dev);
1402 return err;
1405 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1407 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1408 struct sock *sk = sock->sk;
1409 struct net_device *dev = NULL;
1410 int err;
1414 * Check legality
1417 if (addr_len < sizeof(struct sockaddr_ll))
1418 return -EINVAL;
1419 if (sll->sll_family != AF_PACKET)
1420 return -EINVAL;
1422 if (sll->sll_ifindex) {
1423 err = -ENODEV;
1424 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1425 if (dev == NULL)
1426 goto out;
1428 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1429 if (dev)
1430 dev_put(dev);
1432 out:
1433 return err;
1436 static struct proto packet_proto = {
1437 .name = "PACKET",
1438 .owner = THIS_MODULE,
1439 .obj_size = sizeof(struct packet_sock),
1443 * Create a packet of type SOCK_PACKET.
1446 static int packet_create(struct net *net, struct socket *sock, int protocol,
1447 int kern)
1449 struct sock *sk;
1450 struct packet_sock *po;
1451 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1452 int err;
1454 if (!capable(CAP_NET_RAW))
1455 return -EPERM;
1456 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1457 sock->type != SOCK_PACKET)
1458 return -ESOCKTNOSUPPORT;
1460 sock->state = SS_UNCONNECTED;
1462 err = -ENOBUFS;
1463 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1464 if (sk == NULL)
1465 goto out;
1467 sock->ops = &packet_ops;
1468 if (sock->type == SOCK_PACKET)
1469 sock->ops = &packet_ops_spkt;
1471 sock_init_data(sock, sk);
1473 po = pkt_sk(sk);
1474 sk->sk_family = PF_PACKET;
1475 po->num = proto;
1477 sk->sk_destruct = packet_sock_destruct;
1478 sk_refcnt_debug_inc(sk);
1481 * Attach a protocol block
1484 spin_lock_init(&po->bind_lock);
1485 mutex_init(&po->pg_vec_lock);
1486 po->prot_hook.func = packet_rcv;
1488 if (sock->type == SOCK_PACKET)
1489 po->prot_hook.func = packet_rcv_spkt;
1491 po->prot_hook.af_packet_priv = sk;
1493 if (proto) {
1494 po->prot_hook.type = proto;
1495 dev_add_pack(&po->prot_hook);
1496 sock_hold(sk);
1497 po->running = 1;
1500 spin_lock_bh(&net->packet.sklist_lock);
1501 sk_add_node_rcu(sk, &net->packet.sklist);
1502 sock_prot_inuse_add(net, &packet_proto, 1);
1503 spin_unlock_bh(&net->packet.sklist_lock);
1505 return 0;
1506 out:
1507 return err;
1510 static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1512 struct sock_exterr_skb *serr;
1513 struct sk_buff *skb, *skb2;
1514 int copied, err;
1516 err = -EAGAIN;
1517 skb = skb_dequeue(&sk->sk_error_queue);
1518 if (skb == NULL)
1519 goto out;
1521 copied = skb->len;
1522 if (copied > len) {
1523 msg->msg_flags |= MSG_TRUNC;
1524 copied = len;
1526 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1527 if (err)
1528 goto out_free_skb;
1530 sock_recv_timestamp(msg, sk, skb);
1532 serr = SKB_EXT_ERR(skb);
1533 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1534 sizeof(serr->ee), &serr->ee);
1536 msg->msg_flags |= MSG_ERRQUEUE;
1537 err = copied;
1539 /* Reset and regenerate socket error */
1540 spin_lock_bh(&sk->sk_error_queue.lock);
1541 sk->sk_err = 0;
1542 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1543 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1544 spin_unlock_bh(&sk->sk_error_queue.lock);
1545 sk->sk_error_report(sk);
1546 } else
1547 spin_unlock_bh(&sk->sk_error_queue.lock);
1549 out_free_skb:
1550 kfree_skb(skb);
1551 out:
1552 return err;
1556 * Pull a packet from our receive queue and hand it to the user.
1557 * If necessary we block.
1560 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1561 struct msghdr *msg, size_t len, int flags)
1563 struct sock *sk = sock->sk;
1564 struct sk_buff *skb;
1565 int copied, err;
1566 struct sockaddr_ll *sll;
1567 int vnet_hdr_len = 0;
1569 err = -EINVAL;
1570 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1571 goto out;
1574 if (flags & MSG_ERRQUEUE) {
1575 err = packet_recv_error(sk, msg, len);
1576 goto out;
1580 * Call the generic datagram receiver. This handles all sorts
1581 * of horrible races and re-entrancy so we can forget about it
1582 * in the protocol layers.
1584 * Now it will return ENETDOWN, if device have just gone down,
1585 * but then it will block.
1588 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1591 * An error occurred so return it. Because skb_recv_datagram()
1592 * handles the blocking we don't see and worry about blocking
1593 * retries.
1596 if (skb == NULL)
1597 goto out;
1599 if (pkt_sk(sk)->has_vnet_hdr) {
1600 struct virtio_net_hdr vnet_hdr = { 0 };
1602 err = -EINVAL;
1603 vnet_hdr_len = sizeof(vnet_hdr);
1604 if (len < vnet_hdr_len)
1605 goto out_free;
1607 len -= vnet_hdr_len;
1609 if (skb_is_gso(skb)) {
1610 struct skb_shared_info *sinfo = skb_shinfo(skb);
1612 /* This is a hint as to how much should be linear. */
1613 vnet_hdr.hdr_len = skb_headlen(skb);
1614 vnet_hdr.gso_size = sinfo->gso_size;
1615 if (sinfo->gso_type & SKB_GSO_TCPV4)
1616 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1617 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1618 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1619 else if (sinfo->gso_type & SKB_GSO_UDP)
1620 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1621 else if (sinfo->gso_type & SKB_GSO_FCOE)
1622 goto out_free;
1623 else
1624 BUG();
1625 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1626 vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1627 } else
1628 vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1630 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1631 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1632 vnet_hdr.csum_start = skb->csum_start -
1633 skb_headroom(skb);
1634 vnet_hdr.csum_offset = skb->csum_offset;
1635 } /* else everything is zero */
1637 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
1638 vnet_hdr_len);
1639 if (err < 0)
1640 goto out_free;
1644 * If the address length field is there to be filled in, we fill
1645 * it in now.
1648 sll = &PACKET_SKB_CB(skb)->sa.ll;
1649 if (sock->type == SOCK_PACKET)
1650 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1651 else
1652 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1655 * You lose any data beyond the buffer you gave. If it worries a
1656 * user program they can ask the device for its MTU anyway.
1659 copied = skb->len;
1660 if (copied > len) {
1661 copied = len;
1662 msg->msg_flags |= MSG_TRUNC;
1665 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1666 if (err)
1667 goto out_free;
1669 sock_recv_ts_and_drops(msg, sk, skb);
1671 if (msg->msg_name)
1672 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1673 msg->msg_namelen);
1675 if (pkt_sk(sk)->auxdata) {
1676 struct tpacket_auxdata aux;
1678 aux.tp_status = TP_STATUS_USER;
1679 if (skb->ip_summed == CHECKSUM_PARTIAL)
1680 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1681 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1682 aux.tp_snaplen = skb->len;
1683 aux.tp_mac = 0;
1684 aux.tp_net = skb_network_offset(skb);
1685 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1687 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1691 * Free or return the buffer as appropriate. Again this
1692 * hides all the races and re-entrancy issues from us.
1694 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
1696 out_free:
1697 skb_free_datagram(sk, skb);
1698 out:
1699 return err;
1702 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1703 int *uaddr_len, int peer)
1705 struct net_device *dev;
1706 struct sock *sk = sock->sk;
1708 if (peer)
1709 return -EOPNOTSUPP;
1711 uaddr->sa_family = AF_PACKET;
1712 rcu_read_lock();
1713 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1714 if (dev)
1715 strncpy(uaddr->sa_data, dev->name, 14);
1716 else
1717 memset(uaddr->sa_data, 0, 14);
1718 rcu_read_unlock();
1719 *uaddr_len = sizeof(*uaddr);
1721 return 0;
1724 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1725 int *uaddr_len, int peer)
1727 struct net_device *dev;
1728 struct sock *sk = sock->sk;
1729 struct packet_sock *po = pkt_sk(sk);
1730 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1732 if (peer)
1733 return -EOPNOTSUPP;
1735 sll->sll_family = AF_PACKET;
1736 sll->sll_ifindex = po->ifindex;
1737 sll->sll_protocol = po->num;
1738 sll->sll_pkttype = 0;
1739 rcu_read_lock();
1740 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1741 if (dev) {
1742 sll->sll_hatype = dev->type;
1743 sll->sll_halen = dev->addr_len;
1744 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1745 } else {
1746 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1747 sll->sll_halen = 0;
1749 rcu_read_unlock();
1750 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1752 return 0;
1755 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1756 int what)
1758 switch (i->type) {
1759 case PACKET_MR_MULTICAST:
1760 if (i->alen != dev->addr_len)
1761 return -EINVAL;
1762 if (what > 0)
1763 return dev_mc_add(dev, i->addr);
1764 else
1765 return dev_mc_del(dev, i->addr);
1766 break;
1767 case PACKET_MR_PROMISC:
1768 return dev_set_promiscuity(dev, what);
1769 break;
1770 case PACKET_MR_ALLMULTI:
1771 return dev_set_allmulti(dev, what);
1772 break;
1773 case PACKET_MR_UNICAST:
1774 if (i->alen != dev->addr_len)
1775 return -EINVAL;
1776 if (what > 0)
1777 return dev_uc_add(dev, i->addr);
1778 else
1779 return dev_uc_del(dev, i->addr);
1780 break;
1781 default:
1782 break;
1784 return 0;
1787 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1789 for ( ; i; i = i->next) {
1790 if (i->ifindex == dev->ifindex)
1791 packet_dev_mc(dev, i, what);
1795 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1797 struct packet_sock *po = pkt_sk(sk);
1798 struct packet_mclist *ml, *i;
1799 struct net_device *dev;
1800 int err;
1802 rtnl_lock();
1804 err = -ENODEV;
1805 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1806 if (!dev)
1807 goto done;
1809 err = -EINVAL;
1810 if (mreq->mr_alen > dev->addr_len)
1811 goto done;
1813 err = -ENOBUFS;
1814 i = kmalloc(sizeof(*i), GFP_KERNEL);
1815 if (i == NULL)
1816 goto done;
1818 err = 0;
1819 for (ml = po->mclist; ml; ml = ml->next) {
1820 if (ml->ifindex == mreq->mr_ifindex &&
1821 ml->type == mreq->mr_type &&
1822 ml->alen == mreq->mr_alen &&
1823 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1824 ml->count++;
1825 /* Free the new element ... */
1826 kfree(i);
1827 goto done;
1831 i->type = mreq->mr_type;
1832 i->ifindex = mreq->mr_ifindex;
1833 i->alen = mreq->mr_alen;
1834 memcpy(i->addr, mreq->mr_address, i->alen);
1835 i->count = 1;
1836 i->next = po->mclist;
1837 po->mclist = i;
1838 err = packet_dev_mc(dev, i, 1);
1839 if (err) {
1840 po->mclist = i->next;
1841 kfree(i);
1844 done:
1845 rtnl_unlock();
1846 return err;
1849 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1851 struct packet_mclist *ml, **mlp;
1853 rtnl_lock();
1855 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1856 if (ml->ifindex == mreq->mr_ifindex &&
1857 ml->type == mreq->mr_type &&
1858 ml->alen == mreq->mr_alen &&
1859 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1860 if (--ml->count == 0) {
1861 struct net_device *dev;
1862 *mlp = ml->next;
1863 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1864 if (dev)
1865 packet_dev_mc(dev, ml, -1);
1866 kfree(ml);
1868 rtnl_unlock();
1869 return 0;
1872 rtnl_unlock();
1873 return -EADDRNOTAVAIL;
1876 static void packet_flush_mclist(struct sock *sk)
1878 struct packet_sock *po = pkt_sk(sk);
1879 struct packet_mclist *ml;
1881 if (!po->mclist)
1882 return;
1884 rtnl_lock();
1885 while ((ml = po->mclist) != NULL) {
1886 struct net_device *dev;
1888 po->mclist = ml->next;
1889 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
1890 if (dev != NULL)
1891 packet_dev_mc(dev, ml, -1);
1892 kfree(ml);
1894 rtnl_unlock();
1897 static int
1898 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1900 struct sock *sk = sock->sk;
1901 struct packet_sock *po = pkt_sk(sk);
1902 int ret;
1904 if (level != SOL_PACKET)
1905 return -ENOPROTOOPT;
1907 switch (optname) {
1908 case PACKET_ADD_MEMBERSHIP:
1909 case PACKET_DROP_MEMBERSHIP:
1911 struct packet_mreq_max mreq;
1912 int len = optlen;
1913 memset(&mreq, 0, sizeof(mreq));
1914 if (len < sizeof(struct packet_mreq))
1915 return -EINVAL;
1916 if (len > sizeof(mreq))
1917 len = sizeof(mreq);
1918 if (copy_from_user(&mreq, optval, len))
1919 return -EFAULT;
1920 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1921 return -EINVAL;
1922 if (optname == PACKET_ADD_MEMBERSHIP)
1923 ret = packet_mc_add(sk, &mreq);
1924 else
1925 ret = packet_mc_drop(sk, &mreq);
1926 return ret;
1929 case PACKET_RX_RING:
1930 case PACKET_TX_RING:
1932 struct tpacket_req req;
1934 if (optlen < sizeof(req))
1935 return -EINVAL;
1936 if (pkt_sk(sk)->has_vnet_hdr)
1937 return -EINVAL;
1938 if (copy_from_user(&req, optval, sizeof(req)))
1939 return -EFAULT;
1940 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1942 case PACKET_COPY_THRESH:
1944 int val;
1946 if (optlen != sizeof(val))
1947 return -EINVAL;
1948 if (copy_from_user(&val, optval, sizeof(val)))
1949 return -EFAULT;
1951 pkt_sk(sk)->copy_thresh = val;
1952 return 0;
1954 case PACKET_VERSION:
1956 int val;
1958 if (optlen != sizeof(val))
1959 return -EINVAL;
1960 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1961 return -EBUSY;
1962 if (copy_from_user(&val, optval, sizeof(val)))
1963 return -EFAULT;
1964 switch (val) {
1965 case TPACKET_V1:
1966 case TPACKET_V2:
1967 po->tp_version = val;
1968 return 0;
1969 default:
1970 return -EINVAL;
1973 case PACKET_RESERVE:
1975 unsigned int val;
1977 if (optlen != sizeof(val))
1978 return -EINVAL;
1979 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1980 return -EBUSY;
1981 if (copy_from_user(&val, optval, sizeof(val)))
1982 return -EFAULT;
1983 po->tp_reserve = val;
1984 return 0;
1986 case PACKET_LOSS:
1988 unsigned int val;
1990 if (optlen != sizeof(val))
1991 return -EINVAL;
1992 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1993 return -EBUSY;
1994 if (copy_from_user(&val, optval, sizeof(val)))
1995 return -EFAULT;
1996 po->tp_loss = !!val;
1997 return 0;
1999 case PACKET_AUXDATA:
2001 int val;
2003 if (optlen < sizeof(val))
2004 return -EINVAL;
2005 if (copy_from_user(&val, optval, sizeof(val)))
2006 return -EFAULT;
2008 po->auxdata = !!val;
2009 return 0;
2011 case PACKET_ORIGDEV:
2013 int val;
2015 if (optlen < sizeof(val))
2016 return -EINVAL;
2017 if (copy_from_user(&val, optval, sizeof(val)))
2018 return -EFAULT;
2020 po->origdev = !!val;
2021 return 0;
2023 case PACKET_VNET_HDR:
2025 int val;
2027 if (sock->type != SOCK_RAW)
2028 return -EINVAL;
2029 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
2030 return -EBUSY;
2031 if (optlen < sizeof(val))
2032 return -EINVAL;
2033 if (copy_from_user(&val, optval, sizeof(val)))
2034 return -EFAULT;
2036 po->has_vnet_hdr = !!val;
2037 return 0;
2039 case PACKET_TIMESTAMP:
2041 int val;
2043 if (optlen != sizeof(val))
2044 return -EINVAL;
2045 if (copy_from_user(&val, optval, sizeof(val)))
2046 return -EFAULT;
2048 po->tp_tstamp = val;
2049 return 0;
2051 default:
2052 return -ENOPROTOOPT;
2056 static int packet_getsockopt(struct socket *sock, int level, int optname,
2057 char __user *optval, int __user *optlen)
2059 int len;
2060 int val;
2061 struct sock *sk = sock->sk;
2062 struct packet_sock *po = pkt_sk(sk);
2063 void *data;
2064 struct tpacket_stats st;
2066 if (level != SOL_PACKET)
2067 return -ENOPROTOOPT;
2069 if (get_user(len, optlen))
2070 return -EFAULT;
2072 if (len < 0)
2073 return -EINVAL;
2075 switch (optname) {
2076 case PACKET_STATISTICS:
2077 if (len > sizeof(struct tpacket_stats))
2078 len = sizeof(struct tpacket_stats);
2079 spin_lock_bh(&sk->sk_receive_queue.lock);
2080 st = po->stats;
2081 memset(&po->stats, 0, sizeof(st));
2082 spin_unlock_bh(&sk->sk_receive_queue.lock);
2083 st.tp_packets += st.tp_drops;
2085 data = &st;
2086 break;
2087 case PACKET_AUXDATA:
2088 if (len > sizeof(int))
2089 len = sizeof(int);
2090 val = po->auxdata;
2092 data = &val;
2093 break;
2094 case PACKET_ORIGDEV:
2095 if (len > sizeof(int))
2096 len = sizeof(int);
2097 val = po->origdev;
2099 data = &val;
2100 break;
2101 case PACKET_VNET_HDR:
2102 if (len > sizeof(int))
2103 len = sizeof(int);
2104 val = po->has_vnet_hdr;
2106 data = &val;
2107 break;
2108 case PACKET_VERSION:
2109 if (len > sizeof(int))
2110 len = sizeof(int);
2111 val = po->tp_version;
2112 data = &val;
2113 break;
2114 case PACKET_HDRLEN:
2115 if (len > sizeof(int))
2116 len = sizeof(int);
2117 if (copy_from_user(&val, optval, len))
2118 return -EFAULT;
2119 switch (val) {
2120 case TPACKET_V1:
2121 val = sizeof(struct tpacket_hdr);
2122 break;
2123 case TPACKET_V2:
2124 val = sizeof(struct tpacket2_hdr);
2125 break;
2126 default:
2127 return -EINVAL;
2129 data = &val;
2130 break;
2131 case PACKET_RESERVE:
2132 if (len > sizeof(unsigned int))
2133 len = sizeof(unsigned int);
2134 val = po->tp_reserve;
2135 data = &val;
2136 break;
2137 case PACKET_LOSS:
2138 if (len > sizeof(unsigned int))
2139 len = sizeof(unsigned int);
2140 val = po->tp_loss;
2141 data = &val;
2142 break;
2143 case PACKET_TIMESTAMP:
2144 if (len > sizeof(int))
2145 len = sizeof(int);
2146 val = po->tp_tstamp;
2147 data = &val;
2148 break;
2149 default:
2150 return -ENOPROTOOPT;
2153 if (put_user(len, optlen))
2154 return -EFAULT;
2155 if (copy_to_user(optval, data, len))
2156 return -EFAULT;
2157 return 0;
2161 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
2163 struct sock *sk;
2164 struct hlist_node *node;
2165 struct net_device *dev = data;
2166 struct net *net = dev_net(dev);
2168 rcu_read_lock();
2169 sk_for_each_rcu(sk, node, &net->packet.sklist) {
2170 struct packet_sock *po = pkt_sk(sk);
2172 switch (msg) {
2173 case NETDEV_UNREGISTER:
2174 if (po->mclist)
2175 packet_dev_mclist(dev, po->mclist, -1);
2176 /* fallthrough */
2178 case NETDEV_DOWN:
2179 if (dev->ifindex == po->ifindex) {
2180 spin_lock(&po->bind_lock);
2181 if (po->running) {
2182 __dev_remove_pack(&po->prot_hook);
2183 __sock_put(sk);
2184 po->running = 0;
2185 sk->sk_err = ENETDOWN;
2186 if (!sock_flag(sk, SOCK_DEAD))
2187 sk->sk_error_report(sk);
2189 if (msg == NETDEV_UNREGISTER) {
2190 po->ifindex = -1;
2191 po->prot_hook.dev = NULL;
2193 spin_unlock(&po->bind_lock);
2195 break;
2196 case NETDEV_UP:
2197 if (dev->ifindex == po->ifindex) {
2198 spin_lock(&po->bind_lock);
2199 if (po->num && !po->running) {
2200 dev_add_pack(&po->prot_hook);
2201 sock_hold(sk);
2202 po->running = 1;
2204 spin_unlock(&po->bind_lock);
2206 break;
2209 rcu_read_unlock();
2210 return NOTIFY_DONE;
2214 static int packet_ioctl(struct socket *sock, unsigned int cmd,
2215 unsigned long arg)
2217 struct sock *sk = sock->sk;
2219 switch (cmd) {
2220 case SIOCOUTQ:
2222 int amount = sk_wmem_alloc_get(sk);
2224 return put_user(amount, (int __user *)arg);
2226 case SIOCINQ:
2228 struct sk_buff *skb;
2229 int amount = 0;
2231 spin_lock_bh(&sk->sk_receive_queue.lock);
2232 skb = skb_peek(&sk->sk_receive_queue);
2233 if (skb)
2234 amount = skb->len;
2235 spin_unlock_bh(&sk->sk_receive_queue.lock);
2236 return put_user(amount, (int __user *)arg);
2238 case SIOCGSTAMP:
2239 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2240 case SIOCGSTAMPNS:
2241 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2243 #ifdef CONFIG_INET
2244 case SIOCADDRT:
2245 case SIOCDELRT:
2246 case SIOCDARP:
2247 case SIOCGARP:
2248 case SIOCSARP:
2249 case SIOCGIFADDR:
2250 case SIOCSIFADDR:
2251 case SIOCGIFBRDADDR:
2252 case SIOCSIFBRDADDR:
2253 case SIOCGIFNETMASK:
2254 case SIOCSIFNETMASK:
2255 case SIOCGIFDSTADDR:
2256 case SIOCSIFDSTADDR:
2257 case SIOCSIFFLAGS:
2258 return inet_dgram_ops.ioctl(sock, cmd, arg);
2259 #endif
2261 default:
2262 return -ENOIOCTLCMD;
2264 return 0;
2267 static unsigned int packet_poll(struct file *file, struct socket *sock,
2268 poll_table *wait)
2270 struct sock *sk = sock->sk;
2271 struct packet_sock *po = pkt_sk(sk);
2272 unsigned int mask = datagram_poll(file, sock, wait);
2274 spin_lock_bh(&sk->sk_receive_queue.lock);
2275 if (po->rx_ring.pg_vec) {
2276 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2277 mask |= POLLIN | POLLRDNORM;
2279 spin_unlock_bh(&sk->sk_receive_queue.lock);
2280 spin_lock_bh(&sk->sk_write_queue.lock);
2281 if (po->tx_ring.pg_vec) {
2282 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2283 mask |= POLLOUT | POLLWRNORM;
2285 spin_unlock_bh(&sk->sk_write_queue.lock);
2286 return mask;
2290 /* Dirty? Well, I still did not learn better way to account
2291 * for user mmaps.
2294 static void packet_mm_open(struct vm_area_struct *vma)
2296 struct file *file = vma->vm_file;
2297 struct socket *sock = file->private_data;
2298 struct sock *sk = sock->sk;
2300 if (sk)
2301 atomic_inc(&pkt_sk(sk)->mapped);
2304 static void packet_mm_close(struct vm_area_struct *vma)
2306 struct file *file = vma->vm_file;
2307 struct socket *sock = file->private_data;
2308 struct sock *sk = sock->sk;
2310 if (sk)
2311 atomic_dec(&pkt_sk(sk)->mapped);
2314 static const struct vm_operations_struct packet_mmap_ops = {
2315 .open = packet_mm_open,
2316 .close = packet_mm_close,
2319 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2321 int i;
2323 for (i = 0; i < len; i++) {
2324 if (likely(pg_vec[i]))
2325 free_pages((unsigned long) pg_vec[i], order);
2327 kfree(pg_vec);
2330 static inline char *alloc_one_pg_vec_page(unsigned long order)
2332 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2334 return (char *) __get_free_pages(gfp_flags, order);
2337 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2339 unsigned int block_nr = req->tp_block_nr;
2340 char **pg_vec;
2341 int i;
2343 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2344 if (unlikely(!pg_vec))
2345 goto out;
2347 for (i = 0; i < block_nr; i++) {
2348 pg_vec[i] = alloc_one_pg_vec_page(order);
2349 if (unlikely(!pg_vec[i]))
2350 goto out_free_pgvec;
2353 out:
2354 return pg_vec;
2356 out_free_pgvec:
2357 free_pg_vec(pg_vec, order, block_nr);
2358 pg_vec = NULL;
2359 goto out;
2362 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2363 int closing, int tx_ring)
2365 char **pg_vec = NULL;
2366 struct packet_sock *po = pkt_sk(sk);
2367 int was_running, order = 0;
2368 struct packet_ring_buffer *rb;
2369 struct sk_buff_head *rb_queue;
2370 __be16 num;
2371 int err;
2373 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2374 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2376 err = -EBUSY;
2377 if (!closing) {
2378 if (atomic_read(&po->mapped))
2379 goto out;
2380 if (atomic_read(&rb->pending))
2381 goto out;
2384 if (req->tp_block_nr) {
2385 /* Sanity tests and some calculations */
2386 err = -EBUSY;
2387 if (unlikely(rb->pg_vec))
2388 goto out;
2390 switch (po->tp_version) {
2391 case TPACKET_V1:
2392 po->tp_hdrlen = TPACKET_HDRLEN;
2393 break;
2394 case TPACKET_V2:
2395 po->tp_hdrlen = TPACKET2_HDRLEN;
2396 break;
2399 err = -EINVAL;
2400 if (unlikely((int)req->tp_block_size <= 0))
2401 goto out;
2402 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2403 goto out;
2404 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2405 po->tp_reserve))
2406 goto out;
2407 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2408 goto out;
2410 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2411 if (unlikely(rb->frames_per_block <= 0))
2412 goto out;
2413 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2414 req->tp_frame_nr))
2415 goto out;
2417 err = -ENOMEM;
2418 order = get_order(req->tp_block_size);
2419 pg_vec = alloc_pg_vec(req, order);
2420 if (unlikely(!pg_vec))
2421 goto out;
2423 /* Done */
2424 else {
2425 err = -EINVAL;
2426 if (unlikely(req->tp_frame_nr))
2427 goto out;
2430 lock_sock(sk);
2432 /* Detach socket from network */
2433 spin_lock(&po->bind_lock);
2434 was_running = po->running;
2435 num = po->num;
2436 if (was_running) {
2437 __dev_remove_pack(&po->prot_hook);
2438 po->num = 0;
2439 po->running = 0;
2440 __sock_put(sk);
2442 spin_unlock(&po->bind_lock);
2444 synchronize_net();
2446 err = -EBUSY;
2447 mutex_lock(&po->pg_vec_lock);
2448 if (closing || atomic_read(&po->mapped) == 0) {
2449 err = 0;
2450 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2451 spin_lock_bh(&rb_queue->lock);
2452 pg_vec = XC(rb->pg_vec, pg_vec);
2453 rb->frame_max = (req->tp_frame_nr - 1);
2454 rb->head = 0;
2455 rb->frame_size = req->tp_frame_size;
2456 spin_unlock_bh(&rb_queue->lock);
2458 order = XC(rb->pg_vec_order, order);
2459 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2461 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2462 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2463 tpacket_rcv : packet_rcv;
2464 skb_queue_purge(rb_queue);
2465 #undef XC
2466 if (atomic_read(&po->mapped))
2467 pr_err("packet_mmap: vma is busy: %d\n",
2468 atomic_read(&po->mapped));
2470 mutex_unlock(&po->pg_vec_lock);
2472 spin_lock(&po->bind_lock);
2473 if (was_running && !po->running) {
2474 sock_hold(sk);
2475 po->running = 1;
2476 po->num = num;
2477 dev_add_pack(&po->prot_hook);
2479 spin_unlock(&po->bind_lock);
2481 release_sock(sk);
2483 if (pg_vec)
2484 free_pg_vec(pg_vec, order, req->tp_block_nr);
2485 out:
2486 return err;
2489 static int packet_mmap(struct file *file, struct socket *sock,
2490 struct vm_area_struct *vma)
2492 struct sock *sk = sock->sk;
2493 struct packet_sock *po = pkt_sk(sk);
2494 unsigned long size, expected_size;
2495 struct packet_ring_buffer *rb;
2496 unsigned long start;
2497 int err = -EINVAL;
2498 int i;
2500 if (vma->vm_pgoff)
2501 return -EINVAL;
2503 mutex_lock(&po->pg_vec_lock);
2505 expected_size = 0;
2506 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2507 if (rb->pg_vec) {
2508 expected_size += rb->pg_vec_len
2509 * rb->pg_vec_pages
2510 * PAGE_SIZE;
2514 if (expected_size == 0)
2515 goto out;
2517 size = vma->vm_end - vma->vm_start;
2518 if (size != expected_size)
2519 goto out;
2521 start = vma->vm_start;
2522 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2523 if (rb->pg_vec == NULL)
2524 continue;
2526 for (i = 0; i < rb->pg_vec_len; i++) {
2527 struct page *page = virt_to_page(rb->pg_vec[i]);
2528 int pg_num;
2530 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2531 pg_num++, page++) {
2532 err = vm_insert_page(vma, start, page);
2533 if (unlikely(err))
2534 goto out;
2535 start += PAGE_SIZE;
2540 atomic_inc(&po->mapped);
2541 vma->vm_ops = &packet_mmap_ops;
2542 err = 0;
2544 out:
2545 mutex_unlock(&po->pg_vec_lock);
2546 return err;
2549 static const struct proto_ops packet_ops_spkt = {
2550 .family = PF_PACKET,
2551 .owner = THIS_MODULE,
2552 .release = packet_release,
2553 .bind = packet_bind_spkt,
2554 .connect = sock_no_connect,
2555 .socketpair = sock_no_socketpair,
2556 .accept = sock_no_accept,
2557 .getname = packet_getname_spkt,
2558 .poll = datagram_poll,
2559 .ioctl = packet_ioctl,
2560 .listen = sock_no_listen,
2561 .shutdown = sock_no_shutdown,
2562 .setsockopt = sock_no_setsockopt,
2563 .getsockopt = sock_no_getsockopt,
2564 .sendmsg = packet_sendmsg_spkt,
2565 .recvmsg = packet_recvmsg,
2566 .mmap = sock_no_mmap,
2567 .sendpage = sock_no_sendpage,
2570 static const struct proto_ops packet_ops = {
2571 .family = PF_PACKET,
2572 .owner = THIS_MODULE,
2573 .release = packet_release,
2574 .bind = packet_bind,
2575 .connect = sock_no_connect,
2576 .socketpair = sock_no_socketpair,
2577 .accept = sock_no_accept,
2578 .getname = packet_getname,
2579 .poll = packet_poll,
2580 .ioctl = packet_ioctl,
2581 .listen = sock_no_listen,
2582 .shutdown = sock_no_shutdown,
2583 .setsockopt = packet_setsockopt,
2584 .getsockopt = packet_getsockopt,
2585 .sendmsg = packet_sendmsg,
2586 .recvmsg = packet_recvmsg,
2587 .mmap = packet_mmap,
2588 .sendpage = sock_no_sendpage,
2591 static const struct net_proto_family packet_family_ops = {
2592 .family = PF_PACKET,
2593 .create = packet_create,
2594 .owner = THIS_MODULE,
2597 static struct notifier_block packet_netdev_notifier = {
2598 .notifier_call = packet_notifier,
2601 #ifdef CONFIG_PROC_FS
2603 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2604 __acquires(RCU)
2606 struct net *net = seq_file_net(seq);
2608 rcu_read_lock();
2609 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
2612 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2614 struct net *net = seq_file_net(seq);
2615 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
2618 static void packet_seq_stop(struct seq_file *seq, void *v)
2619 __releases(RCU)
2621 rcu_read_unlock();
2624 static int packet_seq_show(struct seq_file *seq, void *v)
2626 if (v == SEQ_START_TOKEN)
2627 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2628 else {
2629 struct sock *s = sk_entry(v);
2630 const struct packet_sock *po = pkt_sk(s);
2632 seq_printf(seq,
2633 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2635 atomic_read(&s->sk_refcnt),
2636 s->sk_type,
2637 ntohs(po->num),
2638 po->ifindex,
2639 po->running,
2640 atomic_read(&s->sk_rmem_alloc),
2641 sock_i_uid(s),
2642 sock_i_ino(s));
2645 return 0;
2648 static const struct seq_operations packet_seq_ops = {
2649 .start = packet_seq_start,
2650 .next = packet_seq_next,
2651 .stop = packet_seq_stop,
2652 .show = packet_seq_show,
2655 static int packet_seq_open(struct inode *inode, struct file *file)
2657 return seq_open_net(inode, file, &packet_seq_ops,
2658 sizeof(struct seq_net_private));
2661 static const struct file_operations packet_seq_fops = {
2662 .owner = THIS_MODULE,
2663 .open = packet_seq_open,
2664 .read = seq_read,
2665 .llseek = seq_lseek,
2666 .release = seq_release_net,
2669 #endif
2671 static int __net_init packet_net_init(struct net *net)
2673 spin_lock_init(&net->packet.sklist_lock);
2674 INIT_HLIST_HEAD(&net->packet.sklist);
2676 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2677 return -ENOMEM;
2679 return 0;
2682 static void __net_exit packet_net_exit(struct net *net)
2684 proc_net_remove(net, "packet");
2687 static struct pernet_operations packet_net_ops = {
2688 .init = packet_net_init,
2689 .exit = packet_net_exit,
2693 static void __exit packet_exit(void)
2695 unregister_netdevice_notifier(&packet_netdev_notifier);
2696 unregister_pernet_subsys(&packet_net_ops);
2697 sock_unregister(PF_PACKET);
2698 proto_unregister(&packet_proto);
2701 static int __init packet_init(void)
2703 int rc = proto_register(&packet_proto, 0);
2705 if (rc != 0)
2706 goto out;
2708 sock_register(&packet_family_ops);
2709 register_pernet_subsys(&packet_net_ops);
2710 register_netdevice_notifier(&packet_netdev_notifier);
2711 out:
2712 return rc;
2715 module_init(packet_init);
2716 module_exit(packet_exit);
2717 MODULE_LICENSE("GPL");
2718 MODULE_ALIAS_NETPROTO(PF_PACKET);