xfs: validate acl count
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / packet / af_packet.c
blob35cfa792aec2f20ea6623adbd73ffbd8bd295a54
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Fixes:
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
41 * and packet_mreq.
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
52 #include <linux/mm.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
56 #include <linux/in.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <net/net_namespace.h>
64 #include <net/ip.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
73 #include <asm/page.h>
74 #include <asm/cacheflush.h>
75 #include <asm/io.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/poll.h>
79 #include <linux/module.h>
80 #include <linux/init.h>
81 #include <linux/mutex.h>
83 #ifdef CONFIG_INET
84 #include <net/inet_common.h>
85 #endif
88 Assumptions:
89 - if device has no dev->hard_header routine, it adds and removes ll header
90 inside itself. In this case ll header is invisible outside of device,
91 but higher levels still should reserve dev->hard_header_len.
92 Some devices are enough clever to reallocate skb, when header
93 will not fit to reserved space (tunnel), another ones are silly
94 (PPP).
95 - packet socket receives packets with pulled ll header,
96 so that SOCK_RAW should push it back.
98 On receive:
99 -----------
101 Incoming, dev->hard_header!=NULL
102 mac_header -> ll header
103 data -> data
105 Outgoing, dev->hard_header!=NULL
106 mac_header -> ll header
107 data -> ll header
109 Incoming, dev->hard_header==NULL
110 mac_header -> UNKNOWN position. It is very likely, that it points to ll
111 header. PPP makes it, that is wrong, because introduce
112 assymetry between rx and tx paths.
113 data -> data
115 Outgoing, dev->hard_header==NULL
116 mac_header -> data. ll header is still not built!
117 data -> data
119 Resume
120 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
123 On transmit:
124 ------------
126 dev->hard_header != NULL
127 mac_header -> ll header
128 data -> ll header
130 dev->hard_header == NULL (ll header is added by device, we cannot control it)
131 mac_header -> data
132 data -> data
134 We should set nh.raw on output to correct posistion,
135 packet classifier depends on it.
138 /* Private packet socket structures. */
140 struct packet_mclist {
141 struct packet_mclist *next;
142 int ifindex;
143 int count;
144 unsigned short type;
145 unsigned short alen;
146 unsigned char addr[MAX_ADDR_LEN];
148 /* identical to struct packet_mreq except it has
149 * a longer address field.
151 struct packet_mreq_max {
152 int mr_ifindex;
153 unsigned short mr_type;
154 unsigned short mr_alen;
155 unsigned char mr_address[MAX_ADDR_LEN];
158 #ifdef CONFIG_PACKET_MMAP
159 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
160 int closing, int tx_ring);
162 struct packet_ring_buffer {
163 char **pg_vec;
164 unsigned int head;
165 unsigned int frames_per_block;
166 unsigned int frame_size;
167 unsigned int frame_max;
169 unsigned int pg_vec_order;
170 unsigned int pg_vec_pages;
171 unsigned int pg_vec_len;
173 atomic_t pending;
176 struct packet_sock;
177 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
178 #endif
180 static void packet_flush_mclist(struct sock *sk);
182 struct packet_sock {
183 /* struct sock has to be the first member of packet_sock */
184 struct sock sk;
185 struct tpacket_stats stats;
186 #ifdef CONFIG_PACKET_MMAP
187 struct packet_ring_buffer rx_ring;
188 struct packet_ring_buffer tx_ring;
189 int copy_thresh;
190 #endif
191 struct packet_type prot_hook;
192 spinlock_t bind_lock;
193 struct mutex pg_vec_lock;
194 unsigned int running:1, /* prot_hook is attached*/
195 auxdata:1,
196 origdev:1;
197 int ifindex; /* bound device */
198 __be16 num;
199 struct packet_mclist *mclist;
200 #ifdef CONFIG_PACKET_MMAP
201 atomic_t mapped;
202 enum tpacket_versions tp_version;
203 unsigned int tp_hdrlen;
204 unsigned int tp_reserve;
205 unsigned int tp_loss:1;
206 #endif
209 struct packet_skb_cb {
210 unsigned int origlen;
211 union {
212 struct sockaddr_pkt pkt;
213 struct sockaddr_ll ll;
214 } sa;
217 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
219 #ifdef CONFIG_PACKET_MMAP
221 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
223 union {
224 struct tpacket_hdr *h1;
225 struct tpacket2_hdr *h2;
226 void *raw;
227 } h;
229 h.raw = frame;
230 switch (po->tp_version) {
231 case TPACKET_V1:
232 h.h1->tp_status = status;
233 flush_dcache_page(virt_to_page(&h.h1->tp_status));
234 break;
235 case TPACKET_V2:
236 h.h2->tp_status = status;
237 flush_dcache_page(virt_to_page(&h.h2->tp_status));
238 break;
239 default:
240 pr_err("TPACKET version not supported\n");
241 BUG();
244 smp_wmb();
247 static int __packet_get_status(struct packet_sock *po, void *frame)
249 union {
250 struct tpacket_hdr *h1;
251 struct tpacket2_hdr *h2;
252 void *raw;
253 } h;
255 smp_rmb();
257 h.raw = frame;
258 switch (po->tp_version) {
259 case TPACKET_V1:
260 flush_dcache_page(virt_to_page(&h.h1->tp_status));
261 return h.h1->tp_status;
262 case TPACKET_V2:
263 flush_dcache_page(virt_to_page(&h.h2->tp_status));
264 return h.h2->tp_status;
265 default:
266 pr_err("TPACKET version not supported\n");
267 BUG();
268 return 0;
272 static void *packet_lookup_frame(struct packet_sock *po,
273 struct packet_ring_buffer *rb,
274 unsigned int position,
275 int status)
277 unsigned int pg_vec_pos, frame_offset;
278 union {
279 struct tpacket_hdr *h1;
280 struct tpacket2_hdr *h2;
281 void *raw;
282 } h;
284 pg_vec_pos = position / rb->frames_per_block;
285 frame_offset = position % rb->frames_per_block;
287 h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
289 if (status != __packet_get_status(po, h.raw))
290 return NULL;
292 return h.raw;
295 static inline void *packet_current_frame(struct packet_sock *po,
296 struct packet_ring_buffer *rb,
297 int status)
299 return packet_lookup_frame(po, rb, rb->head, status);
302 static inline void *packet_previous_frame(struct packet_sock *po,
303 struct packet_ring_buffer *rb,
304 int status)
306 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
307 return packet_lookup_frame(po, rb, previous, status);
310 static inline void packet_increment_head(struct packet_ring_buffer *buff)
312 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
315 #endif
317 static inline struct packet_sock *pkt_sk(struct sock *sk)
319 return (struct packet_sock *)sk;
322 static void packet_sock_destruct(struct sock *sk)
324 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
325 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
327 if (!sock_flag(sk, SOCK_DEAD)) {
328 pr_err("Attempt to release alive packet socket: %p\n", sk);
329 return;
332 sk_refcnt_debug_dec(sk);
336 static const struct proto_ops packet_ops;
338 static const struct proto_ops packet_ops_spkt;
340 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
341 struct packet_type *pt, struct net_device *orig_dev)
343 struct sock *sk;
344 struct sockaddr_pkt *spkt;
347 * When we registered the protocol we saved the socket in the data
348 * field for just this event.
351 sk = pt->af_packet_priv;
354 * Yank back the headers [hope the device set this
355 * right or kerboom...]
357 * Incoming packets have ll header pulled,
358 * push it back.
360 * For outgoing ones skb->data == skb_mac_header(skb)
361 * so that this procedure is noop.
364 if (skb->pkt_type == PACKET_LOOPBACK)
365 goto out;
367 if (dev_net(dev) != sock_net(sk))
368 goto out;
370 skb = skb_share_check(skb, GFP_ATOMIC);
371 if (skb == NULL)
372 goto oom;
374 /* drop any routing info */
375 skb_dst_drop(skb);
377 /* drop conntrack reference */
378 nf_reset(skb);
380 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
382 skb_push(skb, skb->data - skb_mac_header(skb));
385 * The SOCK_PACKET socket receives _all_ frames.
388 spkt->spkt_family = dev->type;
389 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
390 spkt->spkt_protocol = skb->protocol;
393 * Charge the memory to the socket. This is done specifically
394 * to prevent sockets using all the memory up.
397 if (sock_queue_rcv_skb(sk, skb) == 0)
398 return 0;
400 out:
401 kfree_skb(skb);
402 oom:
403 return 0;
408 * Output a raw packet to a device layer. This bypasses all the other
409 * protocol layers and you must therefore supply it with a complete frame
412 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
413 struct msghdr *msg, size_t len)
415 struct sock *sk = sock->sk;
416 struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
417 struct sk_buff *skb;
418 struct net_device *dev;
419 __be16 proto = 0;
420 int err;
423 * Get and verify the address.
426 if (saddr) {
427 if (msg->msg_namelen < sizeof(struct sockaddr))
428 return -EINVAL;
429 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
430 proto = saddr->spkt_protocol;
431 } else
432 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
435 * Find the device first to size check it
438 saddr->spkt_device[13] = 0;
439 dev = dev_get_by_name(sock_net(sk), saddr->spkt_device);
440 err = -ENODEV;
441 if (dev == NULL)
442 goto out_unlock;
444 err = -ENETDOWN;
445 if (!(dev->flags & IFF_UP))
446 goto out_unlock;
449 * You may not queue a frame bigger than the mtu. This is the lowest level
450 * raw protocol and you must do your own fragmentation at this level.
453 err = -EMSGSIZE;
454 if (len > dev->mtu + dev->hard_header_len)
455 goto out_unlock;
457 err = -ENOBUFS;
458 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
461 * If the write buffer is full, then tough. At this level the user
462 * gets to deal with the problem - do your own algorithmic backoffs.
463 * That's far more flexible.
466 if (skb == NULL)
467 goto out_unlock;
470 * Fill it in
473 /* FIXME: Save some space for broken drivers that write a
474 * hard header at transmission time by themselves. PPP is the
475 * notable one here. This should really be fixed at the driver level.
477 skb_reserve(skb, LL_RESERVED_SPACE(dev));
478 skb_reset_network_header(skb);
480 /* Try to align data part correctly */
481 if (dev->header_ops) {
482 skb->data -= dev->hard_header_len;
483 skb->tail -= dev->hard_header_len;
484 if (len < dev->hard_header_len)
485 skb_reset_network_header(skb);
488 /* Returns -EFAULT on error */
489 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
490 skb->protocol = proto;
491 skb->dev = dev;
492 skb->priority = sk->sk_priority;
493 if (err)
494 goto out_free;
497 * Now send it
500 dev_queue_xmit(skb);
501 dev_put(dev);
502 return len;
504 out_free:
505 kfree_skb(skb);
506 out_unlock:
507 if (dev)
508 dev_put(dev);
509 return err;
512 static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
513 unsigned int res)
515 struct sk_filter *filter;
517 rcu_read_lock_bh();
518 filter = rcu_dereference(sk->sk_filter);
519 if (filter != NULL)
520 res = sk_run_filter(skb, filter->insns, filter->len);
521 rcu_read_unlock_bh();
523 return res;
527 This function makes lazy skb cloning in hope that most of packets
528 are discarded by BPF.
530 Note tricky part: we DO mangle shared skb! skb->data, skb->len
531 and skb->cb are mangled. It works because (and until) packets
532 falling here are owned by current CPU. Output packets are cloned
533 by dev_queue_xmit_nit(), input packets are processed by net_bh
534 sequencially, so that if we return skb to original state on exit,
535 we will not harm anyone.
538 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
539 struct packet_type *pt, struct net_device *orig_dev)
541 struct sock *sk;
542 struct sockaddr_ll *sll;
543 struct packet_sock *po;
544 u8 *skb_head = skb->data;
545 int skb_len = skb->len;
546 unsigned int snaplen, res;
548 if (skb->pkt_type == PACKET_LOOPBACK)
549 goto drop;
551 sk = pt->af_packet_priv;
552 po = pkt_sk(sk);
554 if (dev_net(dev) != sock_net(sk))
555 goto drop;
557 skb->dev = dev;
559 if (dev->header_ops) {
560 /* The device has an explicit notion of ll header,
561 exported to higher levels.
563 Otherwise, the device hides datails of it frame
564 structure, so that corresponding packet head
565 never delivered to user.
567 if (sk->sk_type != SOCK_DGRAM)
568 skb_push(skb, skb->data - skb_mac_header(skb));
569 else if (skb->pkt_type == PACKET_OUTGOING) {
570 /* Special case: outgoing packets have ll header at head */
571 skb_pull(skb, skb_network_offset(skb));
575 snaplen = skb->len;
577 res = run_filter(skb, sk, snaplen);
578 if (!res)
579 goto drop_n_restore;
580 if (snaplen > res)
581 snaplen = res;
583 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
584 (unsigned)sk->sk_rcvbuf)
585 goto drop_n_acct;
587 if (skb_shared(skb)) {
588 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
589 if (nskb == NULL)
590 goto drop_n_acct;
592 if (skb_head != skb->data) {
593 skb->data = skb_head;
594 skb->len = skb_len;
596 kfree_skb(skb);
597 skb = nskb;
600 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
601 sizeof(skb->cb));
603 sll = &PACKET_SKB_CB(skb)->sa.ll;
604 sll->sll_family = AF_PACKET;
605 sll->sll_hatype = dev->type;
606 sll->sll_protocol = skb->protocol;
607 sll->sll_pkttype = skb->pkt_type;
608 if (unlikely(po->origdev))
609 sll->sll_ifindex = orig_dev->ifindex;
610 else
611 sll->sll_ifindex = dev->ifindex;
613 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
615 PACKET_SKB_CB(skb)->origlen = skb->len;
617 if (pskb_trim(skb, snaplen))
618 goto drop_n_acct;
620 skb_set_owner_r(skb, sk);
621 skb->dev = NULL;
622 skb_dst_drop(skb);
624 /* drop conntrack reference */
625 nf_reset(skb);
627 spin_lock(&sk->sk_receive_queue.lock);
628 po->stats.tp_packets++;
629 __skb_queue_tail(&sk->sk_receive_queue, skb);
630 spin_unlock(&sk->sk_receive_queue.lock);
631 sk->sk_data_ready(sk, skb->len);
632 return 0;
634 drop_n_acct:
635 spin_lock(&sk->sk_receive_queue.lock);
636 po->stats.tp_drops++;
637 spin_unlock(&sk->sk_receive_queue.lock);
639 drop_n_restore:
640 if (skb_head != skb->data && skb_shared(skb)) {
641 skb->data = skb_head;
642 skb->len = skb_len;
644 drop:
645 consume_skb(skb);
646 return 0;
649 #ifdef CONFIG_PACKET_MMAP
650 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
651 struct packet_type *pt, struct net_device *orig_dev)
653 struct sock *sk;
654 struct packet_sock *po;
655 struct sockaddr_ll *sll;
656 union {
657 struct tpacket_hdr *h1;
658 struct tpacket2_hdr *h2;
659 void *raw;
660 } h;
661 u8 *skb_head = skb->data;
662 int skb_len = skb->len;
663 unsigned int snaplen, res;
664 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
665 unsigned short macoff, netoff, hdrlen;
666 struct sk_buff *copy_skb = NULL;
667 struct timeval tv;
668 struct timespec ts;
670 if (skb->pkt_type == PACKET_LOOPBACK)
671 goto drop;
673 sk = pt->af_packet_priv;
674 po = pkt_sk(sk);
676 if (dev_net(dev) != sock_net(sk))
677 goto drop;
679 if (dev->header_ops) {
680 if (sk->sk_type != SOCK_DGRAM)
681 skb_push(skb, skb->data - skb_mac_header(skb));
682 else if (skb->pkt_type == PACKET_OUTGOING) {
683 /* Special case: outgoing packets have ll header at head */
684 skb_pull(skb, skb_network_offset(skb));
688 if (skb->ip_summed == CHECKSUM_PARTIAL)
689 status |= TP_STATUS_CSUMNOTREADY;
691 snaplen = skb->len;
693 res = run_filter(skb, sk, snaplen);
694 if (!res)
695 goto drop_n_restore;
696 if (snaplen > res)
697 snaplen = res;
699 if (sk->sk_type == SOCK_DGRAM) {
700 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
701 po->tp_reserve;
702 } else {
703 unsigned maclen = skb_network_offset(skb);
704 netoff = TPACKET_ALIGN(po->tp_hdrlen +
705 (maclen < 16 ? 16 : maclen)) +
706 po->tp_reserve;
707 macoff = netoff - maclen;
710 if (macoff + snaplen > po->rx_ring.frame_size) {
711 if (po->copy_thresh &&
712 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
713 (unsigned)sk->sk_rcvbuf) {
714 if (skb_shared(skb)) {
715 copy_skb = skb_clone(skb, GFP_ATOMIC);
716 } else {
717 copy_skb = skb_get(skb);
718 skb_head = skb->data;
720 if (copy_skb)
721 skb_set_owner_r(copy_skb, sk);
723 snaplen = po->rx_ring.frame_size - macoff;
724 if ((int)snaplen < 0)
725 snaplen = 0;
728 spin_lock(&sk->sk_receive_queue.lock);
729 h.raw = packet_current_frame(po, &po->rx_ring, TP_STATUS_KERNEL);
730 if (!h.raw)
731 goto ring_is_full;
732 packet_increment_head(&po->rx_ring);
733 po->stats.tp_packets++;
734 if (copy_skb) {
735 status |= TP_STATUS_COPY;
736 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
738 if (!po->stats.tp_drops)
739 status &= ~TP_STATUS_LOSING;
740 spin_unlock(&sk->sk_receive_queue.lock);
742 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
744 switch (po->tp_version) {
745 case TPACKET_V1:
746 h.h1->tp_len = skb->len;
747 h.h1->tp_snaplen = snaplen;
748 h.h1->tp_mac = macoff;
749 h.h1->tp_net = netoff;
750 if (skb->tstamp.tv64)
751 tv = ktime_to_timeval(skb->tstamp);
752 else
753 do_gettimeofday(&tv);
754 h.h1->tp_sec = tv.tv_sec;
755 h.h1->tp_usec = tv.tv_usec;
756 hdrlen = sizeof(*h.h1);
757 break;
758 case TPACKET_V2:
759 h.h2->tp_len = skb->len;
760 h.h2->tp_snaplen = snaplen;
761 h.h2->tp_mac = macoff;
762 h.h2->tp_net = netoff;
763 if (skb->tstamp.tv64)
764 ts = ktime_to_timespec(skb->tstamp);
765 else
766 getnstimeofday(&ts);
767 h.h2->tp_sec = ts.tv_sec;
768 h.h2->tp_nsec = ts.tv_nsec;
769 h.h2->tp_vlan_tci = skb->vlan_tci;
770 h.h2->tp_padding = 0;
771 hdrlen = sizeof(*h.h2);
772 break;
773 default:
774 BUG();
777 sll = h.raw + TPACKET_ALIGN(hdrlen);
778 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
779 sll->sll_family = AF_PACKET;
780 sll->sll_hatype = dev->type;
781 sll->sll_protocol = skb->protocol;
782 sll->sll_pkttype = skb->pkt_type;
783 if (unlikely(po->origdev))
784 sll->sll_ifindex = orig_dev->ifindex;
785 else
786 sll->sll_ifindex = dev->ifindex;
788 __packet_set_status(po, h.raw, status);
789 smp_mb();
791 struct page *p_start, *p_end;
792 u8 *h_end = h.raw + macoff + snaplen - 1;
794 p_start = virt_to_page(h.raw);
795 p_end = virt_to_page(h_end);
796 while (p_start <= p_end) {
797 flush_dcache_page(p_start);
798 p_start++;
802 sk->sk_data_ready(sk, 0);
804 drop_n_restore:
805 if (skb_head != skb->data && skb_shared(skb)) {
806 skb->data = skb_head;
807 skb->len = skb_len;
809 drop:
810 kfree_skb(skb);
811 return 0;
813 ring_is_full:
814 po->stats.tp_drops++;
815 spin_unlock(&sk->sk_receive_queue.lock);
817 sk->sk_data_ready(sk, 0);
818 kfree_skb(copy_skb);
819 goto drop_n_restore;
822 static void tpacket_destruct_skb(struct sk_buff *skb)
824 struct packet_sock *po = pkt_sk(skb->sk);
825 void *ph;
827 BUG_ON(skb == NULL);
829 if (likely(po->tx_ring.pg_vec)) {
830 ph = skb_shinfo(skb)->destructor_arg;
831 BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING);
832 BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
833 atomic_dec(&po->tx_ring.pending);
834 __packet_set_status(po, ph, TP_STATUS_AVAILABLE);
837 sock_wfree(skb);
840 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
841 void *frame, struct net_device *dev, int size_max,
842 __be16 proto, unsigned char *addr)
844 union {
845 struct tpacket_hdr *h1;
846 struct tpacket2_hdr *h2;
847 void *raw;
848 } ph;
849 int to_write, offset, len, tp_len, nr_frags, len_max;
850 struct socket *sock = po->sk.sk_socket;
851 struct page *page;
852 void *data;
853 int err;
855 ph.raw = frame;
857 skb->protocol = proto;
858 skb->dev = dev;
859 skb->priority = po->sk.sk_priority;
860 skb_shinfo(skb)->destructor_arg = ph.raw;
862 switch (po->tp_version) {
863 case TPACKET_V2:
864 tp_len = ph.h2->tp_len;
865 break;
866 default:
867 tp_len = ph.h1->tp_len;
868 break;
870 if (unlikely(tp_len > size_max)) {
871 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
872 return -EMSGSIZE;
875 skb_reserve(skb, LL_RESERVED_SPACE(dev));
876 skb_reset_network_header(skb);
878 data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
879 to_write = tp_len;
881 if (sock->type == SOCK_DGRAM) {
882 err = dev_hard_header(skb, dev, ntohs(proto), addr,
883 NULL, tp_len);
884 if (unlikely(err < 0))
885 return -EINVAL;
886 } else if (dev->hard_header_len) {
887 /* net device doesn't like empty head */
888 if (unlikely(tp_len <= dev->hard_header_len)) {
889 pr_err("packet size is too short (%d < %d)\n",
890 tp_len, dev->hard_header_len);
891 return -EINVAL;
894 skb_push(skb, dev->hard_header_len);
895 err = skb_store_bits(skb, 0, data,
896 dev->hard_header_len);
897 if (unlikely(err))
898 return err;
900 data += dev->hard_header_len;
901 to_write -= dev->hard_header_len;
904 err = -EFAULT;
905 page = virt_to_page(data);
906 offset = offset_in_page(data);
907 len_max = PAGE_SIZE - offset;
908 len = ((to_write > len_max) ? len_max : to_write);
910 skb->data_len = to_write;
911 skb->len += to_write;
912 skb->truesize += to_write;
913 atomic_add(to_write, &po->sk.sk_wmem_alloc);
915 while (likely(to_write)) {
916 nr_frags = skb_shinfo(skb)->nr_frags;
918 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
919 pr_err("Packet exceed the number of skb frags(%lu)\n",
920 MAX_SKB_FRAGS);
921 return -EFAULT;
924 flush_dcache_page(page);
925 get_page(page);
926 skb_fill_page_desc(skb,
927 nr_frags,
928 page++, offset, len);
929 to_write -= len;
930 offset = 0;
931 len_max = PAGE_SIZE;
932 len = ((to_write > len_max) ? len_max : to_write);
935 return tp_len;
938 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
940 struct socket *sock;
941 struct sk_buff *skb;
942 struct net_device *dev;
943 __be16 proto;
944 int ifindex, err, reserve = 0;
945 void *ph;
946 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
947 int tp_len, size_max;
948 unsigned char *addr;
949 int len_sum = 0;
950 int status = 0;
952 sock = po->sk.sk_socket;
954 mutex_lock(&po->pg_vec_lock);
956 err = -EBUSY;
957 if (saddr == NULL) {
958 ifindex = po->ifindex;
959 proto = po->num;
960 addr = NULL;
961 } else {
962 err = -EINVAL;
963 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
964 goto out;
965 if (msg->msg_namelen < (saddr->sll_halen
966 + offsetof(struct sockaddr_ll,
967 sll_addr)))
968 goto out;
969 ifindex = saddr->sll_ifindex;
970 proto = saddr->sll_protocol;
971 addr = saddr->sll_addr;
974 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
975 err = -ENXIO;
976 if (unlikely(dev == NULL))
977 goto out;
979 reserve = dev->hard_header_len;
981 err = -ENETDOWN;
982 if (unlikely(!(dev->flags & IFF_UP)))
983 goto out_put;
985 size_max = po->tx_ring.frame_size
986 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
988 if (size_max > dev->mtu + reserve)
989 size_max = dev->mtu + reserve;
991 do {
992 ph = packet_current_frame(po, &po->tx_ring,
993 TP_STATUS_SEND_REQUEST);
995 if (unlikely(ph == NULL)) {
996 schedule();
997 continue;
1000 status = TP_STATUS_SEND_REQUEST;
1001 skb = sock_alloc_send_skb(&po->sk,
1002 LL_ALLOCATED_SPACE(dev)
1003 + sizeof(struct sockaddr_ll),
1004 0, &err);
1006 if (unlikely(skb == NULL))
1007 goto out_status;
1009 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
1010 addr);
1012 if (unlikely(tp_len < 0)) {
1013 if (po->tp_loss) {
1014 __packet_set_status(po, ph,
1015 TP_STATUS_AVAILABLE);
1016 packet_increment_head(&po->tx_ring);
1017 kfree_skb(skb);
1018 continue;
1019 } else {
1020 status = TP_STATUS_WRONG_FORMAT;
1021 err = tp_len;
1022 goto out_status;
1026 skb->destructor = tpacket_destruct_skb;
1027 __packet_set_status(po, ph, TP_STATUS_SENDING);
1028 atomic_inc(&po->tx_ring.pending);
1030 status = TP_STATUS_SEND_REQUEST;
1031 err = dev_queue_xmit(skb);
1032 if (unlikely(err > 0)) {
1033 err = net_xmit_errno(err);
1034 if (err && __packet_get_status(po, ph) ==
1035 TP_STATUS_AVAILABLE) {
1036 /* skb was destructed already */
1037 skb = NULL;
1038 goto out_status;
1041 * skb was dropped but not destructed yet;
1042 * let's treat it like congestion or err < 0
1044 err = 0;
1046 packet_increment_head(&po->tx_ring);
1047 len_sum += tp_len;
1048 } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
1049 && (atomic_read(&po->tx_ring.pending))))
1052 err = len_sum;
1053 goto out_put;
1055 out_status:
1056 __packet_set_status(po, ph, status);
1057 kfree_skb(skb);
1058 out_put:
1059 dev_put(dev);
1060 out:
1061 mutex_unlock(&po->pg_vec_lock);
1062 return err;
1064 #endif
1066 static int packet_snd(struct socket *sock,
1067 struct msghdr *msg, size_t len)
1069 struct sock *sk = sock->sk;
1070 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
1071 struct sk_buff *skb;
1072 struct net_device *dev;
1073 __be16 proto;
1074 unsigned char *addr;
1075 int ifindex, err, reserve = 0;
1078 * Get and verify the address.
1081 if (saddr == NULL) {
1082 struct packet_sock *po = pkt_sk(sk);
1084 ifindex = po->ifindex;
1085 proto = po->num;
1086 addr = NULL;
1087 } else {
1088 err = -EINVAL;
1089 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1090 goto out;
1091 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1092 goto out;
1093 ifindex = saddr->sll_ifindex;
1094 proto = saddr->sll_protocol;
1095 addr = saddr->sll_addr;
1099 dev = dev_get_by_index(sock_net(sk), ifindex);
1100 err = -ENXIO;
1101 if (dev == NULL)
1102 goto out_unlock;
1103 if (sock->type == SOCK_RAW)
1104 reserve = dev->hard_header_len;
1106 err = -ENETDOWN;
1107 if (!(dev->flags & IFF_UP))
1108 goto out_unlock;
1110 err = -EMSGSIZE;
1111 if (len > dev->mtu+reserve)
1112 goto out_unlock;
1114 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
1115 msg->msg_flags & MSG_DONTWAIT, &err);
1116 if (skb == NULL)
1117 goto out_unlock;
1119 skb_reserve(skb, LL_RESERVED_SPACE(dev));
1120 skb_reset_network_header(skb);
1122 err = -EINVAL;
1123 if (sock->type == SOCK_DGRAM &&
1124 dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len) < 0)
1125 goto out_free;
1127 /* Returns -EFAULT on error */
1128 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1129 if (err)
1130 goto out_free;
1132 skb->protocol = proto;
1133 skb->dev = dev;
1134 skb->priority = sk->sk_priority;
1137 * Now send it
1140 err = dev_queue_xmit(skb);
1141 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1142 goto out_unlock;
1144 dev_put(dev);
1146 return len;
1148 out_free:
1149 kfree_skb(skb);
1150 out_unlock:
1151 if (dev)
1152 dev_put(dev);
1153 out:
1154 return err;
1157 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
1158 struct msghdr *msg, size_t len)
1160 #ifdef CONFIG_PACKET_MMAP
1161 struct sock *sk = sock->sk;
1162 struct packet_sock *po = pkt_sk(sk);
1163 if (po->tx_ring.pg_vec)
1164 return tpacket_snd(po, msg);
1165 else
1166 #endif
1167 return packet_snd(sock, msg, len);
1171 * Close a PACKET socket. This is fairly simple. We immediately go
1172 * to 'closed' state and remove our protocol entry in the device list.
1175 static int packet_release(struct socket *sock)
1177 struct sock *sk = sock->sk;
1178 struct packet_sock *po;
1179 struct net *net;
1180 #ifdef CONFIG_PACKET_MMAP
1181 struct tpacket_req req;
1182 #endif
1184 if (!sk)
1185 return 0;
1187 net = sock_net(sk);
1188 po = pkt_sk(sk);
1190 write_lock_bh(&net->packet.sklist_lock);
1191 sk_del_node_init(sk);
1192 sock_prot_inuse_add(net, sk->sk_prot, -1);
1193 write_unlock_bh(&net->packet.sklist_lock);
1196 * Unhook packet receive handler.
1199 if (po->running) {
1201 * Remove the protocol hook
1203 dev_remove_pack(&po->prot_hook);
1204 po->running = 0;
1205 po->num = 0;
1206 __sock_put(sk);
1209 packet_flush_mclist(sk);
1211 #ifdef CONFIG_PACKET_MMAP
1212 memset(&req, 0, sizeof(req));
1214 if (po->rx_ring.pg_vec)
1215 packet_set_ring(sk, &req, 1, 0);
1217 if (po->tx_ring.pg_vec)
1218 packet_set_ring(sk, &req, 1, 1);
1219 #endif
1222 * Now the socket is dead. No more input will appear.
1225 sock_orphan(sk);
1226 sock->sk = NULL;
1228 /* Purge queues */
1230 skb_queue_purge(&sk->sk_receive_queue);
1231 sk_refcnt_debug_release(sk);
1233 sock_put(sk);
1234 return 0;
1238 * Attach a packet hook.
1241 static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1243 struct packet_sock *po = pkt_sk(sk);
1245 * Detach an existing hook if present.
1248 lock_sock(sk);
1250 spin_lock(&po->bind_lock);
1251 if (po->running) {
1252 __sock_put(sk);
1253 po->running = 0;
1254 po->num = 0;
1255 spin_unlock(&po->bind_lock);
1256 dev_remove_pack(&po->prot_hook);
1257 spin_lock(&po->bind_lock);
1260 po->num = protocol;
1261 po->prot_hook.type = protocol;
1262 po->prot_hook.dev = dev;
1264 po->ifindex = dev ? dev->ifindex : 0;
1266 if (protocol == 0)
1267 goto out_unlock;
1269 if (!dev || (dev->flags & IFF_UP)) {
1270 dev_add_pack(&po->prot_hook);
1271 sock_hold(sk);
1272 po->running = 1;
1273 } else {
1274 sk->sk_err = ENETDOWN;
1275 if (!sock_flag(sk, SOCK_DEAD))
1276 sk->sk_error_report(sk);
1279 out_unlock:
1280 spin_unlock(&po->bind_lock);
1281 release_sock(sk);
1282 return 0;
1286 * Bind a packet socket to a device
1289 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1290 int addr_len)
1292 struct sock *sk = sock->sk;
1293 char name[15];
1294 struct net_device *dev;
1295 int err = -ENODEV;
1298 * Check legality
1301 if (addr_len != sizeof(struct sockaddr))
1302 return -EINVAL;
1303 strlcpy(name, uaddr->sa_data, sizeof(name));
1305 dev = dev_get_by_name(sock_net(sk), name);
1306 if (dev) {
1307 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1308 dev_put(dev);
1310 return err;
1313 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1315 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1316 struct sock *sk = sock->sk;
1317 struct net_device *dev = NULL;
1318 int err;
1322 * Check legality
1325 if (addr_len < sizeof(struct sockaddr_ll))
1326 return -EINVAL;
1327 if (sll->sll_family != AF_PACKET)
1328 return -EINVAL;
1330 if (sll->sll_ifindex) {
1331 err = -ENODEV;
1332 dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
1333 if (dev == NULL)
1334 goto out;
1336 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1337 if (dev)
1338 dev_put(dev);
1340 out:
1341 return err;
1344 static struct proto packet_proto = {
1345 .name = "PACKET",
1346 .owner = THIS_MODULE,
1347 .obj_size = sizeof(struct packet_sock),
1351 * Create a packet of type SOCK_PACKET.
1354 static int packet_create(struct net *net, struct socket *sock, int protocol)
1356 struct sock *sk;
1357 struct packet_sock *po;
1358 __be16 proto = (__force __be16)protocol; /* weird, but documented */
1359 int err;
1361 if (!capable(CAP_NET_RAW))
1362 return -EPERM;
1363 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
1364 sock->type != SOCK_PACKET)
1365 return -ESOCKTNOSUPPORT;
1367 sock->state = SS_UNCONNECTED;
1369 err = -ENOBUFS;
1370 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
1371 if (sk == NULL)
1372 goto out;
1374 sock->ops = &packet_ops;
1375 if (sock->type == SOCK_PACKET)
1376 sock->ops = &packet_ops_spkt;
1378 sock_init_data(sock, sk);
1380 po = pkt_sk(sk);
1381 sk->sk_family = PF_PACKET;
1382 po->num = proto;
1384 sk->sk_destruct = packet_sock_destruct;
1385 sk_refcnt_debug_inc(sk);
1388 * Attach a protocol block
1391 spin_lock_init(&po->bind_lock);
1392 mutex_init(&po->pg_vec_lock);
1393 po->prot_hook.func = packet_rcv;
1395 if (sock->type == SOCK_PACKET)
1396 po->prot_hook.func = packet_rcv_spkt;
1398 po->prot_hook.af_packet_priv = sk;
1400 if (proto) {
1401 po->prot_hook.type = proto;
1402 dev_add_pack(&po->prot_hook);
1403 sock_hold(sk);
1404 po->running = 1;
1407 write_lock_bh(&net->packet.sklist_lock);
1408 sk_add_node(sk, &net->packet.sklist);
1409 sock_prot_inuse_add(net, &packet_proto, 1);
1410 write_unlock_bh(&net->packet.sklist_lock);
1411 return 0;
1412 out:
1413 return err;
1417 * Pull a packet from our receive queue and hand it to the user.
1418 * If necessary we block.
1421 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1422 struct msghdr *msg, size_t len, int flags)
1424 struct sock *sk = sock->sk;
1425 struct sk_buff *skb;
1426 int copied, err;
1427 struct sockaddr_ll *sll;
1429 err = -EINVAL;
1430 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1431 goto out;
1433 #if 0
1434 /* What error should we return now? EUNATTACH? */
1435 if (pkt_sk(sk)->ifindex < 0)
1436 return -ENODEV;
1437 #endif
1440 * Call the generic datagram receiver. This handles all sorts
1441 * of horrible races and re-entrancy so we can forget about it
1442 * in the protocol layers.
1444 * Now it will return ENETDOWN, if device have just gone down,
1445 * but then it will block.
1448 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
1451 * An error occurred so return it. Because skb_recv_datagram()
1452 * handles the blocking we don't see and worry about blocking
1453 * retries.
1456 if (skb == NULL)
1457 goto out;
1460 * If the address length field is there to be filled in, we fill
1461 * it in now.
1464 sll = &PACKET_SKB_CB(skb)->sa.ll;
1465 if (sock->type == SOCK_PACKET)
1466 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1467 else
1468 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1471 * You lose any data beyond the buffer you gave. If it worries a
1472 * user program they can ask the device for its MTU anyway.
1475 copied = skb->len;
1476 if (copied > len) {
1477 copied = len;
1478 msg->msg_flags |= MSG_TRUNC;
1481 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1482 if (err)
1483 goto out_free;
1485 sock_recv_timestamp(msg, sk, skb);
1487 if (msg->msg_name)
1488 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
1489 msg->msg_namelen);
1491 if (pkt_sk(sk)->auxdata) {
1492 struct tpacket_auxdata aux;
1494 aux.tp_status = TP_STATUS_USER;
1495 if (skb->ip_summed == CHECKSUM_PARTIAL)
1496 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
1497 aux.tp_len = PACKET_SKB_CB(skb)->origlen;
1498 aux.tp_snaplen = skb->len;
1499 aux.tp_mac = 0;
1500 aux.tp_net = skb_network_offset(skb);
1501 aux.tp_vlan_tci = skb->vlan_tci;
1503 aux.tp_padding = 0;
1504 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1508 * Free or return the buffer as appropriate. Again this
1509 * hides all the races and re-entrancy issues from us.
1511 err = (flags&MSG_TRUNC) ? skb->len : copied;
1513 out_free:
1514 skb_free_datagram(sk, skb);
1515 out:
1516 return err;
1519 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1520 int *uaddr_len, int peer)
1522 struct net_device *dev;
1523 struct sock *sk = sock->sk;
1525 if (peer)
1526 return -EOPNOTSUPP;
1528 uaddr->sa_family = AF_PACKET;
1529 dev = dev_get_by_index(sock_net(sk), pkt_sk(sk)->ifindex);
1530 if (dev) {
1531 strncpy(uaddr->sa_data, dev->name, 14);
1532 dev_put(dev);
1533 } else
1534 memset(uaddr->sa_data, 0, 14);
1535 *uaddr_len = sizeof(*uaddr);
1537 return 0;
1540 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1541 int *uaddr_len, int peer)
1543 struct net_device *dev;
1544 struct sock *sk = sock->sk;
1545 struct packet_sock *po = pkt_sk(sk);
1546 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
1548 if (peer)
1549 return -EOPNOTSUPP;
1551 sll->sll_family = AF_PACKET;
1552 sll->sll_ifindex = po->ifindex;
1553 sll->sll_protocol = po->num;
1554 sll->sll_pkttype = 0;
1555 dev = dev_get_by_index(sock_net(sk), po->ifindex);
1556 if (dev) {
1557 sll->sll_hatype = dev->type;
1558 sll->sll_halen = dev->addr_len;
1559 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1560 dev_put(dev);
1561 } else {
1562 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1563 sll->sll_halen = 0;
1565 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1567 return 0;
1570 static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1571 int what)
1573 switch (i->type) {
1574 case PACKET_MR_MULTICAST:
1575 if (what > 0)
1576 return dev_mc_add(dev, i->addr, i->alen, 0);
1577 else
1578 return dev_mc_delete(dev, i->addr, i->alen, 0);
1579 break;
1580 case PACKET_MR_PROMISC:
1581 return dev_set_promiscuity(dev, what);
1582 break;
1583 case PACKET_MR_ALLMULTI:
1584 return dev_set_allmulti(dev, what);
1585 break;
1586 case PACKET_MR_UNICAST:
1587 if (what > 0)
1588 return dev_unicast_add(dev, i->addr);
1589 else
1590 return dev_unicast_delete(dev, i->addr);
1591 break;
1592 default:
1593 break;
1595 return 0;
1598 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1600 for ( ; i; i = i->next) {
1601 if (i->ifindex == dev->ifindex)
1602 packet_dev_mc(dev, i, what);
1606 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1608 struct packet_sock *po = pkt_sk(sk);
1609 struct packet_mclist *ml, *i;
1610 struct net_device *dev;
1611 int err;
1613 rtnl_lock();
1615 err = -ENODEV;
1616 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
1617 if (!dev)
1618 goto done;
1620 err = -EINVAL;
1621 if (mreq->mr_alen > dev->addr_len)
1622 goto done;
1624 err = -ENOBUFS;
1625 i = kmalloc(sizeof(*i), GFP_KERNEL);
1626 if (i == NULL)
1627 goto done;
1629 err = 0;
1630 for (ml = po->mclist; ml; ml = ml->next) {
1631 if (ml->ifindex == mreq->mr_ifindex &&
1632 ml->type == mreq->mr_type &&
1633 ml->alen == mreq->mr_alen &&
1634 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1635 ml->count++;
1636 /* Free the new element ... */
1637 kfree(i);
1638 goto done;
1642 i->type = mreq->mr_type;
1643 i->ifindex = mreq->mr_ifindex;
1644 i->alen = mreq->mr_alen;
1645 memcpy(i->addr, mreq->mr_address, i->alen);
1646 i->count = 1;
1647 i->next = po->mclist;
1648 po->mclist = i;
1649 err = packet_dev_mc(dev, i, 1);
1650 if (err) {
1651 po->mclist = i->next;
1652 kfree(i);
1655 done:
1656 rtnl_unlock();
1657 return err;
1660 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1662 struct packet_mclist *ml, **mlp;
1664 rtnl_lock();
1666 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1667 if (ml->ifindex == mreq->mr_ifindex &&
1668 ml->type == mreq->mr_type &&
1669 ml->alen == mreq->mr_alen &&
1670 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1671 if (--ml->count == 0) {
1672 struct net_device *dev;
1673 *mlp = ml->next;
1674 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1675 if (dev) {
1676 packet_dev_mc(dev, ml, -1);
1677 dev_put(dev);
1679 kfree(ml);
1681 rtnl_unlock();
1682 return 0;
1685 rtnl_unlock();
1686 return -EADDRNOTAVAIL;
1689 static void packet_flush_mclist(struct sock *sk)
1691 struct packet_sock *po = pkt_sk(sk);
1692 struct packet_mclist *ml;
1694 if (!po->mclist)
1695 return;
1697 rtnl_lock();
1698 while ((ml = po->mclist) != NULL) {
1699 struct net_device *dev;
1701 po->mclist = ml->next;
1702 dev = dev_get_by_index(sock_net(sk), ml->ifindex);
1703 if (dev != NULL) {
1704 packet_dev_mc(dev, ml, -1);
1705 dev_put(dev);
1707 kfree(ml);
1709 rtnl_unlock();
1712 static int
1713 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1715 struct sock *sk = sock->sk;
1716 struct packet_sock *po = pkt_sk(sk);
1717 int ret;
1719 if (level != SOL_PACKET)
1720 return -ENOPROTOOPT;
1722 switch (optname) {
1723 case PACKET_ADD_MEMBERSHIP:
1724 case PACKET_DROP_MEMBERSHIP:
1726 struct packet_mreq_max mreq;
1727 int len = optlen;
1728 memset(&mreq, 0, sizeof(mreq));
1729 if (len < sizeof(struct packet_mreq))
1730 return -EINVAL;
1731 if (len > sizeof(mreq))
1732 len = sizeof(mreq);
1733 if (copy_from_user(&mreq, optval, len))
1734 return -EFAULT;
1735 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1736 return -EINVAL;
1737 if (optname == PACKET_ADD_MEMBERSHIP)
1738 ret = packet_mc_add(sk, &mreq);
1739 else
1740 ret = packet_mc_drop(sk, &mreq);
1741 return ret;
1744 #ifdef CONFIG_PACKET_MMAP
1745 case PACKET_RX_RING:
1746 case PACKET_TX_RING:
1748 struct tpacket_req req;
1750 if (optlen < sizeof(req))
1751 return -EINVAL;
1752 if (copy_from_user(&req, optval, sizeof(req)))
1753 return -EFAULT;
1754 return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
1756 case PACKET_COPY_THRESH:
1758 int val;
1760 if (optlen != sizeof(val))
1761 return -EINVAL;
1762 if (copy_from_user(&val, optval, sizeof(val)))
1763 return -EFAULT;
1765 pkt_sk(sk)->copy_thresh = val;
1766 return 0;
1768 case PACKET_VERSION:
1770 int val;
1772 if (optlen != sizeof(val))
1773 return -EINVAL;
1774 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1775 return -EBUSY;
1776 if (copy_from_user(&val, optval, sizeof(val)))
1777 return -EFAULT;
1778 switch (val) {
1779 case TPACKET_V1:
1780 case TPACKET_V2:
1781 po->tp_version = val;
1782 return 0;
1783 default:
1784 return -EINVAL;
1787 case PACKET_RESERVE:
1789 unsigned int val;
1791 if (optlen != sizeof(val))
1792 return -EINVAL;
1793 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1794 return -EBUSY;
1795 if (copy_from_user(&val, optval, sizeof(val)))
1796 return -EFAULT;
1797 po->tp_reserve = val;
1798 return 0;
1800 case PACKET_LOSS:
1802 unsigned int val;
1804 if (optlen != sizeof(val))
1805 return -EINVAL;
1806 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
1807 return -EBUSY;
1808 if (copy_from_user(&val, optval, sizeof(val)))
1809 return -EFAULT;
1810 po->tp_loss = !!val;
1811 return 0;
1813 #endif
1814 case PACKET_AUXDATA:
1816 int val;
1818 if (optlen < sizeof(val))
1819 return -EINVAL;
1820 if (copy_from_user(&val, optval, sizeof(val)))
1821 return -EFAULT;
1823 po->auxdata = !!val;
1824 return 0;
1826 case PACKET_ORIGDEV:
1828 int val;
1830 if (optlen < sizeof(val))
1831 return -EINVAL;
1832 if (copy_from_user(&val, optval, sizeof(val)))
1833 return -EFAULT;
1835 po->origdev = !!val;
1836 return 0;
1838 default:
1839 return -ENOPROTOOPT;
1843 static int packet_getsockopt(struct socket *sock, int level, int optname,
1844 char __user *optval, int __user *optlen)
1846 int len;
1847 int val;
1848 struct sock *sk = sock->sk;
1849 struct packet_sock *po = pkt_sk(sk);
1850 void *data;
1851 struct tpacket_stats st;
1853 if (level != SOL_PACKET)
1854 return -ENOPROTOOPT;
1856 if (get_user(len, optlen))
1857 return -EFAULT;
1859 if (len < 0)
1860 return -EINVAL;
1862 switch (optname) {
1863 case PACKET_STATISTICS:
1864 if (len > sizeof(struct tpacket_stats))
1865 len = sizeof(struct tpacket_stats);
1866 spin_lock_bh(&sk->sk_receive_queue.lock);
1867 st = po->stats;
1868 memset(&po->stats, 0, sizeof(st));
1869 spin_unlock_bh(&sk->sk_receive_queue.lock);
1870 st.tp_packets += st.tp_drops;
1872 data = &st;
1873 break;
1874 case PACKET_AUXDATA:
1875 if (len > sizeof(int))
1876 len = sizeof(int);
1877 val = po->auxdata;
1879 data = &val;
1880 break;
1881 case PACKET_ORIGDEV:
1882 if (len > sizeof(int))
1883 len = sizeof(int);
1884 val = po->origdev;
1886 data = &val;
1887 break;
1888 #ifdef CONFIG_PACKET_MMAP
1889 case PACKET_VERSION:
1890 if (len > sizeof(int))
1891 len = sizeof(int);
1892 val = po->tp_version;
1893 data = &val;
1894 break;
1895 case PACKET_HDRLEN:
1896 if (len > sizeof(int))
1897 len = sizeof(int);
1898 if (copy_from_user(&val, optval, len))
1899 return -EFAULT;
1900 switch (val) {
1901 case TPACKET_V1:
1902 val = sizeof(struct tpacket_hdr);
1903 break;
1904 case TPACKET_V2:
1905 val = sizeof(struct tpacket2_hdr);
1906 break;
1907 default:
1908 return -EINVAL;
1910 data = &val;
1911 break;
1912 case PACKET_RESERVE:
1913 if (len > sizeof(unsigned int))
1914 len = sizeof(unsigned int);
1915 val = po->tp_reserve;
1916 data = &val;
1917 break;
1918 case PACKET_LOSS:
1919 if (len > sizeof(unsigned int))
1920 len = sizeof(unsigned int);
1921 val = po->tp_loss;
1922 data = &val;
1923 break;
1924 #endif
1925 default:
1926 return -ENOPROTOOPT;
1929 if (put_user(len, optlen))
1930 return -EFAULT;
1931 if (copy_to_user(optval, data, len))
1932 return -EFAULT;
1933 return 0;
1937 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1939 struct sock *sk;
1940 struct hlist_node *node;
1941 struct net_device *dev = data;
1942 struct net *net = dev_net(dev);
1944 read_lock(&net->packet.sklist_lock);
1945 sk_for_each(sk, node, &net->packet.sklist) {
1946 struct packet_sock *po = pkt_sk(sk);
1948 switch (msg) {
1949 case NETDEV_UNREGISTER:
1950 if (po->mclist)
1951 packet_dev_mclist(dev, po->mclist, -1);
1952 /* fallthrough */
1954 case NETDEV_DOWN:
1955 if (dev->ifindex == po->ifindex) {
1956 spin_lock(&po->bind_lock);
1957 if (po->running) {
1958 __dev_remove_pack(&po->prot_hook);
1959 __sock_put(sk);
1960 po->running = 0;
1961 sk->sk_err = ENETDOWN;
1962 if (!sock_flag(sk, SOCK_DEAD))
1963 sk->sk_error_report(sk);
1965 if (msg == NETDEV_UNREGISTER) {
1966 po->ifindex = -1;
1967 po->prot_hook.dev = NULL;
1969 spin_unlock(&po->bind_lock);
1971 break;
1972 case NETDEV_UP:
1973 spin_lock(&po->bind_lock);
1974 if (dev->ifindex == po->ifindex && po->num &&
1975 !po->running) {
1976 dev_add_pack(&po->prot_hook);
1977 sock_hold(sk);
1978 po->running = 1;
1980 spin_unlock(&po->bind_lock);
1981 break;
1984 read_unlock(&net->packet.sklist_lock);
1985 return NOTIFY_DONE;
1989 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1990 unsigned long arg)
1992 struct sock *sk = sock->sk;
1994 switch (cmd) {
1995 case SIOCOUTQ:
1997 int amount = sk_wmem_alloc_get(sk);
1999 return put_user(amount, (int __user *)arg);
2001 case SIOCINQ:
2003 struct sk_buff *skb;
2004 int amount = 0;
2006 spin_lock_bh(&sk->sk_receive_queue.lock);
2007 skb = skb_peek(&sk->sk_receive_queue);
2008 if (skb)
2009 amount = skb->len;
2010 spin_unlock_bh(&sk->sk_receive_queue.lock);
2011 return put_user(amount, (int __user *)arg);
2013 case SIOCGSTAMP:
2014 return sock_get_timestamp(sk, (struct timeval __user *)arg);
2015 case SIOCGSTAMPNS:
2016 return sock_get_timestampns(sk, (struct timespec __user *)arg);
2018 #ifdef CONFIG_INET
2019 case SIOCADDRT:
2020 case SIOCDELRT:
2021 case SIOCDARP:
2022 case SIOCGARP:
2023 case SIOCSARP:
2024 case SIOCGIFADDR:
2025 case SIOCSIFADDR:
2026 case SIOCGIFBRDADDR:
2027 case SIOCSIFBRDADDR:
2028 case SIOCGIFNETMASK:
2029 case SIOCSIFNETMASK:
2030 case SIOCGIFDSTADDR:
2031 case SIOCSIFDSTADDR:
2032 case SIOCSIFFLAGS:
2033 if (!net_eq(sock_net(sk), &init_net))
2034 return -ENOIOCTLCMD;
2035 return inet_dgram_ops.ioctl(sock, cmd, arg);
2036 #endif
2038 default:
2039 return -ENOIOCTLCMD;
2041 return 0;
2044 #ifndef CONFIG_PACKET_MMAP
2045 #define packet_mmap sock_no_mmap
2046 #define packet_poll datagram_poll
2047 #else
2049 static unsigned int packet_poll(struct file *file, struct socket *sock,
2050 poll_table *wait)
2052 struct sock *sk = sock->sk;
2053 struct packet_sock *po = pkt_sk(sk);
2054 unsigned int mask = datagram_poll(file, sock, wait);
2056 spin_lock_bh(&sk->sk_receive_queue.lock);
2057 if (po->rx_ring.pg_vec) {
2058 if (!packet_previous_frame(po, &po->rx_ring, TP_STATUS_KERNEL))
2059 mask |= POLLIN | POLLRDNORM;
2061 spin_unlock_bh(&sk->sk_receive_queue.lock);
2062 spin_lock_bh(&sk->sk_write_queue.lock);
2063 if (po->tx_ring.pg_vec) {
2064 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
2065 mask |= POLLOUT | POLLWRNORM;
2067 spin_unlock_bh(&sk->sk_write_queue.lock);
2068 return mask;
2072 /* Dirty? Well, I still did not learn better way to account
2073 * for user mmaps.
2076 static void packet_mm_open(struct vm_area_struct *vma)
2078 struct file *file = vma->vm_file;
2079 struct socket *sock = file->private_data;
2080 struct sock *sk = sock->sk;
2082 if (sk)
2083 atomic_inc(&pkt_sk(sk)->mapped);
2086 static void packet_mm_close(struct vm_area_struct *vma)
2088 struct file *file = vma->vm_file;
2089 struct socket *sock = file->private_data;
2090 struct sock *sk = sock->sk;
2092 if (sk)
2093 atomic_dec(&pkt_sk(sk)->mapped);
2096 static const struct vm_operations_struct packet_mmap_ops = {
2097 .open = packet_mm_open,
2098 .close = packet_mm_close,
2101 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
2103 int i;
2105 for (i = 0; i < len; i++) {
2106 if (likely(pg_vec[i]))
2107 free_pages((unsigned long) pg_vec[i], order);
2109 kfree(pg_vec);
2112 static inline char *alloc_one_pg_vec_page(unsigned long order)
2114 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
2116 return (char *) __get_free_pages(gfp_flags, order);
2119 static char **alloc_pg_vec(struct tpacket_req *req, int order)
2121 unsigned int block_nr = req->tp_block_nr;
2122 char **pg_vec;
2123 int i;
2125 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
2126 if (unlikely(!pg_vec))
2127 goto out;
2129 for (i = 0; i < block_nr; i++) {
2130 pg_vec[i] = alloc_one_pg_vec_page(order);
2131 if (unlikely(!pg_vec[i]))
2132 goto out_free_pgvec;
2135 out:
2136 return pg_vec;
2138 out_free_pgvec:
2139 free_pg_vec(pg_vec, order, block_nr);
2140 pg_vec = NULL;
2141 goto out;
2144 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2145 int closing, int tx_ring)
2147 char **pg_vec = NULL;
2148 struct packet_sock *po = pkt_sk(sk);
2149 int was_running, order = 0;
2150 struct packet_ring_buffer *rb;
2151 struct sk_buff_head *rb_queue;
2152 __be16 num;
2153 int err;
2155 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
2156 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
2158 err = -EBUSY;
2159 if (!closing) {
2160 if (atomic_read(&po->mapped))
2161 goto out;
2162 if (atomic_read(&rb->pending))
2163 goto out;
2166 if (req->tp_block_nr) {
2167 /* Sanity tests and some calculations */
2168 err = -EBUSY;
2169 if (unlikely(rb->pg_vec))
2170 goto out;
2172 switch (po->tp_version) {
2173 case TPACKET_V1:
2174 po->tp_hdrlen = TPACKET_HDRLEN;
2175 break;
2176 case TPACKET_V2:
2177 po->tp_hdrlen = TPACKET2_HDRLEN;
2178 break;
2181 err = -EINVAL;
2182 if (unlikely((int)req->tp_block_size <= 0))
2183 goto out;
2184 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
2185 goto out;
2186 if (unlikely(req->tp_frame_size < po->tp_hdrlen +
2187 po->tp_reserve))
2188 goto out;
2189 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
2190 goto out;
2192 rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
2193 if (unlikely(rb->frames_per_block <= 0))
2194 goto out;
2195 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
2196 req->tp_frame_nr))
2197 goto out;
2199 err = -ENOMEM;
2200 order = get_order(req->tp_block_size);
2201 pg_vec = alloc_pg_vec(req, order);
2202 if (unlikely(!pg_vec))
2203 goto out;
2205 /* Done */
2206 else {
2207 err = -EINVAL;
2208 if (unlikely(req->tp_frame_nr))
2209 goto out;
2212 lock_sock(sk);
2214 /* Detach socket from network */
2215 spin_lock(&po->bind_lock);
2216 was_running = po->running;
2217 num = po->num;
2218 if (was_running) {
2219 __dev_remove_pack(&po->prot_hook);
2220 po->num = 0;
2221 po->running = 0;
2222 __sock_put(sk);
2224 spin_unlock(&po->bind_lock);
2226 synchronize_net();
2228 err = -EBUSY;
2229 mutex_lock(&po->pg_vec_lock);
2230 if (closing || atomic_read(&po->mapped) == 0) {
2231 err = 0;
2232 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2233 spin_lock_bh(&rb_queue->lock);
2234 pg_vec = XC(rb->pg_vec, pg_vec);
2235 rb->frame_max = (req->tp_frame_nr - 1);
2236 rb->head = 0;
2237 rb->frame_size = req->tp_frame_size;
2238 spin_unlock_bh(&rb_queue->lock);
2240 order = XC(rb->pg_vec_order, order);
2241 req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
2243 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
2244 po->prot_hook.func = (po->rx_ring.pg_vec) ?
2245 tpacket_rcv : packet_rcv;
2246 skb_queue_purge(rb_queue);
2247 #undef XC
2248 if (atomic_read(&po->mapped))
2249 pr_err("packet_mmap: vma is busy: %d\n",
2250 atomic_read(&po->mapped));
2252 mutex_unlock(&po->pg_vec_lock);
2254 spin_lock(&po->bind_lock);
2255 if (was_running && !po->running) {
2256 sock_hold(sk);
2257 po->running = 1;
2258 po->num = num;
2259 dev_add_pack(&po->prot_hook);
2261 spin_unlock(&po->bind_lock);
2263 release_sock(sk);
2265 if (pg_vec)
2266 free_pg_vec(pg_vec, order, req->tp_block_nr);
2267 out:
2268 return err;
2271 static int packet_mmap(struct file *file, struct socket *sock,
2272 struct vm_area_struct *vma)
2274 struct sock *sk = sock->sk;
2275 struct packet_sock *po = pkt_sk(sk);
2276 unsigned long size, expected_size;
2277 struct packet_ring_buffer *rb;
2278 unsigned long start;
2279 int err = -EINVAL;
2280 int i;
2282 if (vma->vm_pgoff)
2283 return -EINVAL;
2285 mutex_lock(&po->pg_vec_lock);
2287 expected_size = 0;
2288 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2289 if (rb->pg_vec) {
2290 expected_size += rb->pg_vec_len
2291 * rb->pg_vec_pages
2292 * PAGE_SIZE;
2296 if (expected_size == 0)
2297 goto out;
2299 size = vma->vm_end - vma->vm_start;
2300 if (size != expected_size)
2301 goto out;
2303 start = vma->vm_start;
2304 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
2305 if (rb->pg_vec == NULL)
2306 continue;
2308 for (i = 0; i < rb->pg_vec_len; i++) {
2309 struct page *page = virt_to_page(rb->pg_vec[i]);
2310 int pg_num;
2312 for (pg_num = 0; pg_num < rb->pg_vec_pages;
2313 pg_num++, page++) {
2314 err = vm_insert_page(vma, start, page);
2315 if (unlikely(err))
2316 goto out;
2317 start += PAGE_SIZE;
2322 atomic_inc(&po->mapped);
2323 vma->vm_ops = &packet_mmap_ops;
2324 err = 0;
2326 out:
2327 mutex_unlock(&po->pg_vec_lock);
2328 return err;
2330 #endif
2333 static const struct proto_ops packet_ops_spkt = {
2334 .family = PF_PACKET,
2335 .owner = THIS_MODULE,
2336 .release = packet_release,
2337 .bind = packet_bind_spkt,
2338 .connect = sock_no_connect,
2339 .socketpair = sock_no_socketpair,
2340 .accept = sock_no_accept,
2341 .getname = packet_getname_spkt,
2342 .poll = datagram_poll,
2343 .ioctl = packet_ioctl,
2344 .listen = sock_no_listen,
2345 .shutdown = sock_no_shutdown,
2346 .setsockopt = sock_no_setsockopt,
2347 .getsockopt = sock_no_getsockopt,
2348 .sendmsg = packet_sendmsg_spkt,
2349 .recvmsg = packet_recvmsg,
2350 .mmap = sock_no_mmap,
2351 .sendpage = sock_no_sendpage,
2354 static const struct proto_ops packet_ops = {
2355 .family = PF_PACKET,
2356 .owner = THIS_MODULE,
2357 .release = packet_release,
2358 .bind = packet_bind,
2359 .connect = sock_no_connect,
2360 .socketpair = sock_no_socketpair,
2361 .accept = sock_no_accept,
2362 .getname = packet_getname,
2363 .poll = packet_poll,
2364 .ioctl = packet_ioctl,
2365 .listen = sock_no_listen,
2366 .shutdown = sock_no_shutdown,
2367 .setsockopt = packet_setsockopt,
2368 .getsockopt = packet_getsockopt,
2369 .sendmsg = packet_sendmsg,
2370 .recvmsg = packet_recvmsg,
2371 .mmap = packet_mmap,
2372 .sendpage = sock_no_sendpage,
2375 static struct net_proto_family packet_family_ops = {
2376 .family = PF_PACKET,
2377 .create = packet_create,
2378 .owner = THIS_MODULE,
2381 static struct notifier_block packet_netdev_notifier = {
2382 .notifier_call = packet_notifier,
2385 #ifdef CONFIG_PROC_FS
2386 static inline struct sock *packet_seq_idx(struct net *net, loff_t off)
2388 struct sock *s;
2389 struct hlist_node *node;
2391 sk_for_each(s, node, &net->packet.sklist) {
2392 if (!off--)
2393 return s;
2395 return NULL;
2398 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
2399 __acquires(seq_file_net(seq)->packet.sklist_lock)
2401 struct net *net = seq_file_net(seq);
2402 read_lock(&net->packet.sklist_lock);
2403 return *pos ? packet_seq_idx(net, *pos - 1) : SEQ_START_TOKEN;
2406 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2408 struct net *net = seq_file_net(seq);
2409 ++*pos;
2410 return (v == SEQ_START_TOKEN)
2411 ? sk_head(&net->packet.sklist)
2412 : sk_next((struct sock *)v) ;
2415 static void packet_seq_stop(struct seq_file *seq, void *v)
2416 __releases(seq_file_net(seq)->packet.sklist_lock)
2418 struct net *net = seq_file_net(seq);
2419 read_unlock(&net->packet.sklist_lock);
2422 static int packet_seq_show(struct seq_file *seq, void *v)
2424 if (v == SEQ_START_TOKEN)
2425 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2426 else {
2427 struct sock *s = v;
2428 const struct packet_sock *po = pkt_sk(s);
2430 seq_printf(seq,
2431 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2433 atomic_read(&s->sk_refcnt),
2434 s->sk_type,
2435 ntohs(po->num),
2436 po->ifindex,
2437 po->running,
2438 atomic_read(&s->sk_rmem_alloc),
2439 sock_i_uid(s),
2440 sock_i_ino(s));
2443 return 0;
2446 static const struct seq_operations packet_seq_ops = {
2447 .start = packet_seq_start,
2448 .next = packet_seq_next,
2449 .stop = packet_seq_stop,
2450 .show = packet_seq_show,
2453 static int packet_seq_open(struct inode *inode, struct file *file)
2455 return seq_open_net(inode, file, &packet_seq_ops,
2456 sizeof(struct seq_net_private));
2459 static const struct file_operations packet_seq_fops = {
2460 .owner = THIS_MODULE,
2461 .open = packet_seq_open,
2462 .read = seq_read,
2463 .llseek = seq_lseek,
2464 .release = seq_release_net,
2467 #endif
2469 static int packet_net_init(struct net *net)
2471 rwlock_init(&net->packet.sklist_lock);
2472 INIT_HLIST_HEAD(&net->packet.sklist);
2474 if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
2475 return -ENOMEM;
2477 return 0;
2480 static void packet_net_exit(struct net *net)
2482 proc_net_remove(net, "packet");
2485 static struct pernet_operations packet_net_ops = {
2486 .init = packet_net_init,
2487 .exit = packet_net_exit,
2491 static void __exit packet_exit(void)
2493 unregister_netdevice_notifier(&packet_netdev_notifier);
2494 unregister_pernet_subsys(&packet_net_ops);
2495 sock_unregister(PF_PACKET);
2496 proto_unregister(&packet_proto);
2499 static int __init packet_init(void)
2501 int rc = proto_register(&packet_proto, 0);
2503 if (rc != 0)
2504 goto out;
2506 sock_register(&packet_family_ops);
2507 register_pernet_subsys(&packet_net_ops);
2508 register_netdevice_notifier(&packet_netdev_notifier);
2509 out:
2510 return rc;
2513 module_init(packet_init);
2514 module_exit(packet_exit);
2515 MODULE_LICENSE("GPL");
2516 MODULE_ALIAS_NETPROTO(PF_PACKET);