[NET]: Replace CHECKSUM_HW by CHECKSUM_PARTIAL/CHECKSUM_COMPLETE
[linux-2.6/x86.git] / net / packet / af_packet.c
blob300215bdbf46660fd748d4fc1eac44238453d465
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
14 * Fixes:
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
43 * and packet_mreq.
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
53 #include <linux/sched.h>
54 #include <linux/mm.h>
55 #include <linux/capability.h>
56 #include <linux/fcntl.h>
57 #include <linux/socket.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/if_packet.h>
62 #include <linux/wireless.h>
63 #include <linux/kmod.h>
64 #include <net/ip.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
67 #include <net/sock.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
73 #include <asm/page.h>
74 #include <asm/io.h>
75 #include <linux/proc_fs.h>
76 #include <linux/seq_file.h>
77 #include <linux/poll.h>
78 #include <linux/module.h>
79 #include <linux/init.h>
81 #ifdef CONFIG_INET
82 #include <net/inet_common.h>
83 #endif
85 #define CONFIG_SOCK_PACKET 1
88 Proposed replacement for SIOC{ADD,DEL}MULTI and
89 IFF_PROMISC, IFF_ALLMULTI flags.
91 It is more expensive, but I believe,
92 it is really correct solution: reentereble, safe and fault tolerant.
94 IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping
95 reference count and global flag, so that real status is
96 (gflag|(count != 0)), so that we can use obsolete faulty interface
97 not harming clever users.
99 #define CONFIG_PACKET_MULTICAST 1
102 Assumptions:
103 - if device has no dev->hard_header routine, it adds and removes ll header
104 inside itself. In this case ll header is invisible outside of device,
105 but higher levels still should reserve dev->hard_header_len.
106 Some devices are enough clever to reallocate skb, when header
107 will not fit to reserved space (tunnel), another ones are silly
108 (PPP).
109 - packet socket receives packets with pulled ll header,
110 so that SOCK_RAW should push it back.
112 On receive:
113 -----------
115 Incoming, dev->hard_header!=NULL
116 mac.raw -> ll header
117 data -> data
119 Outgoing, dev->hard_header!=NULL
120 mac.raw -> ll header
121 data -> ll header
123 Incoming, dev->hard_header==NULL
124 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
125 PPP makes it, that is wrong, because introduce assymetry
126 between rx and tx paths.
127 data -> data
129 Outgoing, dev->hard_header==NULL
130 mac.raw -> data. ll header is still not built!
131 data -> data
133 Resume
134 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
137 On transmit:
138 ------------
140 dev->hard_header != NULL
141 mac.raw -> ll header
142 data -> ll header
144 dev->hard_header == NULL (ll header is added by device, we cannot control it)
145 mac.raw -> data
146 data -> data
148 We should set nh.raw on output to correct posistion,
149 packet classifier depends on it.
152 /* List of all packet sockets. */
153 static HLIST_HEAD(packet_sklist);
154 static DEFINE_RWLOCK(packet_sklist_lock);
156 static atomic_t packet_socks_nr;
159 /* Private packet socket structures. */
161 #ifdef CONFIG_PACKET_MULTICAST
162 struct packet_mclist
164 struct packet_mclist *next;
165 int ifindex;
166 int count;
167 unsigned short type;
168 unsigned short alen;
169 unsigned char addr[MAX_ADDR_LEN];
171 /* identical to struct packet_mreq except it has
172 * a longer address field.
174 struct packet_mreq_max
176 int mr_ifindex;
177 unsigned short mr_type;
178 unsigned short mr_alen;
179 unsigned char mr_address[MAX_ADDR_LEN];
181 #endif
182 #ifdef CONFIG_PACKET_MMAP
183 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing);
184 #endif
186 static void packet_flush_mclist(struct sock *sk);
188 struct packet_sock {
189 /* struct sock has to be the first member of packet_sock */
190 struct sock sk;
191 struct tpacket_stats stats;
192 #ifdef CONFIG_PACKET_MMAP
193 char * *pg_vec;
194 unsigned int head;
195 unsigned int frames_per_block;
196 unsigned int frame_size;
197 unsigned int frame_max;
198 int copy_thresh;
199 #endif
200 struct packet_type prot_hook;
201 spinlock_t bind_lock;
202 char running; /* prot_hook is attached*/
203 int ifindex; /* bound device */
204 unsigned short num;
205 #ifdef CONFIG_PACKET_MULTICAST
206 struct packet_mclist *mclist;
207 #endif
208 #ifdef CONFIG_PACKET_MMAP
209 atomic_t mapped;
210 unsigned int pg_vec_order;
211 unsigned int pg_vec_pages;
212 unsigned int pg_vec_len;
213 #endif
216 #ifdef CONFIG_PACKET_MMAP
218 static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
220 unsigned int pg_vec_pos, frame_offset;
221 char *frame;
223 pg_vec_pos = position / po->frames_per_block;
224 frame_offset = position % po->frames_per_block;
226 frame = po->pg_vec[pg_vec_pos] + (frame_offset * po->frame_size);
228 return frame;
230 #endif
232 static inline struct packet_sock *pkt_sk(struct sock *sk)
234 return (struct packet_sock *)sk;
237 static void packet_sock_destruct(struct sock *sk)
239 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
240 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
242 if (!sock_flag(sk, SOCK_DEAD)) {
243 printk("Attempt to release alive packet socket: %p\n", sk);
244 return;
247 atomic_dec(&packet_socks_nr);
248 #ifdef PACKET_REFCNT_DEBUG
249 printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
250 #endif
254 static const struct proto_ops packet_ops;
256 #ifdef CONFIG_SOCK_PACKET
257 static const struct proto_ops packet_ops_spkt;
259 static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
261 struct sock *sk;
262 struct sockaddr_pkt *spkt;
265 * When we registered the protocol we saved the socket in the data
266 * field for just this event.
269 sk = pt->af_packet_priv;
272 * Yank back the headers [hope the device set this
273 * right or kerboom...]
275 * Incoming packets have ll header pulled,
276 * push it back.
278 * For outgoing ones skb->data == skb->mac.raw
279 * so that this procedure is noop.
282 if (skb->pkt_type == PACKET_LOOPBACK)
283 goto out;
285 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
286 goto oom;
288 /* drop any routing info */
289 dst_release(skb->dst);
290 skb->dst = NULL;
292 /* drop conntrack reference */
293 nf_reset(skb);
295 spkt = (struct sockaddr_pkt*)skb->cb;
297 skb_push(skb, skb->data-skb->mac.raw);
300 * The SOCK_PACKET socket receives _all_ frames.
303 spkt->spkt_family = dev->type;
304 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
305 spkt->spkt_protocol = skb->protocol;
308 * Charge the memory to the socket. This is done specifically
309 * to prevent sockets using all the memory up.
312 if (sock_queue_rcv_skb(sk,skb) == 0)
313 return 0;
315 out:
316 kfree_skb(skb);
317 oom:
318 return 0;
323 * Output a raw packet to a device layer. This bypasses all the other
324 * protocol layers and you must therefore supply it with a complete frame
327 static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
328 struct msghdr *msg, size_t len)
330 struct sock *sk = sock->sk;
331 struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
332 struct sk_buff *skb;
333 struct net_device *dev;
334 unsigned short proto=0;
335 int err;
338 * Get and verify the address.
341 if (saddr)
343 if (msg->msg_namelen < sizeof(struct sockaddr))
344 return(-EINVAL);
345 if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
346 proto=saddr->spkt_protocol;
348 else
349 return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
352 * Find the device first to size check it
355 saddr->spkt_device[13] = 0;
356 dev = dev_get_by_name(saddr->spkt_device);
357 err = -ENODEV;
358 if (dev == NULL)
359 goto out_unlock;
362 * You may not queue a frame bigger than the mtu. This is the lowest level
363 * raw protocol and you must do your own fragmentation at this level.
366 err = -EMSGSIZE;
367 if (len > dev->mtu + dev->hard_header_len)
368 goto out_unlock;
370 err = -ENOBUFS;
371 skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
374 * If the write buffer is full, then tough. At this level the user gets to
375 * deal with the problem - do your own algorithmic backoffs. That's far
376 * more flexible.
379 if (skb == NULL)
380 goto out_unlock;
383 * Fill it in
386 /* FIXME: Save some space for broken drivers that write a
387 * hard header at transmission time by themselves. PPP is the
388 * notable one here. This should really be fixed at the driver level.
390 skb_reserve(skb, LL_RESERVED_SPACE(dev));
391 skb->nh.raw = skb->data;
393 /* Try to align data part correctly */
394 if (dev->hard_header) {
395 skb->data -= dev->hard_header_len;
396 skb->tail -= dev->hard_header_len;
397 if (len < dev->hard_header_len)
398 skb->nh.raw = skb->data;
401 /* Returns -EFAULT on error */
402 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
403 skb->protocol = proto;
404 skb->dev = dev;
405 skb->priority = sk->sk_priority;
406 if (err)
407 goto out_free;
409 err = -ENETDOWN;
410 if (!(dev->flags & IFF_UP))
411 goto out_free;
414 * Now send it
417 dev_queue_xmit(skb);
418 dev_put(dev);
419 return(len);
421 out_free:
422 kfree_skb(skb);
423 out_unlock:
424 if (dev)
425 dev_put(dev);
426 return err;
428 #endif
430 static inline unsigned run_filter(struct sk_buff *skb, struct sock *sk, unsigned res)
432 struct sk_filter *filter;
434 bh_lock_sock(sk);
435 filter = sk->sk_filter;
437 * Our caller already checked that filter != NULL but we need to
438 * verify that under bh_lock_sock() to be safe
440 if (likely(filter != NULL))
441 res = sk_run_filter(skb, filter->insns, filter->len);
442 bh_unlock_sock(sk);
444 return res;
448 This function makes lazy skb cloning in hope that most of packets
449 are discarded by BPF.
451 Note tricky part: we DO mangle shared skb! skb->data, skb->len
452 and skb->cb are mangled. It works because (and until) packets
453 falling here are owned by current CPU. Output packets are cloned
454 by dev_queue_xmit_nit(), input packets are processed by net_bh
455 sequencially, so that if we return skb to original state on exit,
456 we will not harm anyone.
459 static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
461 struct sock *sk;
462 struct sockaddr_ll *sll;
463 struct packet_sock *po;
464 u8 * skb_head = skb->data;
465 int skb_len = skb->len;
466 unsigned snaplen;
468 if (skb->pkt_type == PACKET_LOOPBACK)
469 goto drop;
471 sk = pt->af_packet_priv;
472 po = pkt_sk(sk);
474 skb->dev = dev;
476 if (dev->hard_header) {
477 /* The device has an explicit notion of ll header,
478 exported to higher levels.
480 Otherwise, the device hides datails of it frame
481 structure, so that corresponding packet head
482 never delivered to user.
484 if (sk->sk_type != SOCK_DGRAM)
485 skb_push(skb, skb->data - skb->mac.raw);
486 else if (skb->pkt_type == PACKET_OUTGOING) {
487 /* Special case: outgoing packets have ll header at head */
488 skb_pull(skb, skb->nh.raw - skb->data);
492 snaplen = skb->len;
494 if (sk->sk_filter) {
495 unsigned res = run_filter(skb, sk, snaplen);
496 if (res == 0)
497 goto drop_n_restore;
498 if (snaplen > res)
499 snaplen = res;
502 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
503 (unsigned)sk->sk_rcvbuf)
504 goto drop_n_acct;
506 if (skb_shared(skb)) {
507 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
508 if (nskb == NULL)
509 goto drop_n_acct;
511 if (skb_head != skb->data) {
512 skb->data = skb_head;
513 skb->len = skb_len;
515 kfree_skb(skb);
516 skb = nskb;
519 sll = (struct sockaddr_ll*)skb->cb;
520 sll->sll_family = AF_PACKET;
521 sll->sll_hatype = dev->type;
522 sll->sll_protocol = skb->protocol;
523 sll->sll_pkttype = skb->pkt_type;
524 sll->sll_ifindex = dev->ifindex;
525 sll->sll_halen = 0;
527 if (dev->hard_header_parse)
528 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
530 if (pskb_trim(skb, snaplen))
531 goto drop_n_acct;
533 skb_set_owner_r(skb, sk);
534 skb->dev = NULL;
535 dst_release(skb->dst);
536 skb->dst = NULL;
538 /* drop conntrack reference */
539 nf_reset(skb);
541 spin_lock(&sk->sk_receive_queue.lock);
542 po->stats.tp_packets++;
543 __skb_queue_tail(&sk->sk_receive_queue, skb);
544 spin_unlock(&sk->sk_receive_queue.lock);
545 sk->sk_data_ready(sk, skb->len);
546 return 0;
548 drop_n_acct:
549 spin_lock(&sk->sk_receive_queue.lock);
550 po->stats.tp_drops++;
551 spin_unlock(&sk->sk_receive_queue.lock);
553 drop_n_restore:
554 if (skb_head != skb->data && skb_shared(skb)) {
555 skb->data = skb_head;
556 skb->len = skb_len;
558 drop:
559 kfree_skb(skb);
560 return 0;
563 #ifdef CONFIG_PACKET_MMAP
564 static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
566 struct sock *sk;
567 struct packet_sock *po;
568 struct sockaddr_ll *sll;
569 struct tpacket_hdr *h;
570 u8 * skb_head = skb->data;
571 int skb_len = skb->len;
572 unsigned snaplen;
573 unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
574 unsigned short macoff, netoff;
575 struct sk_buff *copy_skb = NULL;
577 if (skb->pkt_type == PACKET_LOOPBACK)
578 goto drop;
580 sk = pt->af_packet_priv;
581 po = pkt_sk(sk);
583 if (dev->hard_header) {
584 if (sk->sk_type != SOCK_DGRAM)
585 skb_push(skb, skb->data - skb->mac.raw);
586 else if (skb->pkt_type == PACKET_OUTGOING) {
587 /* Special case: outgoing packets have ll header at head */
588 skb_pull(skb, skb->nh.raw - skb->data);
589 if (skb->ip_summed == CHECKSUM_PARTIAL)
590 status |= TP_STATUS_CSUMNOTREADY;
594 snaplen = skb->len;
596 if (sk->sk_filter) {
597 unsigned res = run_filter(skb, sk, snaplen);
598 if (res == 0)
599 goto drop_n_restore;
600 if (snaplen > res)
601 snaplen = res;
604 if (sk->sk_type == SOCK_DGRAM) {
605 macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
606 } else {
607 unsigned maclen = skb->nh.raw - skb->data;
608 netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
609 macoff = netoff - maclen;
612 if (macoff + snaplen > po->frame_size) {
613 if (po->copy_thresh &&
614 atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
615 (unsigned)sk->sk_rcvbuf) {
616 if (skb_shared(skb)) {
617 copy_skb = skb_clone(skb, GFP_ATOMIC);
618 } else {
619 copy_skb = skb_get(skb);
620 skb_head = skb->data;
622 if (copy_skb)
623 skb_set_owner_r(copy_skb, sk);
625 snaplen = po->frame_size - macoff;
626 if ((int)snaplen < 0)
627 snaplen = 0;
630 spin_lock(&sk->sk_receive_queue.lock);
631 h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
633 if (h->tp_status)
634 goto ring_is_full;
635 po->head = po->head != po->frame_max ? po->head+1 : 0;
636 po->stats.tp_packets++;
637 if (copy_skb) {
638 status |= TP_STATUS_COPY;
639 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
641 if (!po->stats.tp_drops)
642 status &= ~TP_STATUS_LOSING;
643 spin_unlock(&sk->sk_receive_queue.lock);
645 skb_copy_bits(skb, 0, (u8*)h + macoff, snaplen);
647 h->tp_len = skb->len;
648 h->tp_snaplen = snaplen;
649 h->tp_mac = macoff;
650 h->tp_net = netoff;
651 if (skb->tstamp.off_sec == 0) {
652 __net_timestamp(skb);
653 sock_enable_timestamp(sk);
655 h->tp_sec = skb->tstamp.off_sec;
656 h->tp_usec = skb->tstamp.off_usec;
658 sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
659 sll->sll_halen = 0;
660 if (dev->hard_header_parse)
661 sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
662 sll->sll_family = AF_PACKET;
663 sll->sll_hatype = dev->type;
664 sll->sll_protocol = skb->protocol;
665 sll->sll_pkttype = skb->pkt_type;
666 sll->sll_ifindex = dev->ifindex;
668 h->tp_status = status;
669 mb();
672 struct page *p_start, *p_end;
673 u8 *h_end = (u8 *)h + macoff + snaplen - 1;
675 p_start = virt_to_page(h);
676 p_end = virt_to_page(h_end);
677 while (p_start <= p_end) {
678 flush_dcache_page(p_start);
679 p_start++;
683 sk->sk_data_ready(sk, 0);
685 drop_n_restore:
686 if (skb_head != skb->data && skb_shared(skb)) {
687 skb->data = skb_head;
688 skb->len = skb_len;
690 drop:
691 kfree_skb(skb);
692 return 0;
694 ring_is_full:
695 po->stats.tp_drops++;
696 spin_unlock(&sk->sk_receive_queue.lock);
698 sk->sk_data_ready(sk, 0);
699 if (copy_skb)
700 kfree_skb(copy_skb);
701 goto drop_n_restore;
704 #endif
707 static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
708 struct msghdr *msg, size_t len)
710 struct sock *sk = sock->sk;
711 struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
712 struct sk_buff *skb;
713 struct net_device *dev;
714 unsigned short proto;
715 unsigned char *addr;
716 int ifindex, err, reserve = 0;
719 * Get and verify the address.
722 if (saddr == NULL) {
723 struct packet_sock *po = pkt_sk(sk);
725 ifindex = po->ifindex;
726 proto = po->num;
727 addr = NULL;
728 } else {
729 err = -EINVAL;
730 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
731 goto out;
732 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
733 goto out;
734 ifindex = saddr->sll_ifindex;
735 proto = saddr->sll_protocol;
736 addr = saddr->sll_addr;
740 dev = dev_get_by_index(ifindex);
741 err = -ENXIO;
742 if (dev == NULL)
743 goto out_unlock;
744 if (sock->type == SOCK_RAW)
745 reserve = dev->hard_header_len;
747 err = -EMSGSIZE;
748 if (len > dev->mtu+reserve)
749 goto out_unlock;
751 skb = sock_alloc_send_skb(sk, len + LL_RESERVED_SPACE(dev),
752 msg->msg_flags & MSG_DONTWAIT, &err);
753 if (skb==NULL)
754 goto out_unlock;
756 skb_reserve(skb, LL_RESERVED_SPACE(dev));
757 skb->nh.raw = skb->data;
759 if (dev->hard_header) {
760 int res;
761 err = -EINVAL;
762 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
763 if (sock->type != SOCK_DGRAM) {
764 skb->tail = skb->data;
765 skb->len = 0;
766 } else if (res < 0)
767 goto out_free;
770 /* Returns -EFAULT on error */
771 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
772 if (err)
773 goto out_free;
775 skb->protocol = proto;
776 skb->dev = dev;
777 skb->priority = sk->sk_priority;
779 err = -ENETDOWN;
780 if (!(dev->flags & IFF_UP))
781 goto out_free;
784 * Now send it
787 err = dev_queue_xmit(skb);
788 if (err > 0 && (err = net_xmit_errno(err)) != 0)
789 goto out_unlock;
791 dev_put(dev);
793 return(len);
795 out_free:
796 kfree_skb(skb);
797 out_unlock:
798 if (dev)
799 dev_put(dev);
800 out:
801 return err;
805 * Close a PACKET socket. This is fairly simple. We immediately go
806 * to 'closed' state and remove our protocol entry in the device list.
809 static int packet_release(struct socket *sock)
811 struct sock *sk = sock->sk;
812 struct packet_sock *po;
814 if (!sk)
815 return 0;
817 po = pkt_sk(sk);
819 write_lock_bh(&packet_sklist_lock);
820 sk_del_node_init(sk);
821 write_unlock_bh(&packet_sklist_lock);
824 * Unhook packet receive handler.
827 if (po->running) {
829 * Remove the protocol hook
831 dev_remove_pack(&po->prot_hook);
832 po->running = 0;
833 po->num = 0;
834 __sock_put(sk);
837 #ifdef CONFIG_PACKET_MULTICAST
838 packet_flush_mclist(sk);
839 #endif
841 #ifdef CONFIG_PACKET_MMAP
842 if (po->pg_vec) {
843 struct tpacket_req req;
844 memset(&req, 0, sizeof(req));
845 packet_set_ring(sk, &req, 1);
847 #endif
850 * Now the socket is dead. No more input will appear.
853 sock_orphan(sk);
854 sock->sk = NULL;
856 /* Purge queues */
858 skb_queue_purge(&sk->sk_receive_queue);
860 sock_put(sk);
861 return 0;
865 * Attach a packet hook.
868 static int packet_do_bind(struct sock *sk, struct net_device *dev, int protocol)
870 struct packet_sock *po = pkt_sk(sk);
872 * Detach an existing hook if present.
875 lock_sock(sk);
877 spin_lock(&po->bind_lock);
878 if (po->running) {
879 __sock_put(sk);
880 po->running = 0;
881 po->num = 0;
882 spin_unlock(&po->bind_lock);
883 dev_remove_pack(&po->prot_hook);
884 spin_lock(&po->bind_lock);
887 po->num = protocol;
888 po->prot_hook.type = protocol;
889 po->prot_hook.dev = dev;
891 po->ifindex = dev ? dev->ifindex : 0;
893 if (protocol == 0)
894 goto out_unlock;
896 if (dev) {
897 if (dev->flags&IFF_UP) {
898 dev_add_pack(&po->prot_hook);
899 sock_hold(sk);
900 po->running = 1;
901 } else {
902 sk->sk_err = ENETDOWN;
903 if (!sock_flag(sk, SOCK_DEAD))
904 sk->sk_error_report(sk);
906 } else {
907 dev_add_pack(&po->prot_hook);
908 sock_hold(sk);
909 po->running = 1;
912 out_unlock:
913 spin_unlock(&po->bind_lock);
914 release_sock(sk);
915 return 0;
919 * Bind a packet socket to a device
922 #ifdef CONFIG_SOCK_PACKET
924 static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
926 struct sock *sk=sock->sk;
927 char name[15];
928 struct net_device *dev;
929 int err = -ENODEV;
932 * Check legality
935 if (addr_len != sizeof(struct sockaddr))
936 return -EINVAL;
937 strlcpy(name,uaddr->sa_data,sizeof(name));
939 dev = dev_get_by_name(name);
940 if (dev) {
941 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
942 dev_put(dev);
944 return err;
946 #endif
948 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
950 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
951 struct sock *sk=sock->sk;
952 struct net_device *dev = NULL;
953 int err;
957 * Check legality
960 if (addr_len < sizeof(struct sockaddr_ll))
961 return -EINVAL;
962 if (sll->sll_family != AF_PACKET)
963 return -EINVAL;
965 if (sll->sll_ifindex) {
966 err = -ENODEV;
967 dev = dev_get_by_index(sll->sll_ifindex);
968 if (dev == NULL)
969 goto out;
971 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
972 if (dev)
973 dev_put(dev);
975 out:
976 return err;
979 static struct proto packet_proto = {
980 .name = "PACKET",
981 .owner = THIS_MODULE,
982 .obj_size = sizeof(struct packet_sock),
986 * Create a packet of type SOCK_PACKET.
989 static int packet_create(struct socket *sock, int protocol)
991 struct sock *sk;
992 struct packet_sock *po;
993 int err;
995 if (!capable(CAP_NET_RAW))
996 return -EPERM;
997 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
998 #ifdef CONFIG_SOCK_PACKET
999 && sock->type != SOCK_PACKET
1000 #endif
1002 return -ESOCKTNOSUPPORT;
1004 sock->state = SS_UNCONNECTED;
1006 err = -ENOBUFS;
1007 sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1);
1008 if (sk == NULL)
1009 goto out;
1011 sock->ops = &packet_ops;
1012 #ifdef CONFIG_SOCK_PACKET
1013 if (sock->type == SOCK_PACKET)
1014 sock->ops = &packet_ops_spkt;
1015 #endif
1016 sock_init_data(sock, sk);
1018 po = pkt_sk(sk);
1019 sk->sk_family = PF_PACKET;
1020 po->num = protocol;
1022 sk->sk_destruct = packet_sock_destruct;
1023 atomic_inc(&packet_socks_nr);
1026 * Attach a protocol block
1029 spin_lock_init(&po->bind_lock);
1030 po->prot_hook.func = packet_rcv;
1031 #ifdef CONFIG_SOCK_PACKET
1032 if (sock->type == SOCK_PACKET)
1033 po->prot_hook.func = packet_rcv_spkt;
1034 #endif
1035 po->prot_hook.af_packet_priv = sk;
1037 if (protocol) {
1038 po->prot_hook.type = protocol;
1039 dev_add_pack(&po->prot_hook);
1040 sock_hold(sk);
1041 po->running = 1;
1044 write_lock_bh(&packet_sklist_lock);
1045 sk_add_node(sk, &packet_sklist);
1046 write_unlock_bh(&packet_sklist_lock);
1047 return(0);
1048 out:
1049 return err;
1053 * Pull a packet from our receive queue and hand it to the user.
1054 * If necessary we block.
1057 static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1058 struct msghdr *msg, size_t len, int flags)
1060 struct sock *sk = sock->sk;
1061 struct sk_buff *skb;
1062 int copied, err;
1063 struct sockaddr_ll *sll;
1065 err = -EINVAL;
1066 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
1067 goto out;
1069 #if 0
1070 /* What error should we return now? EUNATTACH? */
1071 if (pkt_sk(sk)->ifindex < 0)
1072 return -ENODEV;
1073 #endif
1076 * Call the generic datagram receiver. This handles all sorts
1077 * of horrible races and re-entrancy so we can forget about it
1078 * in the protocol layers.
1080 * Now it will return ENETDOWN, if device have just gone down,
1081 * but then it will block.
1084 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
1087 * An error occurred so return it. Because skb_recv_datagram()
1088 * handles the blocking we don't see and worry about blocking
1089 * retries.
1092 if (skb == NULL)
1093 goto out;
1096 * If the address length field is there to be filled in, we fill
1097 * it in now.
1100 sll = (struct sockaddr_ll*)skb->cb;
1101 if (sock->type == SOCK_PACKET)
1102 msg->msg_namelen = sizeof(struct sockaddr_pkt);
1103 else
1104 msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
1107 * You lose any data beyond the buffer you gave. If it worries a
1108 * user program they can ask the device for its MTU anyway.
1111 copied = skb->len;
1112 if (copied > len)
1114 copied=len;
1115 msg->msg_flags|=MSG_TRUNC;
1118 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1119 if (err)
1120 goto out_free;
1122 sock_recv_timestamp(msg, sk, skb);
1124 if (msg->msg_name)
1125 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1128 * Free or return the buffer as appropriate. Again this
1129 * hides all the races and re-entrancy issues from us.
1131 err = (flags&MSG_TRUNC) ? skb->len : copied;
1133 out_free:
1134 skb_free_datagram(sk, skb);
1135 out:
1136 return err;
1139 #ifdef CONFIG_SOCK_PACKET
1140 static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1141 int *uaddr_len, int peer)
1143 struct net_device *dev;
1144 struct sock *sk = sock->sk;
1146 if (peer)
1147 return -EOPNOTSUPP;
1149 uaddr->sa_family = AF_PACKET;
1150 dev = dev_get_by_index(pkt_sk(sk)->ifindex);
1151 if (dev) {
1152 strlcpy(uaddr->sa_data, dev->name, 15);
1153 dev_put(dev);
1154 } else
1155 memset(uaddr->sa_data, 0, 14);
1156 *uaddr_len = sizeof(*uaddr);
1158 return 0;
1160 #endif
1162 static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1163 int *uaddr_len, int peer)
1165 struct net_device *dev;
1166 struct sock *sk = sock->sk;
1167 struct packet_sock *po = pkt_sk(sk);
1168 struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
1170 if (peer)
1171 return -EOPNOTSUPP;
1173 sll->sll_family = AF_PACKET;
1174 sll->sll_ifindex = po->ifindex;
1175 sll->sll_protocol = po->num;
1176 dev = dev_get_by_index(po->ifindex);
1177 if (dev) {
1178 sll->sll_hatype = dev->type;
1179 sll->sll_halen = dev->addr_len;
1180 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
1181 dev_put(dev);
1182 } else {
1183 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
1184 sll->sll_halen = 0;
1186 *uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
1188 return 0;
1191 #ifdef CONFIG_PACKET_MULTICAST
1192 static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what)
1194 switch (i->type) {
1195 case PACKET_MR_MULTICAST:
1196 if (what > 0)
1197 dev_mc_add(dev, i->addr, i->alen, 0);
1198 else
1199 dev_mc_delete(dev, i->addr, i->alen, 0);
1200 break;
1201 case PACKET_MR_PROMISC:
1202 dev_set_promiscuity(dev, what);
1203 break;
1204 case PACKET_MR_ALLMULTI:
1205 dev_set_allmulti(dev, what);
1206 break;
1207 default:;
1211 static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
1213 for ( ; i; i=i->next) {
1214 if (i->ifindex == dev->ifindex)
1215 packet_dev_mc(dev, i, what);
1219 static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
1221 struct packet_sock *po = pkt_sk(sk);
1222 struct packet_mclist *ml, *i;
1223 struct net_device *dev;
1224 int err;
1226 rtnl_lock();
1228 err = -ENODEV;
1229 dev = __dev_get_by_index(mreq->mr_ifindex);
1230 if (!dev)
1231 goto done;
1233 err = -EINVAL;
1234 if (mreq->mr_alen > dev->addr_len)
1235 goto done;
1237 err = -ENOBUFS;
1238 i = kmalloc(sizeof(*i), GFP_KERNEL);
1239 if (i == NULL)
1240 goto done;
1242 err = 0;
1243 for (ml = po->mclist; ml; ml = ml->next) {
1244 if (ml->ifindex == mreq->mr_ifindex &&
1245 ml->type == mreq->mr_type &&
1246 ml->alen == mreq->mr_alen &&
1247 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1248 ml->count++;
1249 /* Free the new element ... */
1250 kfree(i);
1251 goto done;
1255 i->type = mreq->mr_type;
1256 i->ifindex = mreq->mr_ifindex;
1257 i->alen = mreq->mr_alen;
1258 memcpy(i->addr, mreq->mr_address, i->alen);
1259 i->count = 1;
1260 i->next = po->mclist;
1261 po->mclist = i;
1262 packet_dev_mc(dev, i, +1);
1264 done:
1265 rtnl_unlock();
1266 return err;
1269 static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
1271 struct packet_mclist *ml, **mlp;
1273 rtnl_lock();
1275 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
1276 if (ml->ifindex == mreq->mr_ifindex &&
1277 ml->type == mreq->mr_type &&
1278 ml->alen == mreq->mr_alen &&
1279 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
1280 if (--ml->count == 0) {
1281 struct net_device *dev;
1282 *mlp = ml->next;
1283 dev = dev_get_by_index(ml->ifindex);
1284 if (dev) {
1285 packet_dev_mc(dev, ml, -1);
1286 dev_put(dev);
1288 kfree(ml);
1290 rtnl_unlock();
1291 return 0;
1294 rtnl_unlock();
1295 return -EADDRNOTAVAIL;
1298 static void packet_flush_mclist(struct sock *sk)
1300 struct packet_sock *po = pkt_sk(sk);
1301 struct packet_mclist *ml;
1303 if (!po->mclist)
1304 return;
1306 rtnl_lock();
1307 while ((ml = po->mclist) != NULL) {
1308 struct net_device *dev;
1310 po->mclist = ml->next;
1311 if ((dev = dev_get_by_index(ml->ifindex)) != NULL) {
1312 packet_dev_mc(dev, ml, -1);
1313 dev_put(dev);
1315 kfree(ml);
1317 rtnl_unlock();
1319 #endif
1321 static int
1322 packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1324 struct sock *sk = sock->sk;
1325 int ret;
1327 if (level != SOL_PACKET)
1328 return -ENOPROTOOPT;
1330 switch(optname) {
1331 #ifdef CONFIG_PACKET_MULTICAST
1332 case PACKET_ADD_MEMBERSHIP:
1333 case PACKET_DROP_MEMBERSHIP:
1335 struct packet_mreq_max mreq;
1336 int len = optlen;
1337 memset(&mreq, 0, sizeof(mreq));
1338 if (len < sizeof(struct packet_mreq))
1339 return -EINVAL;
1340 if (len > sizeof(mreq))
1341 len = sizeof(mreq);
1342 if (copy_from_user(&mreq,optval,len))
1343 return -EFAULT;
1344 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
1345 return -EINVAL;
1346 if (optname == PACKET_ADD_MEMBERSHIP)
1347 ret = packet_mc_add(sk, &mreq);
1348 else
1349 ret = packet_mc_drop(sk, &mreq);
1350 return ret;
1352 #endif
1353 #ifdef CONFIG_PACKET_MMAP
1354 case PACKET_RX_RING:
1356 struct tpacket_req req;
1358 if (optlen<sizeof(req))
1359 return -EINVAL;
1360 if (copy_from_user(&req,optval,sizeof(req)))
1361 return -EFAULT;
1362 return packet_set_ring(sk, &req, 0);
1364 case PACKET_COPY_THRESH:
1366 int val;
1368 if (optlen!=sizeof(val))
1369 return -EINVAL;
1370 if (copy_from_user(&val,optval,sizeof(val)))
1371 return -EFAULT;
1373 pkt_sk(sk)->copy_thresh = val;
1374 return 0;
1376 #endif
1377 default:
1378 return -ENOPROTOOPT;
1382 static int packet_getsockopt(struct socket *sock, int level, int optname,
1383 char __user *optval, int __user *optlen)
1385 int len;
1386 struct sock *sk = sock->sk;
1387 struct packet_sock *po = pkt_sk(sk);
1389 if (level != SOL_PACKET)
1390 return -ENOPROTOOPT;
1392 if (get_user(len, optlen))
1393 return -EFAULT;
1395 if (len < 0)
1396 return -EINVAL;
1398 switch(optname) {
1399 case PACKET_STATISTICS:
1401 struct tpacket_stats st;
1403 if (len > sizeof(struct tpacket_stats))
1404 len = sizeof(struct tpacket_stats);
1405 spin_lock_bh(&sk->sk_receive_queue.lock);
1406 st = po->stats;
1407 memset(&po->stats, 0, sizeof(st));
1408 spin_unlock_bh(&sk->sk_receive_queue.lock);
1409 st.tp_packets += st.tp_drops;
1411 if (copy_to_user(optval, &st, len))
1412 return -EFAULT;
1413 break;
1415 default:
1416 return -ENOPROTOOPT;
1419 if (put_user(len, optlen))
1420 return -EFAULT;
1421 return 0;
1425 static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
1427 struct sock *sk;
1428 struct hlist_node *node;
1429 struct net_device *dev = (struct net_device*)data;
1431 read_lock(&packet_sklist_lock);
1432 sk_for_each(sk, node, &packet_sklist) {
1433 struct packet_sock *po = pkt_sk(sk);
1435 switch (msg) {
1436 case NETDEV_UNREGISTER:
1437 #ifdef CONFIG_PACKET_MULTICAST
1438 if (po->mclist)
1439 packet_dev_mclist(dev, po->mclist, -1);
1440 // fallthrough
1441 #endif
1442 case NETDEV_DOWN:
1443 if (dev->ifindex == po->ifindex) {
1444 spin_lock(&po->bind_lock);
1445 if (po->running) {
1446 __dev_remove_pack(&po->prot_hook);
1447 __sock_put(sk);
1448 po->running = 0;
1449 sk->sk_err = ENETDOWN;
1450 if (!sock_flag(sk, SOCK_DEAD))
1451 sk->sk_error_report(sk);
1453 if (msg == NETDEV_UNREGISTER) {
1454 po->ifindex = -1;
1455 po->prot_hook.dev = NULL;
1457 spin_unlock(&po->bind_lock);
1459 break;
1460 case NETDEV_UP:
1461 spin_lock(&po->bind_lock);
1462 if (dev->ifindex == po->ifindex && po->num &&
1463 !po->running) {
1464 dev_add_pack(&po->prot_hook);
1465 sock_hold(sk);
1466 po->running = 1;
1468 spin_unlock(&po->bind_lock);
1469 break;
1472 read_unlock(&packet_sklist_lock);
1473 return NOTIFY_DONE;
1477 static int packet_ioctl(struct socket *sock, unsigned int cmd,
1478 unsigned long arg)
1480 struct sock *sk = sock->sk;
1482 switch(cmd) {
1483 case SIOCOUTQ:
1485 int amount = atomic_read(&sk->sk_wmem_alloc);
1486 return put_user(amount, (int __user *)arg);
1488 case SIOCINQ:
1490 struct sk_buff *skb;
1491 int amount = 0;
1493 spin_lock_bh(&sk->sk_receive_queue.lock);
1494 skb = skb_peek(&sk->sk_receive_queue);
1495 if (skb)
1496 amount = skb->len;
1497 spin_unlock_bh(&sk->sk_receive_queue.lock);
1498 return put_user(amount, (int __user *)arg);
1500 case SIOCGSTAMP:
1501 return sock_get_timestamp(sk, (struct timeval __user *)arg);
1503 #ifdef CONFIG_INET
1504 case SIOCADDRT:
1505 case SIOCDELRT:
1506 case SIOCDARP:
1507 case SIOCGARP:
1508 case SIOCSARP:
1509 case SIOCGIFADDR:
1510 case SIOCSIFADDR:
1511 case SIOCGIFBRDADDR:
1512 case SIOCSIFBRDADDR:
1513 case SIOCGIFNETMASK:
1514 case SIOCSIFNETMASK:
1515 case SIOCGIFDSTADDR:
1516 case SIOCSIFDSTADDR:
1517 case SIOCSIFFLAGS:
1518 return inet_dgram_ops.ioctl(sock, cmd, arg);
1519 #endif
1521 default:
1522 return -ENOIOCTLCMD;
1524 return 0;
1527 #ifndef CONFIG_PACKET_MMAP
1528 #define packet_mmap sock_no_mmap
1529 #define packet_poll datagram_poll
1530 #else
1532 static unsigned int packet_poll(struct file * file, struct socket *sock,
1533 poll_table *wait)
1535 struct sock *sk = sock->sk;
1536 struct packet_sock *po = pkt_sk(sk);
1537 unsigned int mask = datagram_poll(file, sock, wait);
1539 spin_lock_bh(&sk->sk_receive_queue.lock);
1540 if (po->pg_vec) {
1541 unsigned last = po->head ? po->head-1 : po->frame_max;
1542 struct tpacket_hdr *h;
1544 h = (struct tpacket_hdr *)packet_lookup_frame(po, last);
1546 if (h->tp_status)
1547 mask |= POLLIN | POLLRDNORM;
1549 spin_unlock_bh(&sk->sk_receive_queue.lock);
1550 return mask;
1554 /* Dirty? Well, I still did not learn better way to account
1555 * for user mmaps.
1558 static void packet_mm_open(struct vm_area_struct *vma)
1560 struct file *file = vma->vm_file;
1561 struct socket * sock = file->private_data;
1562 struct sock *sk = sock->sk;
1564 if (sk)
1565 atomic_inc(&pkt_sk(sk)->mapped);
1568 static void packet_mm_close(struct vm_area_struct *vma)
1570 struct file *file = vma->vm_file;
1571 struct socket * sock = file->private_data;
1572 struct sock *sk = sock->sk;
1574 if (sk)
1575 atomic_dec(&pkt_sk(sk)->mapped);
1578 static struct vm_operations_struct packet_mmap_ops = {
1579 .open = packet_mm_open,
1580 .close =packet_mm_close,
1583 static inline struct page *pg_vec_endpage(char *one_pg_vec, unsigned int order)
1585 return virt_to_page(one_pg_vec + (PAGE_SIZE << order) - 1);
1588 static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
1590 int i;
1592 for (i = 0; i < len; i++) {
1593 if (likely(pg_vec[i]))
1594 free_pages((unsigned long) pg_vec[i], order);
1596 kfree(pg_vec);
1599 static inline char *alloc_one_pg_vec_page(unsigned long order)
1601 return (char *) __get_free_pages(GFP_KERNEL | __GFP_COMP | __GFP_ZERO,
1602 order);
1605 static char **alloc_pg_vec(struct tpacket_req *req, int order)
1607 unsigned int block_nr = req->tp_block_nr;
1608 char **pg_vec;
1609 int i;
1611 pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
1612 if (unlikely(!pg_vec))
1613 goto out;
1615 for (i = 0; i < block_nr; i++) {
1616 pg_vec[i] = alloc_one_pg_vec_page(order);
1617 if (unlikely(!pg_vec[i]))
1618 goto out_free_pgvec;
1621 out:
1622 return pg_vec;
1624 out_free_pgvec:
1625 free_pg_vec(pg_vec, order, block_nr);
1626 pg_vec = NULL;
1627 goto out;
1630 static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing)
1632 char **pg_vec = NULL;
1633 struct packet_sock *po = pkt_sk(sk);
1634 int was_running, num, order = 0;
1635 int err = 0;
1637 if (req->tp_block_nr) {
1638 int i, l;
1640 /* Sanity tests and some calculations */
1642 if (unlikely(po->pg_vec))
1643 return -EBUSY;
1645 if (unlikely((int)req->tp_block_size <= 0))
1646 return -EINVAL;
1647 if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
1648 return -EINVAL;
1649 if (unlikely(req->tp_frame_size < TPACKET_HDRLEN))
1650 return -EINVAL;
1651 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
1652 return -EINVAL;
1654 po->frames_per_block = req->tp_block_size/req->tp_frame_size;
1655 if (unlikely(po->frames_per_block <= 0))
1656 return -EINVAL;
1657 if (unlikely((po->frames_per_block * req->tp_block_nr) !=
1658 req->tp_frame_nr))
1659 return -EINVAL;
1661 err = -ENOMEM;
1662 order = get_order(req->tp_block_size);
1663 pg_vec = alloc_pg_vec(req, order);
1664 if (unlikely(!pg_vec))
1665 goto out;
1667 l = 0;
1668 for (i = 0; i < req->tp_block_nr; i++) {
1669 char *ptr = pg_vec[i];
1670 struct tpacket_hdr *header;
1671 int k;
1673 for (k = 0; k < po->frames_per_block; k++) {
1674 header = (struct tpacket_hdr *) ptr;
1675 header->tp_status = TP_STATUS_KERNEL;
1676 ptr += req->tp_frame_size;
1679 /* Done */
1680 } else {
1681 if (unlikely(req->tp_frame_nr))
1682 return -EINVAL;
1685 lock_sock(sk);
1687 /* Detach socket from network */
1688 spin_lock(&po->bind_lock);
1689 was_running = po->running;
1690 num = po->num;
1691 if (was_running) {
1692 __dev_remove_pack(&po->prot_hook);
1693 po->num = 0;
1694 po->running = 0;
1695 __sock_put(sk);
1697 spin_unlock(&po->bind_lock);
1699 synchronize_net();
1701 err = -EBUSY;
1702 if (closing || atomic_read(&po->mapped) == 0) {
1703 err = 0;
1704 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1706 spin_lock_bh(&sk->sk_receive_queue.lock);
1707 pg_vec = XC(po->pg_vec, pg_vec);
1708 po->frame_max = (req->tp_frame_nr - 1);
1709 po->head = 0;
1710 po->frame_size = req->tp_frame_size;
1711 spin_unlock_bh(&sk->sk_receive_queue.lock);
1713 order = XC(po->pg_vec_order, order);
1714 req->tp_block_nr = XC(po->pg_vec_len, req->tp_block_nr);
1716 po->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
1717 po->prot_hook.func = po->pg_vec ? tpacket_rcv : packet_rcv;
1718 skb_queue_purge(&sk->sk_receive_queue);
1719 #undef XC
1720 if (atomic_read(&po->mapped))
1721 printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped));
1724 spin_lock(&po->bind_lock);
1725 if (was_running && !po->running) {
1726 sock_hold(sk);
1727 po->running = 1;
1728 po->num = num;
1729 dev_add_pack(&po->prot_hook);
1731 spin_unlock(&po->bind_lock);
1733 release_sock(sk);
1735 if (pg_vec)
1736 free_pg_vec(pg_vec, order, req->tp_block_nr);
1737 out:
1738 return err;
1741 static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
1743 struct sock *sk = sock->sk;
1744 struct packet_sock *po = pkt_sk(sk);
1745 unsigned long size;
1746 unsigned long start;
1747 int err = -EINVAL;
1748 int i;
1750 if (vma->vm_pgoff)
1751 return -EINVAL;
1753 size = vma->vm_end - vma->vm_start;
1755 lock_sock(sk);
1756 if (po->pg_vec == NULL)
1757 goto out;
1758 if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE)
1759 goto out;
1761 start = vma->vm_start;
1762 for (i = 0; i < po->pg_vec_len; i++) {
1763 struct page *page = virt_to_page(po->pg_vec[i]);
1764 int pg_num;
1766 for (pg_num = 0; pg_num < po->pg_vec_pages; pg_num++, page++) {
1767 err = vm_insert_page(vma, start, page);
1768 if (unlikely(err))
1769 goto out;
1770 start += PAGE_SIZE;
1773 atomic_inc(&po->mapped);
1774 vma->vm_ops = &packet_mmap_ops;
1775 err = 0;
1777 out:
1778 release_sock(sk);
1779 return err;
1781 #endif
1784 #ifdef CONFIG_SOCK_PACKET
1785 static const struct proto_ops packet_ops_spkt = {
1786 .family = PF_PACKET,
1787 .owner = THIS_MODULE,
1788 .release = packet_release,
1789 .bind = packet_bind_spkt,
1790 .connect = sock_no_connect,
1791 .socketpair = sock_no_socketpair,
1792 .accept = sock_no_accept,
1793 .getname = packet_getname_spkt,
1794 .poll = datagram_poll,
1795 .ioctl = packet_ioctl,
1796 .listen = sock_no_listen,
1797 .shutdown = sock_no_shutdown,
1798 .setsockopt = sock_no_setsockopt,
1799 .getsockopt = sock_no_getsockopt,
1800 .sendmsg = packet_sendmsg_spkt,
1801 .recvmsg = packet_recvmsg,
1802 .mmap = sock_no_mmap,
1803 .sendpage = sock_no_sendpage,
1805 #endif
1807 static const struct proto_ops packet_ops = {
1808 .family = PF_PACKET,
1809 .owner = THIS_MODULE,
1810 .release = packet_release,
1811 .bind = packet_bind,
1812 .connect = sock_no_connect,
1813 .socketpair = sock_no_socketpair,
1814 .accept = sock_no_accept,
1815 .getname = packet_getname,
1816 .poll = packet_poll,
1817 .ioctl = packet_ioctl,
1818 .listen = sock_no_listen,
1819 .shutdown = sock_no_shutdown,
1820 .setsockopt = packet_setsockopt,
1821 .getsockopt = packet_getsockopt,
1822 .sendmsg = packet_sendmsg,
1823 .recvmsg = packet_recvmsg,
1824 .mmap = packet_mmap,
1825 .sendpage = sock_no_sendpage,
1828 static struct net_proto_family packet_family_ops = {
1829 .family = PF_PACKET,
1830 .create = packet_create,
1831 .owner = THIS_MODULE,
1834 static struct notifier_block packet_netdev_notifier = {
1835 .notifier_call =packet_notifier,
1838 #ifdef CONFIG_PROC_FS
1839 static inline struct sock *packet_seq_idx(loff_t off)
1841 struct sock *s;
1842 struct hlist_node *node;
1844 sk_for_each(s, node, &packet_sklist) {
1845 if (!off--)
1846 return s;
1848 return NULL;
1851 static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
1853 read_lock(&packet_sklist_lock);
1854 return *pos ? packet_seq_idx(*pos - 1) : SEQ_START_TOKEN;
1857 static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1859 ++*pos;
1860 return (v == SEQ_START_TOKEN)
1861 ? sk_head(&packet_sklist)
1862 : sk_next((struct sock*)v) ;
1865 static void packet_seq_stop(struct seq_file *seq, void *v)
1867 read_unlock(&packet_sklist_lock);
1870 static int packet_seq_show(struct seq_file *seq, void *v)
1872 if (v == SEQ_START_TOKEN)
1873 seq_puts(seq, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1874 else {
1875 struct sock *s = v;
1876 const struct packet_sock *po = pkt_sk(s);
1878 seq_printf(seq,
1879 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1881 atomic_read(&s->sk_refcnt),
1882 s->sk_type,
1883 ntohs(po->num),
1884 po->ifindex,
1885 po->running,
1886 atomic_read(&s->sk_rmem_alloc),
1887 sock_i_uid(s),
1888 sock_i_ino(s) );
1891 return 0;
1894 static struct seq_operations packet_seq_ops = {
1895 .start = packet_seq_start,
1896 .next = packet_seq_next,
1897 .stop = packet_seq_stop,
1898 .show = packet_seq_show,
1901 static int packet_seq_open(struct inode *inode, struct file *file)
1903 return seq_open(file, &packet_seq_ops);
1906 static struct file_operations packet_seq_fops = {
1907 .owner = THIS_MODULE,
1908 .open = packet_seq_open,
1909 .read = seq_read,
1910 .llseek = seq_lseek,
1911 .release = seq_release,
1914 #endif
1916 static void __exit packet_exit(void)
1918 proc_net_remove("packet");
1919 unregister_netdevice_notifier(&packet_netdev_notifier);
1920 sock_unregister(PF_PACKET);
1921 proto_unregister(&packet_proto);
1924 static int __init packet_init(void)
1926 int rc = proto_register(&packet_proto, 0);
1928 if (rc != 0)
1929 goto out;
1931 sock_register(&packet_family_ops);
1932 register_netdevice_notifier(&packet_netdev_notifier);
1933 proc_net_fops_create("packet", 0, &packet_seq_fops);
1934 out:
1935 return rc;
1938 module_init(packet_init);
1939 module_exit(packet_exit);
1940 MODULE_LICENSE("GPL");
1941 MODULE_ALIAS_NETPROTO(PF_PACKET);