2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
39 * Eric Biederman : Allow for > 8 byte hardware addresses.
40 * The convention is that longer addresses
41 * will simply extend the hardware address
42 * byte arrays at the end of sockaddr_ll
45 * This program is free software; you can redistribute it and/or
46 * modify it under the terms of the GNU General Public License
47 * as published by the Free Software Foundation; either version
48 * 2 of the License, or (at your option) any later version.
52 #include <linux/types.h>
54 #include <linux/capability.h>
55 #include <linux/fcntl.h>
56 #include <linux/socket.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/if_packet.h>
61 #include <linux/wireless.h>
62 #include <linux/kernel.h>
63 #include <linux/kmod.h>
65 #include <net/protocol.h>
66 #include <linux/skbuff.h>
68 #include <linux/errno.h>
69 #include <linux/timer.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
72 #include <asm/ioctls.h>
74 #include <asm/cacheflush.h>
76 #include <linux/proc_fs.h>
77 #include <linux/seq_file.h>
78 #include <linux/poll.h>
79 #include <linux/module.h>
80 #include <linux/init.h>
83 #include <net/inet_common.h>
88 - if device has no dev->hard_header routine, it adds and removes ll header
89 inside itself. In this case ll header is invisible outside of device,
90 but higher levels still should reserve dev->hard_header_len.
91 Some devices are enough clever to reallocate skb, when header
92 will not fit to reserved space (tunnel), another ones are silly
94 - packet socket receives packets with pulled ll header,
95 so that SOCK_RAW should push it back.
100 Incoming, dev->hard_header!=NULL
101 mac_header -> ll header
104 Outgoing, dev->hard_header!=NULL
105 mac_header -> ll header
108 Incoming, dev->hard_header==NULL
109 mac_header -> UNKNOWN position. It is very likely, that it points to ll
110 header. PPP makes it, that is wrong, because introduce
111 assymetry between rx and tx paths.
114 Outgoing, dev->hard_header==NULL
115 mac_header -> data. ll header is still not built!
119 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
125 dev->hard_header != NULL
126 mac_header -> ll header
129 dev->hard_header == NULL (ll header is added by device, we cannot control it)
133 We should set nh.raw on output to correct posistion,
134 packet classifier depends on it.
137 /* List of all packet sockets. */
138 static HLIST_HEAD(packet_sklist
);
139 static DEFINE_RWLOCK(packet_sklist_lock
);
141 static atomic_t packet_socks_nr
;
144 /* Private packet socket structures. */
148 struct packet_mclist
*next
;
153 unsigned char addr
[MAX_ADDR_LEN
];
155 /* identical to struct packet_mreq except it has
156 * a longer address field.
158 struct packet_mreq_max
161 unsigned short mr_type
;
162 unsigned short mr_alen
;
163 unsigned char mr_address
[MAX_ADDR_LEN
];
166 #ifdef CONFIG_PACKET_MMAP
167 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
);
170 static void packet_flush_mclist(struct sock
*sk
);
173 /* struct sock has to be the first member of packet_sock */
175 struct tpacket_stats stats
;
176 #ifdef CONFIG_PACKET_MMAP
179 unsigned int frames_per_block
;
180 unsigned int frame_size
;
181 unsigned int frame_max
;
184 struct packet_type prot_hook
;
185 spinlock_t bind_lock
;
186 unsigned int running
:1, /* prot_hook is attached*/
189 int ifindex
; /* bound device */
191 struct packet_mclist
*mclist
;
192 #ifdef CONFIG_PACKET_MMAP
194 unsigned int pg_vec_order
;
195 unsigned int pg_vec_pages
;
196 unsigned int pg_vec_len
;
200 struct packet_skb_cb
{
201 unsigned int origlen
;
203 struct sockaddr_pkt pkt
;
204 struct sockaddr_ll ll
;
208 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
210 #ifdef CONFIG_PACKET_MMAP
212 static inline struct tpacket_hdr
*packet_lookup_frame(struct packet_sock
*po
, unsigned int position
)
214 unsigned int pg_vec_pos
, frame_offset
;
216 pg_vec_pos
= position
/ po
->frames_per_block
;
217 frame_offset
= position
% po
->frames_per_block
;
219 return (struct tpacket_hdr
*)(po
->pg_vec
[pg_vec_pos
] + (frame_offset
* po
->frame_size
));
223 static inline struct packet_sock
*pkt_sk(struct sock
*sk
)
225 return (struct packet_sock
*)sk
;
228 static void packet_sock_destruct(struct sock
*sk
)
230 BUG_TRAP(!atomic_read(&sk
->sk_rmem_alloc
));
231 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
233 if (!sock_flag(sk
, SOCK_DEAD
)) {
234 printk("Attempt to release alive packet socket: %p\n", sk
);
238 atomic_dec(&packet_socks_nr
);
239 #ifdef PACKET_REFCNT_DEBUG
240 printk(KERN_DEBUG
"PACKET socket %p is free, %d are alive\n", sk
, atomic_read(&packet_socks_nr
));
245 static const struct proto_ops packet_ops
;
247 static const struct proto_ops packet_ops_spkt
;
249 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
252 struct sockaddr_pkt
*spkt
;
255 * When we registered the protocol we saved the socket in the data
256 * field for just this event.
259 sk
= pt
->af_packet_priv
;
262 * Yank back the headers [hope the device set this
263 * right or kerboom...]
265 * Incoming packets have ll header pulled,
268 * For outgoing ones skb->data == skb_mac_header(skb)
269 * so that this procedure is noop.
272 if (skb
->pkt_type
== PACKET_LOOPBACK
)
275 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
278 /* drop any routing info */
279 dst_release(skb
->dst
);
282 /* drop conntrack reference */
285 spkt
= &PACKET_SKB_CB(skb
)->sa
.pkt
;
287 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
290 * The SOCK_PACKET socket receives _all_ frames.
293 spkt
->spkt_family
= dev
->type
;
294 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
295 spkt
->spkt_protocol
= skb
->protocol
;
298 * Charge the memory to the socket. This is done specifically
299 * to prevent sockets using all the memory up.
302 if (sock_queue_rcv_skb(sk
,skb
) == 0)
313 * Output a raw packet to a device layer. This bypasses all the other
314 * protocol layers and you must therefore supply it with a complete frame
317 static int packet_sendmsg_spkt(struct kiocb
*iocb
, struct socket
*sock
,
318 struct msghdr
*msg
, size_t len
)
320 struct sock
*sk
= sock
->sk
;
321 struct sockaddr_pkt
*saddr
=(struct sockaddr_pkt
*)msg
->msg_name
;
323 struct net_device
*dev
;
328 * Get and verify the address.
333 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
335 if (msg
->msg_namelen
==sizeof(struct sockaddr_pkt
))
336 proto
=saddr
->spkt_protocol
;
339 return(-ENOTCONN
); /* SOCK_PACKET must be sent giving an address */
342 * Find the device first to size check it
345 saddr
->spkt_device
[13] = 0;
346 dev
= dev_get_by_name(saddr
->spkt_device
);
352 if (!(dev
->flags
& IFF_UP
))
356 * You may not queue a frame bigger than the mtu. This is the lowest level
357 * raw protocol and you must do your own fragmentation at this level.
361 if (len
> dev
->mtu
+ dev
->hard_header_len
)
365 skb
= sock_wmalloc(sk
, len
+ LL_RESERVED_SPACE(dev
), 0, GFP_KERNEL
);
368 * If the write buffer is full, then tough. At this level the user gets to
369 * deal with the problem - do your own algorithmic backoffs. That's far
380 /* FIXME: Save some space for broken drivers that write a
381 * hard header at transmission time by themselves. PPP is the
382 * notable one here. This should really be fixed at the driver level.
384 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
385 skb_reset_network_header(skb
);
387 /* Try to align data part correctly */
388 if (dev
->hard_header
) {
389 skb
->data
-= dev
->hard_header_len
;
390 skb
->tail
-= dev
->hard_header_len
;
391 if (len
< dev
->hard_header_len
)
392 skb_reset_network_header(skb
);
395 /* Returns -EFAULT on error */
396 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
397 skb
->protocol
= proto
;
399 skb
->priority
= sk
->sk_priority
;
419 static inline unsigned int run_filter(struct sk_buff
*skb
, struct sock
*sk
,
422 struct sk_filter
*filter
;
425 filter
= rcu_dereference(sk
->sk_filter
);
427 res
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
428 rcu_read_unlock_bh();
434 This function makes lazy skb cloning in hope that most of packets
435 are discarded by BPF.
437 Note tricky part: we DO mangle shared skb! skb->data, skb->len
438 and skb->cb are mangled. It works because (and until) packets
439 falling here are owned by current CPU. Output packets are cloned
440 by dev_queue_xmit_nit(), input packets are processed by net_bh
441 sequencially, so that if we return skb to original state on exit,
442 we will not harm anyone.
445 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
448 struct sockaddr_ll
*sll
;
449 struct packet_sock
*po
;
450 u8
* skb_head
= skb
->data
;
451 int skb_len
= skb
->len
;
452 unsigned int snaplen
, res
;
454 if (skb
->pkt_type
== PACKET_LOOPBACK
)
457 sk
= pt
->af_packet_priv
;
462 if (dev
->hard_header
) {
463 /* The device has an explicit notion of ll header,
464 exported to higher levels.
466 Otherwise, the device hides datails of it frame
467 structure, so that corresponding packet head
468 never delivered to user.
470 if (sk
->sk_type
!= SOCK_DGRAM
)
471 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
472 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
473 /* Special case: outgoing packets have ll header at head */
474 skb_pull(skb
, skb_network_offset(skb
));
480 res
= run_filter(skb
, sk
, snaplen
);
486 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
487 (unsigned)sk
->sk_rcvbuf
)
490 if (skb_shared(skb
)) {
491 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
495 if (skb_head
!= skb
->data
) {
496 skb
->data
= skb_head
;
503 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb
)) + MAX_ADDR_LEN
- 8 >
506 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
507 sll
->sll_family
= AF_PACKET
;
508 sll
->sll_hatype
= dev
->type
;
509 sll
->sll_protocol
= skb
->protocol
;
510 sll
->sll_pkttype
= skb
->pkt_type
;
511 if (unlikely(po
->origdev
) && skb
->pkt_type
== PACKET_HOST
)
512 sll
->sll_ifindex
= orig_dev
->ifindex
;
514 sll
->sll_ifindex
= dev
->ifindex
;
517 if (dev
->hard_header_parse
)
518 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
520 PACKET_SKB_CB(skb
)->origlen
= skb
->len
;
522 if (pskb_trim(skb
, snaplen
))
525 skb_set_owner_r(skb
, sk
);
527 dst_release(skb
->dst
);
530 /* drop conntrack reference */
533 spin_lock(&sk
->sk_receive_queue
.lock
);
534 po
->stats
.tp_packets
++;
535 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
536 spin_unlock(&sk
->sk_receive_queue
.lock
);
537 sk
->sk_data_ready(sk
, skb
->len
);
541 spin_lock(&sk
->sk_receive_queue
.lock
);
542 po
->stats
.tp_drops
++;
543 spin_unlock(&sk
->sk_receive_queue
.lock
);
546 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
547 skb
->data
= skb_head
;
555 #ifdef CONFIG_PACKET_MMAP
556 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
, struct net_device
*orig_dev
)
559 struct packet_sock
*po
;
560 struct sockaddr_ll
*sll
;
561 struct tpacket_hdr
*h
;
562 u8
* skb_head
= skb
->data
;
563 int skb_len
= skb
->len
;
564 unsigned int snaplen
, res
;
565 unsigned long status
= TP_STATUS_LOSING
|TP_STATUS_USER
;
566 unsigned short macoff
, netoff
;
567 struct sk_buff
*copy_skb
= NULL
;
570 if (skb
->pkt_type
== PACKET_LOOPBACK
)
573 sk
= pt
->af_packet_priv
;
576 if (dev
->hard_header
) {
577 if (sk
->sk_type
!= SOCK_DGRAM
)
578 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
579 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
580 /* Special case: outgoing packets have ll header at head */
581 skb_pull(skb
, skb_network_offset(skb
));
585 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
586 status
|= TP_STATUS_CSUMNOTREADY
;
590 res
= run_filter(skb
, sk
, snaplen
);
596 if (sk
->sk_type
== SOCK_DGRAM
) {
597 macoff
= netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
) + 16;
599 unsigned maclen
= skb_network_offset(skb
);
600 netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
+ (maclen
< 16 ? 16 : maclen
));
601 macoff
= netoff
- maclen
;
604 if (macoff
+ snaplen
> po
->frame_size
) {
605 if (po
->copy_thresh
&&
606 atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
<
607 (unsigned)sk
->sk_rcvbuf
) {
608 if (skb_shared(skb
)) {
609 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
611 copy_skb
= skb_get(skb
);
612 skb_head
= skb
->data
;
615 skb_set_owner_r(copy_skb
, sk
);
617 snaplen
= po
->frame_size
- macoff
;
618 if ((int)snaplen
< 0)
622 spin_lock(&sk
->sk_receive_queue
.lock
);
623 h
= packet_lookup_frame(po
, po
->head
);
627 po
->head
= po
->head
!= po
->frame_max
? po
->head
+1 : 0;
628 po
->stats
.tp_packets
++;
630 status
|= TP_STATUS_COPY
;
631 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
633 if (!po
->stats
.tp_drops
)
634 status
&= ~TP_STATUS_LOSING
;
635 spin_unlock(&sk
->sk_receive_queue
.lock
);
637 skb_copy_bits(skb
, 0, (u8
*)h
+ macoff
, snaplen
);
639 h
->tp_len
= skb
->len
;
640 h
->tp_snaplen
= snaplen
;
643 if (skb
->tstamp
.tv64
== 0) {
644 __net_timestamp(skb
);
645 sock_enable_timestamp(sk
);
647 tv
= ktime_to_timeval(skb
->tstamp
);
648 h
->tp_sec
= tv
.tv_sec
;
649 h
->tp_usec
= tv
.tv_usec
;
651 sll
= (struct sockaddr_ll
*)((u8
*)h
+ TPACKET_ALIGN(sizeof(*h
)));
653 if (dev
->hard_header_parse
)
654 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
655 sll
->sll_family
= AF_PACKET
;
656 sll
->sll_hatype
= dev
->type
;
657 sll
->sll_protocol
= skb
->protocol
;
658 sll
->sll_pkttype
= skb
->pkt_type
;
659 if (unlikely(po
->origdev
) && skb
->pkt_type
== PACKET_HOST
)
660 sll
->sll_ifindex
= orig_dev
->ifindex
;
662 sll
->sll_ifindex
= dev
->ifindex
;
664 h
->tp_status
= status
;
668 struct page
*p_start
, *p_end
;
669 u8
*h_end
= (u8
*)h
+ macoff
+ snaplen
- 1;
671 p_start
= virt_to_page(h
);
672 p_end
= virt_to_page(h_end
);
673 while (p_start
<= p_end
) {
674 flush_dcache_page(p_start
);
679 sk
->sk_data_ready(sk
, 0);
682 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
683 skb
->data
= skb_head
;
691 po
->stats
.tp_drops
++;
692 spin_unlock(&sk
->sk_receive_queue
.lock
);
694 sk
->sk_data_ready(sk
, 0);
703 static int packet_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
704 struct msghdr
*msg
, size_t len
)
706 struct sock
*sk
= sock
->sk
;
707 struct sockaddr_ll
*saddr
=(struct sockaddr_ll
*)msg
->msg_name
;
709 struct net_device
*dev
;
712 int ifindex
, err
, reserve
= 0;
715 * Get and verify the address.
719 struct packet_sock
*po
= pkt_sk(sk
);
721 ifindex
= po
->ifindex
;
726 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
728 if (msg
->msg_namelen
< (saddr
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
)))
730 ifindex
= saddr
->sll_ifindex
;
731 proto
= saddr
->sll_protocol
;
732 addr
= saddr
->sll_addr
;
736 dev
= dev_get_by_index(ifindex
);
740 if (sock
->type
== SOCK_RAW
)
741 reserve
= dev
->hard_header_len
;
744 if (!(dev
->flags
& IFF_UP
))
748 if (len
> dev
->mtu
+reserve
)
751 skb
= sock_alloc_send_skb(sk
, len
+ LL_RESERVED_SPACE(dev
),
752 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
756 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
757 skb_reset_network_header(skb
);
759 if (dev
->hard_header
) {
762 res
= dev
->hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
);
763 if (sock
->type
!= SOCK_DGRAM
) {
764 skb_reset_tail_pointer(skb
);
770 /* Returns -EFAULT on error */
771 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
775 skb
->protocol
= proto
;
777 skb
->priority
= sk
->sk_priority
;
783 err
= dev_queue_xmit(skb
);
784 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
801 * Close a PACKET socket. This is fairly simple. We immediately go
802 * to 'closed' state and remove our protocol entry in the device list.
805 static int packet_release(struct socket
*sock
)
807 struct sock
*sk
= sock
->sk
;
808 struct packet_sock
*po
;
815 write_lock_bh(&packet_sklist_lock
);
816 sk_del_node_init(sk
);
817 write_unlock_bh(&packet_sklist_lock
);
820 * Unhook packet receive handler.
825 * Remove the protocol hook
827 dev_remove_pack(&po
->prot_hook
);
833 packet_flush_mclist(sk
);
835 #ifdef CONFIG_PACKET_MMAP
837 struct tpacket_req req
;
838 memset(&req
, 0, sizeof(req
));
839 packet_set_ring(sk
, &req
, 1);
844 * Now the socket is dead. No more input will appear.
852 skb_queue_purge(&sk
->sk_receive_queue
);
859 * Attach a packet hook.
862 static int packet_do_bind(struct sock
*sk
, struct net_device
*dev
, __be16 protocol
)
864 struct packet_sock
*po
= pkt_sk(sk
);
866 * Detach an existing hook if present.
871 spin_lock(&po
->bind_lock
);
876 spin_unlock(&po
->bind_lock
);
877 dev_remove_pack(&po
->prot_hook
);
878 spin_lock(&po
->bind_lock
);
882 po
->prot_hook
.type
= protocol
;
883 po
->prot_hook
.dev
= dev
;
885 po
->ifindex
= dev
? dev
->ifindex
: 0;
891 if (dev
->flags
&IFF_UP
) {
892 dev_add_pack(&po
->prot_hook
);
896 sk
->sk_err
= ENETDOWN
;
897 if (!sock_flag(sk
, SOCK_DEAD
))
898 sk
->sk_error_report(sk
);
901 dev_add_pack(&po
->prot_hook
);
907 spin_unlock(&po
->bind_lock
);
913 * Bind a packet socket to a device
916 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
918 struct sock
*sk
=sock
->sk
;
920 struct net_device
*dev
;
927 if (addr_len
!= sizeof(struct sockaddr
))
929 strlcpy(name
,uaddr
->sa_data
,sizeof(name
));
931 dev
= dev_get_by_name(name
);
933 err
= packet_do_bind(sk
, dev
, pkt_sk(sk
)->num
);
939 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
941 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
942 struct sock
*sk
=sock
->sk
;
943 struct net_device
*dev
= NULL
;
951 if (addr_len
< sizeof(struct sockaddr_ll
))
953 if (sll
->sll_family
!= AF_PACKET
)
956 if (sll
->sll_ifindex
) {
958 dev
= dev_get_by_index(sll
->sll_ifindex
);
962 err
= packet_do_bind(sk
, dev
, sll
->sll_protocol
? : pkt_sk(sk
)->num
);
970 static struct proto packet_proto
= {
972 .owner
= THIS_MODULE
,
973 .obj_size
= sizeof(struct packet_sock
),
977 * Create a packet of type SOCK_PACKET.
980 static int packet_create(struct socket
*sock
, int protocol
)
983 struct packet_sock
*po
;
984 __be16 proto
= (__force __be16
)protocol
; /* weird, but documented */
987 if (!capable(CAP_NET_RAW
))
989 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
&&
990 sock
->type
!= SOCK_PACKET
)
991 return -ESOCKTNOSUPPORT
;
993 sock
->state
= SS_UNCONNECTED
;
996 sk
= sk_alloc(PF_PACKET
, GFP_KERNEL
, &packet_proto
, 1);
1000 sock
->ops
= &packet_ops
;
1001 if (sock
->type
== SOCK_PACKET
)
1002 sock
->ops
= &packet_ops_spkt
;
1004 sock_init_data(sock
, sk
);
1007 sk
->sk_family
= PF_PACKET
;
1010 sk
->sk_destruct
= packet_sock_destruct
;
1011 atomic_inc(&packet_socks_nr
);
1014 * Attach a protocol block
1017 spin_lock_init(&po
->bind_lock
);
1018 po
->prot_hook
.func
= packet_rcv
;
1020 if (sock
->type
== SOCK_PACKET
)
1021 po
->prot_hook
.func
= packet_rcv_spkt
;
1023 po
->prot_hook
.af_packet_priv
= sk
;
1026 po
->prot_hook
.type
= proto
;
1027 dev_add_pack(&po
->prot_hook
);
1032 write_lock_bh(&packet_sklist_lock
);
1033 sk_add_node(sk
, &packet_sklist
);
1034 write_unlock_bh(&packet_sklist_lock
);
1041 * Pull a packet from our receive queue and hand it to the user.
1042 * If necessary we block.
1045 static int packet_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1046 struct msghdr
*msg
, size_t len
, int flags
)
1048 struct sock
*sk
= sock
->sk
;
1049 struct sk_buff
*skb
;
1051 struct sockaddr_ll
*sll
;
1054 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
))
1058 /* What error should we return now? EUNATTACH? */
1059 if (pkt_sk(sk
)->ifindex
< 0)
1064 * Call the generic datagram receiver. This handles all sorts
1065 * of horrible races and re-entrancy so we can forget about it
1066 * in the protocol layers.
1068 * Now it will return ENETDOWN, if device have just gone down,
1069 * but then it will block.
1072 skb
=skb_recv_datagram(sk
,flags
,flags
&MSG_DONTWAIT
,&err
);
1075 * An error occurred so return it. Because skb_recv_datagram()
1076 * handles the blocking we don't see and worry about blocking
1084 * If the address length field is there to be filled in, we fill
1088 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
1089 if (sock
->type
== SOCK_PACKET
)
1090 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
1092 msg
->msg_namelen
= sll
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
);
1095 * You lose any data beyond the buffer you gave. If it worries a
1096 * user program they can ask the device for its MTU anyway.
1103 msg
->msg_flags
|=MSG_TRUNC
;
1106 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1110 sock_recv_timestamp(msg
, sk
, skb
);
1113 memcpy(msg
->msg_name
, &PACKET_SKB_CB(skb
)->sa
,
1116 if (pkt_sk(sk
)->auxdata
) {
1117 struct tpacket_auxdata aux
;
1119 aux
.tp_status
= TP_STATUS_USER
;
1120 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1121 aux
.tp_status
|= TP_STATUS_CSUMNOTREADY
;
1122 aux
.tp_len
= PACKET_SKB_CB(skb
)->origlen
;
1123 aux
.tp_snaplen
= skb
->len
;
1125 aux
.tp_net
= skb_network_offset(skb
);
1127 put_cmsg(msg
, SOL_PACKET
, PACKET_AUXDATA
, sizeof(aux
), &aux
);
1131 * Free or return the buffer as appropriate. Again this
1132 * hides all the races and re-entrancy issues from us.
1134 err
= (flags
&MSG_TRUNC
) ? skb
->len
: copied
;
1137 skb_free_datagram(sk
, skb
);
1142 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1143 int *uaddr_len
, int peer
)
1145 struct net_device
*dev
;
1146 struct sock
*sk
= sock
->sk
;
1151 uaddr
->sa_family
= AF_PACKET
;
1152 dev
= dev_get_by_index(pkt_sk(sk
)->ifindex
);
1154 strlcpy(uaddr
->sa_data
, dev
->name
, 15);
1157 memset(uaddr
->sa_data
, 0, 14);
1158 *uaddr_len
= sizeof(*uaddr
);
1163 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1164 int *uaddr_len
, int peer
)
1166 struct net_device
*dev
;
1167 struct sock
*sk
= sock
->sk
;
1168 struct packet_sock
*po
= pkt_sk(sk
);
1169 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
1174 sll
->sll_family
= AF_PACKET
;
1175 sll
->sll_ifindex
= po
->ifindex
;
1176 sll
->sll_protocol
= po
->num
;
1177 dev
= dev_get_by_index(po
->ifindex
);
1179 sll
->sll_hatype
= dev
->type
;
1180 sll
->sll_halen
= dev
->addr_len
;
1181 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
1184 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
1187 *uaddr_len
= offsetof(struct sockaddr_ll
, sll_addr
) + sll
->sll_halen
;
1192 static void packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1195 case PACKET_MR_MULTICAST
:
1197 dev_mc_add(dev
, i
->addr
, i
->alen
, 0);
1199 dev_mc_delete(dev
, i
->addr
, i
->alen
, 0);
1201 case PACKET_MR_PROMISC
:
1202 dev_set_promiscuity(dev
, what
);
1204 case PACKET_MR_ALLMULTI
:
1205 dev_set_allmulti(dev
, what
);
1211 static void packet_dev_mclist(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1213 for ( ; i
; i
=i
->next
) {
1214 if (i
->ifindex
== dev
->ifindex
)
1215 packet_dev_mc(dev
, i
, what
);
1219 static int packet_mc_add(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1221 struct packet_sock
*po
= pkt_sk(sk
);
1222 struct packet_mclist
*ml
, *i
;
1223 struct net_device
*dev
;
1229 dev
= __dev_get_by_index(mreq
->mr_ifindex
);
1234 if (mreq
->mr_alen
> dev
->addr_len
)
1238 i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
1243 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
1244 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1245 ml
->type
== mreq
->mr_type
&&
1246 ml
->alen
== mreq
->mr_alen
&&
1247 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1249 /* Free the new element ... */
1255 i
->type
= mreq
->mr_type
;
1256 i
->ifindex
= mreq
->mr_ifindex
;
1257 i
->alen
= mreq
->mr_alen
;
1258 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
1260 i
->next
= po
->mclist
;
1262 packet_dev_mc(dev
, i
, +1);
1269 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1271 struct packet_mclist
*ml
, **mlp
;
1275 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
1276 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1277 ml
->type
== mreq
->mr_type
&&
1278 ml
->alen
== mreq
->mr_alen
&&
1279 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1280 if (--ml
->count
== 0) {
1281 struct net_device
*dev
;
1283 dev
= dev_get_by_index(ml
->ifindex
);
1285 packet_dev_mc(dev
, ml
, -1);
1295 return -EADDRNOTAVAIL
;
1298 static void packet_flush_mclist(struct sock
*sk
)
1300 struct packet_sock
*po
= pkt_sk(sk
);
1301 struct packet_mclist
*ml
;
1307 while ((ml
= po
->mclist
) != NULL
) {
1308 struct net_device
*dev
;
1310 po
->mclist
= ml
->next
;
1311 if ((dev
= dev_get_by_index(ml
->ifindex
)) != NULL
) {
1312 packet_dev_mc(dev
, ml
, -1);
1321 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
)
1323 struct sock
*sk
= sock
->sk
;
1324 struct packet_sock
*po
= pkt_sk(sk
);
1327 if (level
!= SOL_PACKET
)
1328 return -ENOPROTOOPT
;
1331 case PACKET_ADD_MEMBERSHIP
:
1332 case PACKET_DROP_MEMBERSHIP
:
1334 struct packet_mreq_max mreq
;
1336 memset(&mreq
, 0, sizeof(mreq
));
1337 if (len
< sizeof(struct packet_mreq
))
1339 if (len
> sizeof(mreq
))
1341 if (copy_from_user(&mreq
,optval
,len
))
1343 if (len
< (mreq
.mr_alen
+ offsetof(struct packet_mreq
, mr_address
)))
1345 if (optname
== PACKET_ADD_MEMBERSHIP
)
1346 ret
= packet_mc_add(sk
, &mreq
);
1348 ret
= packet_mc_drop(sk
, &mreq
);
1352 #ifdef CONFIG_PACKET_MMAP
1353 case PACKET_RX_RING
:
1355 struct tpacket_req req
;
1357 if (optlen
<sizeof(req
))
1359 if (copy_from_user(&req
,optval
,sizeof(req
)))
1361 return packet_set_ring(sk
, &req
, 0);
1363 case PACKET_COPY_THRESH
:
1367 if (optlen
!=sizeof(val
))
1369 if (copy_from_user(&val
,optval
,sizeof(val
)))
1372 pkt_sk(sk
)->copy_thresh
= val
;
1376 case PACKET_AUXDATA
:
1380 if (optlen
< sizeof(val
))
1382 if (copy_from_user(&val
, optval
, sizeof(val
)))
1385 po
->auxdata
= !!val
;
1388 case PACKET_ORIGDEV
:
1392 if (optlen
< sizeof(val
))
1394 if (copy_from_user(&val
, optval
, sizeof(val
)))
1397 po
->origdev
= !!val
;
1401 return -ENOPROTOOPT
;
1405 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
1406 char __user
*optval
, int __user
*optlen
)
1410 struct sock
*sk
= sock
->sk
;
1411 struct packet_sock
*po
= pkt_sk(sk
);
1413 struct tpacket_stats st
;
1415 if (level
!= SOL_PACKET
)
1416 return -ENOPROTOOPT
;
1418 if (get_user(len
, optlen
))
1425 case PACKET_STATISTICS
:
1426 if (len
> sizeof(struct tpacket_stats
))
1427 len
= sizeof(struct tpacket_stats
);
1428 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1430 memset(&po
->stats
, 0, sizeof(st
));
1431 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1432 st
.tp_packets
+= st
.tp_drops
;
1436 case PACKET_AUXDATA
:
1437 if (len
> sizeof(int))
1443 case PACKET_ORIGDEV
:
1444 if (len
> sizeof(int))
1451 return -ENOPROTOOPT
;
1454 if (put_user(len
, optlen
))
1456 if (copy_to_user(optval
, data
, len
))
1462 static int packet_notifier(struct notifier_block
*this, unsigned long msg
, void *data
)
1465 struct hlist_node
*node
;
1466 struct net_device
*dev
= data
;
1468 read_lock(&packet_sklist_lock
);
1469 sk_for_each(sk
, node
, &packet_sklist
) {
1470 struct packet_sock
*po
= pkt_sk(sk
);
1473 case NETDEV_UNREGISTER
:
1475 packet_dev_mclist(dev
, po
->mclist
, -1);
1479 if (dev
->ifindex
== po
->ifindex
) {
1480 spin_lock(&po
->bind_lock
);
1482 __dev_remove_pack(&po
->prot_hook
);
1485 sk
->sk_err
= ENETDOWN
;
1486 if (!sock_flag(sk
, SOCK_DEAD
))
1487 sk
->sk_error_report(sk
);
1489 if (msg
== NETDEV_UNREGISTER
) {
1491 po
->prot_hook
.dev
= NULL
;
1493 spin_unlock(&po
->bind_lock
);
1497 spin_lock(&po
->bind_lock
);
1498 if (dev
->ifindex
== po
->ifindex
&& po
->num
&&
1500 dev_add_pack(&po
->prot_hook
);
1504 spin_unlock(&po
->bind_lock
);
1508 read_unlock(&packet_sklist_lock
);
1513 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
1516 struct sock
*sk
= sock
->sk
;
1521 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
1522 return put_user(amount
, (int __user
*)arg
);
1526 struct sk_buff
*skb
;
1529 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1530 skb
= skb_peek(&sk
->sk_receive_queue
);
1533 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1534 return put_user(amount
, (int __user
*)arg
);
1537 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
1539 return sock_get_timestampns(sk
, (struct timespec __user
*)arg
);
1549 case SIOCGIFBRDADDR
:
1550 case SIOCSIFBRDADDR
:
1551 case SIOCGIFNETMASK
:
1552 case SIOCSIFNETMASK
:
1553 case SIOCGIFDSTADDR
:
1554 case SIOCSIFDSTADDR
:
1556 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
1560 return -ENOIOCTLCMD
;
1565 #ifndef CONFIG_PACKET_MMAP
1566 #define packet_mmap sock_no_mmap
1567 #define packet_poll datagram_poll
1570 static unsigned int packet_poll(struct file
* file
, struct socket
*sock
,
1573 struct sock
*sk
= sock
->sk
;
1574 struct packet_sock
*po
= pkt_sk(sk
);
1575 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1577 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1579 unsigned last
= po
->head
? po
->head
-1 : po
->frame_max
;
1580 struct tpacket_hdr
*h
;
1582 h
= packet_lookup_frame(po
, last
);
1585 mask
|= POLLIN
| POLLRDNORM
;
1587 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1592 /* Dirty? Well, I still did not learn better way to account
1596 static void packet_mm_open(struct vm_area_struct
*vma
)
1598 struct file
*file
= vma
->vm_file
;
1599 struct socket
* sock
= file
->private_data
;
1600 struct sock
*sk
= sock
->sk
;
1603 atomic_inc(&pkt_sk(sk
)->mapped
);
1606 static void packet_mm_close(struct vm_area_struct
*vma
)
1608 struct file
*file
= vma
->vm_file
;
1609 struct socket
* sock
= file
->private_data
;
1610 struct sock
*sk
= sock
->sk
;
1613 atomic_dec(&pkt_sk(sk
)->mapped
);
1616 static struct vm_operations_struct packet_mmap_ops
= {
1617 .open
= packet_mm_open
,
1618 .close
=packet_mm_close
,
1621 static inline struct page
*pg_vec_endpage(char *one_pg_vec
, unsigned int order
)
1623 return virt_to_page(one_pg_vec
+ (PAGE_SIZE
<< order
) - 1);
1626 static void free_pg_vec(char **pg_vec
, unsigned int order
, unsigned int len
)
1630 for (i
= 0; i
< len
; i
++) {
1631 if (likely(pg_vec
[i
]))
1632 free_pages((unsigned long) pg_vec
[i
], order
);
1637 static inline char *alloc_one_pg_vec_page(unsigned long order
)
1639 return (char *) __get_free_pages(GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
,
1643 static char **alloc_pg_vec(struct tpacket_req
*req
, int order
)
1645 unsigned int block_nr
= req
->tp_block_nr
;
1649 pg_vec
= kzalloc(block_nr
* sizeof(char *), GFP_KERNEL
);
1650 if (unlikely(!pg_vec
))
1653 for (i
= 0; i
< block_nr
; i
++) {
1654 pg_vec
[i
] = alloc_one_pg_vec_page(order
);
1655 if (unlikely(!pg_vec
[i
]))
1656 goto out_free_pgvec
;
1663 free_pg_vec(pg_vec
, order
, block_nr
);
1668 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
)
1670 char **pg_vec
= NULL
;
1671 struct packet_sock
*po
= pkt_sk(sk
);
1672 int was_running
, order
= 0;
1676 if (req
->tp_block_nr
) {
1679 /* Sanity tests and some calculations */
1681 if (unlikely(po
->pg_vec
))
1684 if (unlikely((int)req
->tp_block_size
<= 0))
1686 if (unlikely(req
->tp_block_size
& (PAGE_SIZE
- 1)))
1688 if (unlikely(req
->tp_frame_size
< TPACKET_HDRLEN
))
1690 if (unlikely(req
->tp_frame_size
& (TPACKET_ALIGNMENT
- 1)))
1693 po
->frames_per_block
= req
->tp_block_size
/req
->tp_frame_size
;
1694 if (unlikely(po
->frames_per_block
<= 0))
1696 if (unlikely((po
->frames_per_block
* req
->tp_block_nr
) !=
1701 order
= get_order(req
->tp_block_size
);
1702 pg_vec
= alloc_pg_vec(req
, order
);
1703 if (unlikely(!pg_vec
))
1707 for (i
= 0; i
< req
->tp_block_nr
; i
++) {
1708 char *ptr
= pg_vec
[i
];
1709 struct tpacket_hdr
*header
;
1712 for (k
= 0; k
< po
->frames_per_block
; k
++) {
1713 header
= (struct tpacket_hdr
*) ptr
;
1714 header
->tp_status
= TP_STATUS_KERNEL
;
1715 ptr
+= req
->tp_frame_size
;
1720 if (unlikely(req
->tp_frame_nr
))
1726 /* Detach socket from network */
1727 spin_lock(&po
->bind_lock
);
1728 was_running
= po
->running
;
1731 __dev_remove_pack(&po
->prot_hook
);
1736 spin_unlock(&po
->bind_lock
);
1741 if (closing
|| atomic_read(&po
->mapped
) == 0) {
1743 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1745 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1746 pg_vec
= XC(po
->pg_vec
, pg_vec
);
1747 po
->frame_max
= (req
->tp_frame_nr
- 1);
1749 po
->frame_size
= req
->tp_frame_size
;
1750 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1752 order
= XC(po
->pg_vec_order
, order
);
1753 req
->tp_block_nr
= XC(po
->pg_vec_len
, req
->tp_block_nr
);
1755 po
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
1756 po
->prot_hook
.func
= po
->pg_vec
? tpacket_rcv
: packet_rcv
;
1757 skb_queue_purge(&sk
->sk_receive_queue
);
1759 if (atomic_read(&po
->mapped
))
1760 printk(KERN_DEBUG
"packet_mmap: vma is busy: %d\n", atomic_read(&po
->mapped
));
1763 spin_lock(&po
->bind_lock
);
1764 if (was_running
&& !po
->running
) {
1768 dev_add_pack(&po
->prot_hook
);
1770 spin_unlock(&po
->bind_lock
);
1775 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
1780 static int packet_mmap(struct file
*file
, struct socket
*sock
, struct vm_area_struct
*vma
)
1782 struct sock
*sk
= sock
->sk
;
1783 struct packet_sock
*po
= pkt_sk(sk
);
1785 unsigned long start
;
1792 size
= vma
->vm_end
- vma
->vm_start
;
1795 if (po
->pg_vec
== NULL
)
1797 if (size
!= po
->pg_vec_len
*po
->pg_vec_pages
*PAGE_SIZE
)
1800 start
= vma
->vm_start
;
1801 for (i
= 0; i
< po
->pg_vec_len
; i
++) {
1802 struct page
*page
= virt_to_page(po
->pg_vec
[i
]);
1805 for (pg_num
= 0; pg_num
< po
->pg_vec_pages
; pg_num
++, page
++) {
1806 err
= vm_insert_page(vma
, start
, page
);
1812 atomic_inc(&po
->mapped
);
1813 vma
->vm_ops
= &packet_mmap_ops
;
1823 static const struct proto_ops packet_ops_spkt
= {
1824 .family
= PF_PACKET
,
1825 .owner
= THIS_MODULE
,
1826 .release
= packet_release
,
1827 .bind
= packet_bind_spkt
,
1828 .connect
= sock_no_connect
,
1829 .socketpair
= sock_no_socketpair
,
1830 .accept
= sock_no_accept
,
1831 .getname
= packet_getname_spkt
,
1832 .poll
= datagram_poll
,
1833 .ioctl
= packet_ioctl
,
1834 .listen
= sock_no_listen
,
1835 .shutdown
= sock_no_shutdown
,
1836 .setsockopt
= sock_no_setsockopt
,
1837 .getsockopt
= sock_no_getsockopt
,
1838 .sendmsg
= packet_sendmsg_spkt
,
1839 .recvmsg
= packet_recvmsg
,
1840 .mmap
= sock_no_mmap
,
1841 .sendpage
= sock_no_sendpage
,
1844 static const struct proto_ops packet_ops
= {
1845 .family
= PF_PACKET
,
1846 .owner
= THIS_MODULE
,
1847 .release
= packet_release
,
1848 .bind
= packet_bind
,
1849 .connect
= sock_no_connect
,
1850 .socketpair
= sock_no_socketpair
,
1851 .accept
= sock_no_accept
,
1852 .getname
= packet_getname
,
1853 .poll
= packet_poll
,
1854 .ioctl
= packet_ioctl
,
1855 .listen
= sock_no_listen
,
1856 .shutdown
= sock_no_shutdown
,
1857 .setsockopt
= packet_setsockopt
,
1858 .getsockopt
= packet_getsockopt
,
1859 .sendmsg
= packet_sendmsg
,
1860 .recvmsg
= packet_recvmsg
,
1861 .mmap
= packet_mmap
,
1862 .sendpage
= sock_no_sendpage
,
1865 static struct net_proto_family packet_family_ops
= {
1866 .family
= PF_PACKET
,
1867 .create
= packet_create
,
1868 .owner
= THIS_MODULE
,
1871 static struct notifier_block packet_netdev_notifier
= {
1872 .notifier_call
=packet_notifier
,
1875 #ifdef CONFIG_PROC_FS
1876 static inline struct sock
*packet_seq_idx(loff_t off
)
1879 struct hlist_node
*node
;
1881 sk_for_each(s
, node
, &packet_sklist
) {
1888 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1890 read_lock(&packet_sklist_lock
);
1891 return *pos
? packet_seq_idx(*pos
- 1) : SEQ_START_TOKEN
;
1894 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1897 return (v
== SEQ_START_TOKEN
)
1898 ? sk_head(&packet_sklist
)
1899 : sk_next((struct sock
*)v
) ;
1902 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
1904 read_unlock(&packet_sklist_lock
);
1907 static int packet_seq_show(struct seq_file
*seq
, void *v
)
1909 if (v
== SEQ_START_TOKEN
)
1910 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1913 const struct packet_sock
*po
= pkt_sk(s
);
1916 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1918 atomic_read(&s
->sk_refcnt
),
1923 atomic_read(&s
->sk_rmem_alloc
),
1931 static const struct seq_operations packet_seq_ops
= {
1932 .start
= packet_seq_start
,
1933 .next
= packet_seq_next
,
1934 .stop
= packet_seq_stop
,
1935 .show
= packet_seq_show
,
1938 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
1940 return seq_open(file
, &packet_seq_ops
);
1943 static const struct file_operations packet_seq_fops
= {
1944 .owner
= THIS_MODULE
,
1945 .open
= packet_seq_open
,
1947 .llseek
= seq_lseek
,
1948 .release
= seq_release
,
1953 static void __exit
packet_exit(void)
1955 proc_net_remove("packet");
1956 unregister_netdevice_notifier(&packet_netdev_notifier
);
1957 sock_unregister(PF_PACKET
);
1958 proto_unregister(&packet_proto
);
1961 static int __init
packet_init(void)
1963 int rc
= proto_register(&packet_proto
, 0);
1968 sock_register(&packet_family_ops
);
1969 register_netdevice_notifier(&packet_netdev_notifier
);
1970 proc_net_fops_create("packet", 0, &packet_seq_fops
);
1975 module_init(packet_init
);
1976 module_exit(packet_exit
);
1977 MODULE_LICENSE("GPL");
1978 MODULE_ALIAS_NETPROTO(PF_PACKET
);