2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
8 * Version: $Id: af_packet.c,v 1.61 2002/02/08 03:57:19 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Alan Cox : verify_area() now used correctly
16 * Alan Cox : new skbuff lists, look ma no backlogs!
17 * Alan Cox : tidied skbuff lists.
18 * Alan Cox : Now uses generic datagram routines I
19 * added. Also fixed the peek/read crash
20 * from all old Linux datagram code.
21 * Alan Cox : Uses the improved datagram code.
22 * Alan Cox : Added NULL's for socket options.
23 * Alan Cox : Re-commented the code.
24 * Alan Cox : Use new kernel side addressing
25 * Rob Janssen : Correct MTU usage.
26 * Dave Platt : Counter leaks caused by incorrect
27 * interrupt locking and some slightly
28 * dubious gcc output. Can you read
29 * compiler: it said _VOLATILE_
30 * Richard Kooijman : Timestamp fixes.
31 * Alan Cox : New buffers. Use sk->mac.raw.
32 * Alan Cox : sendmsg/recvmsg support.
33 * Alan Cox : Protocol setting support
34 * Alexey Kuznetsov : Untied from IPv4 stack.
35 * Cyrus Durgin : Fixed kerneld for kmod.
36 * Michal Ostrowski : Module initialization cleanup.
37 * Ulises Alonso : Frame number limit removal and
38 * packet_set_ring memory leak.
40 * This program is free software; you can redistribute it and/or
41 * modify it under the terms of the GNU General Public License
42 * as published by the Free Software Foundation; either version
43 * 2 of the License, or (at your option) any later version.
47 #include <linux/config.h>
48 #include <linux/types.h>
49 #include <linux/sched.h>
51 #include <linux/fcntl.h>
52 #include <linux/socket.h>
54 #include <linux/inet.h>
55 #include <linux/netdevice.h>
56 #include <linux/if_packet.h>
57 #include <linux/wireless.h>
58 #include <linux/kmod.h>
60 #include <net/protocol.h>
61 #include <linux/skbuff.h>
63 #include <linux/errno.h>
64 #include <linux/timer.h>
65 #include <asm/system.h>
66 #include <asm/uaccess.h>
67 #include <asm/ioctls.h>
70 #include <linux/proc_fs.h>
71 #include <linux/seq_file.h>
72 #include <linux/poll.h>
73 #include <linux/module.h>
74 #include <linux/init.h>
77 #include <net/inet_common.h>
80 #define CONFIG_SOCK_PACKET 1
83 Proposed replacement for SIOC{ADD,DEL}MULTI and
84 IFF_PROMISC, IFF_ALLMULTI flags.
86 It is more expensive, but I believe,
87 it is really correct solution: reentereble, safe and fault tolerant.
89 IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping
90 reference count and global flag, so that real status is
91 (gflag|(count != 0)), so that we can use obsolete faulty interface
92 not harming clever users.
94 #define CONFIG_PACKET_MULTICAST 1
98 - if device has no dev->hard_header routine, it adds and removes ll header
99 inside itself. In this case ll header is invisible outside of device,
100 but higher levels still should reserve dev->hard_header_len.
101 Some devices are enough clever to reallocate skb, when header
102 will not fit to reserved space (tunnel), another ones are silly
104 - packet socket receives packets with pulled ll header,
105 so that SOCK_RAW should push it back.
110 Incoming, dev->hard_header!=NULL
114 Outgoing, dev->hard_header!=NULL
118 Incoming, dev->hard_header==NULL
119 mac.raw -> UNKNOWN position. It is very likely, that it points to ll header.
120 PPP makes it, that is wrong, because introduce assymetry
121 between rx and tx paths.
124 Outgoing, dev->hard_header==NULL
125 mac.raw -> data. ll header is still not built!
129 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
135 dev->hard_header != NULL
139 dev->hard_header == NULL (ll header is added by device, we cannot control it)
143 We should set nh.raw on output to correct posistion,
144 packet classifier depends on it.
147 /* List of all packet sockets. */
148 static HLIST_HEAD(packet_sklist
);
149 static DEFINE_RWLOCK(packet_sklist_lock
);
151 static atomic_t packet_socks_nr
;
154 /* Private packet socket structures. */
156 #ifdef CONFIG_PACKET_MULTICAST
159 struct packet_mclist
*next
;
164 unsigned char addr
[8];
167 #ifdef CONFIG_PACKET_MMAP
168 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
);
171 static void packet_flush_mclist(struct sock
*sk
);
174 /* struct sock has to be the first member of packet_sock */
176 struct tpacket_stats stats
;
177 #ifdef CONFIG_PACKET_MMAP
180 unsigned int frames_per_block
;
181 unsigned int frame_size
;
182 unsigned int frame_max
;
185 struct packet_type prot_hook
;
186 spinlock_t bind_lock
;
187 char running
; /* prot_hook is attached*/
188 int ifindex
; /* bound device */
190 #ifdef CONFIG_PACKET_MULTICAST
191 struct packet_mclist
*mclist
;
193 #ifdef CONFIG_PACKET_MMAP
195 unsigned int pg_vec_order
;
196 unsigned int pg_vec_pages
;
197 unsigned int pg_vec_len
;
201 #ifdef CONFIG_PACKET_MMAP
203 static inline char *packet_lookup_frame(struct packet_sock
*po
, unsigned int position
)
205 unsigned int pg_vec_pos
, frame_offset
;
208 pg_vec_pos
= position
/ po
->frames_per_block
;
209 frame_offset
= position
% po
->frames_per_block
;
211 frame
= po
->pg_vec
[pg_vec_pos
] + (frame_offset
* po
->frame_size
);
217 static inline struct packet_sock
*pkt_sk(struct sock
*sk
)
219 return (struct packet_sock
*)sk
;
222 static void packet_sock_destruct(struct sock
*sk
)
224 BUG_TRAP(!atomic_read(&sk
->sk_rmem_alloc
));
225 BUG_TRAP(!atomic_read(&sk
->sk_wmem_alloc
));
227 if (!sock_flag(sk
, SOCK_DEAD
)) {
228 printk("Attempt to release alive packet socket: %p\n", sk
);
232 atomic_dec(&packet_socks_nr
);
233 #ifdef PACKET_REFCNT_DEBUG
234 printk(KERN_DEBUG
"PACKET socket %p is free, %d are alive\n", sk
, atomic_read(&packet_socks_nr
));
239 static struct proto_ops packet_ops
;
241 #ifdef CONFIG_SOCK_PACKET
242 static struct proto_ops packet_ops_spkt
;
244 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
)
247 struct sockaddr_pkt
*spkt
;
250 * When we registered the protocol we saved the socket in the data
251 * field for just this event.
254 sk
= pt
->af_packet_priv
;
257 * Yank back the headers [hope the device set this
258 * right or kerboom...]
260 * Incoming packets have ll header pulled,
263 * For outgoing ones skb->data == skb->mac.raw
264 * so that this procedure is noop.
267 if (skb
->pkt_type
== PACKET_LOOPBACK
)
270 if ((skb
= skb_share_check(skb
, GFP_ATOMIC
)) == NULL
)
273 /* drop any routing info */
274 dst_release(skb
->dst
);
277 spkt
= (struct sockaddr_pkt
*)skb
->cb
;
279 skb_push(skb
, skb
->data
-skb
->mac
.raw
);
282 * The SOCK_PACKET socket receives _all_ frames.
285 spkt
->spkt_family
= dev
->type
;
286 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
287 spkt
->spkt_protocol
= skb
->protocol
;
290 * Charge the memory to the socket. This is done specifically
291 * to prevent sockets using all the memory up.
294 if (sock_queue_rcv_skb(sk
,skb
) == 0)
305 * Output a raw packet to a device layer. This bypasses all the other
306 * protocol layers and you must therefore supply it with a complete frame
309 static int packet_sendmsg_spkt(struct kiocb
*iocb
, struct socket
*sock
,
310 struct msghdr
*msg
, size_t len
)
312 struct sock
*sk
= sock
->sk
;
313 struct sockaddr_pkt
*saddr
=(struct sockaddr_pkt
*)msg
->msg_name
;
315 struct net_device
*dev
;
316 unsigned short proto
=0;
320 * Get and verify the address.
325 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
327 if (msg
->msg_namelen
==sizeof(struct sockaddr_pkt
))
328 proto
=saddr
->spkt_protocol
;
331 return(-ENOTCONN
); /* SOCK_PACKET must be sent giving an address */
334 * Find the device first to size check it
337 saddr
->spkt_device
[13] = 0;
338 dev
= dev_get_by_name(saddr
->spkt_device
);
344 * You may not queue a frame bigger than the mtu. This is the lowest level
345 * raw protocol and you must do your own fragmentation at this level.
349 if(len
>dev
->mtu
+dev
->hard_header_len
)
353 skb
= sock_wmalloc(sk
, len
+ LL_RESERVED_SPACE(dev
), 0, GFP_KERNEL
);
356 * If the write buffer is full, then tough. At this level the user gets to
357 * deal with the problem - do your own algorithmic backoffs. That's far
368 /* FIXME: Save some space for broken drivers that write a
369 * hard header at transmission time by themselves. PPP is the
370 * notable one here. This should really be fixed at the driver level.
372 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
373 skb
->nh
.raw
= skb
->data
;
375 /* Try to align data part correctly */
376 if (dev
->hard_header
) {
377 skb
->data
-= dev
->hard_header_len
;
378 skb
->tail
-= dev
->hard_header_len
;
379 if (len
< dev
->hard_header_len
)
380 skb
->nh
.raw
= skb
->data
;
383 /* Returns -EFAULT on error */
384 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
385 skb
->protocol
= proto
;
387 skb
->priority
= sk
->sk_priority
;
392 if (!(dev
->flags
& IFF_UP
))
412 static inline unsigned run_filter(struct sk_buff
*skb
, struct sock
*sk
, unsigned res
)
414 struct sk_filter
*filter
;
417 filter
= sk
->sk_filter
;
419 * Our caller already checked that filter != NULL but we need to
420 * verify that under bh_lock_sock() to be safe
422 if (likely(filter
!= NULL
))
423 res
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
430 This function makes lazy skb cloning in hope that most of packets
431 are discarded by BPF.
433 Note tricky part: we DO mangle shared skb! skb->data, skb->len
434 and skb->cb are mangled. It works because (and until) packets
435 falling here are owned by current CPU. Output packets are cloned
436 by dev_queue_xmit_nit(), input packets are processed by net_bh
437 sequencially, so that if we return skb to original state on exit,
438 we will not harm anyone.
441 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
)
444 struct sockaddr_ll
*sll
;
445 struct packet_sock
*po
;
446 u8
* skb_head
= skb
->data
;
447 int skb_len
= skb
->len
;
450 if (skb
->pkt_type
== PACKET_LOOPBACK
)
453 sk
= pt
->af_packet_priv
;
458 if (dev
->hard_header
) {
459 /* The device has an explicit notion of ll header,
460 exported to higher levels.
462 Otherwise, the device hides datails of it frame
463 structure, so that corresponding packet head
464 never delivered to user.
466 if (sk
->sk_type
!= SOCK_DGRAM
)
467 skb_push(skb
, skb
->data
- skb
->mac
.raw
);
468 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
469 /* Special case: outgoing packets have ll header at head */
470 skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
477 unsigned res
= run_filter(skb
, sk
, snaplen
);
484 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
485 (unsigned)sk
->sk_rcvbuf
)
488 if (skb_shared(skb
)) {
489 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
493 if (skb_head
!= skb
->data
) {
494 skb
->data
= skb_head
;
501 sll
= (struct sockaddr_ll
*)skb
->cb
;
502 sll
->sll_family
= AF_PACKET
;
503 sll
->sll_hatype
= dev
->type
;
504 sll
->sll_protocol
= skb
->protocol
;
505 sll
->sll_pkttype
= skb
->pkt_type
;
506 sll
->sll_ifindex
= dev
->ifindex
;
509 if (dev
->hard_header_parse
)
510 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
512 if (pskb_trim(skb
, snaplen
))
515 skb_set_owner_r(skb
, sk
);
517 dst_release(skb
->dst
);
520 spin_lock(&sk
->sk_receive_queue
.lock
);
521 po
->stats
.tp_packets
++;
522 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
523 spin_unlock(&sk
->sk_receive_queue
.lock
);
524 sk
->sk_data_ready(sk
, skb
->len
);
528 spin_lock(&sk
->sk_receive_queue
.lock
);
529 po
->stats
.tp_drops
++;
530 spin_unlock(&sk
->sk_receive_queue
.lock
);
533 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
534 skb
->data
= skb_head
;
542 #ifdef CONFIG_PACKET_MMAP
543 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
, struct packet_type
*pt
)
546 struct packet_sock
*po
;
547 struct sockaddr_ll
*sll
;
548 struct tpacket_hdr
*h
;
549 u8
* skb_head
= skb
->data
;
550 int skb_len
= skb
->len
;
552 unsigned long status
= TP_STATUS_LOSING
|TP_STATUS_USER
;
553 unsigned short macoff
, netoff
;
554 struct sk_buff
*copy_skb
= NULL
;
556 if (skb
->pkt_type
== PACKET_LOOPBACK
)
559 sk
= pt
->af_packet_priv
;
562 if (dev
->hard_header
) {
563 if (sk
->sk_type
!= SOCK_DGRAM
)
564 skb_push(skb
, skb
->data
- skb
->mac
.raw
);
565 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
566 /* Special case: outgoing packets have ll header at head */
567 skb_pull(skb
, skb
->nh
.raw
- skb
->data
);
568 if (skb
->ip_summed
== CHECKSUM_HW
)
569 status
|= TP_STATUS_CSUMNOTREADY
;
576 unsigned res
= run_filter(skb
, sk
, snaplen
);
583 if (sk
->sk_type
== SOCK_DGRAM
) {
584 macoff
= netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
) + 16;
586 unsigned maclen
= skb
->nh
.raw
- skb
->data
;
587 netoff
= TPACKET_ALIGN(TPACKET_HDRLEN
+ (maclen
< 16 ? 16 : maclen
));
588 macoff
= netoff
- maclen
;
591 if (macoff
+ snaplen
> po
->frame_size
) {
592 if (po
->copy_thresh
&&
593 atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
<
594 (unsigned)sk
->sk_rcvbuf
) {
595 if (skb_shared(skb
)) {
596 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
598 copy_skb
= skb_get(skb
);
599 skb_head
= skb
->data
;
602 skb_set_owner_r(copy_skb
, sk
);
604 snaplen
= po
->frame_size
- macoff
;
605 if ((int)snaplen
< 0)
608 if (snaplen
> skb
->len
-skb
->data_len
)
609 snaplen
= skb
->len
-skb
->data_len
;
611 spin_lock(&sk
->sk_receive_queue
.lock
);
612 h
= (struct tpacket_hdr
*)packet_lookup_frame(po
, po
->head
);
616 po
->head
= po
->head
!= po
->frame_max
? po
->head
+1 : 0;
617 po
->stats
.tp_packets
++;
619 status
|= TP_STATUS_COPY
;
620 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
622 if (!po
->stats
.tp_drops
)
623 status
&= ~TP_STATUS_LOSING
;
624 spin_unlock(&sk
->sk_receive_queue
.lock
);
626 memcpy((u8
*)h
+ macoff
, skb
->data
, snaplen
);
628 h
->tp_len
= skb
->len
;
629 h
->tp_snaplen
= snaplen
;
632 if (skb
->stamp
.tv_sec
== 0) {
633 do_gettimeofday(&skb
->stamp
);
634 sock_enable_timestamp(sk
);
636 h
->tp_sec
= skb
->stamp
.tv_sec
;
637 h
->tp_usec
= skb
->stamp
.tv_usec
;
639 sll
= (struct sockaddr_ll
*)((u8
*)h
+ TPACKET_ALIGN(sizeof(*h
)));
641 if (dev
->hard_header_parse
)
642 sll
->sll_halen
= dev
->hard_header_parse(skb
, sll
->sll_addr
);
643 sll
->sll_family
= AF_PACKET
;
644 sll
->sll_hatype
= dev
->type
;
645 sll
->sll_protocol
= skb
->protocol
;
646 sll
->sll_pkttype
= skb
->pkt_type
;
647 sll
->sll_ifindex
= dev
->ifindex
;
649 h
->tp_status
= status
;
653 struct page
*p_start
, *p_end
;
654 u8
*h_end
= (u8
*)h
+ macoff
+ snaplen
- 1;
656 p_start
= virt_to_page(h
);
657 p_end
= virt_to_page(h_end
);
658 while (p_start
<= p_end
) {
659 flush_dcache_page(p_start
);
664 sk
->sk_data_ready(sk
, 0);
667 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
668 skb
->data
= skb_head
;
676 po
->stats
.tp_drops
++;
677 spin_unlock(&sk
->sk_receive_queue
.lock
);
679 sk
->sk_data_ready(sk
, 0);
688 static int packet_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
689 struct msghdr
*msg
, size_t len
)
691 struct sock
*sk
= sock
->sk
;
692 struct sockaddr_ll
*saddr
=(struct sockaddr_ll
*)msg
->msg_name
;
694 struct net_device
*dev
;
695 unsigned short proto
;
697 int ifindex
, err
, reserve
= 0;
700 * Get and verify the address.
704 struct packet_sock
*po
= pkt_sk(sk
);
706 ifindex
= po
->ifindex
;
711 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
713 ifindex
= saddr
->sll_ifindex
;
714 proto
= saddr
->sll_protocol
;
715 addr
= saddr
->sll_addr
;
719 dev
= dev_get_by_index(ifindex
);
723 if (sock
->type
== SOCK_RAW
)
724 reserve
= dev
->hard_header_len
;
727 if (len
> dev
->mtu
+reserve
)
730 skb
= sock_alloc_send_skb(sk
, len
+ LL_RESERVED_SPACE(dev
),
731 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
735 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
736 skb
->nh
.raw
= skb
->data
;
738 if (dev
->hard_header
) {
741 res
= dev
->hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
);
742 if (sock
->type
!= SOCK_DGRAM
) {
743 skb
->tail
= skb
->data
;
749 /* Returns -EFAULT on error */
750 err
= memcpy_fromiovec(skb_put(skb
,len
), msg
->msg_iov
, len
);
754 skb
->protocol
= proto
;
756 skb
->priority
= sk
->sk_priority
;
759 if (!(dev
->flags
& IFF_UP
))
766 err
= dev_queue_xmit(skb
);
767 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
784 * Close a PACKET socket. This is fairly simple. We immediately go
785 * to 'closed' state and remove our protocol entry in the device list.
788 static int packet_release(struct socket
*sock
)
790 struct sock
*sk
= sock
->sk
;
791 struct packet_sock
*po
;
798 write_lock_bh(&packet_sklist_lock
);
799 sk_del_node_init(sk
);
800 write_unlock_bh(&packet_sklist_lock
);
803 * Unhook packet receive handler.
808 * Remove the protocol hook
810 dev_remove_pack(&po
->prot_hook
);
816 #ifdef CONFIG_PACKET_MULTICAST
817 packet_flush_mclist(sk
);
820 #ifdef CONFIG_PACKET_MMAP
822 struct tpacket_req req
;
823 memset(&req
, 0, sizeof(req
));
824 packet_set_ring(sk
, &req
, 1);
829 * Now the socket is dead. No more input will appear.
837 skb_queue_purge(&sk
->sk_receive_queue
);
844 * Attach a packet hook.
847 static int packet_do_bind(struct sock
*sk
, struct net_device
*dev
, int protocol
)
849 struct packet_sock
*po
= pkt_sk(sk
);
851 * Detach an existing hook if present.
856 spin_lock(&po
->bind_lock
);
861 spin_unlock(&po
->bind_lock
);
862 dev_remove_pack(&po
->prot_hook
);
863 spin_lock(&po
->bind_lock
);
867 po
->prot_hook
.type
= protocol
;
868 po
->prot_hook
.dev
= dev
;
870 po
->ifindex
= dev
? dev
->ifindex
: 0;
876 if (dev
->flags
&IFF_UP
) {
877 dev_add_pack(&po
->prot_hook
);
881 sk
->sk_err
= ENETDOWN
;
882 if (!sock_flag(sk
, SOCK_DEAD
))
883 sk
->sk_error_report(sk
);
886 dev_add_pack(&po
->prot_hook
);
892 spin_unlock(&po
->bind_lock
);
898 * Bind a packet socket to a device
901 #ifdef CONFIG_SOCK_PACKET
903 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
905 struct sock
*sk
=sock
->sk
;
907 struct net_device
*dev
;
914 if(addr_len
!=sizeof(struct sockaddr
))
916 strlcpy(name
,uaddr
->sa_data
,sizeof(name
));
918 dev
= dev_get_by_name(name
);
920 err
= packet_do_bind(sk
, dev
, pkt_sk(sk
)->num
);
927 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
929 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
930 struct sock
*sk
=sock
->sk
;
931 struct net_device
*dev
= NULL
;
939 if (addr_len
< sizeof(struct sockaddr_ll
))
941 if (sll
->sll_family
!= AF_PACKET
)
944 if (sll
->sll_ifindex
) {
946 dev
= dev_get_by_index(sll
->sll_ifindex
);
950 err
= packet_do_bind(sk
, dev
, sll
->sll_protocol
? : pkt_sk(sk
)->num
);
958 static struct proto packet_proto
= {
960 .owner
= THIS_MODULE
,
961 .obj_size
= sizeof(struct packet_sock
),
965 * Create a packet of type SOCK_PACKET.
968 static int packet_create(struct socket
*sock
, int protocol
)
971 struct packet_sock
*po
;
974 if (!capable(CAP_NET_RAW
))
976 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
977 #ifdef CONFIG_SOCK_PACKET
978 && sock
->type
!= SOCK_PACKET
981 return -ESOCKTNOSUPPORT
;
983 sock
->state
= SS_UNCONNECTED
;
986 sk
= sk_alloc(PF_PACKET
, GFP_KERNEL
, &packet_proto
, 1);
990 sock
->ops
= &packet_ops
;
991 #ifdef CONFIG_SOCK_PACKET
992 if (sock
->type
== SOCK_PACKET
)
993 sock
->ops
= &packet_ops_spkt
;
995 sock_init_data(sock
, sk
);
998 sk
->sk_family
= PF_PACKET
;
1001 sk
->sk_destruct
= packet_sock_destruct
;
1002 atomic_inc(&packet_socks_nr
);
1005 * Attach a protocol block
1008 spin_lock_init(&po
->bind_lock
);
1009 po
->prot_hook
.func
= packet_rcv
;
1010 #ifdef CONFIG_SOCK_PACKET
1011 if (sock
->type
== SOCK_PACKET
)
1012 po
->prot_hook
.func
= packet_rcv_spkt
;
1014 po
->prot_hook
.af_packet_priv
= sk
;
1017 po
->prot_hook
.type
= protocol
;
1018 dev_add_pack(&po
->prot_hook
);
1023 write_lock_bh(&packet_sklist_lock
);
1024 sk_add_node(sk
, &packet_sklist
);
1025 write_unlock_bh(&packet_sklist_lock
);
1032 * Pull a packet from our receive queue and hand it to the user.
1033 * If necessary we block.
1036 static int packet_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1037 struct msghdr
*msg
, size_t len
, int flags
)
1039 struct sock
*sk
= sock
->sk
;
1040 struct sk_buff
*skb
;
1044 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
))
1048 /* What error should we return now? EUNATTACH? */
1049 if (pkt_sk(sk
)->ifindex
< 0)
1054 * If the address length field is there to be filled in, we fill
1058 if (sock
->type
== SOCK_PACKET
)
1059 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
1061 msg
->msg_namelen
= sizeof(struct sockaddr_ll
);
1064 * Call the generic datagram receiver. This handles all sorts
1065 * of horrible races and re-entrancy so we can forget about it
1066 * in the protocol layers.
1068 * Now it will return ENETDOWN, if device have just gone down,
1069 * but then it will block.
1072 skb
=skb_recv_datagram(sk
,flags
,flags
&MSG_DONTWAIT
,&err
);
1075 * An error occurred so return it. Because skb_recv_datagram()
1076 * handles the blocking we don't see and worry about blocking
1084 * You lose any data beyond the buffer you gave. If it worries a
1085 * user program they can ask the device for its MTU anyway.
1092 msg
->msg_flags
|=MSG_TRUNC
;
1095 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1099 sock_recv_timestamp(msg
, sk
, skb
);
1102 memcpy(msg
->msg_name
, skb
->cb
, msg
->msg_namelen
);
1105 * Free or return the buffer as appropriate. Again this
1106 * hides all the races and re-entrancy issues from us.
1108 err
= (flags
&MSG_TRUNC
) ? skb
->len
: copied
;
1111 skb_free_datagram(sk
, skb
);
1116 #ifdef CONFIG_SOCK_PACKET
1117 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1118 int *uaddr_len
, int peer
)
1120 struct net_device
*dev
;
1121 struct sock
*sk
= sock
->sk
;
1126 uaddr
->sa_family
= AF_PACKET
;
1127 dev
= dev_get_by_index(pkt_sk(sk
)->ifindex
);
1129 strlcpy(uaddr
->sa_data
, dev
->name
, 15);
1132 memset(uaddr
->sa_data
, 0, 14);
1133 *uaddr_len
= sizeof(*uaddr
);
1139 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1140 int *uaddr_len
, int peer
)
1142 struct net_device
*dev
;
1143 struct sock
*sk
= sock
->sk
;
1144 struct packet_sock
*po
= pkt_sk(sk
);
1145 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
1150 sll
->sll_family
= AF_PACKET
;
1151 sll
->sll_ifindex
= po
->ifindex
;
1152 sll
->sll_protocol
= po
->num
;
1153 dev
= dev_get_by_index(po
->ifindex
);
1155 sll
->sll_hatype
= dev
->type
;
1156 sll
->sll_halen
= dev
->addr_len
;
1157 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
1160 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
1163 *uaddr_len
= sizeof(*sll
);
1168 #ifdef CONFIG_PACKET_MULTICAST
1169 static void packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1172 case PACKET_MR_MULTICAST
:
1174 dev_mc_add(dev
, i
->addr
, i
->alen
, 0);
1176 dev_mc_delete(dev
, i
->addr
, i
->alen
, 0);
1178 case PACKET_MR_PROMISC
:
1179 dev_set_promiscuity(dev
, what
);
1181 case PACKET_MR_ALLMULTI
:
1182 dev_set_allmulti(dev
, what
);
1188 static void packet_dev_mclist(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1190 for ( ; i
; i
=i
->next
) {
1191 if (i
->ifindex
== dev
->ifindex
)
1192 packet_dev_mc(dev
, i
, what
);
1196 static int packet_mc_add(struct sock
*sk
, struct packet_mreq
*mreq
)
1198 struct packet_sock
*po
= pkt_sk(sk
);
1199 struct packet_mclist
*ml
, *i
;
1200 struct net_device
*dev
;
1206 dev
= __dev_get_by_index(mreq
->mr_ifindex
);
1211 if (mreq
->mr_alen
> dev
->addr_len
)
1215 i
= (struct packet_mclist
*)kmalloc(sizeof(*i
), GFP_KERNEL
);
1220 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
1221 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1222 ml
->type
== mreq
->mr_type
&&
1223 ml
->alen
== mreq
->mr_alen
&&
1224 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1226 /* Free the new element ... */
1232 i
->type
= mreq
->mr_type
;
1233 i
->ifindex
= mreq
->mr_ifindex
;
1234 i
->alen
= mreq
->mr_alen
;
1235 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
1237 i
->next
= po
->mclist
;
1239 packet_dev_mc(dev
, i
, +1);
1246 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq
*mreq
)
1248 struct packet_mclist
*ml
, **mlp
;
1252 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
1253 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1254 ml
->type
== mreq
->mr_type
&&
1255 ml
->alen
== mreq
->mr_alen
&&
1256 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1257 if (--ml
->count
== 0) {
1258 struct net_device
*dev
;
1260 dev
= dev_get_by_index(ml
->ifindex
);
1262 packet_dev_mc(dev
, ml
, -1);
1272 return -EADDRNOTAVAIL
;
1275 static void packet_flush_mclist(struct sock
*sk
)
1277 struct packet_sock
*po
= pkt_sk(sk
);
1278 struct packet_mclist
*ml
;
1284 while ((ml
= po
->mclist
) != NULL
) {
1285 struct net_device
*dev
;
1287 po
->mclist
= ml
->next
;
1288 if ((dev
= dev_get_by_index(ml
->ifindex
)) != NULL
) {
1289 packet_dev_mc(dev
, ml
, -1);
1299 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int optlen
)
1301 struct sock
*sk
= sock
->sk
;
1304 if (level
!= SOL_PACKET
)
1305 return -ENOPROTOOPT
;
1308 #ifdef CONFIG_PACKET_MULTICAST
1309 case PACKET_ADD_MEMBERSHIP
:
1310 case PACKET_DROP_MEMBERSHIP
:
1312 struct packet_mreq mreq
;
1313 if (optlen
<sizeof(mreq
))
1315 if (copy_from_user(&mreq
,optval
,sizeof(mreq
)))
1317 if (optname
== PACKET_ADD_MEMBERSHIP
)
1318 ret
= packet_mc_add(sk
, &mreq
);
1320 ret
= packet_mc_drop(sk
, &mreq
);
1324 #ifdef CONFIG_PACKET_MMAP
1325 case PACKET_RX_RING
:
1327 struct tpacket_req req
;
1329 if (optlen
<sizeof(req
))
1331 if (copy_from_user(&req
,optval
,sizeof(req
)))
1333 return packet_set_ring(sk
, &req
, 0);
1335 case PACKET_COPY_THRESH
:
1339 if (optlen
!=sizeof(val
))
1341 if (copy_from_user(&val
,optval
,sizeof(val
)))
1344 pkt_sk(sk
)->copy_thresh
= val
;
1349 return -ENOPROTOOPT
;
1353 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
1354 char __user
*optval
, int __user
*optlen
)
1357 struct sock
*sk
= sock
->sk
;
1358 struct packet_sock
*po
= pkt_sk(sk
);
1360 if (level
!= SOL_PACKET
)
1361 return -ENOPROTOOPT
;
1363 if (get_user(len
,optlen
))
1370 case PACKET_STATISTICS
:
1372 struct tpacket_stats st
;
1374 if (len
> sizeof(struct tpacket_stats
))
1375 len
= sizeof(struct tpacket_stats
);
1376 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1378 memset(&po
->stats
, 0, sizeof(st
));
1379 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1380 st
.tp_packets
+= st
.tp_drops
;
1382 if (copy_to_user(optval
, &st
, len
))
1387 return -ENOPROTOOPT
;
1390 if (put_user(len
, optlen
))
1396 static int packet_notifier(struct notifier_block
*this, unsigned long msg
, void *data
)
1399 struct hlist_node
*node
;
1400 struct net_device
*dev
= (struct net_device
*)data
;
1402 read_lock(&packet_sklist_lock
);
1403 sk_for_each(sk
, node
, &packet_sklist
) {
1404 struct packet_sock
*po
= pkt_sk(sk
);
1407 case NETDEV_UNREGISTER
:
1408 #ifdef CONFIG_PACKET_MULTICAST
1410 packet_dev_mclist(dev
, po
->mclist
, -1);
1414 if (dev
->ifindex
== po
->ifindex
) {
1415 spin_lock(&po
->bind_lock
);
1417 __dev_remove_pack(&po
->prot_hook
);
1420 sk
->sk_err
= ENETDOWN
;
1421 if (!sock_flag(sk
, SOCK_DEAD
))
1422 sk
->sk_error_report(sk
);
1424 if (msg
== NETDEV_UNREGISTER
) {
1426 po
->prot_hook
.dev
= NULL
;
1428 spin_unlock(&po
->bind_lock
);
1432 spin_lock(&po
->bind_lock
);
1433 if (dev
->ifindex
== po
->ifindex
&& po
->num
&&
1435 dev_add_pack(&po
->prot_hook
);
1439 spin_unlock(&po
->bind_lock
);
1443 read_unlock(&packet_sklist_lock
);
1448 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
1451 struct sock
*sk
= sock
->sk
;
1456 int amount
= atomic_read(&sk
->sk_wmem_alloc
);
1457 return put_user(amount
, (int __user
*)arg
);
1461 struct sk_buff
*skb
;
1464 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1465 skb
= skb_peek(&sk
->sk_receive_queue
);
1468 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1469 return put_user(amount
, (int __user
*)arg
);
1472 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
1482 case SIOCGIFBRDADDR
:
1483 case SIOCSIFBRDADDR
:
1484 case SIOCGIFNETMASK
:
1485 case SIOCSIFNETMASK
:
1486 case SIOCGIFDSTADDR
:
1487 case SIOCSIFDSTADDR
:
1489 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
1493 return dev_ioctl(cmd
, (void __user
*)arg
);
1498 #ifndef CONFIG_PACKET_MMAP
1499 #define packet_mmap sock_no_mmap
1500 #define packet_poll datagram_poll
1503 static unsigned int packet_poll(struct file
* file
, struct socket
*sock
,
1506 struct sock
*sk
= sock
->sk
;
1507 struct packet_sock
*po
= pkt_sk(sk
);
1508 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1510 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1512 unsigned last
= po
->head
? po
->head
-1 : po
->frame_max
;
1513 struct tpacket_hdr
*h
;
1515 h
= (struct tpacket_hdr
*)packet_lookup_frame(po
, last
);
1518 mask
|= POLLIN
| POLLRDNORM
;
1520 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1525 /* Dirty? Well, I still did not learn better way to account
1529 static void packet_mm_open(struct vm_area_struct
*vma
)
1531 struct file
*file
= vma
->vm_file
;
1532 struct inode
*inode
= file
->f_dentry
->d_inode
;
1533 struct socket
* sock
= SOCKET_I(inode
);
1534 struct sock
*sk
= sock
->sk
;
1537 atomic_inc(&pkt_sk(sk
)->mapped
);
1540 static void packet_mm_close(struct vm_area_struct
*vma
)
1542 struct file
*file
= vma
->vm_file
;
1543 struct inode
*inode
= file
->f_dentry
->d_inode
;
1544 struct socket
* sock
= SOCKET_I(inode
);
1545 struct sock
*sk
= sock
->sk
;
1548 atomic_dec(&pkt_sk(sk
)->mapped
);
1551 static struct vm_operations_struct packet_mmap_ops
= {
1552 .open
= packet_mm_open
,
1553 .close
=packet_mm_close
,
1556 static inline struct page
*pg_vec_endpage(char *one_pg_vec
, unsigned int order
)
1558 return virt_to_page(one_pg_vec
+ (PAGE_SIZE
<< order
) - 1);
1561 static void free_pg_vec(char **pg_vec
, unsigned order
, unsigned len
)
1565 for (i
=0; i
<len
; i
++) {
1567 struct page
*page
, *pend
;
1569 pend
= pg_vec_endpage(pg_vec
[i
], order
);
1570 for (page
= virt_to_page(pg_vec
[i
]); page
<= pend
; page
++)
1571 ClearPageReserved(page
);
1572 free_pages((unsigned long)pg_vec
[i
], order
);
1579 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
, int closing
)
1581 char **pg_vec
= NULL
;
1582 struct packet_sock
*po
= pkt_sk(sk
);
1583 int was_running
, num
, order
= 0;
1586 if (req
->tp_block_nr
) {
1589 /* Sanity tests and some calculations */
1594 if ((int)req
->tp_block_size
<= 0)
1596 if (req
->tp_block_size
&(PAGE_SIZE
-1))
1598 if (req
->tp_frame_size
< TPACKET_HDRLEN
)
1600 if (req
->tp_frame_size
&(TPACKET_ALIGNMENT
-1))
1603 po
->frames_per_block
= req
->tp_block_size
/req
->tp_frame_size
;
1604 if (po
->frames_per_block
<= 0)
1606 if (po
->frames_per_block
*req
->tp_block_nr
!= req
->tp_frame_nr
)
1610 /* Allocate page vector */
1611 while ((PAGE_SIZE
<<order
) < req
->tp_block_size
)
1616 pg_vec
= kmalloc(req
->tp_block_nr
*sizeof(char *), GFP_KERNEL
);
1619 memset(pg_vec
, 0, req
->tp_block_nr
*sizeof(char **));
1621 for (i
=0; i
<req
->tp_block_nr
; i
++) {
1622 struct page
*page
, *pend
;
1623 pg_vec
[i
] = (char *)__get_free_pages(GFP_KERNEL
, order
);
1625 goto out_free_pgvec
;
1627 pend
= pg_vec_endpage(pg_vec
[i
], order
);
1628 for (page
= virt_to_page(pg_vec
[i
]); page
<= pend
; page
++)
1629 SetPageReserved(page
);
1631 /* Page vector is allocated */
1634 for (i
=0; i
<req
->tp_block_nr
; i
++) {
1635 char *ptr
= pg_vec
[i
];
1636 struct tpacket_hdr
*header
;
1639 for (k
=0; k
<po
->frames_per_block
; k
++) {
1641 header
= (struct tpacket_hdr
*)ptr
;
1642 header
->tp_status
= TP_STATUS_KERNEL
;
1643 ptr
+= req
->tp_frame_size
;
1648 if (req
->tp_frame_nr
)
1654 /* Detach socket from network */
1655 spin_lock(&po
->bind_lock
);
1656 was_running
= po
->running
;
1659 __dev_remove_pack(&po
->prot_hook
);
1664 spin_unlock(&po
->bind_lock
);
1669 if (closing
|| atomic_read(&po
->mapped
) == 0) {
1671 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
1673 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
1674 pg_vec
= XC(po
->pg_vec
, pg_vec
);
1675 po
->frame_max
= req
->tp_frame_nr
-1;
1677 po
->frame_size
= req
->tp_frame_size
;
1678 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
1680 order
= XC(po
->pg_vec_order
, order
);
1681 req
->tp_block_nr
= XC(po
->pg_vec_len
, req
->tp_block_nr
);
1683 po
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
1684 po
->prot_hook
.func
= po
->pg_vec
? tpacket_rcv
: packet_rcv
;
1685 skb_queue_purge(&sk
->sk_receive_queue
);
1687 if (atomic_read(&po
->mapped
))
1688 printk(KERN_DEBUG
"packet_mmap: vma is busy: %d\n", atomic_read(&po
->mapped
));
1691 spin_lock(&po
->bind_lock
);
1692 if (was_running
&& !po
->running
) {
1696 dev_add_pack(&po
->prot_hook
);
1698 spin_unlock(&po
->bind_lock
);
1704 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
1709 static int packet_mmap(struct file
*file
, struct socket
*sock
, struct vm_area_struct
*vma
)
1711 struct sock
*sk
= sock
->sk
;
1712 struct packet_sock
*po
= pkt_sk(sk
);
1714 unsigned long start
;
1721 size
= vma
->vm_end
- vma
->vm_start
;
1724 if (po
->pg_vec
== NULL
)
1726 if (size
!= po
->pg_vec_len
*po
->pg_vec_pages
*PAGE_SIZE
)
1729 atomic_inc(&po
->mapped
);
1730 start
= vma
->vm_start
;
1732 for (i
=0; i
<po
->pg_vec_len
; i
++) {
1733 if (remap_pfn_range(vma
, start
,
1734 __pa(po
->pg_vec
[i
]) >> PAGE_SHIFT
,
1735 po
->pg_vec_pages
*PAGE_SIZE
,
1738 start
+= po
->pg_vec_pages
*PAGE_SIZE
;
1740 vma
->vm_ops
= &packet_mmap_ops
;
1750 #ifdef CONFIG_SOCK_PACKET
1751 static struct proto_ops packet_ops_spkt
= {
1752 .family
= PF_PACKET
,
1753 .owner
= THIS_MODULE
,
1754 .release
= packet_release
,
1755 .bind
= packet_bind_spkt
,
1756 .connect
= sock_no_connect
,
1757 .socketpair
= sock_no_socketpair
,
1758 .accept
= sock_no_accept
,
1759 .getname
= packet_getname_spkt
,
1760 .poll
= datagram_poll
,
1761 .ioctl
= packet_ioctl
,
1762 .listen
= sock_no_listen
,
1763 .shutdown
= sock_no_shutdown
,
1764 .setsockopt
= sock_no_setsockopt
,
1765 .getsockopt
= sock_no_getsockopt
,
1766 .sendmsg
= packet_sendmsg_spkt
,
1767 .recvmsg
= packet_recvmsg
,
1768 .mmap
= sock_no_mmap
,
1769 .sendpage
= sock_no_sendpage
,
1773 static struct proto_ops packet_ops
= {
1774 .family
= PF_PACKET
,
1775 .owner
= THIS_MODULE
,
1776 .release
= packet_release
,
1777 .bind
= packet_bind
,
1778 .connect
= sock_no_connect
,
1779 .socketpair
= sock_no_socketpair
,
1780 .accept
= sock_no_accept
,
1781 .getname
= packet_getname
,
1782 .poll
= packet_poll
,
1783 .ioctl
= packet_ioctl
,
1784 .listen
= sock_no_listen
,
1785 .shutdown
= sock_no_shutdown
,
1786 .setsockopt
= packet_setsockopt
,
1787 .getsockopt
= packet_getsockopt
,
1788 .sendmsg
= packet_sendmsg
,
1789 .recvmsg
= packet_recvmsg
,
1790 .mmap
= packet_mmap
,
1791 .sendpage
= sock_no_sendpage
,
1794 static struct net_proto_family packet_family_ops
= {
1795 .family
= PF_PACKET
,
1796 .create
= packet_create
,
1797 .owner
= THIS_MODULE
,
1800 static struct notifier_block packet_netdev_notifier
= {
1801 .notifier_call
=packet_notifier
,
1804 #ifdef CONFIG_PROC_FS
1805 static inline struct sock
*packet_seq_idx(loff_t off
)
1808 struct hlist_node
*node
;
1810 sk_for_each(s
, node
, &packet_sklist
) {
1817 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1819 read_lock(&packet_sklist_lock
);
1820 return *pos
? packet_seq_idx(*pos
- 1) : SEQ_START_TOKEN
;
1823 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1826 return (v
== SEQ_START_TOKEN
)
1827 ? sk_head(&packet_sklist
)
1828 : sk_next((struct sock
*)v
) ;
1831 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
1833 read_unlock(&packet_sklist_lock
);
1836 static int packet_seq_show(struct seq_file
*seq
, void *v
)
1838 if (v
== SEQ_START_TOKEN
)
1839 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
1842 const struct packet_sock
*po
= pkt_sk(s
);
1845 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
1847 atomic_read(&s
->sk_refcnt
),
1852 atomic_read(&s
->sk_rmem_alloc
),
1860 static struct seq_operations packet_seq_ops
= {
1861 .start
= packet_seq_start
,
1862 .next
= packet_seq_next
,
1863 .stop
= packet_seq_stop
,
1864 .show
= packet_seq_show
,
1867 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
1869 return seq_open(file
, &packet_seq_ops
);
1872 static struct file_operations packet_seq_fops
= {
1873 .owner
= THIS_MODULE
,
1874 .open
= packet_seq_open
,
1876 .llseek
= seq_lseek
,
1877 .release
= seq_release
,
1882 static void __exit
packet_exit(void)
1884 proc_net_remove("packet");
1885 unregister_netdevice_notifier(&packet_netdev_notifier
);
1886 sock_unregister(PF_PACKET
);
1887 proto_unregister(&packet_proto
);
1890 static int __init
packet_init(void)
1892 int rc
= proto_register(&packet_proto
, 0);
1897 sock_register(&packet_family_ops
);
1898 register_netdevice_notifier(&packet_netdev_notifier
);
1899 proc_net_fops_create("packet", 0, &packet_seq_fops
);
1904 module_init(packet_init
);
1905 module_exit(packet_exit
);
1906 MODULE_LICENSE("GPL");
1907 MODULE_ALIAS_NETPROTO(PF_PACKET
);