2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <net/net_namespace.h>
66 #include <net/protocol.h>
67 #include <linux/skbuff.h>
69 #include <linux/errno.h>
70 #include <linux/timer.h>
71 #include <asm/system.h>
72 #include <asm/uaccess.h>
73 #include <asm/ioctls.h>
75 #include <asm/cacheflush.h>
77 #include <linux/proc_fs.h>
78 #include <linux/seq_file.h>
79 #include <linux/poll.h>
80 #include <linux/module.h>
81 #include <linux/init.h>
82 #include <linux/mutex.h>
83 #include <linux/if_vlan.h>
84 #include <linux/virtio_net.h>
85 #include <linux/errqueue.h>
86 #include <linux/net_tstamp.h>
89 #include <net/inet_common.h>
94 - if device has no dev->hard_header routine, it adds and removes ll header
95 inside itself. In this case ll header is invisible outside of device,
96 but higher levels still should reserve dev->hard_header_len.
97 Some devices are enough clever to reallocate skb, when header
98 will not fit to reserved space (tunnel), another ones are silly
100 - packet socket receives packets with pulled ll header,
101 so that SOCK_RAW should push it back.
106 Incoming, dev->hard_header!=NULL
107 mac_header -> ll header
110 Outgoing, dev->hard_header!=NULL
111 mac_header -> ll header
114 Incoming, dev->hard_header==NULL
115 mac_header -> UNKNOWN position. It is very likely, that it points to ll
116 header. PPP makes it, that is wrong, because introduce
117 assymetry between rx and tx paths.
120 Outgoing, dev->hard_header==NULL
121 mac_header -> data. ll header is still not built!
125 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
131 dev->hard_header != NULL
132 mac_header -> ll header
135 dev->hard_header == NULL (ll header is added by device, we cannot control it)
139 We should set nh.raw on output to correct posistion,
140 packet classifier depends on it.
143 /* Private packet socket structures. */
145 struct packet_mclist
{
146 struct packet_mclist
*next
;
151 unsigned char addr
[MAX_ADDR_LEN
];
153 /* identical to struct packet_mreq except it has
154 * a longer address field.
156 struct packet_mreq_max
{
158 unsigned short mr_type
;
159 unsigned short mr_alen
;
160 unsigned char mr_address
[MAX_ADDR_LEN
];
163 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
,
164 int closing
, int tx_ring
);
166 struct packet_ring_buffer
{
169 unsigned int frames_per_block
;
170 unsigned int frame_size
;
171 unsigned int frame_max
;
173 unsigned int pg_vec_order
;
174 unsigned int pg_vec_pages
;
175 unsigned int pg_vec_len
;
181 static int tpacket_snd(struct packet_sock
*po
, struct msghdr
*msg
);
183 static void packet_flush_mclist(struct sock
*sk
);
186 /* struct sock has to be the first member of packet_sock */
188 struct tpacket_stats stats
;
189 struct packet_ring_buffer rx_ring
;
190 struct packet_ring_buffer tx_ring
;
192 spinlock_t bind_lock
;
193 struct mutex pg_vec_lock
;
194 unsigned int running
:1, /* prot_hook is attached*/
198 int ifindex
; /* bound device */
200 struct packet_mclist
*mclist
;
202 enum tpacket_versions tp_version
;
203 unsigned int tp_hdrlen
;
204 unsigned int tp_reserve
;
205 unsigned int tp_loss
:1;
206 unsigned int tp_tstamp
;
207 struct packet_type prot_hook ____cacheline_aligned_in_smp
;
210 struct packet_skb_cb
{
211 unsigned int origlen
;
213 struct sockaddr_pkt pkt
;
214 struct sockaddr_ll ll
;
218 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
220 static void __packet_set_status(struct packet_sock
*po
, void *frame
, int status
)
223 struct tpacket_hdr
*h1
;
224 struct tpacket2_hdr
*h2
;
229 switch (po
->tp_version
) {
231 h
.h1
->tp_status
= status
;
232 flush_dcache_page(virt_to_page(&h
.h1
->tp_status
));
235 h
.h2
->tp_status
= status
;
236 flush_dcache_page(virt_to_page(&h
.h2
->tp_status
));
239 pr_err("TPACKET version not supported\n");
246 static int __packet_get_status(struct packet_sock
*po
, void *frame
)
249 struct tpacket_hdr
*h1
;
250 struct tpacket2_hdr
*h2
;
257 switch (po
->tp_version
) {
259 flush_dcache_page(virt_to_page(&h
.h1
->tp_status
));
260 return h
.h1
->tp_status
;
262 flush_dcache_page(virt_to_page(&h
.h2
->tp_status
));
263 return h
.h2
->tp_status
;
265 pr_err("TPACKET version not supported\n");
271 static void *packet_lookup_frame(struct packet_sock
*po
,
272 struct packet_ring_buffer
*rb
,
273 unsigned int position
,
276 unsigned int pg_vec_pos
, frame_offset
;
278 struct tpacket_hdr
*h1
;
279 struct tpacket2_hdr
*h2
;
283 pg_vec_pos
= position
/ rb
->frames_per_block
;
284 frame_offset
= position
% rb
->frames_per_block
;
286 h
.raw
= rb
->pg_vec
[pg_vec_pos
] + (frame_offset
* rb
->frame_size
);
288 if (status
!= __packet_get_status(po
, h
.raw
))
294 static inline void *packet_current_frame(struct packet_sock
*po
,
295 struct packet_ring_buffer
*rb
,
298 return packet_lookup_frame(po
, rb
, rb
->head
, status
);
301 static inline void *packet_previous_frame(struct packet_sock
*po
,
302 struct packet_ring_buffer
*rb
,
305 unsigned int previous
= rb
->head
? rb
->head
- 1 : rb
->frame_max
;
306 return packet_lookup_frame(po
, rb
, previous
, status
);
309 static inline void packet_increment_head(struct packet_ring_buffer
*buff
)
311 buff
->head
= buff
->head
!= buff
->frame_max
? buff
->head
+1 : 0;
314 static inline struct packet_sock
*pkt_sk(struct sock
*sk
)
316 return (struct packet_sock
*)sk
;
319 static void packet_sock_destruct(struct sock
*sk
)
321 skb_queue_purge(&sk
->sk_error_queue
);
323 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
324 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
326 if (!sock_flag(sk
, SOCK_DEAD
)) {
327 pr_err("Attempt to release alive packet socket: %p\n", sk
);
331 sk_refcnt_debug_dec(sk
);
335 static const struct proto_ops packet_ops
;
337 static const struct proto_ops packet_ops_spkt
;
339 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
,
340 struct packet_type
*pt
, struct net_device
*orig_dev
)
343 struct sockaddr_pkt
*spkt
;
346 * When we registered the protocol we saved the socket in the data
347 * field for just this event.
350 sk
= pt
->af_packet_priv
;
353 * Yank back the headers [hope the device set this
354 * right or kerboom...]
356 * Incoming packets have ll header pulled,
359 * For outgoing ones skb->data == skb_mac_header(skb)
360 * so that this procedure is noop.
363 if (skb
->pkt_type
== PACKET_LOOPBACK
)
366 if (!net_eq(dev_net(dev
), sock_net(sk
)))
369 skb
= skb_share_check(skb
, GFP_ATOMIC
);
373 /* drop any routing info */
376 /* drop conntrack reference */
379 spkt
= &PACKET_SKB_CB(skb
)->sa
.pkt
;
381 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
384 * The SOCK_PACKET socket receives _all_ frames.
387 spkt
->spkt_family
= dev
->type
;
388 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
389 spkt
->spkt_protocol
= skb
->protocol
;
392 * Charge the memory to the socket. This is done specifically
393 * to prevent sockets using all the memory up.
396 if (sock_queue_rcv_skb(sk
, skb
) == 0)
407 * Output a raw packet to a device layer. This bypasses all the other
408 * protocol layers and you must therefore supply it with a complete frame
411 static int packet_sendmsg_spkt(struct kiocb
*iocb
, struct socket
*sock
,
412 struct msghdr
*msg
, size_t len
)
414 struct sock
*sk
= sock
->sk
;
415 struct sockaddr_pkt
*saddr
= (struct sockaddr_pkt
*)msg
->msg_name
;
416 struct sk_buff
*skb
= NULL
;
417 struct net_device
*dev
;
422 * Get and verify the address.
426 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
428 if (msg
->msg_namelen
== sizeof(struct sockaddr_pkt
))
429 proto
= saddr
->spkt_protocol
;
431 return -ENOTCONN
; /* SOCK_PACKET must be sent giving an address */
434 * Find the device first to size check it
437 saddr
->spkt_device
[13] = 0;
440 dev
= dev_get_by_name_rcu(sock_net(sk
), saddr
->spkt_device
);
446 if (!(dev
->flags
& IFF_UP
))
450 * You may not queue a frame bigger than the mtu. This is the lowest level
451 * raw protocol and you must do your own fragmentation at this level.
455 if (len
> dev
->mtu
+ dev
->hard_header_len
)
459 size_t reserved
= LL_RESERVED_SPACE(dev
);
460 unsigned int hhlen
= dev
->header_ops
? dev
->hard_header_len
: 0;
463 skb
= sock_wmalloc(sk
, len
+ reserved
, 0, GFP_KERNEL
);
466 /* FIXME: Save some space for broken drivers that write a hard
467 * header at transmission time by themselves. PPP is the notable
468 * one here. This should really be fixed at the driver level.
470 skb_reserve(skb
, reserved
);
471 skb_reset_network_header(skb
);
473 /* Try to align data part correctly */
478 skb_reset_network_header(skb
);
480 err
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
487 skb
->protocol
= proto
;
489 skb
->priority
= sk
->sk_priority
;
490 skb
->mark
= sk
->sk_mark
;
491 err
= sock_tx_timestamp(sk
, &skb_shinfo(skb
)->tx_flags
);
506 static inline unsigned int run_filter(struct sk_buff
*skb
, struct sock
*sk
,
509 struct sk_filter
*filter
;
512 filter
= rcu_dereference_bh(sk
->sk_filter
);
514 res
= sk_run_filter(skb
, filter
->insns
, filter
->len
);
515 rcu_read_unlock_bh();
521 This function makes lazy skb cloning in hope that most of packets
522 are discarded by BPF.
524 Note tricky part: we DO mangle shared skb! skb->data, skb->len
525 and skb->cb are mangled. It works because (and until) packets
526 falling here are owned by current CPU. Output packets are cloned
527 by dev_queue_xmit_nit(), input packets are processed by net_bh
528 sequencially, so that if we return skb to original state on exit,
529 we will not harm anyone.
532 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
533 struct packet_type
*pt
, struct net_device
*orig_dev
)
536 struct sockaddr_ll
*sll
;
537 struct packet_sock
*po
;
538 u8
*skb_head
= skb
->data
;
539 int skb_len
= skb
->len
;
540 unsigned int snaplen
, res
;
542 if (skb
->pkt_type
== PACKET_LOOPBACK
)
545 sk
= pt
->af_packet_priv
;
548 if (!net_eq(dev_net(dev
), sock_net(sk
)))
553 if (dev
->header_ops
) {
554 /* The device has an explicit notion of ll header,
555 exported to higher levels.
557 Otherwise, the device hides datails of it frame
558 structure, so that corresponding packet head
559 never delivered to user.
561 if (sk
->sk_type
!= SOCK_DGRAM
)
562 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
563 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
564 /* Special case: outgoing packets have ll header at head */
565 skb_pull(skb
, skb_network_offset(skb
));
571 res
= run_filter(skb
, sk
, snaplen
);
577 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
578 (unsigned)sk
->sk_rcvbuf
)
581 if (skb_shared(skb
)) {
582 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
586 if (skb_head
!= skb
->data
) {
587 skb
->data
= skb_head
;
594 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb
)) + MAX_ADDR_LEN
- 8 >
597 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
598 sll
->sll_family
= AF_PACKET
;
599 sll
->sll_hatype
= dev
->type
;
600 sll
->sll_protocol
= skb
->protocol
;
601 sll
->sll_pkttype
= skb
->pkt_type
;
602 if (unlikely(po
->origdev
))
603 sll
->sll_ifindex
= orig_dev
->ifindex
;
605 sll
->sll_ifindex
= dev
->ifindex
;
607 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
609 PACKET_SKB_CB(skb
)->origlen
= skb
->len
;
611 if (pskb_trim(skb
, snaplen
))
614 skb_set_owner_r(skb
, sk
);
618 /* drop conntrack reference */
621 spin_lock(&sk
->sk_receive_queue
.lock
);
622 po
->stats
.tp_packets
++;
623 skb
->dropcount
= atomic_read(&sk
->sk_drops
);
624 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
625 spin_unlock(&sk
->sk_receive_queue
.lock
);
626 sk
->sk_data_ready(sk
, skb
->len
);
630 po
->stats
.tp_drops
= atomic_inc_return(&sk
->sk_drops
);
633 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
634 skb
->data
= skb_head
;
642 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
643 struct packet_type
*pt
, struct net_device
*orig_dev
)
646 struct packet_sock
*po
;
647 struct sockaddr_ll
*sll
;
649 struct tpacket_hdr
*h1
;
650 struct tpacket2_hdr
*h2
;
653 u8
*skb_head
= skb
->data
;
654 int skb_len
= skb
->len
;
655 unsigned int snaplen
, res
;
656 unsigned long status
= TP_STATUS_LOSING
|TP_STATUS_USER
;
657 unsigned short macoff
, netoff
, hdrlen
;
658 struct sk_buff
*copy_skb
= NULL
;
661 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
663 if (skb
->pkt_type
== PACKET_LOOPBACK
)
666 sk
= pt
->af_packet_priv
;
669 if (!net_eq(dev_net(dev
), sock_net(sk
)))
672 if (dev
->header_ops
) {
673 if (sk
->sk_type
!= SOCK_DGRAM
)
674 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
675 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
676 /* Special case: outgoing packets have ll header at head */
677 skb_pull(skb
, skb_network_offset(skb
));
681 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
682 status
|= TP_STATUS_CSUMNOTREADY
;
686 res
= run_filter(skb
, sk
, snaplen
);
692 if (sk
->sk_type
== SOCK_DGRAM
) {
693 macoff
= netoff
= TPACKET_ALIGN(po
->tp_hdrlen
) + 16 +
696 unsigned maclen
= skb_network_offset(skb
);
697 netoff
= TPACKET_ALIGN(po
->tp_hdrlen
+
698 (maclen
< 16 ? 16 : maclen
)) +
700 macoff
= netoff
- maclen
;
703 if (macoff
+ snaplen
> po
->rx_ring
.frame_size
) {
704 if (po
->copy_thresh
&&
705 atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
<
706 (unsigned)sk
->sk_rcvbuf
) {
707 if (skb_shared(skb
)) {
708 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
710 copy_skb
= skb_get(skb
);
711 skb_head
= skb
->data
;
714 skb_set_owner_r(copy_skb
, sk
);
716 snaplen
= po
->rx_ring
.frame_size
- macoff
;
717 if ((int)snaplen
< 0)
721 spin_lock(&sk
->sk_receive_queue
.lock
);
722 h
.raw
= packet_current_frame(po
, &po
->rx_ring
, TP_STATUS_KERNEL
);
725 packet_increment_head(&po
->rx_ring
);
726 po
->stats
.tp_packets
++;
728 status
|= TP_STATUS_COPY
;
729 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
731 if (!po
->stats
.tp_drops
)
732 status
&= ~TP_STATUS_LOSING
;
733 spin_unlock(&sk
->sk_receive_queue
.lock
);
735 skb_copy_bits(skb
, 0, h
.raw
+ macoff
, snaplen
);
737 switch (po
->tp_version
) {
739 h
.h1
->tp_len
= skb
->len
;
740 h
.h1
->tp_snaplen
= snaplen
;
741 h
.h1
->tp_mac
= macoff
;
742 h
.h1
->tp_net
= netoff
;
743 if ((po
->tp_tstamp
& SOF_TIMESTAMPING_SYS_HARDWARE
)
744 && shhwtstamps
->syststamp
.tv64
)
745 tv
= ktime_to_timeval(shhwtstamps
->syststamp
);
746 else if ((po
->tp_tstamp
& SOF_TIMESTAMPING_RAW_HARDWARE
)
747 && shhwtstamps
->hwtstamp
.tv64
)
748 tv
= ktime_to_timeval(shhwtstamps
->hwtstamp
);
749 else if (skb
->tstamp
.tv64
)
750 tv
= ktime_to_timeval(skb
->tstamp
);
752 do_gettimeofday(&tv
);
753 h
.h1
->tp_sec
= tv
.tv_sec
;
754 h
.h1
->tp_usec
= tv
.tv_usec
;
755 hdrlen
= sizeof(*h
.h1
);
758 h
.h2
->tp_len
= skb
->len
;
759 h
.h2
->tp_snaplen
= snaplen
;
760 h
.h2
->tp_mac
= macoff
;
761 h
.h2
->tp_net
= netoff
;
762 if ((po
->tp_tstamp
& SOF_TIMESTAMPING_SYS_HARDWARE
)
763 && shhwtstamps
->syststamp
.tv64
)
764 ts
= ktime_to_timespec(shhwtstamps
->syststamp
);
765 else if ((po
->tp_tstamp
& SOF_TIMESTAMPING_RAW_HARDWARE
)
766 && shhwtstamps
->hwtstamp
.tv64
)
767 ts
= ktime_to_timespec(shhwtstamps
->hwtstamp
);
768 else if (skb
->tstamp
.tv64
)
769 ts
= ktime_to_timespec(skb
->tstamp
);
772 h
.h2
->tp_sec
= ts
.tv_sec
;
773 h
.h2
->tp_nsec
= ts
.tv_nsec
;
774 h
.h2
->tp_vlan_tci
= vlan_tx_tag_get(skb
);
775 hdrlen
= sizeof(*h
.h2
);
781 sll
= h
.raw
+ TPACKET_ALIGN(hdrlen
);
782 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
783 sll
->sll_family
= AF_PACKET
;
784 sll
->sll_hatype
= dev
->type
;
785 sll
->sll_protocol
= skb
->protocol
;
786 sll
->sll_pkttype
= skb
->pkt_type
;
787 if (unlikely(po
->origdev
))
788 sll
->sll_ifindex
= orig_dev
->ifindex
;
790 sll
->sll_ifindex
= dev
->ifindex
;
792 __packet_set_status(po
, h
.raw
, status
);
795 struct page
*p_start
, *p_end
;
796 u8
*h_end
= h
.raw
+ macoff
+ snaplen
- 1;
798 p_start
= virt_to_page(h
.raw
);
799 p_end
= virt_to_page(h_end
);
800 while (p_start
<= p_end
) {
801 flush_dcache_page(p_start
);
806 sk
->sk_data_ready(sk
, 0);
809 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
810 skb
->data
= skb_head
;
818 po
->stats
.tp_drops
++;
819 spin_unlock(&sk
->sk_receive_queue
.lock
);
821 sk
->sk_data_ready(sk
, 0);
826 static void tpacket_destruct_skb(struct sk_buff
*skb
)
828 struct packet_sock
*po
= pkt_sk(skb
->sk
);
833 if (likely(po
->tx_ring
.pg_vec
)) {
834 ph
= skb_shinfo(skb
)->destructor_arg
;
835 BUG_ON(__packet_get_status(po
, ph
) != TP_STATUS_SENDING
);
836 BUG_ON(atomic_read(&po
->tx_ring
.pending
) == 0);
837 atomic_dec(&po
->tx_ring
.pending
);
838 __packet_set_status(po
, ph
, TP_STATUS_AVAILABLE
);
844 static int tpacket_fill_skb(struct packet_sock
*po
, struct sk_buff
*skb
,
845 void *frame
, struct net_device
*dev
, int size_max
,
846 __be16 proto
, unsigned char *addr
)
849 struct tpacket_hdr
*h1
;
850 struct tpacket2_hdr
*h2
;
853 int to_write
, offset
, len
, tp_len
, nr_frags
, len_max
;
854 struct socket
*sock
= po
->sk
.sk_socket
;
861 skb
->protocol
= proto
;
863 skb
->priority
= po
->sk
.sk_priority
;
864 skb
->mark
= po
->sk
.sk_mark
;
865 skb_shinfo(skb
)->destructor_arg
= ph
.raw
;
867 switch (po
->tp_version
) {
869 tp_len
= ph
.h2
->tp_len
;
872 tp_len
= ph
.h1
->tp_len
;
875 if (unlikely(tp_len
> size_max
)) {
876 pr_err("packet size is too long (%d > %d)\n", tp_len
, size_max
);
880 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
881 skb_reset_network_header(skb
);
883 data
= ph
.raw
+ po
->tp_hdrlen
- sizeof(struct sockaddr_ll
);
886 if (sock
->type
== SOCK_DGRAM
) {
887 err
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
,
889 if (unlikely(err
< 0))
891 } else if (dev
->hard_header_len
) {
892 /* net device doesn't like empty head */
893 if (unlikely(tp_len
<= dev
->hard_header_len
)) {
894 pr_err("packet size is too short (%d < %d)\n",
895 tp_len
, dev
->hard_header_len
);
899 skb_push(skb
, dev
->hard_header_len
);
900 err
= skb_store_bits(skb
, 0, data
,
901 dev
->hard_header_len
);
905 data
+= dev
->hard_header_len
;
906 to_write
-= dev
->hard_header_len
;
910 page
= virt_to_page(data
);
911 offset
= offset_in_page(data
);
912 len_max
= PAGE_SIZE
- offset
;
913 len
= ((to_write
> len_max
) ? len_max
: to_write
);
915 skb
->data_len
= to_write
;
916 skb
->len
+= to_write
;
917 skb
->truesize
+= to_write
;
918 atomic_add(to_write
, &po
->sk
.sk_wmem_alloc
);
920 while (likely(to_write
)) {
921 nr_frags
= skb_shinfo(skb
)->nr_frags
;
923 if (unlikely(nr_frags
>= MAX_SKB_FRAGS
)) {
924 pr_err("Packet exceed the number of skb frags(%lu)\n",
929 flush_dcache_page(page
);
931 skb_fill_page_desc(skb
,
933 page
++, offset
, len
);
937 len
= ((to_write
> len_max
) ? len_max
: to_write
);
943 static int tpacket_snd(struct packet_sock
*po
, struct msghdr
*msg
)
947 struct net_device
*dev
;
949 int ifindex
, err
, reserve
= 0;
951 struct sockaddr_ll
*saddr
= (struct sockaddr_ll
*)msg
->msg_name
;
952 int tp_len
, size_max
;
957 sock
= po
->sk
.sk_socket
;
959 mutex_lock(&po
->pg_vec_lock
);
963 ifindex
= po
->ifindex
;
968 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
970 if (msg
->msg_namelen
< (saddr
->sll_halen
971 + offsetof(struct sockaddr_ll
,
974 ifindex
= saddr
->sll_ifindex
;
975 proto
= saddr
->sll_protocol
;
976 addr
= saddr
->sll_addr
;
979 dev
= dev_get_by_index(sock_net(&po
->sk
), ifindex
);
981 if (unlikely(dev
== NULL
))
984 reserve
= dev
->hard_header_len
;
987 if (unlikely(!(dev
->flags
& IFF_UP
)))
990 size_max
= po
->tx_ring
.frame_size
991 - (po
->tp_hdrlen
- sizeof(struct sockaddr_ll
));
993 if (size_max
> dev
->mtu
+ reserve
)
994 size_max
= dev
->mtu
+ reserve
;
997 ph
= packet_current_frame(po
, &po
->tx_ring
,
998 TP_STATUS_SEND_REQUEST
);
1000 if (unlikely(ph
== NULL
)) {
1005 status
= TP_STATUS_SEND_REQUEST
;
1006 skb
= sock_alloc_send_skb(&po
->sk
,
1007 LL_ALLOCATED_SPACE(dev
)
1008 + sizeof(struct sockaddr_ll
),
1011 if (unlikely(skb
== NULL
))
1014 tp_len
= tpacket_fill_skb(po
, skb
, ph
, dev
, size_max
, proto
,
1017 if (unlikely(tp_len
< 0)) {
1019 __packet_set_status(po
, ph
,
1020 TP_STATUS_AVAILABLE
);
1021 packet_increment_head(&po
->tx_ring
);
1025 status
= TP_STATUS_WRONG_FORMAT
;
1031 skb
->destructor
= tpacket_destruct_skb
;
1032 __packet_set_status(po
, ph
, TP_STATUS_SENDING
);
1033 atomic_inc(&po
->tx_ring
.pending
);
1035 status
= TP_STATUS_SEND_REQUEST
;
1036 err
= dev_queue_xmit(skb
);
1037 if (unlikely(err
> 0)) {
1038 err
= net_xmit_errno(err
);
1039 if (err
&& __packet_get_status(po
, ph
) ==
1040 TP_STATUS_AVAILABLE
) {
1041 /* skb was destructed already */
1046 * skb was dropped but not destructed yet;
1047 * let's treat it like congestion or err < 0
1051 packet_increment_head(&po
->tx_ring
);
1053 } while (likely((ph
!= NULL
) ||
1054 ((!(msg
->msg_flags
& MSG_DONTWAIT
)) &&
1055 (atomic_read(&po
->tx_ring
.pending
))))
1062 __packet_set_status(po
, ph
, status
);
1067 mutex_unlock(&po
->pg_vec_lock
);
1071 static inline struct sk_buff
*packet_alloc_skb(struct sock
*sk
, size_t prepad
,
1072 size_t reserve
, size_t len
,
1073 size_t linear
, int noblock
,
1076 struct sk_buff
*skb
;
1078 /* Under a page? Don't bother with paged skb. */
1079 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1082 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1087 skb_reserve(skb
, reserve
);
1088 skb_put(skb
, linear
);
1089 skb
->data_len
= len
- linear
;
1090 skb
->len
+= len
- linear
;
1095 static int packet_snd(struct socket
*sock
,
1096 struct msghdr
*msg
, size_t len
)
1098 struct sock
*sk
= sock
->sk
;
1099 struct sockaddr_ll
*saddr
= (struct sockaddr_ll
*)msg
->msg_name
;
1100 struct sk_buff
*skb
;
1101 struct net_device
*dev
;
1103 unsigned char *addr
;
1104 int ifindex
, err
, reserve
= 0;
1105 struct virtio_net_hdr vnet_hdr
= { 0 };
1108 struct packet_sock
*po
= pkt_sk(sk
);
1109 unsigned short gso_type
= 0;
1112 * Get and verify the address.
1115 if (saddr
== NULL
) {
1116 ifindex
= po
->ifindex
;
1121 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
1123 if (msg
->msg_namelen
< (saddr
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
)))
1125 ifindex
= saddr
->sll_ifindex
;
1126 proto
= saddr
->sll_protocol
;
1127 addr
= saddr
->sll_addr
;
1131 dev
= dev_get_by_index(sock_net(sk
), ifindex
);
1135 if (sock
->type
== SOCK_RAW
)
1136 reserve
= dev
->hard_header_len
;
1139 if (!(dev
->flags
& IFF_UP
))
1142 if (po
->has_vnet_hdr
) {
1143 vnet_hdr_len
= sizeof(vnet_hdr
);
1146 if (len
< vnet_hdr_len
)
1149 len
-= vnet_hdr_len
;
1151 err
= memcpy_fromiovec((void *)&vnet_hdr
, msg
->msg_iov
,
1156 if ((vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1157 (vnet_hdr
.csum_start
+ vnet_hdr
.csum_offset
+ 2 >
1159 vnet_hdr
.hdr_len
= vnet_hdr
.csum_start
+
1160 vnet_hdr
.csum_offset
+ 2;
1163 if (vnet_hdr
.hdr_len
> len
)
1166 if (vnet_hdr
.gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1167 switch (vnet_hdr
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1168 case VIRTIO_NET_HDR_GSO_TCPV4
:
1169 gso_type
= SKB_GSO_TCPV4
;
1171 case VIRTIO_NET_HDR_GSO_TCPV6
:
1172 gso_type
= SKB_GSO_TCPV6
;
1174 case VIRTIO_NET_HDR_GSO_UDP
:
1175 gso_type
= SKB_GSO_UDP
;
1181 if (vnet_hdr
.gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
1182 gso_type
|= SKB_GSO_TCP_ECN
;
1184 if (vnet_hdr
.gso_size
== 0)
1191 if (!gso_type
&& (len
> dev
->mtu
+reserve
))
1195 skb
= packet_alloc_skb(sk
, LL_ALLOCATED_SPACE(dev
),
1196 LL_RESERVED_SPACE(dev
), len
, vnet_hdr
.hdr_len
,
1197 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1201 skb_set_network_header(skb
, reserve
);
1204 if (sock
->type
== SOCK_DGRAM
&&
1205 (offset
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
)) < 0)
1208 /* Returns -EFAULT on error */
1209 err
= skb_copy_datagram_from_iovec(skb
, offset
, msg
->msg_iov
, 0, len
);
1212 err
= sock_tx_timestamp(sk
, &skb_shinfo(skb
)->tx_flags
);
1216 skb
->protocol
= proto
;
1218 skb
->priority
= sk
->sk_priority
;
1219 skb
->mark
= sk
->sk_mark
;
1221 if (po
->has_vnet_hdr
) {
1222 if (vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1223 if (!skb_partial_csum_set(skb
, vnet_hdr
.csum_start
,
1224 vnet_hdr
.csum_offset
)) {
1230 skb_shinfo(skb
)->gso_size
= vnet_hdr
.gso_size
;
1231 skb_shinfo(skb
)->gso_type
= gso_type
;
1233 /* Header must be checked, and gso_segs computed. */
1234 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1235 skb_shinfo(skb
)->gso_segs
= 0;
1237 len
+= vnet_hdr_len
;
1244 err
= dev_queue_xmit(skb
);
1245 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
1261 static int packet_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1262 struct msghdr
*msg
, size_t len
)
1264 struct sock
*sk
= sock
->sk
;
1265 struct packet_sock
*po
= pkt_sk(sk
);
1266 if (po
->tx_ring
.pg_vec
)
1267 return tpacket_snd(po
, msg
);
1269 return packet_snd(sock
, msg
, len
);
1273 * Close a PACKET socket. This is fairly simple. We immediately go
1274 * to 'closed' state and remove our protocol entry in the device list.
1277 static int packet_release(struct socket
*sock
)
1279 struct sock
*sk
= sock
->sk
;
1280 struct packet_sock
*po
;
1282 struct tpacket_req req
;
1290 spin_lock_bh(&net
->packet
.sklist_lock
);
1291 sk_del_node_init_rcu(sk
);
1292 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
1293 spin_unlock_bh(&net
->packet
.sklist_lock
);
1295 spin_lock(&po
->bind_lock
);
1298 * Remove from protocol table
1302 __dev_remove_pack(&po
->prot_hook
);
1305 spin_unlock(&po
->bind_lock
);
1307 packet_flush_mclist(sk
);
1309 memset(&req
, 0, sizeof(req
));
1311 if (po
->rx_ring
.pg_vec
)
1312 packet_set_ring(sk
, &req
, 1, 0);
1314 if (po
->tx_ring
.pg_vec
)
1315 packet_set_ring(sk
, &req
, 1, 1);
1319 * Now the socket is dead. No more input will appear.
1326 skb_queue_purge(&sk
->sk_receive_queue
);
1327 sk_refcnt_debug_release(sk
);
1334 * Attach a packet hook.
1337 static int packet_do_bind(struct sock
*sk
, struct net_device
*dev
, __be16 protocol
)
1339 struct packet_sock
*po
= pkt_sk(sk
);
1341 * Detach an existing hook if present.
1346 spin_lock(&po
->bind_lock
);
1351 spin_unlock(&po
->bind_lock
);
1352 dev_remove_pack(&po
->prot_hook
);
1353 spin_lock(&po
->bind_lock
);
1357 po
->prot_hook
.type
= protocol
;
1358 po
->prot_hook
.dev
= dev
;
1360 po
->ifindex
= dev
? dev
->ifindex
: 0;
1365 if (!dev
|| (dev
->flags
& IFF_UP
)) {
1366 dev_add_pack(&po
->prot_hook
);
1370 sk
->sk_err
= ENETDOWN
;
1371 if (!sock_flag(sk
, SOCK_DEAD
))
1372 sk
->sk_error_report(sk
);
1376 spin_unlock(&po
->bind_lock
);
1382 * Bind a packet socket to a device
1385 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1388 struct sock
*sk
= sock
->sk
;
1390 struct net_device
*dev
;
1397 if (addr_len
!= sizeof(struct sockaddr
))
1399 strlcpy(name
, uaddr
->sa_data
, sizeof(name
));
1401 dev
= dev_get_by_name(sock_net(sk
), name
);
1403 err
= packet_do_bind(sk
, dev
, pkt_sk(sk
)->num
);
1409 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
1411 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
1412 struct sock
*sk
= sock
->sk
;
1413 struct net_device
*dev
= NULL
;
1421 if (addr_len
< sizeof(struct sockaddr_ll
))
1423 if (sll
->sll_family
!= AF_PACKET
)
1426 if (sll
->sll_ifindex
) {
1428 dev
= dev_get_by_index(sock_net(sk
), sll
->sll_ifindex
);
1432 err
= packet_do_bind(sk
, dev
, sll
->sll_protocol
? : pkt_sk(sk
)->num
);
1440 static struct proto packet_proto
= {
1442 .owner
= THIS_MODULE
,
1443 .obj_size
= sizeof(struct packet_sock
),
1447 * Create a packet of type SOCK_PACKET.
1450 static int packet_create(struct net
*net
, struct socket
*sock
, int protocol
,
1454 struct packet_sock
*po
;
1455 __be16 proto
= (__force __be16
)protocol
; /* weird, but documented */
1458 if (!capable(CAP_NET_RAW
))
1460 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
&&
1461 sock
->type
!= SOCK_PACKET
)
1462 return -ESOCKTNOSUPPORT
;
1464 sock
->state
= SS_UNCONNECTED
;
1467 sk
= sk_alloc(net
, PF_PACKET
, GFP_KERNEL
, &packet_proto
);
1471 sock
->ops
= &packet_ops
;
1472 if (sock
->type
== SOCK_PACKET
)
1473 sock
->ops
= &packet_ops_spkt
;
1475 sock_init_data(sock
, sk
);
1478 sk
->sk_family
= PF_PACKET
;
1481 sk
->sk_destruct
= packet_sock_destruct
;
1482 sk_refcnt_debug_inc(sk
);
1485 * Attach a protocol block
1488 spin_lock_init(&po
->bind_lock
);
1489 mutex_init(&po
->pg_vec_lock
);
1490 po
->prot_hook
.func
= packet_rcv
;
1492 if (sock
->type
== SOCK_PACKET
)
1493 po
->prot_hook
.func
= packet_rcv_spkt
;
1495 po
->prot_hook
.af_packet_priv
= sk
;
1498 po
->prot_hook
.type
= proto
;
1499 dev_add_pack(&po
->prot_hook
);
1504 spin_lock_bh(&net
->packet
.sklist_lock
);
1505 sk_add_node_rcu(sk
, &net
->packet
.sklist
);
1506 sock_prot_inuse_add(net
, &packet_proto
, 1);
1507 spin_unlock_bh(&net
->packet
.sklist_lock
);
1514 static int packet_recv_error(struct sock
*sk
, struct msghdr
*msg
, int len
)
1516 struct sock_exterr_skb
*serr
;
1517 struct sk_buff
*skb
, *skb2
;
1521 skb
= skb_dequeue(&sk
->sk_error_queue
);
1527 msg
->msg_flags
|= MSG_TRUNC
;
1530 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1534 sock_recv_timestamp(msg
, sk
, skb
);
1536 serr
= SKB_EXT_ERR(skb
);
1537 put_cmsg(msg
, SOL_PACKET
, PACKET_TX_TIMESTAMP
,
1538 sizeof(serr
->ee
), &serr
->ee
);
1540 msg
->msg_flags
|= MSG_ERRQUEUE
;
1543 /* Reset and regenerate socket error */
1544 spin_lock_bh(&sk
->sk_error_queue
.lock
);
1546 if ((skb2
= skb_peek(&sk
->sk_error_queue
)) != NULL
) {
1547 sk
->sk_err
= SKB_EXT_ERR(skb2
)->ee
.ee_errno
;
1548 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
1549 sk
->sk_error_report(sk
);
1551 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
1560 * Pull a packet from our receive queue and hand it to the user.
1561 * If necessary we block.
1564 static int packet_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1565 struct msghdr
*msg
, size_t len
, int flags
)
1567 struct sock
*sk
= sock
->sk
;
1568 struct sk_buff
*skb
;
1570 struct sockaddr_ll
*sll
;
1571 int vnet_hdr_len
= 0;
1574 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
|MSG_ERRQUEUE
))
1578 /* What error should we return now? EUNATTACH? */
1579 if (pkt_sk(sk
)->ifindex
< 0)
1583 if (flags
& MSG_ERRQUEUE
) {
1584 err
= packet_recv_error(sk
, msg
, len
);
1589 * Call the generic datagram receiver. This handles all sorts
1590 * of horrible races and re-entrancy so we can forget about it
1591 * in the protocol layers.
1593 * Now it will return ENETDOWN, if device have just gone down,
1594 * but then it will block.
1597 skb
= skb_recv_datagram(sk
, flags
, flags
& MSG_DONTWAIT
, &err
);
1600 * An error occurred so return it. Because skb_recv_datagram()
1601 * handles the blocking we don't see and worry about blocking
1608 if (pkt_sk(sk
)->has_vnet_hdr
) {
1609 struct virtio_net_hdr vnet_hdr
= { 0 };
1612 vnet_hdr_len
= sizeof(vnet_hdr
);
1613 if (len
< vnet_hdr_len
)
1616 len
-= vnet_hdr_len
;
1618 if (skb_is_gso(skb
)) {
1619 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
1621 /* This is a hint as to how much should be linear. */
1622 vnet_hdr
.hdr_len
= skb_headlen(skb
);
1623 vnet_hdr
.gso_size
= sinfo
->gso_size
;
1624 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
1625 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1626 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
1627 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1628 else if (sinfo
->gso_type
& SKB_GSO_UDP
)
1629 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
1630 else if (sinfo
->gso_type
& SKB_GSO_FCOE
)
1634 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
1635 vnet_hdr
.gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1637 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
1639 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1640 vnet_hdr
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1641 vnet_hdr
.csum_start
= skb
->csum_start
-
1643 vnet_hdr
.csum_offset
= skb
->csum_offset
;
1644 } /* else everything is zero */
1646 err
= memcpy_toiovec(msg
->msg_iov
, (void *)&vnet_hdr
,
1653 * If the address length field is there to be filled in, we fill
1657 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
1658 if (sock
->type
== SOCK_PACKET
)
1659 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
1661 msg
->msg_namelen
= sll
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
);
1664 * You lose any data beyond the buffer you gave. If it worries a
1665 * user program they can ask the device for its MTU anyway.
1671 msg
->msg_flags
|= MSG_TRUNC
;
1674 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1678 sock_recv_ts_and_drops(msg
, sk
, skb
);
1681 memcpy(msg
->msg_name
, &PACKET_SKB_CB(skb
)->sa
,
1684 if (pkt_sk(sk
)->auxdata
) {
1685 struct tpacket_auxdata aux
;
1687 aux
.tp_status
= TP_STATUS_USER
;
1688 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1689 aux
.tp_status
|= TP_STATUS_CSUMNOTREADY
;
1690 aux
.tp_len
= PACKET_SKB_CB(skb
)->origlen
;
1691 aux
.tp_snaplen
= skb
->len
;
1693 aux
.tp_net
= skb_network_offset(skb
);
1694 aux
.tp_vlan_tci
= vlan_tx_tag_get(skb
);
1696 put_cmsg(msg
, SOL_PACKET
, PACKET_AUXDATA
, sizeof(aux
), &aux
);
1700 * Free or return the buffer as appropriate. Again this
1701 * hides all the races and re-entrancy issues from us.
1703 err
= vnet_hdr_len
+ ((flags
&MSG_TRUNC
) ? skb
->len
: copied
);
1706 skb_free_datagram(sk
, skb
);
1711 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1712 int *uaddr_len
, int peer
)
1714 struct net_device
*dev
;
1715 struct sock
*sk
= sock
->sk
;
1720 uaddr
->sa_family
= AF_PACKET
;
1722 dev
= dev_get_by_index_rcu(sock_net(sk
), pkt_sk(sk
)->ifindex
);
1724 strncpy(uaddr
->sa_data
, dev
->name
, 14);
1726 memset(uaddr
->sa_data
, 0, 14);
1728 *uaddr_len
= sizeof(*uaddr
);
1733 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
1734 int *uaddr_len
, int peer
)
1736 struct net_device
*dev
;
1737 struct sock
*sk
= sock
->sk
;
1738 struct packet_sock
*po
= pkt_sk(sk
);
1739 DECLARE_SOCKADDR(struct sockaddr_ll
*, sll
, uaddr
);
1744 sll
->sll_family
= AF_PACKET
;
1745 sll
->sll_ifindex
= po
->ifindex
;
1746 sll
->sll_protocol
= po
->num
;
1747 sll
->sll_pkttype
= 0;
1749 dev
= dev_get_by_index_rcu(sock_net(sk
), po
->ifindex
);
1751 sll
->sll_hatype
= dev
->type
;
1752 sll
->sll_halen
= dev
->addr_len
;
1753 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
1755 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
1759 *uaddr_len
= offsetof(struct sockaddr_ll
, sll_addr
) + sll
->sll_halen
;
1764 static int packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
,
1768 case PACKET_MR_MULTICAST
:
1769 if (i
->alen
!= dev
->addr_len
)
1772 return dev_mc_add(dev
, i
->addr
);
1774 return dev_mc_del(dev
, i
->addr
);
1776 case PACKET_MR_PROMISC
:
1777 return dev_set_promiscuity(dev
, what
);
1779 case PACKET_MR_ALLMULTI
:
1780 return dev_set_allmulti(dev
, what
);
1782 case PACKET_MR_UNICAST
:
1783 if (i
->alen
!= dev
->addr_len
)
1786 return dev_uc_add(dev
, i
->addr
);
1788 return dev_uc_del(dev
, i
->addr
);
1796 static void packet_dev_mclist(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
1798 for ( ; i
; i
= i
->next
) {
1799 if (i
->ifindex
== dev
->ifindex
)
1800 packet_dev_mc(dev
, i
, what
);
1804 static int packet_mc_add(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1806 struct packet_sock
*po
= pkt_sk(sk
);
1807 struct packet_mclist
*ml
, *i
;
1808 struct net_device
*dev
;
1814 dev
= __dev_get_by_index(sock_net(sk
), mreq
->mr_ifindex
);
1819 if (mreq
->mr_alen
> dev
->addr_len
)
1823 i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
1828 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
1829 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1830 ml
->type
== mreq
->mr_type
&&
1831 ml
->alen
== mreq
->mr_alen
&&
1832 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1834 /* Free the new element ... */
1840 i
->type
= mreq
->mr_type
;
1841 i
->ifindex
= mreq
->mr_ifindex
;
1842 i
->alen
= mreq
->mr_alen
;
1843 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
1845 i
->next
= po
->mclist
;
1847 err
= packet_dev_mc(dev
, i
, 1);
1849 po
->mclist
= i
->next
;
1858 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq_max
*mreq
)
1860 struct packet_mclist
*ml
, **mlp
;
1864 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
1865 if (ml
->ifindex
== mreq
->mr_ifindex
&&
1866 ml
->type
== mreq
->mr_type
&&
1867 ml
->alen
== mreq
->mr_alen
&&
1868 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
1869 if (--ml
->count
== 0) {
1870 struct net_device
*dev
;
1872 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
1874 packet_dev_mc(dev
, ml
, -1);
1882 return -EADDRNOTAVAIL
;
1885 static void packet_flush_mclist(struct sock
*sk
)
1887 struct packet_sock
*po
= pkt_sk(sk
);
1888 struct packet_mclist
*ml
;
1894 while ((ml
= po
->mclist
) != NULL
) {
1895 struct net_device
*dev
;
1897 po
->mclist
= ml
->next
;
1898 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
1900 packet_dev_mc(dev
, ml
, -1);
1907 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1909 struct sock
*sk
= sock
->sk
;
1910 struct packet_sock
*po
= pkt_sk(sk
);
1913 if (level
!= SOL_PACKET
)
1914 return -ENOPROTOOPT
;
1917 case PACKET_ADD_MEMBERSHIP
:
1918 case PACKET_DROP_MEMBERSHIP
:
1920 struct packet_mreq_max mreq
;
1922 memset(&mreq
, 0, sizeof(mreq
));
1923 if (len
< sizeof(struct packet_mreq
))
1925 if (len
> sizeof(mreq
))
1927 if (copy_from_user(&mreq
, optval
, len
))
1929 if (len
< (mreq
.mr_alen
+ offsetof(struct packet_mreq
, mr_address
)))
1931 if (optname
== PACKET_ADD_MEMBERSHIP
)
1932 ret
= packet_mc_add(sk
, &mreq
);
1934 ret
= packet_mc_drop(sk
, &mreq
);
1938 case PACKET_RX_RING
:
1939 case PACKET_TX_RING
:
1941 struct tpacket_req req
;
1943 if (optlen
< sizeof(req
))
1945 if (pkt_sk(sk
)->has_vnet_hdr
)
1947 if (copy_from_user(&req
, optval
, sizeof(req
)))
1949 return packet_set_ring(sk
, &req
, 0, optname
== PACKET_TX_RING
);
1951 case PACKET_COPY_THRESH
:
1955 if (optlen
!= sizeof(val
))
1957 if (copy_from_user(&val
, optval
, sizeof(val
)))
1960 pkt_sk(sk
)->copy_thresh
= val
;
1963 case PACKET_VERSION
:
1967 if (optlen
!= sizeof(val
))
1969 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
1971 if (copy_from_user(&val
, optval
, sizeof(val
)))
1976 po
->tp_version
= val
;
1982 case PACKET_RESERVE
:
1986 if (optlen
!= sizeof(val
))
1988 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
1990 if (copy_from_user(&val
, optval
, sizeof(val
)))
1992 po
->tp_reserve
= val
;
1999 if (optlen
!= sizeof(val
))
2001 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2003 if (copy_from_user(&val
, optval
, sizeof(val
)))
2005 po
->tp_loss
= !!val
;
2008 case PACKET_AUXDATA
:
2012 if (optlen
< sizeof(val
))
2014 if (copy_from_user(&val
, optval
, sizeof(val
)))
2017 po
->auxdata
= !!val
;
2020 case PACKET_ORIGDEV
:
2024 if (optlen
< sizeof(val
))
2026 if (copy_from_user(&val
, optval
, sizeof(val
)))
2029 po
->origdev
= !!val
;
2032 case PACKET_VNET_HDR
:
2036 if (sock
->type
!= SOCK_RAW
)
2038 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2040 if (optlen
< sizeof(val
))
2042 if (copy_from_user(&val
, optval
, sizeof(val
)))
2045 po
->has_vnet_hdr
= !!val
;
2048 case PACKET_TIMESTAMP
:
2052 if (optlen
!= sizeof(val
))
2054 if (copy_from_user(&val
, optval
, sizeof(val
)))
2057 po
->tp_tstamp
= val
;
2061 return -ENOPROTOOPT
;
2065 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
2066 char __user
*optval
, int __user
*optlen
)
2070 struct sock
*sk
= sock
->sk
;
2071 struct packet_sock
*po
= pkt_sk(sk
);
2073 struct tpacket_stats st
;
2075 if (level
!= SOL_PACKET
)
2076 return -ENOPROTOOPT
;
2078 if (get_user(len
, optlen
))
2085 case PACKET_STATISTICS
:
2086 if (len
> sizeof(struct tpacket_stats
))
2087 len
= sizeof(struct tpacket_stats
);
2088 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2090 memset(&po
->stats
, 0, sizeof(st
));
2091 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2092 st
.tp_packets
+= st
.tp_drops
;
2096 case PACKET_AUXDATA
:
2097 if (len
> sizeof(int))
2103 case PACKET_ORIGDEV
:
2104 if (len
> sizeof(int))
2110 case PACKET_VNET_HDR
:
2111 if (len
> sizeof(int))
2113 val
= po
->has_vnet_hdr
;
2117 case PACKET_VERSION
:
2118 if (len
> sizeof(int))
2120 val
= po
->tp_version
;
2124 if (len
> sizeof(int))
2126 if (copy_from_user(&val
, optval
, len
))
2130 val
= sizeof(struct tpacket_hdr
);
2133 val
= sizeof(struct tpacket2_hdr
);
2140 case PACKET_RESERVE
:
2141 if (len
> sizeof(unsigned int))
2142 len
= sizeof(unsigned int);
2143 val
= po
->tp_reserve
;
2147 if (len
> sizeof(unsigned int))
2148 len
= sizeof(unsigned int);
2152 case PACKET_TIMESTAMP
:
2153 if (len
> sizeof(int))
2155 val
= po
->tp_tstamp
;
2159 return -ENOPROTOOPT
;
2162 if (put_user(len
, optlen
))
2164 if (copy_to_user(optval
, data
, len
))
2170 static int packet_notifier(struct notifier_block
*this, unsigned long msg
, void *data
)
2173 struct hlist_node
*node
;
2174 struct net_device
*dev
= data
;
2175 struct net
*net
= dev_net(dev
);
2178 sk_for_each_rcu(sk
, node
, &net
->packet
.sklist
) {
2179 struct packet_sock
*po
= pkt_sk(sk
);
2182 case NETDEV_UNREGISTER
:
2184 packet_dev_mclist(dev
, po
->mclist
, -1);
2188 if (dev
->ifindex
== po
->ifindex
) {
2189 spin_lock(&po
->bind_lock
);
2191 __dev_remove_pack(&po
->prot_hook
);
2194 sk
->sk_err
= ENETDOWN
;
2195 if (!sock_flag(sk
, SOCK_DEAD
))
2196 sk
->sk_error_report(sk
);
2198 if (msg
== NETDEV_UNREGISTER
) {
2200 po
->prot_hook
.dev
= NULL
;
2202 spin_unlock(&po
->bind_lock
);
2206 if (dev
->ifindex
== po
->ifindex
) {
2207 spin_lock(&po
->bind_lock
);
2208 if (po
->num
&& !po
->running
) {
2209 dev_add_pack(&po
->prot_hook
);
2213 spin_unlock(&po
->bind_lock
);
2223 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
2226 struct sock
*sk
= sock
->sk
;
2231 int amount
= sk_wmem_alloc_get(sk
);
2233 return put_user(amount
, (int __user
*)arg
);
2237 struct sk_buff
*skb
;
2240 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2241 skb
= skb_peek(&sk
->sk_receive_queue
);
2244 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2245 return put_user(amount
, (int __user
*)arg
);
2248 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
2250 return sock_get_timestampns(sk
, (struct timespec __user
*)arg
);
2260 case SIOCGIFBRDADDR
:
2261 case SIOCSIFBRDADDR
:
2262 case SIOCGIFNETMASK
:
2263 case SIOCSIFNETMASK
:
2264 case SIOCGIFDSTADDR
:
2265 case SIOCSIFDSTADDR
:
2267 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
2271 return -ENOIOCTLCMD
;
2276 static unsigned int packet_poll(struct file
*file
, struct socket
*sock
,
2279 struct sock
*sk
= sock
->sk
;
2280 struct packet_sock
*po
= pkt_sk(sk
);
2281 unsigned int mask
= datagram_poll(file
, sock
, wait
);
2283 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2284 if (po
->rx_ring
.pg_vec
) {
2285 if (!packet_previous_frame(po
, &po
->rx_ring
, TP_STATUS_KERNEL
))
2286 mask
|= POLLIN
| POLLRDNORM
;
2288 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2289 spin_lock_bh(&sk
->sk_write_queue
.lock
);
2290 if (po
->tx_ring
.pg_vec
) {
2291 if (packet_current_frame(po
, &po
->tx_ring
, TP_STATUS_AVAILABLE
))
2292 mask
|= POLLOUT
| POLLWRNORM
;
2294 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
2299 /* Dirty? Well, I still did not learn better way to account
2303 static void packet_mm_open(struct vm_area_struct
*vma
)
2305 struct file
*file
= vma
->vm_file
;
2306 struct socket
*sock
= file
->private_data
;
2307 struct sock
*sk
= sock
->sk
;
2310 atomic_inc(&pkt_sk(sk
)->mapped
);
2313 static void packet_mm_close(struct vm_area_struct
*vma
)
2315 struct file
*file
= vma
->vm_file
;
2316 struct socket
*sock
= file
->private_data
;
2317 struct sock
*sk
= sock
->sk
;
2320 atomic_dec(&pkt_sk(sk
)->mapped
);
2323 static const struct vm_operations_struct packet_mmap_ops
= {
2324 .open
= packet_mm_open
,
2325 .close
= packet_mm_close
,
2328 static void free_pg_vec(char **pg_vec
, unsigned int order
, unsigned int len
)
2332 for (i
= 0; i
< len
; i
++) {
2333 if (likely(pg_vec
[i
]))
2334 free_pages((unsigned long) pg_vec
[i
], order
);
2339 static inline char *alloc_one_pg_vec_page(unsigned long order
)
2341 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
| __GFP_ZERO
| __GFP_NOWARN
;
2343 return (char *) __get_free_pages(gfp_flags
, order
);
2346 static char **alloc_pg_vec(struct tpacket_req
*req
, int order
)
2348 unsigned int block_nr
= req
->tp_block_nr
;
2352 pg_vec
= kzalloc(block_nr
* sizeof(char *), GFP_KERNEL
);
2353 if (unlikely(!pg_vec
))
2356 for (i
= 0; i
< block_nr
; i
++) {
2357 pg_vec
[i
] = alloc_one_pg_vec_page(order
);
2358 if (unlikely(!pg_vec
[i
]))
2359 goto out_free_pgvec
;
2366 free_pg_vec(pg_vec
, order
, block_nr
);
2371 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
,
2372 int closing
, int tx_ring
)
2374 char **pg_vec
= NULL
;
2375 struct packet_sock
*po
= pkt_sk(sk
);
2376 int was_running
, order
= 0;
2377 struct packet_ring_buffer
*rb
;
2378 struct sk_buff_head
*rb_queue
;
2382 rb
= tx_ring
? &po
->tx_ring
: &po
->rx_ring
;
2383 rb_queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
2387 if (atomic_read(&po
->mapped
))
2389 if (atomic_read(&rb
->pending
))
2393 if (req
->tp_block_nr
) {
2394 /* Sanity tests and some calculations */
2396 if (unlikely(rb
->pg_vec
))
2399 switch (po
->tp_version
) {
2401 po
->tp_hdrlen
= TPACKET_HDRLEN
;
2404 po
->tp_hdrlen
= TPACKET2_HDRLEN
;
2409 if (unlikely((int)req
->tp_block_size
<= 0))
2411 if (unlikely(req
->tp_block_size
& (PAGE_SIZE
- 1)))
2413 if (unlikely(req
->tp_frame_size
< po
->tp_hdrlen
+
2416 if (unlikely(req
->tp_frame_size
& (TPACKET_ALIGNMENT
- 1)))
2419 rb
->frames_per_block
= req
->tp_block_size
/req
->tp_frame_size
;
2420 if (unlikely(rb
->frames_per_block
<= 0))
2422 if (unlikely((rb
->frames_per_block
* req
->tp_block_nr
) !=
2427 order
= get_order(req
->tp_block_size
);
2428 pg_vec
= alloc_pg_vec(req
, order
);
2429 if (unlikely(!pg_vec
))
2435 if (unlikely(req
->tp_frame_nr
))
2441 /* Detach socket from network */
2442 spin_lock(&po
->bind_lock
);
2443 was_running
= po
->running
;
2446 __dev_remove_pack(&po
->prot_hook
);
2451 spin_unlock(&po
->bind_lock
);
2456 mutex_lock(&po
->pg_vec_lock
);
2457 if (closing
|| atomic_read(&po
->mapped
) == 0) {
2459 #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
2460 spin_lock_bh(&rb_queue
->lock
);
2461 pg_vec
= XC(rb
->pg_vec
, pg_vec
);
2462 rb
->frame_max
= (req
->tp_frame_nr
- 1);
2464 rb
->frame_size
= req
->tp_frame_size
;
2465 spin_unlock_bh(&rb_queue
->lock
);
2467 order
= XC(rb
->pg_vec_order
, order
);
2468 req
->tp_block_nr
= XC(rb
->pg_vec_len
, req
->tp_block_nr
);
2470 rb
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
2471 po
->prot_hook
.func
= (po
->rx_ring
.pg_vec
) ?
2472 tpacket_rcv
: packet_rcv
;
2473 skb_queue_purge(rb_queue
);
2475 if (atomic_read(&po
->mapped
))
2476 pr_err("packet_mmap: vma is busy: %d\n",
2477 atomic_read(&po
->mapped
));
2479 mutex_unlock(&po
->pg_vec_lock
);
2481 spin_lock(&po
->bind_lock
);
2482 if (was_running
&& !po
->running
) {
2486 dev_add_pack(&po
->prot_hook
);
2488 spin_unlock(&po
->bind_lock
);
2493 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
2498 static int packet_mmap(struct file
*file
, struct socket
*sock
,
2499 struct vm_area_struct
*vma
)
2501 struct sock
*sk
= sock
->sk
;
2502 struct packet_sock
*po
= pkt_sk(sk
);
2503 unsigned long size
, expected_size
;
2504 struct packet_ring_buffer
*rb
;
2505 unsigned long start
;
2512 mutex_lock(&po
->pg_vec_lock
);
2515 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
2517 expected_size
+= rb
->pg_vec_len
2523 if (expected_size
== 0)
2526 size
= vma
->vm_end
- vma
->vm_start
;
2527 if (size
!= expected_size
)
2530 start
= vma
->vm_start
;
2531 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
2532 if (rb
->pg_vec
== NULL
)
2535 for (i
= 0; i
< rb
->pg_vec_len
; i
++) {
2536 struct page
*page
= virt_to_page(rb
->pg_vec
[i
]);
2539 for (pg_num
= 0; pg_num
< rb
->pg_vec_pages
;
2541 err
= vm_insert_page(vma
, start
, page
);
2549 atomic_inc(&po
->mapped
);
2550 vma
->vm_ops
= &packet_mmap_ops
;
2554 mutex_unlock(&po
->pg_vec_lock
);
2558 static const struct proto_ops packet_ops_spkt
= {
2559 .family
= PF_PACKET
,
2560 .owner
= THIS_MODULE
,
2561 .release
= packet_release
,
2562 .bind
= packet_bind_spkt
,
2563 .connect
= sock_no_connect
,
2564 .socketpair
= sock_no_socketpair
,
2565 .accept
= sock_no_accept
,
2566 .getname
= packet_getname_spkt
,
2567 .poll
= datagram_poll
,
2568 .ioctl
= packet_ioctl
,
2569 .listen
= sock_no_listen
,
2570 .shutdown
= sock_no_shutdown
,
2571 .setsockopt
= sock_no_setsockopt
,
2572 .getsockopt
= sock_no_getsockopt
,
2573 .sendmsg
= packet_sendmsg_spkt
,
2574 .recvmsg
= packet_recvmsg
,
2575 .mmap
= sock_no_mmap
,
2576 .sendpage
= sock_no_sendpage
,
2579 static const struct proto_ops packet_ops
= {
2580 .family
= PF_PACKET
,
2581 .owner
= THIS_MODULE
,
2582 .release
= packet_release
,
2583 .bind
= packet_bind
,
2584 .connect
= sock_no_connect
,
2585 .socketpair
= sock_no_socketpair
,
2586 .accept
= sock_no_accept
,
2587 .getname
= packet_getname
,
2588 .poll
= packet_poll
,
2589 .ioctl
= packet_ioctl
,
2590 .listen
= sock_no_listen
,
2591 .shutdown
= sock_no_shutdown
,
2592 .setsockopt
= packet_setsockopt
,
2593 .getsockopt
= packet_getsockopt
,
2594 .sendmsg
= packet_sendmsg
,
2595 .recvmsg
= packet_recvmsg
,
2596 .mmap
= packet_mmap
,
2597 .sendpage
= sock_no_sendpage
,
2600 static const struct net_proto_family packet_family_ops
= {
2601 .family
= PF_PACKET
,
2602 .create
= packet_create
,
2603 .owner
= THIS_MODULE
,
2606 static struct notifier_block packet_netdev_notifier
= {
2607 .notifier_call
= packet_notifier
,
2610 #ifdef CONFIG_PROC_FS
2612 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2615 struct net
*net
= seq_file_net(seq
);
2618 return seq_hlist_start_head_rcu(&net
->packet
.sklist
, *pos
);
2621 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2623 struct net
*net
= seq_file_net(seq
);
2624 return seq_hlist_next_rcu(v
, &net
->packet
.sklist
, pos
);
2627 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
2633 static int packet_seq_show(struct seq_file
*seq
, void *v
)
2635 if (v
== SEQ_START_TOKEN
)
2636 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
2638 struct sock
*s
= sk_entry(v
);
2639 const struct packet_sock
*po
= pkt_sk(s
);
2642 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2644 atomic_read(&s
->sk_refcnt
),
2649 atomic_read(&s
->sk_rmem_alloc
),
2657 static const struct seq_operations packet_seq_ops
= {
2658 .start
= packet_seq_start
,
2659 .next
= packet_seq_next
,
2660 .stop
= packet_seq_stop
,
2661 .show
= packet_seq_show
,
2664 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
2666 return seq_open_net(inode
, file
, &packet_seq_ops
,
2667 sizeof(struct seq_net_private
));
2670 static const struct file_operations packet_seq_fops
= {
2671 .owner
= THIS_MODULE
,
2672 .open
= packet_seq_open
,
2674 .llseek
= seq_lseek
,
2675 .release
= seq_release_net
,
2680 static int __net_init
packet_net_init(struct net
*net
)
2682 spin_lock_init(&net
->packet
.sklist_lock
);
2683 INIT_HLIST_HEAD(&net
->packet
.sklist
);
2685 if (!proc_net_fops_create(net
, "packet", 0, &packet_seq_fops
))
2691 static void __net_exit
packet_net_exit(struct net
*net
)
2693 proc_net_remove(net
, "packet");
2696 static struct pernet_operations packet_net_ops
= {
2697 .init
= packet_net_init
,
2698 .exit
= packet_net_exit
,
2702 static void __exit
packet_exit(void)
2704 unregister_netdevice_notifier(&packet_netdev_notifier
);
2705 unregister_pernet_subsys(&packet_net_ops
);
2706 sock_unregister(PF_PACKET
);
2707 proto_unregister(&packet_proto
);
2710 static int __init
packet_init(void)
2712 int rc
= proto_register(&packet_proto
, 0);
2717 sock_register(&packet_family_ops
);
2718 register_pernet_subsys(&packet_net_ops
);
2719 register_netdevice_notifier(&packet_netdev_notifier
);
2724 module_init(packet_init
);
2725 module_exit(packet_exit
);
2726 MODULE_LICENSE("GPL");
2727 MODULE_ALIAS_NETPROTO(PF_PACKET
);