2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * PACKET - implements raw packet sockets.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Alan Cox : verify_area() now used correctly
14 * Alan Cox : new skbuff lists, look ma no backlogs!
15 * Alan Cox : tidied skbuff lists.
16 * Alan Cox : Now uses generic datagram routines I
17 * added. Also fixed the peek/read crash
18 * from all old Linux datagram code.
19 * Alan Cox : Uses the improved datagram code.
20 * Alan Cox : Added NULL's for socket options.
21 * Alan Cox : Re-commented the code.
22 * Alan Cox : Use new kernel side addressing
23 * Rob Janssen : Correct MTU usage.
24 * Dave Platt : Counter leaks caused by incorrect
25 * interrupt locking and some slightly
26 * dubious gcc output. Can you read
27 * compiler: it said _VOLATILE_
28 * Richard Kooijman : Timestamp fixes.
29 * Alan Cox : New buffers. Use sk->mac.raw.
30 * Alan Cox : sendmsg/recvmsg support.
31 * Alan Cox : Protocol setting support
32 * Alexey Kuznetsov : Untied from IPv4 stack.
33 * Cyrus Durgin : Fixed kerneld for kmod.
34 * Michal Ostrowski : Module initialization cleanup.
35 * Ulises Alonso : Frame number limit removal and
36 * packet_set_ring memory leak.
37 * Eric Biederman : Allow for > 8 byte hardware addresses.
38 * The convention is that longer addresses
39 * will simply extend the hardware address
40 * byte arrays at the end of sockaddr_ll
42 * Johann Baudy : Added TX RING.
44 * This program is free software; you can redistribute it and/or
45 * modify it under the terms of the GNU General Public License
46 * as published by the Free Software Foundation; either version
47 * 2 of the License, or (at your option) any later version.
51 #include <linux/types.h>
53 #include <linux/capability.h>
54 #include <linux/fcntl.h>
55 #include <linux/socket.h>
57 #include <linux/inet.h>
58 #include <linux/netdevice.h>
59 #include <linux/if_packet.h>
60 #include <linux/wireless.h>
61 #include <linux/kernel.h>
62 #include <linux/kmod.h>
63 #include <linux/slab.h>
64 #include <linux/vmalloc.h>
65 #include <net/net_namespace.h>
67 #include <net/protocol.h>
68 #include <linux/skbuff.h>
70 #include <linux/errno.h>
71 #include <linux/timer.h>
72 #include <asm/system.h>
73 #include <asm/uaccess.h>
74 #include <asm/ioctls.h>
76 #include <asm/cacheflush.h>
78 #include <linux/proc_fs.h>
79 #include <linux/seq_file.h>
80 #include <linux/poll.h>
81 #include <linux/module.h>
82 #include <linux/init.h>
83 #include <linux/mutex.h>
84 #include <linux/if_vlan.h>
85 #include <linux/virtio_net.h>
86 #include <linux/errqueue.h>
87 #include <linux/net_tstamp.h>
90 #include <net/inet_common.h>
95 - if device has no dev->hard_header routine, it adds and removes ll header
96 inside itself. In this case ll header is invisible outside of device,
97 but higher levels still should reserve dev->hard_header_len.
98 Some devices are enough clever to reallocate skb, when header
99 will not fit to reserved space (tunnel), another ones are silly
101 - packet socket receives packets with pulled ll header,
102 so that SOCK_RAW should push it back.
107 Incoming, dev->hard_header!=NULL
108 mac_header -> ll header
111 Outgoing, dev->hard_header!=NULL
112 mac_header -> ll header
115 Incoming, dev->hard_header==NULL
116 mac_header -> UNKNOWN position. It is very likely, that it points to ll
117 header. PPP makes it, that is wrong, because introduce
118 assymetry between rx and tx paths.
121 Outgoing, dev->hard_header==NULL
122 mac_header -> data. ll header is still not built!
126 If dev->hard_header==NULL we are unlikely to restore sensible ll header.
132 dev->hard_header != NULL
133 mac_header -> ll header
136 dev->hard_header == NULL (ll header is added by device, we cannot control it)
140 We should set nh.raw on output to correct posistion,
141 packet classifier depends on it.
144 /* Private packet socket structures. */
146 struct packet_mclist
{
147 struct packet_mclist
*next
;
152 unsigned char addr
[MAX_ADDR_LEN
];
154 /* identical to struct packet_mreq except it has
155 * a longer address field.
157 struct packet_mreq_max
{
159 unsigned short mr_type
;
160 unsigned short mr_alen
;
161 unsigned char mr_address
[MAX_ADDR_LEN
];
164 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
,
165 int closing
, int tx_ring
);
171 struct packet_ring_buffer
{
174 unsigned int frames_per_block
;
175 unsigned int frame_size
;
176 unsigned int frame_max
;
178 unsigned int pg_vec_order
;
179 unsigned int pg_vec_pages
;
180 unsigned int pg_vec_len
;
186 static int tpacket_snd(struct packet_sock
*po
, struct msghdr
*msg
);
188 static void packet_flush_mclist(struct sock
*sk
);
190 struct packet_fanout
;
192 /* struct sock has to be the first member of packet_sock */
194 struct packet_fanout
*fanout
;
195 struct tpacket_stats stats
;
196 struct packet_ring_buffer rx_ring
;
197 struct packet_ring_buffer tx_ring
;
199 spinlock_t bind_lock
;
200 struct mutex pg_vec_lock
;
201 unsigned int running
:1, /* prot_hook is attached*/
205 int ifindex
; /* bound device */
207 struct packet_mclist
*mclist
;
209 enum tpacket_versions tp_version
;
210 unsigned int tp_hdrlen
;
211 unsigned int tp_reserve
;
212 unsigned int tp_loss
:1;
213 unsigned int tp_tstamp
;
214 struct packet_type prot_hook ____cacheline_aligned_in_smp
;
217 #define PACKET_FANOUT_MAX 256
219 struct packet_fanout
{
223 unsigned int num_members
;
228 struct list_head list
;
229 struct sock
*arr
[PACKET_FANOUT_MAX
];
232 struct packet_type prot_hook ____cacheline_aligned_in_smp
;
235 struct packet_skb_cb
{
236 unsigned int origlen
;
238 struct sockaddr_pkt pkt
;
239 struct sockaddr_ll ll
;
243 #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
245 static inline struct packet_sock
*pkt_sk(struct sock
*sk
)
247 return (struct packet_sock
*)sk
;
250 static void __fanout_unlink(struct sock
*sk
, struct packet_sock
*po
);
251 static void __fanout_link(struct sock
*sk
, struct packet_sock
*po
);
253 /* register_prot_hook must be invoked with the po->bind_lock held,
254 * or from a context in which asynchronous accesses to the packet
255 * socket is not possible (packet_create()).
257 static void register_prot_hook(struct sock
*sk
)
259 struct packet_sock
*po
= pkt_sk(sk
);
262 __fanout_link(sk
, po
);
264 dev_add_pack(&po
->prot_hook
);
270 /* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
271 * held. If the sync parameter is true, we will temporarily drop
272 * the po->bind_lock and do a synchronize_net to make sure no
273 * asynchronous packet processing paths still refer to the elements
274 * of po->prot_hook. If the sync parameter is false, it is the
275 * callers responsibility to take care of this.
277 static void __unregister_prot_hook(struct sock
*sk
, bool sync
)
279 struct packet_sock
*po
= pkt_sk(sk
);
283 __fanout_unlink(sk
, po
);
285 __dev_remove_pack(&po
->prot_hook
);
289 spin_unlock(&po
->bind_lock
);
291 spin_lock(&po
->bind_lock
);
295 static void unregister_prot_hook(struct sock
*sk
, bool sync
)
297 struct packet_sock
*po
= pkt_sk(sk
);
300 __unregister_prot_hook(sk
, sync
);
303 static inline __pure
struct page
*pgv_to_page(void *addr
)
305 if (is_vmalloc_addr(addr
))
306 return vmalloc_to_page(addr
);
307 return virt_to_page(addr
);
310 static void __packet_set_status(struct packet_sock
*po
, void *frame
, int status
)
313 struct tpacket_hdr
*h1
;
314 struct tpacket2_hdr
*h2
;
319 switch (po
->tp_version
) {
321 h
.h1
->tp_status
= status
;
322 flush_dcache_page(pgv_to_page(&h
.h1
->tp_status
));
325 h
.h2
->tp_status
= status
;
326 flush_dcache_page(pgv_to_page(&h
.h2
->tp_status
));
329 pr_err("TPACKET version not supported\n");
336 static int __packet_get_status(struct packet_sock
*po
, void *frame
)
339 struct tpacket_hdr
*h1
;
340 struct tpacket2_hdr
*h2
;
347 switch (po
->tp_version
) {
349 flush_dcache_page(pgv_to_page(&h
.h1
->tp_status
));
350 return h
.h1
->tp_status
;
352 flush_dcache_page(pgv_to_page(&h
.h2
->tp_status
));
353 return h
.h2
->tp_status
;
355 pr_err("TPACKET version not supported\n");
361 static void *packet_lookup_frame(struct packet_sock
*po
,
362 struct packet_ring_buffer
*rb
,
363 unsigned int position
,
366 unsigned int pg_vec_pos
, frame_offset
;
368 struct tpacket_hdr
*h1
;
369 struct tpacket2_hdr
*h2
;
373 pg_vec_pos
= position
/ rb
->frames_per_block
;
374 frame_offset
= position
% rb
->frames_per_block
;
376 h
.raw
= rb
->pg_vec
[pg_vec_pos
].buffer
+
377 (frame_offset
* rb
->frame_size
);
379 if (status
!= __packet_get_status(po
, h
.raw
))
385 static inline void *packet_current_frame(struct packet_sock
*po
,
386 struct packet_ring_buffer
*rb
,
389 return packet_lookup_frame(po
, rb
, rb
->head
, status
);
392 static inline void *packet_previous_frame(struct packet_sock
*po
,
393 struct packet_ring_buffer
*rb
,
396 unsigned int previous
= rb
->head
? rb
->head
- 1 : rb
->frame_max
;
397 return packet_lookup_frame(po
, rb
, previous
, status
);
400 static inline void packet_increment_head(struct packet_ring_buffer
*buff
)
402 buff
->head
= buff
->head
!= buff
->frame_max
? buff
->head
+1 : 0;
405 static void packet_sock_destruct(struct sock
*sk
)
407 skb_queue_purge(&sk
->sk_error_queue
);
409 WARN_ON(atomic_read(&sk
->sk_rmem_alloc
));
410 WARN_ON(atomic_read(&sk
->sk_wmem_alloc
));
412 if (!sock_flag(sk
, SOCK_DEAD
)) {
413 pr_err("Attempt to release alive packet socket: %p\n", sk
);
417 sk_refcnt_debug_dec(sk
);
420 static int fanout_rr_next(struct packet_fanout
*f
, unsigned int num
)
422 int x
= atomic_read(&f
->rr_cur
) + 1;
430 static struct sock
*fanout_demux_hash(struct packet_fanout
*f
, struct sk_buff
*skb
, unsigned int num
)
432 u32 idx
, hash
= skb
->rxhash
;
434 idx
= ((u64
)hash
* num
) >> 32;
439 static struct sock
*fanout_demux_lb(struct packet_fanout
*f
, struct sk_buff
*skb
, unsigned int num
)
443 cur
= atomic_read(&f
->rr_cur
);
444 while ((old
= atomic_cmpxchg(&f
->rr_cur
, cur
,
445 fanout_rr_next(f
, num
))) != cur
)
450 static struct sock
*fanout_demux_cpu(struct packet_fanout
*f
, struct sk_buff
*skb
, unsigned int num
)
452 unsigned int cpu
= smp_processor_id();
454 return f
->arr
[cpu
% num
];
457 static struct sk_buff
*fanout_check_defrag(struct sk_buff
*skb
)
459 const struct iphdr
*iph
;
462 if (skb
->protocol
!= htons(ETH_P_IP
))
465 if (!pskb_may_pull(skb
, sizeof(struct iphdr
)))
469 if (iph
->ihl
< 5 || iph
->version
!= 4)
471 if (!pskb_may_pull(skb
, iph
->ihl
*4))
474 len
= ntohs(iph
->tot_len
);
475 if (skb
->len
< len
|| len
< (iph
->ihl
* 4))
478 if (ip_is_fragment(ip_hdr(skb
))) {
479 skb
= skb_share_check(skb
, GFP_ATOMIC
);
481 if (pskb_trim_rcsum(skb
, len
))
483 memset(IPCB(skb
), 0, sizeof(struct inet_skb_parm
));
484 if (ip_defrag(skb
, IP_DEFRAG_AF_PACKET
))
492 static int packet_rcv_fanout(struct sk_buff
*skb
, struct net_device
*dev
,
493 struct packet_type
*pt
, struct net_device
*orig_dev
)
495 struct packet_fanout
*f
= pt
->af_packet_priv
;
496 unsigned int num
= f
->num_members
;
497 struct packet_sock
*po
;
500 if (!net_eq(dev_net(dev
), read_pnet(&f
->net
)) ||
507 case PACKET_FANOUT_HASH
:
510 skb
= fanout_check_defrag(skb
);
515 sk
= fanout_demux_hash(f
, skb
, num
);
517 case PACKET_FANOUT_LB
:
518 sk
= fanout_demux_lb(f
, skb
, num
);
520 case PACKET_FANOUT_CPU
:
521 sk
= fanout_demux_cpu(f
, skb
, num
);
527 return po
->prot_hook
.func(skb
, dev
, &po
->prot_hook
, orig_dev
);
530 static DEFINE_MUTEX(fanout_mutex
);
531 static LIST_HEAD(fanout_list
);
533 static void __fanout_link(struct sock
*sk
, struct packet_sock
*po
)
535 struct packet_fanout
*f
= po
->fanout
;
538 f
->arr
[f
->num_members
] = sk
;
541 spin_unlock(&f
->lock
);
544 static void __fanout_unlink(struct sock
*sk
, struct packet_sock
*po
)
546 struct packet_fanout
*f
= po
->fanout
;
550 for (i
= 0; i
< f
->num_members
; i
++) {
554 BUG_ON(i
>= f
->num_members
);
555 f
->arr
[i
] = f
->arr
[f
->num_members
- 1];
557 spin_unlock(&f
->lock
);
560 static int fanout_add(struct sock
*sk
, u16 id
, u16 type_flags
)
562 struct packet_sock
*po
= pkt_sk(sk
);
563 struct packet_fanout
*f
, *match
;
564 u8 type
= type_flags
& 0xff;
565 u8 defrag
= (type_flags
& PACKET_FANOUT_FLAG_DEFRAG
) ? 1 : 0;
569 case PACKET_FANOUT_HASH
:
570 case PACKET_FANOUT_LB
:
571 case PACKET_FANOUT_CPU
:
583 mutex_lock(&fanout_mutex
);
585 list_for_each_entry(f
, &fanout_list
, list
) {
587 read_pnet(&f
->net
) == sock_net(sk
)) {
592 if (match
&& match
->defrag
!= defrag
)
595 match
= kzalloc(sizeof(*match
), GFP_KERNEL
);
597 write_pnet(&match
->net
, sock_net(sk
));
600 match
->defrag
= defrag
;
601 atomic_set(&match
->rr_cur
, 0);
602 INIT_LIST_HEAD(&match
->list
);
603 spin_lock_init(&match
->lock
);
604 atomic_set(&match
->sk_ref
, 0);
605 match
->prot_hook
.type
= po
->prot_hook
.type
;
606 match
->prot_hook
.dev
= po
->prot_hook
.dev
;
607 match
->prot_hook
.func
= packet_rcv_fanout
;
608 match
->prot_hook
.af_packet_priv
= match
;
609 dev_add_pack(&match
->prot_hook
);
610 list_add(&match
->list
, &fanout_list
);
616 if (match
->type
== type
&&
617 match
->prot_hook
.type
== po
->prot_hook
.type
&&
618 match
->prot_hook
.dev
== po
->prot_hook
.dev
) {
620 if (atomic_read(&match
->sk_ref
) < PACKET_FANOUT_MAX
) {
621 __dev_remove_pack(&po
->prot_hook
);
623 atomic_inc(&match
->sk_ref
);
624 __fanout_link(sk
, po
);
629 mutex_unlock(&fanout_mutex
);
633 static void fanout_release(struct sock
*sk
)
635 struct packet_sock
*po
= pkt_sk(sk
);
636 struct packet_fanout
*f
;
644 mutex_lock(&fanout_mutex
);
645 if (atomic_dec_and_test(&f
->sk_ref
)) {
647 dev_remove_pack(&f
->prot_hook
);
650 mutex_unlock(&fanout_mutex
);
653 static const struct proto_ops packet_ops
;
655 static const struct proto_ops packet_ops_spkt
;
657 static int packet_rcv_spkt(struct sk_buff
*skb
, struct net_device
*dev
,
658 struct packet_type
*pt
, struct net_device
*orig_dev
)
661 struct sockaddr_pkt
*spkt
;
664 * When we registered the protocol we saved the socket in the data
665 * field for just this event.
668 sk
= pt
->af_packet_priv
;
671 * Yank back the headers [hope the device set this
672 * right or kerboom...]
674 * Incoming packets have ll header pulled,
677 * For outgoing ones skb->data == skb_mac_header(skb)
678 * so that this procedure is noop.
681 if (skb
->pkt_type
== PACKET_LOOPBACK
)
684 if (!net_eq(dev_net(dev
), sock_net(sk
)))
687 skb
= skb_share_check(skb
, GFP_ATOMIC
);
691 /* drop any routing info */
694 /* drop conntrack reference */
697 spkt
= &PACKET_SKB_CB(skb
)->sa
.pkt
;
699 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
702 * The SOCK_PACKET socket receives _all_ frames.
705 spkt
->spkt_family
= dev
->type
;
706 strlcpy(spkt
->spkt_device
, dev
->name
, sizeof(spkt
->spkt_device
));
707 spkt
->spkt_protocol
= skb
->protocol
;
710 * Charge the memory to the socket. This is done specifically
711 * to prevent sockets using all the memory up.
714 if (sock_queue_rcv_skb(sk
, skb
) == 0)
725 * Output a raw packet to a device layer. This bypasses all the other
726 * protocol layers and you must therefore supply it with a complete frame
729 static int packet_sendmsg_spkt(struct kiocb
*iocb
, struct socket
*sock
,
730 struct msghdr
*msg
, size_t len
)
732 struct sock
*sk
= sock
->sk
;
733 struct sockaddr_pkt
*saddr
= (struct sockaddr_pkt
*)msg
->msg_name
;
734 struct sk_buff
*skb
= NULL
;
735 struct net_device
*dev
;
740 * Get and verify the address.
744 if (msg
->msg_namelen
< sizeof(struct sockaddr
))
746 if (msg
->msg_namelen
== sizeof(struct sockaddr_pkt
))
747 proto
= saddr
->spkt_protocol
;
749 return -ENOTCONN
; /* SOCK_PACKET must be sent giving an address */
752 * Find the device first to size check it
755 saddr
->spkt_device
[13] = 0;
758 dev
= dev_get_by_name_rcu(sock_net(sk
), saddr
->spkt_device
);
764 if (!(dev
->flags
& IFF_UP
))
768 * You may not queue a frame bigger than the mtu. This is the lowest level
769 * raw protocol and you must do your own fragmentation at this level.
773 if (len
> dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
)
777 size_t reserved
= LL_RESERVED_SPACE(dev
);
778 unsigned int hhlen
= dev
->header_ops
? dev
->hard_header_len
: 0;
781 skb
= sock_wmalloc(sk
, len
+ reserved
, 0, GFP_KERNEL
);
784 /* FIXME: Save some space for broken drivers that write a hard
785 * header at transmission time by themselves. PPP is the notable
786 * one here. This should really be fixed at the driver level.
788 skb_reserve(skb
, reserved
);
789 skb_reset_network_header(skb
);
791 /* Try to align data part correctly */
796 skb_reset_network_header(skb
);
798 err
= memcpy_fromiovec(skb_put(skb
, len
), msg
->msg_iov
, len
);
804 if (len
> (dev
->mtu
+ dev
->hard_header_len
)) {
805 /* Earlier code assumed this would be a VLAN pkt,
806 * double-check this now that we have the actual
810 skb_reset_mac_header(skb
);
812 if (ehdr
->h_proto
!= htons(ETH_P_8021Q
)) {
818 skb
->protocol
= proto
;
820 skb
->priority
= sk
->sk_priority
;
821 skb
->mark
= sk
->sk_mark
;
822 err
= sock_tx_timestamp(sk
, &skb_shinfo(skb
)->tx_flags
);
837 static inline unsigned int run_filter(const struct sk_buff
*skb
,
838 const struct sock
*sk
,
841 struct sk_filter
*filter
;
844 filter
= rcu_dereference(sk
->sk_filter
);
846 res
= SK_RUN_FILTER(filter
, skb
);
853 * This function makes lazy skb cloning in hope that most of packets
854 * are discarded by BPF.
856 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
857 * and skb->cb are mangled. It works because (and until) packets
858 * falling here are owned by current CPU. Output packets are cloned
859 * by dev_queue_xmit_nit(), input packets are processed by net_bh
860 * sequencially, so that if we return skb to original state on exit,
861 * we will not harm anyone.
864 static int packet_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
865 struct packet_type
*pt
, struct net_device
*orig_dev
)
868 struct sockaddr_ll
*sll
;
869 struct packet_sock
*po
;
870 u8
*skb_head
= skb
->data
;
871 int skb_len
= skb
->len
;
872 unsigned int snaplen
, res
;
874 if (skb
->pkt_type
== PACKET_LOOPBACK
)
877 sk
= pt
->af_packet_priv
;
880 if (!net_eq(dev_net(dev
), sock_net(sk
)))
885 if (dev
->header_ops
) {
886 /* The device has an explicit notion of ll header,
887 * exported to higher levels.
889 * Otherwise, the device hides details of its frame
890 * structure, so that corresponding packet head is
891 * never delivered to user.
893 if (sk
->sk_type
!= SOCK_DGRAM
)
894 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
895 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
896 /* Special case: outgoing packets have ll header at head */
897 skb_pull(skb
, skb_network_offset(skb
));
903 res
= run_filter(skb
, sk
, snaplen
);
909 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
910 (unsigned)sk
->sk_rcvbuf
)
913 if (skb_shared(skb
)) {
914 struct sk_buff
*nskb
= skb_clone(skb
, GFP_ATOMIC
);
918 if (skb_head
!= skb
->data
) {
919 skb
->data
= skb_head
;
926 BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb
)) + MAX_ADDR_LEN
- 8 >
929 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
930 sll
->sll_family
= AF_PACKET
;
931 sll
->sll_hatype
= dev
->type
;
932 sll
->sll_protocol
= skb
->protocol
;
933 sll
->sll_pkttype
= skb
->pkt_type
;
934 if (unlikely(po
->origdev
))
935 sll
->sll_ifindex
= orig_dev
->ifindex
;
937 sll
->sll_ifindex
= dev
->ifindex
;
939 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
941 PACKET_SKB_CB(skb
)->origlen
= skb
->len
;
943 if (pskb_trim(skb
, snaplen
))
946 skb_set_owner_r(skb
, sk
);
950 /* drop conntrack reference */
953 spin_lock(&sk
->sk_receive_queue
.lock
);
954 po
->stats
.tp_packets
++;
955 skb
->dropcount
= atomic_read(&sk
->sk_drops
);
956 __skb_queue_tail(&sk
->sk_receive_queue
, skb
);
957 spin_unlock(&sk
->sk_receive_queue
.lock
);
958 sk
->sk_data_ready(sk
, skb
->len
);
962 po
->stats
.tp_drops
= atomic_inc_return(&sk
->sk_drops
);
965 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
966 skb
->data
= skb_head
;
974 static int tpacket_rcv(struct sk_buff
*skb
, struct net_device
*dev
,
975 struct packet_type
*pt
, struct net_device
*orig_dev
)
978 struct packet_sock
*po
;
979 struct sockaddr_ll
*sll
;
981 struct tpacket_hdr
*h1
;
982 struct tpacket2_hdr
*h2
;
985 u8
*skb_head
= skb
->data
;
986 int skb_len
= skb
->len
;
987 unsigned int snaplen
, res
;
988 unsigned long status
= TP_STATUS_LOSING
|TP_STATUS_USER
;
989 unsigned short macoff
, netoff
, hdrlen
;
990 struct sk_buff
*copy_skb
= NULL
;
993 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
995 if (skb
->pkt_type
== PACKET_LOOPBACK
)
998 sk
= pt
->af_packet_priv
;
1001 if (!net_eq(dev_net(dev
), sock_net(sk
)))
1004 if (dev
->header_ops
) {
1005 if (sk
->sk_type
!= SOCK_DGRAM
)
1006 skb_push(skb
, skb
->data
- skb_mac_header(skb
));
1007 else if (skb
->pkt_type
== PACKET_OUTGOING
) {
1008 /* Special case: outgoing packets have ll header at head */
1009 skb_pull(skb
, skb_network_offset(skb
));
1013 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1014 status
|= TP_STATUS_CSUMNOTREADY
;
1018 res
= run_filter(skb
, sk
, snaplen
);
1020 goto drop_n_restore
;
1024 if (sk
->sk_type
== SOCK_DGRAM
) {
1025 macoff
= netoff
= TPACKET_ALIGN(po
->tp_hdrlen
) + 16 +
1028 unsigned maclen
= skb_network_offset(skb
);
1029 netoff
= TPACKET_ALIGN(po
->tp_hdrlen
+
1030 (maclen
< 16 ? 16 : maclen
)) +
1032 macoff
= netoff
- maclen
;
1035 if (macoff
+ snaplen
> po
->rx_ring
.frame_size
) {
1036 if (po
->copy_thresh
&&
1037 atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
<
1038 (unsigned)sk
->sk_rcvbuf
) {
1039 if (skb_shared(skb
)) {
1040 copy_skb
= skb_clone(skb
, GFP_ATOMIC
);
1042 copy_skb
= skb_get(skb
);
1043 skb_head
= skb
->data
;
1046 skb_set_owner_r(copy_skb
, sk
);
1048 snaplen
= po
->rx_ring
.frame_size
- macoff
;
1049 if ((int)snaplen
< 0)
1053 spin_lock(&sk
->sk_receive_queue
.lock
);
1054 h
.raw
= packet_current_frame(po
, &po
->rx_ring
, TP_STATUS_KERNEL
);
1057 packet_increment_head(&po
->rx_ring
);
1058 po
->stats
.tp_packets
++;
1060 status
|= TP_STATUS_COPY
;
1061 __skb_queue_tail(&sk
->sk_receive_queue
, copy_skb
);
1063 if (!po
->stats
.tp_drops
)
1064 status
&= ~TP_STATUS_LOSING
;
1065 spin_unlock(&sk
->sk_receive_queue
.lock
);
1067 skb_copy_bits(skb
, 0, h
.raw
+ macoff
, snaplen
);
1069 switch (po
->tp_version
) {
1071 h
.h1
->tp_len
= skb
->len
;
1072 h
.h1
->tp_snaplen
= snaplen
;
1073 h
.h1
->tp_mac
= macoff
;
1074 h
.h1
->tp_net
= netoff
;
1075 if ((po
->tp_tstamp
& SOF_TIMESTAMPING_SYS_HARDWARE
)
1076 && shhwtstamps
->syststamp
.tv64
)
1077 tv
= ktime_to_timeval(shhwtstamps
->syststamp
);
1078 else if ((po
->tp_tstamp
& SOF_TIMESTAMPING_RAW_HARDWARE
)
1079 && shhwtstamps
->hwtstamp
.tv64
)
1080 tv
= ktime_to_timeval(shhwtstamps
->hwtstamp
);
1081 else if (skb
->tstamp
.tv64
)
1082 tv
= ktime_to_timeval(skb
->tstamp
);
1084 do_gettimeofday(&tv
);
1085 h
.h1
->tp_sec
= tv
.tv_sec
;
1086 h
.h1
->tp_usec
= tv
.tv_usec
;
1087 hdrlen
= sizeof(*h
.h1
);
1090 h
.h2
->tp_len
= skb
->len
;
1091 h
.h2
->tp_snaplen
= snaplen
;
1092 h
.h2
->tp_mac
= macoff
;
1093 h
.h2
->tp_net
= netoff
;
1094 if ((po
->tp_tstamp
& SOF_TIMESTAMPING_SYS_HARDWARE
)
1095 && shhwtstamps
->syststamp
.tv64
)
1096 ts
= ktime_to_timespec(shhwtstamps
->syststamp
);
1097 else if ((po
->tp_tstamp
& SOF_TIMESTAMPING_RAW_HARDWARE
)
1098 && shhwtstamps
->hwtstamp
.tv64
)
1099 ts
= ktime_to_timespec(shhwtstamps
->hwtstamp
);
1100 else if (skb
->tstamp
.tv64
)
1101 ts
= ktime_to_timespec(skb
->tstamp
);
1103 getnstimeofday(&ts
);
1104 h
.h2
->tp_sec
= ts
.tv_sec
;
1105 h
.h2
->tp_nsec
= ts
.tv_nsec
;
1106 if (vlan_tx_tag_present(skb
)) {
1107 h
.h2
->tp_vlan_tci
= vlan_tx_tag_get(skb
);
1108 status
|= TP_STATUS_VLAN_VALID
;
1110 h
.h2
->tp_vlan_tci
= 0;
1112 h
.h2
->tp_padding
= 0;
1113 hdrlen
= sizeof(*h
.h2
);
1119 sll
= h
.raw
+ TPACKET_ALIGN(hdrlen
);
1120 sll
->sll_halen
= dev_parse_header(skb
, sll
->sll_addr
);
1121 sll
->sll_family
= AF_PACKET
;
1122 sll
->sll_hatype
= dev
->type
;
1123 sll
->sll_protocol
= skb
->protocol
;
1124 sll
->sll_pkttype
= skb
->pkt_type
;
1125 if (unlikely(po
->origdev
))
1126 sll
->sll_ifindex
= orig_dev
->ifindex
;
1128 sll
->sll_ifindex
= dev
->ifindex
;
1130 __packet_set_status(po
, h
.raw
, status
);
1132 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1136 end
= (u8
*)PAGE_ALIGN((unsigned long)h
.raw
+ macoff
+ snaplen
);
1137 for (start
= h
.raw
; start
< end
; start
+= PAGE_SIZE
)
1138 flush_dcache_page(pgv_to_page(start
));
1142 sk
->sk_data_ready(sk
, 0);
1145 if (skb_head
!= skb
->data
&& skb_shared(skb
)) {
1146 skb
->data
= skb_head
;
1154 po
->stats
.tp_drops
++;
1155 spin_unlock(&sk
->sk_receive_queue
.lock
);
1157 sk
->sk_data_ready(sk
, 0);
1158 kfree_skb(copy_skb
);
1159 goto drop_n_restore
;
1162 static void tpacket_destruct_skb(struct sk_buff
*skb
)
1164 struct packet_sock
*po
= pkt_sk(skb
->sk
);
1167 BUG_ON(skb
== NULL
);
1169 if (likely(po
->tx_ring
.pg_vec
)) {
1170 ph
= skb_shinfo(skb
)->destructor_arg
;
1171 BUG_ON(__packet_get_status(po
, ph
) != TP_STATUS_SENDING
);
1172 BUG_ON(atomic_read(&po
->tx_ring
.pending
) == 0);
1173 atomic_dec(&po
->tx_ring
.pending
);
1174 __packet_set_status(po
, ph
, TP_STATUS_AVAILABLE
);
1180 static int tpacket_fill_skb(struct packet_sock
*po
, struct sk_buff
*skb
,
1181 void *frame
, struct net_device
*dev
, int size_max
,
1182 __be16 proto
, unsigned char *addr
)
1185 struct tpacket_hdr
*h1
;
1186 struct tpacket2_hdr
*h2
;
1189 int to_write
, offset
, len
, tp_len
, nr_frags
, len_max
;
1190 struct socket
*sock
= po
->sk
.sk_socket
;
1197 skb
->protocol
= proto
;
1199 skb
->priority
= po
->sk
.sk_priority
;
1200 skb
->mark
= po
->sk
.sk_mark
;
1201 skb_shinfo(skb
)->destructor_arg
= ph
.raw
;
1203 switch (po
->tp_version
) {
1205 tp_len
= ph
.h2
->tp_len
;
1208 tp_len
= ph
.h1
->tp_len
;
1211 if (unlikely(tp_len
> size_max
)) {
1212 pr_err("packet size is too long (%d > %d)\n", tp_len
, size_max
);
1216 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
1217 skb_reset_network_header(skb
);
1219 data
= ph
.raw
+ po
->tp_hdrlen
- sizeof(struct sockaddr_ll
);
1222 if (sock
->type
== SOCK_DGRAM
) {
1223 err
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
,
1225 if (unlikely(err
< 0))
1227 } else if (dev
->hard_header_len
) {
1228 /* net device doesn't like empty head */
1229 if (unlikely(tp_len
<= dev
->hard_header_len
)) {
1230 pr_err("packet size is too short (%d < %d)\n",
1231 tp_len
, dev
->hard_header_len
);
1235 skb_push(skb
, dev
->hard_header_len
);
1236 err
= skb_store_bits(skb
, 0, data
,
1237 dev
->hard_header_len
);
1241 data
+= dev
->hard_header_len
;
1242 to_write
-= dev
->hard_header_len
;
1246 offset
= offset_in_page(data
);
1247 len_max
= PAGE_SIZE
- offset
;
1248 len
= ((to_write
> len_max
) ? len_max
: to_write
);
1250 skb
->data_len
= to_write
;
1251 skb
->len
+= to_write
;
1252 skb
->truesize
+= to_write
;
1253 atomic_add(to_write
, &po
->sk
.sk_wmem_alloc
);
1255 while (likely(to_write
)) {
1256 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1258 if (unlikely(nr_frags
>= MAX_SKB_FRAGS
)) {
1259 pr_err("Packet exceed the number of skb frags(%lu)\n",
1264 page
= pgv_to_page(data
);
1266 flush_dcache_page(page
);
1268 skb_fill_page_desc(skb
, nr_frags
, page
, offset
, len
);
1271 len_max
= PAGE_SIZE
;
1272 len
= ((to_write
> len_max
) ? len_max
: to_write
);
1278 static int tpacket_snd(struct packet_sock
*po
, struct msghdr
*msg
)
1280 struct sk_buff
*skb
;
1281 struct net_device
*dev
;
1283 bool need_rls_dev
= false;
1284 int err
, reserve
= 0;
1286 struct sockaddr_ll
*saddr
= (struct sockaddr_ll
*)msg
->msg_name
;
1287 int tp_len
, size_max
;
1288 unsigned char *addr
;
1292 mutex_lock(&po
->pg_vec_lock
);
1295 if (saddr
== NULL
) {
1296 dev
= po
->prot_hook
.dev
;
1301 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
1303 if (msg
->msg_namelen
< (saddr
->sll_halen
1304 + offsetof(struct sockaddr_ll
,
1307 proto
= saddr
->sll_protocol
;
1308 addr
= saddr
->sll_addr
;
1309 dev
= dev_get_by_index(sock_net(&po
->sk
), saddr
->sll_ifindex
);
1310 need_rls_dev
= true;
1314 if (unlikely(dev
== NULL
))
1317 reserve
= dev
->hard_header_len
;
1320 if (unlikely(!(dev
->flags
& IFF_UP
)))
1323 size_max
= po
->tx_ring
.frame_size
1324 - (po
->tp_hdrlen
- sizeof(struct sockaddr_ll
));
1326 if (size_max
> dev
->mtu
+ reserve
)
1327 size_max
= dev
->mtu
+ reserve
;
1330 ph
= packet_current_frame(po
, &po
->tx_ring
,
1331 TP_STATUS_SEND_REQUEST
);
1333 if (unlikely(ph
== NULL
)) {
1338 status
= TP_STATUS_SEND_REQUEST
;
1339 skb
= sock_alloc_send_skb(&po
->sk
,
1340 LL_ALLOCATED_SPACE(dev
)
1341 + sizeof(struct sockaddr_ll
),
1344 if (unlikely(skb
== NULL
))
1347 tp_len
= tpacket_fill_skb(po
, skb
, ph
, dev
, size_max
, proto
,
1350 if (unlikely(tp_len
< 0)) {
1352 __packet_set_status(po
, ph
,
1353 TP_STATUS_AVAILABLE
);
1354 packet_increment_head(&po
->tx_ring
);
1358 status
= TP_STATUS_WRONG_FORMAT
;
1364 skb
->destructor
= tpacket_destruct_skb
;
1365 __packet_set_status(po
, ph
, TP_STATUS_SENDING
);
1366 atomic_inc(&po
->tx_ring
.pending
);
1368 status
= TP_STATUS_SEND_REQUEST
;
1369 err
= dev_queue_xmit(skb
);
1370 if (unlikely(err
> 0)) {
1371 err
= net_xmit_errno(err
);
1372 if (err
&& __packet_get_status(po
, ph
) ==
1373 TP_STATUS_AVAILABLE
) {
1374 /* skb was destructed already */
1379 * skb was dropped but not destructed yet;
1380 * let's treat it like congestion or err < 0
1384 packet_increment_head(&po
->tx_ring
);
1386 } while (likely((ph
!= NULL
) ||
1387 ((!(msg
->msg_flags
& MSG_DONTWAIT
)) &&
1388 (atomic_read(&po
->tx_ring
.pending
))))
1395 __packet_set_status(po
, ph
, status
);
1401 mutex_unlock(&po
->pg_vec_lock
);
1405 static inline struct sk_buff
*packet_alloc_skb(struct sock
*sk
, size_t prepad
,
1406 size_t reserve
, size_t len
,
1407 size_t linear
, int noblock
,
1410 struct sk_buff
*skb
;
1412 /* Under a page? Don't bother with paged skb. */
1413 if (prepad
+ len
< PAGE_SIZE
|| !linear
)
1416 skb
= sock_alloc_send_pskb(sk
, prepad
+ linear
, len
- linear
, noblock
,
1421 skb_reserve(skb
, reserve
);
1422 skb_put(skb
, linear
);
1423 skb
->data_len
= len
- linear
;
1424 skb
->len
+= len
- linear
;
1429 static int packet_snd(struct socket
*sock
,
1430 struct msghdr
*msg
, size_t len
)
1432 struct sock
*sk
= sock
->sk
;
1433 struct sockaddr_ll
*saddr
= (struct sockaddr_ll
*)msg
->msg_name
;
1434 struct sk_buff
*skb
;
1435 struct net_device
*dev
;
1437 bool need_rls_dev
= false;
1438 unsigned char *addr
;
1439 int err
, reserve
= 0;
1440 struct virtio_net_hdr vnet_hdr
= { 0 };
1443 struct packet_sock
*po
= pkt_sk(sk
);
1444 unsigned short gso_type
= 0;
1447 * Get and verify the address.
1450 if (saddr
== NULL
) {
1451 dev
= po
->prot_hook
.dev
;
1456 if (msg
->msg_namelen
< sizeof(struct sockaddr_ll
))
1458 if (msg
->msg_namelen
< (saddr
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
)))
1460 proto
= saddr
->sll_protocol
;
1461 addr
= saddr
->sll_addr
;
1462 dev
= dev_get_by_index(sock_net(sk
), saddr
->sll_ifindex
);
1463 need_rls_dev
= true;
1469 if (sock
->type
== SOCK_RAW
)
1470 reserve
= dev
->hard_header_len
;
1473 if (!(dev
->flags
& IFF_UP
))
1476 if (po
->has_vnet_hdr
) {
1477 vnet_hdr_len
= sizeof(vnet_hdr
);
1480 if (len
< vnet_hdr_len
)
1483 len
-= vnet_hdr_len
;
1485 err
= memcpy_fromiovec((void *)&vnet_hdr
, msg
->msg_iov
,
1490 if ((vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) &&
1491 (vnet_hdr
.csum_start
+ vnet_hdr
.csum_offset
+ 2 >
1493 vnet_hdr
.hdr_len
= vnet_hdr
.csum_start
+
1494 vnet_hdr
.csum_offset
+ 2;
1497 if (vnet_hdr
.hdr_len
> len
)
1500 if (vnet_hdr
.gso_type
!= VIRTIO_NET_HDR_GSO_NONE
) {
1501 switch (vnet_hdr
.gso_type
& ~VIRTIO_NET_HDR_GSO_ECN
) {
1502 case VIRTIO_NET_HDR_GSO_TCPV4
:
1503 gso_type
= SKB_GSO_TCPV4
;
1505 case VIRTIO_NET_HDR_GSO_TCPV6
:
1506 gso_type
= SKB_GSO_TCPV6
;
1508 case VIRTIO_NET_HDR_GSO_UDP
:
1509 gso_type
= SKB_GSO_UDP
;
1515 if (vnet_hdr
.gso_type
& VIRTIO_NET_HDR_GSO_ECN
)
1516 gso_type
|= SKB_GSO_TCP_ECN
;
1518 if (vnet_hdr
.gso_size
== 0)
1525 if (!gso_type
&& (len
> dev
->mtu
+ reserve
+ VLAN_HLEN
))
1529 skb
= packet_alloc_skb(sk
, LL_ALLOCATED_SPACE(dev
),
1530 LL_RESERVED_SPACE(dev
), len
, vnet_hdr
.hdr_len
,
1531 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1535 skb_set_network_header(skb
, reserve
);
1538 if (sock
->type
== SOCK_DGRAM
&&
1539 (offset
= dev_hard_header(skb
, dev
, ntohs(proto
), addr
, NULL
, len
)) < 0)
1542 /* Returns -EFAULT on error */
1543 err
= skb_copy_datagram_from_iovec(skb
, offset
, msg
->msg_iov
, 0, len
);
1546 err
= sock_tx_timestamp(sk
, &skb_shinfo(skb
)->tx_flags
);
1550 if (!gso_type
&& (len
> dev
->mtu
+ reserve
)) {
1551 /* Earlier code assumed this would be a VLAN pkt,
1552 * double-check this now that we have the actual
1555 struct ethhdr
*ehdr
;
1556 skb_reset_mac_header(skb
);
1557 ehdr
= eth_hdr(skb
);
1558 if (ehdr
->h_proto
!= htons(ETH_P_8021Q
)) {
1564 skb
->protocol
= proto
;
1566 skb
->priority
= sk
->sk_priority
;
1567 skb
->mark
= sk
->sk_mark
;
1569 if (po
->has_vnet_hdr
) {
1570 if (vnet_hdr
.flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1571 if (!skb_partial_csum_set(skb
, vnet_hdr
.csum_start
,
1572 vnet_hdr
.csum_offset
)) {
1578 skb_shinfo(skb
)->gso_size
= vnet_hdr
.gso_size
;
1579 skb_shinfo(skb
)->gso_type
= gso_type
;
1581 /* Header must be checked, and gso_segs computed. */
1582 skb_shinfo(skb
)->gso_type
|= SKB_GSO_DODGY
;
1583 skb_shinfo(skb
)->gso_segs
= 0;
1585 len
+= vnet_hdr_len
;
1592 err
= dev_queue_xmit(skb
);
1593 if (err
> 0 && (err
= net_xmit_errno(err
)) != 0)
1604 if (dev
&& need_rls_dev
)
1610 static int packet_sendmsg(struct kiocb
*iocb
, struct socket
*sock
,
1611 struct msghdr
*msg
, size_t len
)
1613 struct sock
*sk
= sock
->sk
;
1614 struct packet_sock
*po
= pkt_sk(sk
);
1615 if (po
->tx_ring
.pg_vec
)
1616 return tpacket_snd(po
, msg
);
1618 return packet_snd(sock
, msg
, len
);
1622 * Close a PACKET socket. This is fairly simple. We immediately go
1623 * to 'closed' state and remove our protocol entry in the device list.
1626 static int packet_release(struct socket
*sock
)
1628 struct sock
*sk
= sock
->sk
;
1629 struct packet_sock
*po
;
1631 struct tpacket_req req
;
1639 spin_lock_bh(&net
->packet
.sklist_lock
);
1640 sk_del_node_init_rcu(sk
);
1641 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
1642 spin_unlock_bh(&net
->packet
.sklist_lock
);
1644 spin_lock(&po
->bind_lock
);
1645 unregister_prot_hook(sk
, false);
1646 if (po
->prot_hook
.dev
) {
1647 dev_put(po
->prot_hook
.dev
);
1648 po
->prot_hook
.dev
= NULL
;
1650 spin_unlock(&po
->bind_lock
);
1652 packet_flush_mclist(sk
);
1654 memset(&req
, 0, sizeof(req
));
1656 if (po
->rx_ring
.pg_vec
)
1657 packet_set_ring(sk
, &req
, 1, 0);
1659 if (po
->tx_ring
.pg_vec
)
1660 packet_set_ring(sk
, &req
, 1, 1);
1666 * Now the socket is dead. No more input will appear.
1673 skb_queue_purge(&sk
->sk_receive_queue
);
1674 sk_refcnt_debug_release(sk
);
1681 * Attach a packet hook.
1684 static int packet_do_bind(struct sock
*sk
, struct net_device
*dev
, __be16 protocol
)
1686 struct packet_sock
*po
= pkt_sk(sk
);
1693 spin_lock(&po
->bind_lock
);
1694 unregister_prot_hook(sk
, true);
1696 po
->prot_hook
.type
= protocol
;
1697 if (po
->prot_hook
.dev
)
1698 dev_put(po
->prot_hook
.dev
);
1699 po
->prot_hook
.dev
= dev
;
1701 po
->ifindex
= dev
? dev
->ifindex
: 0;
1706 if (!dev
|| (dev
->flags
& IFF_UP
)) {
1707 register_prot_hook(sk
);
1709 sk
->sk_err
= ENETDOWN
;
1710 if (!sock_flag(sk
, SOCK_DEAD
))
1711 sk
->sk_error_report(sk
);
1715 spin_unlock(&po
->bind_lock
);
1721 * Bind a packet socket to a device
1724 static int packet_bind_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
1727 struct sock
*sk
= sock
->sk
;
1729 struct net_device
*dev
;
1736 if (addr_len
!= sizeof(struct sockaddr
))
1738 strlcpy(name
, uaddr
->sa_data
, sizeof(name
));
1740 dev
= dev_get_by_name(sock_net(sk
), name
);
1742 err
= packet_do_bind(sk
, dev
, pkt_sk(sk
)->num
);
1746 static int packet_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
)
1748 struct sockaddr_ll
*sll
= (struct sockaddr_ll
*)uaddr
;
1749 struct sock
*sk
= sock
->sk
;
1750 struct net_device
*dev
= NULL
;
1758 if (addr_len
< sizeof(struct sockaddr_ll
))
1760 if (sll
->sll_family
!= AF_PACKET
)
1763 if (sll
->sll_ifindex
) {
1765 dev
= dev_get_by_index(sock_net(sk
), sll
->sll_ifindex
);
1769 err
= packet_do_bind(sk
, dev
, sll
->sll_protocol
? : pkt_sk(sk
)->num
);
1775 static struct proto packet_proto
= {
1777 .owner
= THIS_MODULE
,
1778 .obj_size
= sizeof(struct packet_sock
),
1782 * Create a packet of type SOCK_PACKET.
1785 static int packet_create(struct net
*net
, struct socket
*sock
, int protocol
,
1789 struct packet_sock
*po
;
1790 __be16 proto
= (__force __be16
)protocol
; /* weird, but documented */
1793 if (!capable(CAP_NET_RAW
))
1795 if (sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
&&
1796 sock
->type
!= SOCK_PACKET
)
1797 return -ESOCKTNOSUPPORT
;
1799 sock
->state
= SS_UNCONNECTED
;
1802 sk
= sk_alloc(net
, PF_PACKET
, GFP_KERNEL
, &packet_proto
);
1806 sock
->ops
= &packet_ops
;
1807 if (sock
->type
== SOCK_PACKET
)
1808 sock
->ops
= &packet_ops_spkt
;
1810 sock_init_data(sock
, sk
);
1813 sk
->sk_family
= PF_PACKET
;
1816 sk
->sk_destruct
= packet_sock_destruct
;
1817 sk_refcnt_debug_inc(sk
);
1820 * Attach a protocol block
1823 spin_lock_init(&po
->bind_lock
);
1824 mutex_init(&po
->pg_vec_lock
);
1825 po
->prot_hook
.func
= packet_rcv
;
1827 if (sock
->type
== SOCK_PACKET
)
1828 po
->prot_hook
.func
= packet_rcv_spkt
;
1830 po
->prot_hook
.af_packet_priv
= sk
;
1833 po
->prot_hook
.type
= proto
;
1834 register_prot_hook(sk
);
1837 spin_lock_bh(&net
->packet
.sklist_lock
);
1838 sk_add_node_rcu(sk
, &net
->packet
.sklist
);
1839 sock_prot_inuse_add(net
, &packet_proto
, 1);
1840 spin_unlock_bh(&net
->packet
.sklist_lock
);
1847 static int packet_recv_error(struct sock
*sk
, struct msghdr
*msg
, int len
)
1849 struct sock_exterr_skb
*serr
;
1850 struct sk_buff
*skb
, *skb2
;
1854 skb
= skb_dequeue(&sk
->sk_error_queue
);
1860 msg
->msg_flags
|= MSG_TRUNC
;
1863 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
1867 sock_recv_timestamp(msg
, sk
, skb
);
1869 serr
= SKB_EXT_ERR(skb
);
1870 put_cmsg(msg
, SOL_PACKET
, PACKET_TX_TIMESTAMP
,
1871 sizeof(serr
->ee
), &serr
->ee
);
1873 msg
->msg_flags
|= MSG_ERRQUEUE
;
1876 /* Reset and regenerate socket error */
1877 spin_lock_bh(&sk
->sk_error_queue
.lock
);
1879 if ((skb2
= skb_peek(&sk
->sk_error_queue
)) != NULL
) {
1880 sk
->sk_err
= SKB_EXT_ERR(skb2
)->ee
.ee_errno
;
1881 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
1882 sk
->sk_error_report(sk
);
1884 spin_unlock_bh(&sk
->sk_error_queue
.lock
);
1893 * Pull a packet from our receive queue and hand it to the user.
1894 * If necessary we block.
1897 static int packet_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1898 struct msghdr
*msg
, size_t len
, int flags
)
1900 struct sock
*sk
= sock
->sk
;
1901 struct sk_buff
*skb
;
1903 struct sockaddr_ll
*sll
;
1904 int vnet_hdr_len
= 0;
1907 if (flags
& ~(MSG_PEEK
|MSG_DONTWAIT
|MSG_TRUNC
|MSG_CMSG_COMPAT
|MSG_ERRQUEUE
))
1911 /* What error should we return now? EUNATTACH? */
1912 if (pkt_sk(sk
)->ifindex
< 0)
1916 if (flags
& MSG_ERRQUEUE
) {
1917 err
= packet_recv_error(sk
, msg
, len
);
1922 * Call the generic datagram receiver. This handles all sorts
1923 * of horrible races and re-entrancy so we can forget about it
1924 * in the protocol layers.
1926 * Now it will return ENETDOWN, if device have just gone down,
1927 * but then it will block.
1930 skb
= skb_recv_datagram(sk
, flags
, flags
& MSG_DONTWAIT
, &err
);
1933 * An error occurred so return it. Because skb_recv_datagram()
1934 * handles the blocking we don't see and worry about blocking
1941 if (pkt_sk(sk
)->has_vnet_hdr
) {
1942 struct virtio_net_hdr vnet_hdr
= { 0 };
1945 vnet_hdr_len
= sizeof(vnet_hdr
);
1946 if (len
< vnet_hdr_len
)
1949 len
-= vnet_hdr_len
;
1951 if (skb_is_gso(skb
)) {
1952 struct skb_shared_info
*sinfo
= skb_shinfo(skb
);
1954 /* This is a hint as to how much should be linear. */
1955 vnet_hdr
.hdr_len
= skb_headlen(skb
);
1956 vnet_hdr
.gso_size
= sinfo
->gso_size
;
1957 if (sinfo
->gso_type
& SKB_GSO_TCPV4
)
1958 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1959 else if (sinfo
->gso_type
& SKB_GSO_TCPV6
)
1960 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1961 else if (sinfo
->gso_type
& SKB_GSO_UDP
)
1962 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_UDP
;
1963 else if (sinfo
->gso_type
& SKB_GSO_FCOE
)
1967 if (sinfo
->gso_type
& SKB_GSO_TCP_ECN
)
1968 vnet_hdr
.gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1970 vnet_hdr
.gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
1972 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1973 vnet_hdr
.flags
= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1974 vnet_hdr
.csum_start
= skb_checksum_start_offset(skb
);
1975 vnet_hdr
.csum_offset
= skb
->csum_offset
;
1976 } else if (skb
->ip_summed
== CHECKSUM_UNNECESSARY
) {
1977 vnet_hdr
.flags
= VIRTIO_NET_HDR_F_DATA_VALID
;
1978 } /* else everything is zero */
1980 err
= memcpy_toiovec(msg
->msg_iov
, (void *)&vnet_hdr
,
1987 * If the address length field is there to be filled in, we fill
1991 sll
= &PACKET_SKB_CB(skb
)->sa
.ll
;
1992 if (sock
->type
== SOCK_PACKET
)
1993 msg
->msg_namelen
= sizeof(struct sockaddr_pkt
);
1995 msg
->msg_namelen
= sll
->sll_halen
+ offsetof(struct sockaddr_ll
, sll_addr
);
1998 * You lose any data beyond the buffer you gave. If it worries a
1999 * user program they can ask the device for its MTU anyway.
2005 msg
->msg_flags
|= MSG_TRUNC
;
2008 err
= skb_copy_datagram_iovec(skb
, 0, msg
->msg_iov
, copied
);
2012 sock_recv_ts_and_drops(msg
, sk
, skb
);
2015 memcpy(msg
->msg_name
, &PACKET_SKB_CB(skb
)->sa
,
2018 if (pkt_sk(sk
)->auxdata
) {
2019 struct tpacket_auxdata aux
;
2021 aux
.tp_status
= TP_STATUS_USER
;
2022 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2023 aux
.tp_status
|= TP_STATUS_CSUMNOTREADY
;
2024 aux
.tp_len
= PACKET_SKB_CB(skb
)->origlen
;
2025 aux
.tp_snaplen
= skb
->len
;
2027 aux
.tp_net
= skb_network_offset(skb
);
2028 if (vlan_tx_tag_present(skb
)) {
2029 aux
.tp_vlan_tci
= vlan_tx_tag_get(skb
);
2030 aux
.tp_status
|= TP_STATUS_VLAN_VALID
;
2032 aux
.tp_vlan_tci
= 0;
2035 put_cmsg(msg
, SOL_PACKET
, PACKET_AUXDATA
, sizeof(aux
), &aux
);
2039 * Free or return the buffer as appropriate. Again this
2040 * hides all the races and re-entrancy issues from us.
2042 err
= vnet_hdr_len
+ ((flags
&MSG_TRUNC
) ? skb
->len
: copied
);
2045 skb_free_datagram(sk
, skb
);
2050 static int packet_getname_spkt(struct socket
*sock
, struct sockaddr
*uaddr
,
2051 int *uaddr_len
, int peer
)
2053 struct net_device
*dev
;
2054 struct sock
*sk
= sock
->sk
;
2059 uaddr
->sa_family
= AF_PACKET
;
2061 dev
= dev_get_by_index_rcu(sock_net(sk
), pkt_sk(sk
)->ifindex
);
2063 strncpy(uaddr
->sa_data
, dev
->name
, 14);
2065 memset(uaddr
->sa_data
, 0, 14);
2067 *uaddr_len
= sizeof(*uaddr
);
2072 static int packet_getname(struct socket
*sock
, struct sockaddr
*uaddr
,
2073 int *uaddr_len
, int peer
)
2075 struct net_device
*dev
;
2076 struct sock
*sk
= sock
->sk
;
2077 struct packet_sock
*po
= pkt_sk(sk
);
2078 DECLARE_SOCKADDR(struct sockaddr_ll
*, sll
, uaddr
);
2083 sll
->sll_family
= AF_PACKET
;
2084 sll
->sll_ifindex
= po
->ifindex
;
2085 sll
->sll_protocol
= po
->num
;
2086 sll
->sll_pkttype
= 0;
2088 dev
= dev_get_by_index_rcu(sock_net(sk
), po
->ifindex
);
2090 sll
->sll_hatype
= dev
->type
;
2091 sll
->sll_halen
= dev
->addr_len
;
2092 memcpy(sll
->sll_addr
, dev
->dev_addr
, dev
->addr_len
);
2094 sll
->sll_hatype
= 0; /* Bad: we have no ARPHRD_UNSPEC */
2098 *uaddr_len
= offsetof(struct sockaddr_ll
, sll_addr
) + sll
->sll_halen
;
2103 static int packet_dev_mc(struct net_device
*dev
, struct packet_mclist
*i
,
2107 case PACKET_MR_MULTICAST
:
2108 if (i
->alen
!= dev
->addr_len
)
2111 return dev_mc_add(dev
, i
->addr
);
2113 return dev_mc_del(dev
, i
->addr
);
2115 case PACKET_MR_PROMISC
:
2116 return dev_set_promiscuity(dev
, what
);
2118 case PACKET_MR_ALLMULTI
:
2119 return dev_set_allmulti(dev
, what
);
2121 case PACKET_MR_UNICAST
:
2122 if (i
->alen
!= dev
->addr_len
)
2125 return dev_uc_add(dev
, i
->addr
);
2127 return dev_uc_del(dev
, i
->addr
);
2135 static void packet_dev_mclist(struct net_device
*dev
, struct packet_mclist
*i
, int what
)
2137 for ( ; i
; i
= i
->next
) {
2138 if (i
->ifindex
== dev
->ifindex
)
2139 packet_dev_mc(dev
, i
, what
);
2143 static int packet_mc_add(struct sock
*sk
, struct packet_mreq_max
*mreq
)
2145 struct packet_sock
*po
= pkt_sk(sk
);
2146 struct packet_mclist
*ml
, *i
;
2147 struct net_device
*dev
;
2153 dev
= __dev_get_by_index(sock_net(sk
), mreq
->mr_ifindex
);
2158 if (mreq
->mr_alen
> dev
->addr_len
)
2162 i
= kmalloc(sizeof(*i
), GFP_KERNEL
);
2167 for (ml
= po
->mclist
; ml
; ml
= ml
->next
) {
2168 if (ml
->ifindex
== mreq
->mr_ifindex
&&
2169 ml
->type
== mreq
->mr_type
&&
2170 ml
->alen
== mreq
->mr_alen
&&
2171 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
2173 /* Free the new element ... */
2179 i
->type
= mreq
->mr_type
;
2180 i
->ifindex
= mreq
->mr_ifindex
;
2181 i
->alen
= mreq
->mr_alen
;
2182 memcpy(i
->addr
, mreq
->mr_address
, i
->alen
);
2184 i
->next
= po
->mclist
;
2186 err
= packet_dev_mc(dev
, i
, 1);
2188 po
->mclist
= i
->next
;
2197 static int packet_mc_drop(struct sock
*sk
, struct packet_mreq_max
*mreq
)
2199 struct packet_mclist
*ml
, **mlp
;
2203 for (mlp
= &pkt_sk(sk
)->mclist
; (ml
= *mlp
) != NULL
; mlp
= &ml
->next
) {
2204 if (ml
->ifindex
== mreq
->mr_ifindex
&&
2205 ml
->type
== mreq
->mr_type
&&
2206 ml
->alen
== mreq
->mr_alen
&&
2207 memcmp(ml
->addr
, mreq
->mr_address
, ml
->alen
) == 0) {
2208 if (--ml
->count
== 0) {
2209 struct net_device
*dev
;
2211 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
2213 packet_dev_mc(dev
, ml
, -1);
2221 return -EADDRNOTAVAIL
;
2224 static void packet_flush_mclist(struct sock
*sk
)
2226 struct packet_sock
*po
= pkt_sk(sk
);
2227 struct packet_mclist
*ml
;
2233 while ((ml
= po
->mclist
) != NULL
) {
2234 struct net_device
*dev
;
2236 po
->mclist
= ml
->next
;
2237 dev
= __dev_get_by_index(sock_net(sk
), ml
->ifindex
);
2239 packet_dev_mc(dev
, ml
, -1);
2246 packet_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
2248 struct sock
*sk
= sock
->sk
;
2249 struct packet_sock
*po
= pkt_sk(sk
);
2252 if (level
!= SOL_PACKET
)
2253 return -ENOPROTOOPT
;
2256 case PACKET_ADD_MEMBERSHIP
:
2257 case PACKET_DROP_MEMBERSHIP
:
2259 struct packet_mreq_max mreq
;
2261 memset(&mreq
, 0, sizeof(mreq
));
2262 if (len
< sizeof(struct packet_mreq
))
2264 if (len
> sizeof(mreq
))
2266 if (copy_from_user(&mreq
, optval
, len
))
2268 if (len
< (mreq
.mr_alen
+ offsetof(struct packet_mreq
, mr_address
)))
2270 if (optname
== PACKET_ADD_MEMBERSHIP
)
2271 ret
= packet_mc_add(sk
, &mreq
);
2273 ret
= packet_mc_drop(sk
, &mreq
);
2277 case PACKET_RX_RING
:
2278 case PACKET_TX_RING
:
2280 struct tpacket_req req
;
2282 if (optlen
< sizeof(req
))
2284 if (pkt_sk(sk
)->has_vnet_hdr
)
2286 if (copy_from_user(&req
, optval
, sizeof(req
)))
2288 return packet_set_ring(sk
, &req
, 0, optname
== PACKET_TX_RING
);
2290 case PACKET_COPY_THRESH
:
2294 if (optlen
!= sizeof(val
))
2296 if (copy_from_user(&val
, optval
, sizeof(val
)))
2299 pkt_sk(sk
)->copy_thresh
= val
;
2302 case PACKET_VERSION
:
2306 if (optlen
!= sizeof(val
))
2308 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2310 if (copy_from_user(&val
, optval
, sizeof(val
)))
2315 po
->tp_version
= val
;
2321 case PACKET_RESERVE
:
2325 if (optlen
!= sizeof(val
))
2327 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2329 if (copy_from_user(&val
, optval
, sizeof(val
)))
2331 po
->tp_reserve
= val
;
2338 if (optlen
!= sizeof(val
))
2340 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2342 if (copy_from_user(&val
, optval
, sizeof(val
)))
2344 po
->tp_loss
= !!val
;
2347 case PACKET_AUXDATA
:
2351 if (optlen
< sizeof(val
))
2353 if (copy_from_user(&val
, optval
, sizeof(val
)))
2356 po
->auxdata
= !!val
;
2359 case PACKET_ORIGDEV
:
2363 if (optlen
< sizeof(val
))
2365 if (copy_from_user(&val
, optval
, sizeof(val
)))
2368 po
->origdev
= !!val
;
2371 case PACKET_VNET_HDR
:
2375 if (sock
->type
!= SOCK_RAW
)
2377 if (po
->rx_ring
.pg_vec
|| po
->tx_ring
.pg_vec
)
2379 if (optlen
< sizeof(val
))
2381 if (copy_from_user(&val
, optval
, sizeof(val
)))
2384 po
->has_vnet_hdr
= !!val
;
2387 case PACKET_TIMESTAMP
:
2391 if (optlen
!= sizeof(val
))
2393 if (copy_from_user(&val
, optval
, sizeof(val
)))
2396 po
->tp_tstamp
= val
;
2403 if (optlen
!= sizeof(val
))
2405 if (copy_from_user(&val
, optval
, sizeof(val
)))
2408 return fanout_add(sk
, val
& 0xffff, val
>> 16);
2411 return -ENOPROTOOPT
;
2415 static int packet_getsockopt(struct socket
*sock
, int level
, int optname
,
2416 char __user
*optval
, int __user
*optlen
)
2420 struct sock
*sk
= sock
->sk
;
2421 struct packet_sock
*po
= pkt_sk(sk
);
2423 struct tpacket_stats st
;
2425 if (level
!= SOL_PACKET
)
2426 return -ENOPROTOOPT
;
2428 if (get_user(len
, optlen
))
2435 case PACKET_STATISTICS
:
2436 if (len
> sizeof(struct tpacket_stats
))
2437 len
= sizeof(struct tpacket_stats
);
2438 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2440 memset(&po
->stats
, 0, sizeof(st
));
2441 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2442 st
.tp_packets
+= st
.tp_drops
;
2446 case PACKET_AUXDATA
:
2447 if (len
> sizeof(int))
2453 case PACKET_ORIGDEV
:
2454 if (len
> sizeof(int))
2460 case PACKET_VNET_HDR
:
2461 if (len
> sizeof(int))
2463 val
= po
->has_vnet_hdr
;
2467 case PACKET_VERSION
:
2468 if (len
> sizeof(int))
2470 val
= po
->tp_version
;
2474 if (len
> sizeof(int))
2476 if (copy_from_user(&val
, optval
, len
))
2480 val
= sizeof(struct tpacket_hdr
);
2483 val
= sizeof(struct tpacket2_hdr
);
2490 case PACKET_RESERVE
:
2491 if (len
> sizeof(unsigned int))
2492 len
= sizeof(unsigned int);
2493 val
= po
->tp_reserve
;
2497 if (len
> sizeof(unsigned int))
2498 len
= sizeof(unsigned int);
2502 case PACKET_TIMESTAMP
:
2503 if (len
> sizeof(int))
2505 val
= po
->tp_tstamp
;
2509 if (len
> sizeof(int))
2512 ((u32
)po
->fanout
->id
|
2513 ((u32
)po
->fanout
->type
<< 16)) :
2518 return -ENOPROTOOPT
;
2521 if (put_user(len
, optlen
))
2523 if (copy_to_user(optval
, data
, len
))
2529 static int packet_notifier(struct notifier_block
*this, unsigned long msg
, void *data
)
2532 struct hlist_node
*node
;
2533 struct net_device
*dev
= data
;
2534 struct net
*net
= dev_net(dev
);
2537 sk_for_each_rcu(sk
, node
, &net
->packet
.sklist
) {
2538 struct packet_sock
*po
= pkt_sk(sk
);
2541 case NETDEV_UNREGISTER
:
2543 packet_dev_mclist(dev
, po
->mclist
, -1);
2547 if (dev
->ifindex
== po
->ifindex
) {
2548 spin_lock(&po
->bind_lock
);
2550 __unregister_prot_hook(sk
, false);
2551 sk
->sk_err
= ENETDOWN
;
2552 if (!sock_flag(sk
, SOCK_DEAD
))
2553 sk
->sk_error_report(sk
);
2555 if (msg
== NETDEV_UNREGISTER
) {
2557 if (po
->prot_hook
.dev
)
2558 dev_put(po
->prot_hook
.dev
);
2559 po
->prot_hook
.dev
= NULL
;
2561 spin_unlock(&po
->bind_lock
);
2565 if (dev
->ifindex
== po
->ifindex
) {
2566 spin_lock(&po
->bind_lock
);
2568 register_prot_hook(sk
);
2569 spin_unlock(&po
->bind_lock
);
2579 static int packet_ioctl(struct socket
*sock
, unsigned int cmd
,
2582 struct sock
*sk
= sock
->sk
;
2587 int amount
= sk_wmem_alloc_get(sk
);
2589 return put_user(amount
, (int __user
*)arg
);
2593 struct sk_buff
*skb
;
2596 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2597 skb
= skb_peek(&sk
->sk_receive_queue
);
2600 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2601 return put_user(amount
, (int __user
*)arg
);
2604 return sock_get_timestamp(sk
, (struct timeval __user
*)arg
);
2606 return sock_get_timestampns(sk
, (struct timespec __user
*)arg
);
2616 case SIOCGIFBRDADDR
:
2617 case SIOCSIFBRDADDR
:
2618 case SIOCGIFNETMASK
:
2619 case SIOCSIFNETMASK
:
2620 case SIOCGIFDSTADDR
:
2621 case SIOCSIFDSTADDR
:
2623 return inet_dgram_ops
.ioctl(sock
, cmd
, arg
);
2627 return -ENOIOCTLCMD
;
2632 static unsigned int packet_poll(struct file
*file
, struct socket
*sock
,
2635 struct sock
*sk
= sock
->sk
;
2636 struct packet_sock
*po
= pkt_sk(sk
);
2637 unsigned int mask
= datagram_poll(file
, sock
, wait
);
2639 spin_lock_bh(&sk
->sk_receive_queue
.lock
);
2640 if (po
->rx_ring
.pg_vec
) {
2641 if (!packet_previous_frame(po
, &po
->rx_ring
, TP_STATUS_KERNEL
))
2642 mask
|= POLLIN
| POLLRDNORM
;
2644 spin_unlock_bh(&sk
->sk_receive_queue
.lock
);
2645 spin_lock_bh(&sk
->sk_write_queue
.lock
);
2646 if (po
->tx_ring
.pg_vec
) {
2647 if (packet_current_frame(po
, &po
->tx_ring
, TP_STATUS_AVAILABLE
))
2648 mask
|= POLLOUT
| POLLWRNORM
;
2650 spin_unlock_bh(&sk
->sk_write_queue
.lock
);
2655 /* Dirty? Well, I still did not learn better way to account
2659 static void packet_mm_open(struct vm_area_struct
*vma
)
2661 struct file
*file
= vma
->vm_file
;
2662 struct socket
*sock
= file
->private_data
;
2663 struct sock
*sk
= sock
->sk
;
2666 atomic_inc(&pkt_sk(sk
)->mapped
);
2669 static void packet_mm_close(struct vm_area_struct
*vma
)
2671 struct file
*file
= vma
->vm_file
;
2672 struct socket
*sock
= file
->private_data
;
2673 struct sock
*sk
= sock
->sk
;
2676 atomic_dec(&pkt_sk(sk
)->mapped
);
2679 static const struct vm_operations_struct packet_mmap_ops
= {
2680 .open
= packet_mm_open
,
2681 .close
= packet_mm_close
,
2684 static void free_pg_vec(struct pgv
*pg_vec
, unsigned int order
,
2689 for (i
= 0; i
< len
; i
++) {
2690 if (likely(pg_vec
[i
].buffer
)) {
2691 if (is_vmalloc_addr(pg_vec
[i
].buffer
))
2692 vfree(pg_vec
[i
].buffer
);
2694 free_pages((unsigned long)pg_vec
[i
].buffer
,
2696 pg_vec
[i
].buffer
= NULL
;
2702 static inline char *alloc_one_pg_vec_page(unsigned long order
)
2704 char *buffer
= NULL
;
2705 gfp_t gfp_flags
= GFP_KERNEL
| __GFP_COMP
|
2706 __GFP_ZERO
| __GFP_NOWARN
| __GFP_NORETRY
;
2708 buffer
= (char *) __get_free_pages(gfp_flags
, order
);
2714 * __get_free_pages failed, fall back to vmalloc
2716 buffer
= vzalloc((1 << order
) * PAGE_SIZE
);
2722 * vmalloc failed, lets dig into swap here
2724 gfp_flags
&= ~__GFP_NORETRY
;
2725 buffer
= (char *)__get_free_pages(gfp_flags
, order
);
2730 * complete and utter failure
2735 static struct pgv
*alloc_pg_vec(struct tpacket_req
*req
, int order
)
2737 unsigned int block_nr
= req
->tp_block_nr
;
2741 pg_vec
= kcalloc(block_nr
, sizeof(struct pgv
), GFP_KERNEL
);
2742 if (unlikely(!pg_vec
))
2745 for (i
= 0; i
< block_nr
; i
++) {
2746 pg_vec
[i
].buffer
= alloc_one_pg_vec_page(order
);
2747 if (unlikely(!pg_vec
[i
].buffer
))
2748 goto out_free_pgvec
;
2755 free_pg_vec(pg_vec
, order
, block_nr
);
2760 static int packet_set_ring(struct sock
*sk
, struct tpacket_req
*req
,
2761 int closing
, int tx_ring
)
2763 struct pgv
*pg_vec
= NULL
;
2764 struct packet_sock
*po
= pkt_sk(sk
);
2765 int was_running
, order
= 0;
2766 struct packet_ring_buffer
*rb
;
2767 struct sk_buff_head
*rb_queue
;
2771 rb
= tx_ring
? &po
->tx_ring
: &po
->rx_ring
;
2772 rb_queue
= tx_ring
? &sk
->sk_write_queue
: &sk
->sk_receive_queue
;
2776 if (atomic_read(&po
->mapped
))
2778 if (atomic_read(&rb
->pending
))
2782 if (req
->tp_block_nr
) {
2783 /* Sanity tests and some calculations */
2785 if (unlikely(rb
->pg_vec
))
2788 switch (po
->tp_version
) {
2790 po
->tp_hdrlen
= TPACKET_HDRLEN
;
2793 po
->tp_hdrlen
= TPACKET2_HDRLEN
;
2798 if (unlikely((int)req
->tp_block_size
<= 0))
2800 if (unlikely(req
->tp_block_size
& (PAGE_SIZE
- 1)))
2802 if (unlikely(req
->tp_frame_size
< po
->tp_hdrlen
+
2805 if (unlikely(req
->tp_frame_size
& (TPACKET_ALIGNMENT
- 1)))
2808 rb
->frames_per_block
= req
->tp_block_size
/req
->tp_frame_size
;
2809 if (unlikely(rb
->frames_per_block
<= 0))
2811 if (unlikely((rb
->frames_per_block
* req
->tp_block_nr
) !=
2816 order
= get_order(req
->tp_block_size
);
2817 pg_vec
= alloc_pg_vec(req
, order
);
2818 if (unlikely(!pg_vec
))
2824 if (unlikely(req
->tp_frame_nr
))
2830 /* Detach socket from network */
2831 spin_lock(&po
->bind_lock
);
2832 was_running
= po
->running
;
2836 __unregister_prot_hook(sk
, false);
2838 spin_unlock(&po
->bind_lock
);
2843 mutex_lock(&po
->pg_vec_lock
);
2844 if (closing
|| atomic_read(&po
->mapped
) == 0) {
2846 spin_lock_bh(&rb_queue
->lock
);
2847 swap(rb
->pg_vec
, pg_vec
);
2848 rb
->frame_max
= (req
->tp_frame_nr
- 1);
2850 rb
->frame_size
= req
->tp_frame_size
;
2851 spin_unlock_bh(&rb_queue
->lock
);
2853 swap(rb
->pg_vec_order
, order
);
2854 swap(rb
->pg_vec_len
, req
->tp_block_nr
);
2856 rb
->pg_vec_pages
= req
->tp_block_size
/PAGE_SIZE
;
2857 po
->prot_hook
.func
= (po
->rx_ring
.pg_vec
) ?
2858 tpacket_rcv
: packet_rcv
;
2859 skb_queue_purge(rb_queue
);
2860 if (atomic_read(&po
->mapped
))
2861 pr_err("packet_mmap: vma is busy: %d\n",
2862 atomic_read(&po
->mapped
));
2864 mutex_unlock(&po
->pg_vec_lock
);
2866 spin_lock(&po
->bind_lock
);
2869 register_prot_hook(sk
);
2871 spin_unlock(&po
->bind_lock
);
2876 free_pg_vec(pg_vec
, order
, req
->tp_block_nr
);
2881 static int packet_mmap(struct file
*file
, struct socket
*sock
,
2882 struct vm_area_struct
*vma
)
2884 struct sock
*sk
= sock
->sk
;
2885 struct packet_sock
*po
= pkt_sk(sk
);
2886 unsigned long size
, expected_size
;
2887 struct packet_ring_buffer
*rb
;
2888 unsigned long start
;
2895 mutex_lock(&po
->pg_vec_lock
);
2898 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
2900 expected_size
+= rb
->pg_vec_len
2906 if (expected_size
== 0)
2909 size
= vma
->vm_end
- vma
->vm_start
;
2910 if (size
!= expected_size
)
2913 start
= vma
->vm_start
;
2914 for (rb
= &po
->rx_ring
; rb
<= &po
->tx_ring
; rb
++) {
2915 if (rb
->pg_vec
== NULL
)
2918 for (i
= 0; i
< rb
->pg_vec_len
; i
++) {
2920 void *kaddr
= rb
->pg_vec
[i
].buffer
;
2923 for (pg_num
= 0; pg_num
< rb
->pg_vec_pages
; pg_num
++) {
2924 page
= pgv_to_page(kaddr
);
2925 err
= vm_insert_page(vma
, start
, page
);
2934 atomic_inc(&po
->mapped
);
2935 vma
->vm_ops
= &packet_mmap_ops
;
2939 mutex_unlock(&po
->pg_vec_lock
);
2943 static const struct proto_ops packet_ops_spkt
= {
2944 .family
= PF_PACKET
,
2945 .owner
= THIS_MODULE
,
2946 .release
= packet_release
,
2947 .bind
= packet_bind_spkt
,
2948 .connect
= sock_no_connect
,
2949 .socketpair
= sock_no_socketpair
,
2950 .accept
= sock_no_accept
,
2951 .getname
= packet_getname_spkt
,
2952 .poll
= datagram_poll
,
2953 .ioctl
= packet_ioctl
,
2954 .listen
= sock_no_listen
,
2955 .shutdown
= sock_no_shutdown
,
2956 .setsockopt
= sock_no_setsockopt
,
2957 .getsockopt
= sock_no_getsockopt
,
2958 .sendmsg
= packet_sendmsg_spkt
,
2959 .recvmsg
= packet_recvmsg
,
2960 .mmap
= sock_no_mmap
,
2961 .sendpage
= sock_no_sendpage
,
2964 static const struct proto_ops packet_ops
= {
2965 .family
= PF_PACKET
,
2966 .owner
= THIS_MODULE
,
2967 .release
= packet_release
,
2968 .bind
= packet_bind
,
2969 .connect
= sock_no_connect
,
2970 .socketpair
= sock_no_socketpair
,
2971 .accept
= sock_no_accept
,
2972 .getname
= packet_getname
,
2973 .poll
= packet_poll
,
2974 .ioctl
= packet_ioctl
,
2975 .listen
= sock_no_listen
,
2976 .shutdown
= sock_no_shutdown
,
2977 .setsockopt
= packet_setsockopt
,
2978 .getsockopt
= packet_getsockopt
,
2979 .sendmsg
= packet_sendmsg
,
2980 .recvmsg
= packet_recvmsg
,
2981 .mmap
= packet_mmap
,
2982 .sendpage
= sock_no_sendpage
,
2985 static const struct net_proto_family packet_family_ops
= {
2986 .family
= PF_PACKET
,
2987 .create
= packet_create
,
2988 .owner
= THIS_MODULE
,
2991 static struct notifier_block packet_netdev_notifier
= {
2992 .notifier_call
= packet_notifier
,
2995 #ifdef CONFIG_PROC_FS
2997 static void *packet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3000 struct net
*net
= seq_file_net(seq
);
3003 return seq_hlist_start_head_rcu(&net
->packet
.sklist
, *pos
);
3006 static void *packet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3008 struct net
*net
= seq_file_net(seq
);
3009 return seq_hlist_next_rcu(v
, &net
->packet
.sklist
, pos
);
3012 static void packet_seq_stop(struct seq_file
*seq
, void *v
)
3018 static int packet_seq_show(struct seq_file
*seq
, void *v
)
3020 if (v
== SEQ_START_TOKEN
)
3021 seq_puts(seq
, "sk RefCnt Type Proto Iface R Rmem User Inode\n");
3023 struct sock
*s
= sk_entry(v
);
3024 const struct packet_sock
*po
= pkt_sk(s
);
3027 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
3029 atomic_read(&s
->sk_refcnt
),
3034 atomic_read(&s
->sk_rmem_alloc
),
3042 static const struct seq_operations packet_seq_ops
= {
3043 .start
= packet_seq_start
,
3044 .next
= packet_seq_next
,
3045 .stop
= packet_seq_stop
,
3046 .show
= packet_seq_show
,
3049 static int packet_seq_open(struct inode
*inode
, struct file
*file
)
3051 return seq_open_net(inode
, file
, &packet_seq_ops
,
3052 sizeof(struct seq_net_private
));
3055 static const struct file_operations packet_seq_fops
= {
3056 .owner
= THIS_MODULE
,
3057 .open
= packet_seq_open
,
3059 .llseek
= seq_lseek
,
3060 .release
= seq_release_net
,
3065 static int __net_init
packet_net_init(struct net
*net
)
3067 spin_lock_init(&net
->packet
.sklist_lock
);
3068 INIT_HLIST_HEAD(&net
->packet
.sklist
);
3070 if (!proc_net_fops_create(net
, "packet", 0, &packet_seq_fops
))
3076 static void __net_exit
packet_net_exit(struct net
*net
)
3078 proc_net_remove(net
, "packet");
3081 static struct pernet_operations packet_net_ops
= {
3082 .init
= packet_net_init
,
3083 .exit
= packet_net_exit
,
3087 static void __exit
packet_exit(void)
3089 unregister_netdevice_notifier(&packet_netdev_notifier
);
3090 unregister_pernet_subsys(&packet_net_ops
);
3091 sock_unregister(PF_PACKET
);
3092 proto_unregister(&packet_proto
);
3095 static int __init
packet_init(void)
3097 int rc
= proto_register(&packet_proto
, 0);
3102 sock_register(&packet_family_ops
);
3103 register_pernet_subsys(&packet_net_ops
);
3104 register_netdevice_notifier(&packet_netdev_notifier
);
3109 module_init(packet_init
);
3110 module_exit(packet_exit
);
3111 MODULE_LICENSE("GPL");
3112 MODULE_ALIAS_NETPROTO(PF_PACKET
);