2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The User Datagram Protocol (UDP).
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
11 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
12 * Hirokazu Takahashi, <taka@valinux.co.jp>
15 * Alan Cox : verify_area() calls
16 * Alan Cox : stopped close while in use off icmp
17 * messages. Not a fix but a botch that
18 * for udp at least is 'valid'.
19 * Alan Cox : Fixed icmp handling properly
20 * Alan Cox : Correct error for oversized datagrams
21 * Alan Cox : Tidied select() semantics.
22 * Alan Cox : udp_err() fixed properly, also now
23 * select and read wake correctly on errors
24 * Alan Cox : udp_send verify_area moved to avoid mem leak
25 * Alan Cox : UDP can count its memory
26 * Alan Cox : send to an unknown connection causes
27 * an ECONNREFUSED off the icmp, but
29 * Alan Cox : Switched to new sk_buff handlers. No more backlog!
30 * Alan Cox : Using generic datagram code. Even smaller and the PEEK
31 * bug no longer crashes it.
32 * Fred Van Kempen : Net2e support for sk->broadcast.
33 * Alan Cox : Uses skb_free_datagram
34 * Alan Cox : Added get/set sockopt support.
35 * Alan Cox : Broadcasting without option set returns EACCES.
36 * Alan Cox : No wakeup calls. Instead we now use the callbacks.
37 * Alan Cox : Use ip_tos and ip_ttl
38 * Alan Cox : SNMP Mibs
39 * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support.
40 * Matt Dillon : UDP length checks.
41 * Alan Cox : Smarter af_inet used properly.
42 * Alan Cox : Use new kernel side addressing.
43 * Alan Cox : Incorrect return on truncated datagram receive.
44 * Arnt Gulbrandsen : New udp_send and stuff
45 * Alan Cox : Cache last socket
46 * Alan Cox : Route cache
47 * Jon Peatfield : Minor efficiency fix to sendto().
48 * Mike Shaver : RFC1122 checks.
49 * Alan Cox : Nonblocking error fix.
50 * Willy Konynenberg : Transparent proxying support.
51 * Mike McLagan : Routing by source
52 * David S. Miller : New socket lookup architecture.
53 * Last socket cache retained as it
54 * does have a high hit rate.
55 * Olaf Kirch : Don't linearise iovec on sendmsg.
56 * Andi Kleen : Some cleanups, cache destination entry
58 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
59 * Melvin Smith : Check msg_name not msg_namelen in sendto(),
60 * return ENOTCONN for unconnected sockets (POSIX)
61 * Janos Farkas : don't deliver multi/broadcasts to a different
62 * bound-to-device socket
63 * Hirokazu Takahashi : HW checksumming for outgoing UDP
65 * Hirokazu Takahashi : sendfile() on UDP works now.
66 * Arnaldo C. Melo : convert /proc/net/udp to seq_file
67 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
68 * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind
69 * a single port at the same time.
70 * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support
71 * James Chapman : Add L2TP encapsulation type.
74 * This program is free software; you can redistribute it and/or
75 * modify it under the terms of the GNU General Public License
76 * as published by the Free Software Foundation; either version
77 * 2 of the License, or (at your option) any later version.
80 #define pr_fmt(fmt) "UDP: " fmt
82 #include <asm/uaccess.h>
83 #include <asm/ioctls.h>
84 #include <linux/bootmem.h>
85 #include <linux/highmem.h>
86 #include <linux/swap.h>
87 #include <linux/types.h>
88 #include <linux/fcntl.h>
89 #include <linux/module.h>
90 #include <linux/socket.h>
91 #include <linux/sockios.h>
92 #include <linux/igmp.h>
94 #include <linux/errno.h>
95 #include <linux/timer.h>
97 #include <linux/inet.h>
98 #include <linux/netdevice.h>
99 #include <linux/slab.h>
100 #include <net/tcp_states.h>
101 #include <linux/skbuff.h>
102 #include <linux/proc_fs.h>
103 #include <linux/seq_file.h>
104 #include <net/net_namespace.h>
105 #include <net/icmp.h>
106 #include <net/route.h>
107 #include <net/checksum.h>
108 #include <net/xfrm.h>
109 #include <trace/events/udp.h>
110 #include <linux/static_key.h>
111 #include <trace/events/skb.h>
112 #include "udp_impl.h"
114 struct udp_table udp_table __read_mostly
;
115 EXPORT_SYMBOL(udp_table
);
117 long sysctl_udp_mem
[3] __read_mostly
;
118 EXPORT_SYMBOL(sysctl_udp_mem
);
120 int sysctl_udp_rmem_min __read_mostly
;
121 EXPORT_SYMBOL(sysctl_udp_rmem_min
);
123 int sysctl_udp_wmem_min __read_mostly
;
124 EXPORT_SYMBOL(sysctl_udp_wmem_min
);
126 atomic_long_t udp_memory_allocated
;
127 EXPORT_SYMBOL(udp_memory_allocated
);
129 #define MAX_UDP_PORTS 65536
130 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
132 static int udp_lib_lport_inuse(struct net
*net
, __u16 num
,
133 const struct udp_hslot
*hslot
,
134 unsigned long *bitmap
,
136 int (*saddr_comp
)(const struct sock
*sk1
,
137 const struct sock
*sk2
),
141 struct hlist_nulls_node
*node
;
142 kuid_t uid
= sock_i_uid(sk
);
144 sk_nulls_for_each(sk2
, node
, &hslot
->head
)
145 if (net_eq(sock_net(sk2
), net
) &&
147 (bitmap
|| udp_sk(sk2
)->udp_port_hash
== num
) &&
148 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
149 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
150 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
151 (!sk2
->sk_reuseport
|| !sk
->sk_reuseport
||
152 !uid_eq(uid
, sock_i_uid(sk2
))) &&
153 (*saddr_comp
)(sk
, sk2
)) {
155 __set_bit(udp_sk(sk2
)->udp_port_hash
>> log
,
164 * Note: we still hold spinlock of primary hash chain, so no other writer
165 * can insert/delete a socket with local_port == num
167 static int udp_lib_lport_inuse2(struct net
*net
, __u16 num
,
168 struct udp_hslot
*hslot2
,
170 int (*saddr_comp
)(const struct sock
*sk1
,
171 const struct sock
*sk2
))
174 struct hlist_nulls_node
*node
;
175 kuid_t uid
= sock_i_uid(sk
);
178 spin_lock(&hslot2
->lock
);
179 udp_portaddr_for_each_entry(sk2
, node
, &hslot2
->head
)
180 if (net_eq(sock_net(sk2
), net
) &&
182 (udp_sk(sk2
)->udp_port_hash
== num
) &&
183 (!sk2
->sk_reuse
|| !sk
->sk_reuse
) &&
184 (!sk2
->sk_bound_dev_if
|| !sk
->sk_bound_dev_if
||
185 sk2
->sk_bound_dev_if
== sk
->sk_bound_dev_if
) &&
186 (!sk2
->sk_reuseport
|| !sk
->sk_reuseport
||
187 !uid_eq(uid
, sock_i_uid(sk2
))) &&
188 (*saddr_comp
)(sk
, sk2
)) {
192 spin_unlock(&hslot2
->lock
);
197 * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6
199 * @sk: socket struct in question
200 * @snum: port number to look up
201 * @saddr_comp: AF-dependent comparison of bound local IP addresses
202 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
205 int udp_lib_get_port(struct sock
*sk
, unsigned short snum
,
206 int (*saddr_comp
)(const struct sock
*sk1
,
207 const struct sock
*sk2
),
208 unsigned int hash2_nulladdr
)
210 struct udp_hslot
*hslot
, *hslot2
;
211 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
213 struct net
*net
= sock_net(sk
);
216 int low
, high
, remaining
;
218 unsigned short first
, last
;
219 DECLARE_BITMAP(bitmap
, PORTS_PER_CHAIN
);
221 inet_get_local_port_range(&low
, &high
);
222 remaining
= (high
- low
) + 1;
225 first
= (((u64
)rand
* remaining
) >> 32) + low
;
227 * force rand to be an odd multiple of UDP_HTABLE_SIZE
229 rand
= (rand
| 1) * (udptable
->mask
+ 1);
230 last
= first
+ udptable
->mask
+ 1;
232 hslot
= udp_hashslot(udptable
, net
, first
);
233 bitmap_zero(bitmap
, PORTS_PER_CHAIN
);
234 spin_lock_bh(&hslot
->lock
);
235 udp_lib_lport_inuse(net
, snum
, hslot
, bitmap
, sk
,
236 saddr_comp
, udptable
->log
);
240 * Iterate on all possible values of snum for this hash.
241 * Using steps of an odd multiple of UDP_HTABLE_SIZE
242 * give us randomization and full range coverage.
245 if (low
<= snum
&& snum
<= high
&&
246 !test_bit(snum
>> udptable
->log
, bitmap
) &&
247 !inet_is_reserved_local_port(snum
))
250 } while (snum
!= first
);
251 spin_unlock_bh(&hslot
->lock
);
252 } while (++first
!= last
);
255 hslot
= udp_hashslot(udptable
, net
, snum
);
256 spin_lock_bh(&hslot
->lock
);
257 if (hslot
->count
> 10) {
259 unsigned int slot2
= udp_sk(sk
)->udp_portaddr_hash
^ snum
;
261 slot2
&= udptable
->mask
;
262 hash2_nulladdr
&= udptable
->mask
;
264 hslot2
= udp_hashslot2(udptable
, slot2
);
265 if (hslot
->count
< hslot2
->count
)
266 goto scan_primary_hash
;
268 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
270 if (!exist
&& (hash2_nulladdr
!= slot2
)) {
271 hslot2
= udp_hashslot2(udptable
, hash2_nulladdr
);
272 exist
= udp_lib_lport_inuse2(net
, snum
, hslot2
,
281 if (udp_lib_lport_inuse(net
, snum
, hslot
, NULL
, sk
,
286 inet_sk(sk
)->inet_num
= snum
;
287 udp_sk(sk
)->udp_port_hash
= snum
;
288 udp_sk(sk
)->udp_portaddr_hash
^= snum
;
289 if (sk_unhashed(sk
)) {
290 sk_nulls_add_node_rcu(sk
, &hslot
->head
);
292 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, 1);
294 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
295 spin_lock(&hslot2
->lock
);
296 hlist_nulls_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
299 spin_unlock(&hslot2
->lock
);
303 spin_unlock_bh(&hslot
->lock
);
307 EXPORT_SYMBOL(udp_lib_get_port
);
309 static int ipv4_rcv_saddr_equal(const struct sock
*sk1
, const struct sock
*sk2
)
311 struct inet_sock
*inet1
= inet_sk(sk1
), *inet2
= inet_sk(sk2
);
313 return (!ipv6_only_sock(sk2
) &&
314 (!inet1
->inet_rcv_saddr
|| !inet2
->inet_rcv_saddr
||
315 inet1
->inet_rcv_saddr
== inet2
->inet_rcv_saddr
));
318 static unsigned int udp4_portaddr_hash(struct net
*net
, __be32 saddr
,
321 return jhash_1word((__force u32
)saddr
, net_hash_mix(net
)) ^ port
;
324 int udp_v4_get_port(struct sock
*sk
, unsigned short snum
)
326 unsigned int hash2_nulladdr
=
327 udp4_portaddr_hash(sock_net(sk
), htonl(INADDR_ANY
), snum
);
328 unsigned int hash2_partial
=
329 udp4_portaddr_hash(sock_net(sk
), inet_sk(sk
)->inet_rcv_saddr
, 0);
331 /* precompute partial secondary hash */
332 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
333 return udp_lib_get_port(sk
, snum
, ipv4_rcv_saddr_equal
, hash2_nulladdr
);
336 static inline int compute_score(struct sock
*sk
, struct net
*net
, __be32 saddr
,
338 __be16 sport
, __be32 daddr
, __be16 dport
, int dif
)
342 if (net_eq(sock_net(sk
), net
) && udp_sk(sk
)->udp_port_hash
== hnum
&&
343 !ipv6_only_sock(sk
)) {
344 struct inet_sock
*inet
= inet_sk(sk
);
346 score
= (sk
->sk_family
== PF_INET
? 2 : 1);
347 if (inet
->inet_rcv_saddr
) {
348 if (inet
->inet_rcv_saddr
!= daddr
)
352 if (inet
->inet_daddr
) {
353 if (inet
->inet_daddr
!= saddr
)
357 if (inet
->inet_dport
) {
358 if (inet
->inet_dport
!= sport
)
362 if (sk
->sk_bound_dev_if
) {
363 if (sk
->sk_bound_dev_if
!= dif
)
372 * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num)
374 static inline int compute_score2(struct sock
*sk
, struct net
*net
,
375 __be32 saddr
, __be16 sport
,
376 __be32 daddr
, unsigned int hnum
, int dif
)
380 if (net_eq(sock_net(sk
), net
) && !ipv6_only_sock(sk
)) {
381 struct inet_sock
*inet
= inet_sk(sk
);
383 if (inet
->inet_rcv_saddr
!= daddr
)
385 if (inet
->inet_num
!= hnum
)
388 score
= (sk
->sk_family
== PF_INET
? 2 : 1);
389 if (inet
->inet_daddr
) {
390 if (inet
->inet_daddr
!= saddr
)
394 if (inet
->inet_dport
) {
395 if (inet
->inet_dport
!= sport
)
399 if (sk
->sk_bound_dev_if
) {
400 if (sk
->sk_bound_dev_if
!= dif
)
409 /* called with read_rcu_lock() */
410 static struct sock
*udp4_lib_lookup2(struct net
*net
,
411 __be32 saddr
, __be16 sport
,
412 __be32 daddr
, unsigned int hnum
, int dif
,
413 struct udp_hslot
*hslot2
, unsigned int slot2
)
415 struct sock
*sk
, *result
;
416 struct hlist_nulls_node
*node
;
417 int score
, badness
, matches
= 0, reuseport
= 0;
423 udp_portaddr_for_each_entry_rcu(sk
, node
, &hslot2
->head
) {
424 score
= compute_score2(sk
, net
, saddr
, sport
,
426 if (score
> badness
) {
429 reuseport
= sk
->sk_reuseport
;
431 hash
= inet_ehashfn(net
, daddr
, hnum
,
432 saddr
, htons(sport
));
435 } else if (score
== badness
&& reuseport
) {
437 if (((u64
)hash
* matches
) >> 32 == 0)
439 hash
= next_pseudo_random32(hash
);
443 * if the nulls value we got at the end of this lookup is
444 * not the expected one, we must restart lookup.
445 * We probably met an item that was moved to another chain.
447 if (get_nulls_value(node
) != slot2
)
450 if (unlikely(!atomic_inc_not_zero_hint(&result
->sk_refcnt
, 2)))
452 else if (unlikely(compute_score2(result
, net
, saddr
, sport
,
453 daddr
, hnum
, dif
) < badness
)) {
461 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
462 * harder than this. -DaveM
464 struct sock
*__udp4_lib_lookup(struct net
*net
, __be32 saddr
,
465 __be16 sport
, __be32 daddr
, __be16 dport
,
466 int dif
, struct udp_table
*udptable
)
468 struct sock
*sk
, *result
;
469 struct hlist_nulls_node
*node
;
470 unsigned short hnum
= ntohs(dport
);
471 unsigned int hash2
, slot2
, slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
472 struct udp_hslot
*hslot2
, *hslot
= &udptable
->hash
[slot
];
473 int score
, badness
, matches
= 0, reuseport
= 0;
477 if (hslot
->count
> 10) {
478 hash2
= udp4_portaddr_hash(net
, daddr
, hnum
);
479 slot2
= hash2
& udptable
->mask
;
480 hslot2
= &udptable
->hash2
[slot2
];
481 if (hslot
->count
< hslot2
->count
)
484 result
= udp4_lib_lookup2(net
, saddr
, sport
,
488 hash2
= udp4_portaddr_hash(net
, htonl(INADDR_ANY
), hnum
);
489 slot2
= hash2
& udptable
->mask
;
490 hslot2
= &udptable
->hash2
[slot2
];
491 if (hslot
->count
< hslot2
->count
)
494 result
= udp4_lib_lookup2(net
, saddr
, sport
,
495 htonl(INADDR_ANY
), hnum
, dif
,
504 sk_nulls_for_each_rcu(sk
, node
, &hslot
->head
) {
505 score
= compute_score(sk
, net
, saddr
, hnum
, sport
,
507 if (score
> badness
) {
510 reuseport
= sk
->sk_reuseport
;
512 hash
= inet_ehashfn(net
, daddr
, hnum
,
513 saddr
, htons(sport
));
516 } else if (score
== badness
&& reuseport
) {
518 if (((u64
)hash
* matches
) >> 32 == 0)
520 hash
= next_pseudo_random32(hash
);
524 * if the nulls value we got at the end of this lookup is
525 * not the expected one, we must restart lookup.
526 * We probably met an item that was moved to another chain.
528 if (get_nulls_value(node
) != slot
)
532 if (unlikely(!atomic_inc_not_zero_hint(&result
->sk_refcnt
, 2)))
534 else if (unlikely(compute_score(result
, net
, saddr
, hnum
, sport
,
535 daddr
, dport
, dif
) < badness
)) {
543 EXPORT_SYMBOL_GPL(__udp4_lib_lookup
);
545 static inline struct sock
*__udp4_lib_lookup_skb(struct sk_buff
*skb
,
546 __be16 sport
, __be16 dport
,
547 struct udp_table
*udptable
)
550 const struct iphdr
*iph
= ip_hdr(skb
);
552 if (unlikely(sk
= skb_steal_sock(skb
)))
555 return __udp4_lib_lookup(dev_net(skb_dst(skb
)->dev
), iph
->saddr
, sport
,
556 iph
->daddr
, dport
, inet_iif(skb
),
560 struct sock
*udp4_lib_lookup(struct net
*net
, __be32 saddr
, __be16 sport
,
561 __be32 daddr
, __be16 dport
, int dif
)
563 return __udp4_lib_lookup(net
, saddr
, sport
, daddr
, dport
, dif
, &udp_table
);
565 EXPORT_SYMBOL_GPL(udp4_lib_lookup
);
567 static inline struct sock
*udp_v4_mcast_next(struct net
*net
, struct sock
*sk
,
568 __be16 loc_port
, __be32 loc_addr
,
569 __be16 rmt_port
, __be32 rmt_addr
,
572 struct hlist_nulls_node
*node
;
574 unsigned short hnum
= ntohs(loc_port
);
576 sk_nulls_for_each_from(s
, node
) {
577 struct inet_sock
*inet
= inet_sk(s
);
579 if (!net_eq(sock_net(s
), net
) ||
580 udp_sk(s
)->udp_port_hash
!= hnum
||
581 (inet
->inet_daddr
&& inet
->inet_daddr
!= rmt_addr
) ||
582 (inet
->inet_dport
!= rmt_port
&& inet
->inet_dport
) ||
583 (inet
->inet_rcv_saddr
&&
584 inet
->inet_rcv_saddr
!= loc_addr
) ||
586 (s
->sk_bound_dev_if
&& s
->sk_bound_dev_if
!= dif
))
588 if (!ip_mc_sf_allow(s
, loc_addr
, rmt_addr
, dif
))
598 * This routine is called by the ICMP module when it gets some
599 * sort of error condition. If err < 0 then the socket should
600 * be closed and the error returned to the user. If err > 0
601 * it's just the icmp type << 8 | icmp code.
602 * Header points to the ip header of the error packet. We move
603 * on past this. Then (as it used to claim before adjustment)
604 * header points to the first 8 bytes of the udp header. We need
605 * to find the appropriate port.
608 void __udp4_lib_err(struct sk_buff
*skb
, u32 info
, struct udp_table
*udptable
)
610 struct inet_sock
*inet
;
611 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
612 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+(iph
->ihl
<<2));
613 const int type
= icmp_hdr(skb
)->type
;
614 const int code
= icmp_hdr(skb
)->code
;
618 struct net
*net
= dev_net(skb
->dev
);
620 sk
= __udp4_lib_lookup(net
, iph
->daddr
, uh
->dest
,
621 iph
->saddr
, uh
->source
, skb
->dev
->ifindex
, udptable
);
623 ICMP_INC_STATS_BH(net
, ICMP_MIB_INERRORS
);
624 return; /* No socket for error */
633 case ICMP_TIME_EXCEEDED
:
636 case ICMP_SOURCE_QUENCH
:
638 case ICMP_PARAMETERPROB
:
642 case ICMP_DEST_UNREACH
:
643 if (code
== ICMP_FRAG_NEEDED
) { /* Path MTU discovery */
644 ipv4_sk_update_pmtu(skb
, sk
, info
);
645 if (inet
->pmtudisc
!= IP_PMTUDISC_DONT
) {
653 if (code
<= NR_ICMP_UNREACH
) {
654 harderr
= icmp_err_convert
[code
].fatal
;
655 err
= icmp_err_convert
[code
].errno
;
659 ipv4_sk_redirect(skb
, sk
);
664 * RFC1122: OK. Passes ICMP errors back to application, as per
667 if (!inet
->recverr
) {
668 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
671 ip_icmp_error(sk
, skb
, err
, uh
->dest
, info
, (u8
*)(uh
+1));
674 sk
->sk_error_report(sk
);
679 void udp_err(struct sk_buff
*skb
, u32 info
)
681 __udp4_lib_err(skb
, info
, &udp_table
);
685 * Throw away all pending data and cancel the corking. Socket is locked.
687 void udp_flush_pending_frames(struct sock
*sk
)
689 struct udp_sock
*up
= udp_sk(sk
);
694 ip_flush_pending_frames(sk
);
697 EXPORT_SYMBOL(udp_flush_pending_frames
);
700 * udp4_hwcsum - handle outgoing HW checksumming
701 * @skb: sk_buff containing the filled-in UDP header
702 * (checksum field must be zeroed out)
703 * @src: source IP address
704 * @dst: destination IP address
706 static void udp4_hwcsum(struct sk_buff
*skb
, __be32 src
, __be32 dst
)
708 struct udphdr
*uh
= udp_hdr(skb
);
709 struct sk_buff
*frags
= skb_shinfo(skb
)->frag_list
;
710 int offset
= skb_transport_offset(skb
);
711 int len
= skb
->len
- offset
;
717 * Only one fragment on the socket.
719 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
720 skb
->csum_offset
= offsetof(struct udphdr
, check
);
721 uh
->check
= ~csum_tcpudp_magic(src
, dst
, len
,
725 * HW-checksum won't work as there are two or more
726 * fragments on the socket so that all csums of sk_buffs
730 csum
= csum_add(csum
, frags
->csum
);
732 } while ((frags
= frags
->next
));
734 csum
= skb_checksum(skb
, offset
, hlen
, csum
);
735 skb
->ip_summed
= CHECKSUM_NONE
;
737 uh
->check
= csum_tcpudp_magic(src
, dst
, len
, IPPROTO_UDP
, csum
);
739 uh
->check
= CSUM_MANGLED_0
;
743 static int udp_send_skb(struct sk_buff
*skb
, struct flowi4
*fl4
)
745 struct sock
*sk
= skb
->sk
;
746 struct inet_sock
*inet
= inet_sk(sk
);
749 int is_udplite
= IS_UDPLITE(sk
);
750 int offset
= skb_transport_offset(skb
);
751 int len
= skb
->len
- offset
;
755 * Create a UDP header
758 uh
->source
= inet
->inet_sport
;
759 uh
->dest
= fl4
->fl4_dport
;
760 uh
->len
= htons(len
);
763 if (is_udplite
) /* UDP-Lite */
764 csum
= udplite_csum(skb
);
766 else if (sk
->sk_no_check
== UDP_CSUM_NOXMIT
) { /* UDP csum disabled */
768 skb
->ip_summed
= CHECKSUM_NONE
;
771 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
773 udp4_hwcsum(skb
, fl4
->saddr
, fl4
->daddr
);
777 csum
= udp_csum(skb
);
779 /* add protocol-dependent pseudo-header */
780 uh
->check
= csum_tcpudp_magic(fl4
->saddr
, fl4
->daddr
, len
,
781 sk
->sk_protocol
, csum
);
783 uh
->check
= CSUM_MANGLED_0
;
786 err
= ip_send_skb(sock_net(sk
), skb
);
788 if (err
== -ENOBUFS
&& !inet
->recverr
) {
789 UDP_INC_STATS_USER(sock_net(sk
),
790 UDP_MIB_SNDBUFERRORS
, is_udplite
);
794 UDP_INC_STATS_USER(sock_net(sk
),
795 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
800 * Push out all pending data as one UDP datagram. Socket is locked.
802 static int udp_push_pending_frames(struct sock
*sk
)
804 struct udp_sock
*up
= udp_sk(sk
);
805 struct inet_sock
*inet
= inet_sk(sk
);
806 struct flowi4
*fl4
= &inet
->cork
.fl
.u
.ip4
;
810 skb
= ip_finish_skb(sk
, fl4
);
814 err
= udp_send_skb(skb
, fl4
);
822 int udp_sendmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
825 struct inet_sock
*inet
= inet_sk(sk
);
826 struct udp_sock
*up
= udp_sk(sk
);
827 struct flowi4 fl4_stack
;
830 struct ipcm_cookie ipc
;
831 struct rtable
*rt
= NULL
;
834 __be32 daddr
, faddr
, saddr
;
837 int err
, is_udplite
= IS_UDPLITE(sk
);
838 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
839 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
841 struct ip_options_data opt_copy
;
850 if (msg
->msg_flags
& MSG_OOB
) /* Mirror BSD error message compatibility */
856 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
858 fl4
= &inet
->cork
.fl
.u
.ip4
;
861 * There are pending frames.
862 * The socket lock must be held while it's corked.
865 if (likely(up
->pending
)) {
866 if (unlikely(up
->pending
!= AF_INET
)) {
874 ulen
+= sizeof(struct udphdr
);
877 * Get and verify the address.
880 struct sockaddr_in
*usin
= (struct sockaddr_in
*)msg
->msg_name
;
881 if (msg
->msg_namelen
< sizeof(*usin
))
883 if (usin
->sin_family
!= AF_INET
) {
884 if (usin
->sin_family
!= AF_UNSPEC
)
885 return -EAFNOSUPPORT
;
888 daddr
= usin
->sin_addr
.s_addr
;
889 dport
= usin
->sin_port
;
893 if (sk
->sk_state
!= TCP_ESTABLISHED
)
894 return -EDESTADDRREQ
;
895 daddr
= inet
->inet_daddr
;
896 dport
= inet
->inet_dport
;
897 /* Open fast path for connected socket.
898 Route will not be used, if at least one option is set.
902 ipc
.addr
= inet
->inet_saddr
;
904 ipc
.oif
= sk
->sk_bound_dev_if
;
906 sock_tx_timestamp(sk
, &ipc
.tx_flags
);
908 if (msg
->msg_controllen
) {
909 err
= ip_cmsg_send(sock_net(sk
), msg
, &ipc
);
917 struct ip_options_rcu
*inet_opt
;
920 inet_opt
= rcu_dereference(inet
->inet_opt
);
922 memcpy(&opt_copy
, inet_opt
,
923 sizeof(*inet_opt
) + inet_opt
->opt
.optlen
);
924 ipc
.opt
= &opt_copy
.opt
;
930 ipc
.addr
= faddr
= daddr
;
932 if (ipc
.opt
&& ipc
.opt
->opt
.srr
) {
935 faddr
= ipc
.opt
->opt
.faddr
;
938 tos
= RT_TOS(inet
->tos
);
939 if (sock_flag(sk
, SOCK_LOCALROUTE
) ||
940 (msg
->msg_flags
& MSG_DONTROUTE
) ||
941 (ipc
.opt
&& ipc
.opt
->opt
.is_strictroute
)) {
946 if (ipv4_is_multicast(daddr
)) {
948 ipc
.oif
= inet
->mc_index
;
950 saddr
= inet
->mc_addr
;
953 ipc
.oif
= inet
->uc_index
;
956 rt
= (struct rtable
*)sk_dst_check(sk
, 0);
959 struct net
*net
= sock_net(sk
);
962 flowi4_init_output(fl4
, ipc
.oif
, sk
->sk_mark
, tos
,
963 RT_SCOPE_UNIVERSE
, sk
->sk_protocol
,
964 inet_sk_flowi_flags(sk
)|FLOWI_FLAG_CAN_SLEEP
,
965 faddr
, saddr
, dport
, inet
->inet_sport
);
967 security_sk_classify_flow(sk
, flowi4_to_flowi(fl4
));
968 rt
= ip_route_output_flow(net
, fl4
, sk
);
972 if (err
== -ENETUNREACH
)
973 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
978 if ((rt
->rt_flags
& RTCF_BROADCAST
) &&
979 !sock_flag(sk
, SOCK_BROADCAST
))
982 sk_dst_set(sk
, dst_clone(&rt
->dst
));
985 if (msg
->msg_flags
&MSG_CONFIRM
)
991 daddr
= ipc
.addr
= fl4
->daddr
;
993 /* Lockless fast path for the non-corking case. */
995 skb
= ip_make_skb(sk
, fl4
, getfrag
, msg
->msg_iov
, ulen
,
996 sizeof(struct udphdr
), &ipc
, &rt
,
999 if (!IS_ERR_OR_NULL(skb
))
1000 err
= udp_send_skb(skb
, fl4
);
1005 if (unlikely(up
->pending
)) {
1006 /* The socket is already corked while preparing it. */
1007 /* ... which is an evident application bug. --ANK */
1010 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("cork app bug 2\n"));
1015 * Now cork the socket to pend data.
1017 fl4
= &inet
->cork
.fl
.u
.ip4
;
1020 fl4
->fl4_dport
= dport
;
1021 fl4
->fl4_sport
= inet
->inet_sport
;
1022 up
->pending
= AF_INET
;
1026 err
= ip_append_data(sk
, fl4
, getfrag
, msg
->msg_iov
, ulen
,
1027 sizeof(struct udphdr
), &ipc
, &rt
,
1028 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
);
1030 udp_flush_pending_frames(sk
);
1032 err
= udp_push_pending_frames(sk
);
1033 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1044 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1045 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1046 * we don't have a good statistic (IpOutDiscards but it can be too many
1047 * things). We could add another new stat but at least for now that
1048 * seems like overkill.
1050 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1051 UDP_INC_STATS_USER(sock_net(sk
),
1052 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1057 dst_confirm(&rt
->dst
);
1058 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1059 goto back_from_confirm
;
1063 EXPORT_SYMBOL(udp_sendmsg
);
1065 int udp_sendpage(struct sock
*sk
, struct page
*page
, int offset
,
1066 size_t size
, int flags
)
1068 struct inet_sock
*inet
= inet_sk(sk
);
1069 struct udp_sock
*up
= udp_sk(sk
);
1073 struct msghdr msg
= { .msg_flags
= flags
|MSG_MORE
};
1075 /* Call udp_sendmsg to specify destination address which
1076 * sendpage interface can't pass.
1077 * This will succeed only when the socket is connected.
1079 ret
= udp_sendmsg(NULL
, sk
, &msg
, 0);
1086 if (unlikely(!up
->pending
)) {
1089 LIMIT_NETDEBUG(KERN_DEBUG
pr_fmt("udp cork app bug 3\n"));
1093 ret
= ip_append_page(sk
, &inet
->cork
.fl
.u
.ip4
,
1094 page
, offset
, size
, flags
);
1095 if (ret
== -EOPNOTSUPP
) {
1097 return sock_no_sendpage(sk
->sk_socket
, page
, offset
,
1101 udp_flush_pending_frames(sk
);
1106 if (!(up
->corkflag
|| (flags
&MSG_MORE
)))
1107 ret
= udp_push_pending_frames(sk
);
1117 * first_packet_length - return length of first packet in receive queue
1120 * Drops all bad checksum frames, until a valid one is found.
1121 * Returns the length of found skb, or 0 if none is found.
1123 static unsigned int first_packet_length(struct sock
*sk
)
1125 struct sk_buff_head list_kill
, *rcvq
= &sk
->sk_receive_queue
;
1126 struct sk_buff
*skb
;
1129 __skb_queue_head_init(&list_kill
);
1131 spin_lock_bh(&rcvq
->lock
);
1132 while ((skb
= skb_peek(rcvq
)) != NULL
&&
1133 udp_lib_checksum_complete(skb
)) {
1134 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_CSUMERRORS
,
1136 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
,
1138 atomic_inc(&sk
->sk_drops
);
1139 __skb_unlink(skb
, rcvq
);
1140 __skb_queue_tail(&list_kill
, skb
);
1142 res
= skb
? skb
->len
: 0;
1143 spin_unlock_bh(&rcvq
->lock
);
1145 if (!skb_queue_empty(&list_kill
)) {
1146 bool slow
= lock_sock_fast(sk
);
1148 __skb_queue_purge(&list_kill
);
1149 sk_mem_reclaim_partial(sk
);
1150 unlock_sock_fast(sk
, slow
);
1156 * IOCTL requests applicable to the UDP protocol
1159 int udp_ioctl(struct sock
*sk
, int cmd
, unsigned long arg
)
1164 int amount
= sk_wmem_alloc_get(sk
);
1166 return put_user(amount
, (int __user
*)arg
);
1171 unsigned int amount
= first_packet_length(sk
);
1175 * We will only return the amount
1176 * of this packet since that is all
1177 * that will be read.
1179 amount
-= sizeof(struct udphdr
);
1181 return put_user(amount
, (int __user
*)arg
);
1185 return -ENOIOCTLCMD
;
1190 EXPORT_SYMBOL(udp_ioctl
);
1193 * This should be easy, if there is something there we
1194 * return it, otherwise we block.
1197 int udp_recvmsg(struct kiocb
*iocb
, struct sock
*sk
, struct msghdr
*msg
,
1198 size_t len
, int noblock
, int flags
, int *addr_len
)
1200 struct inet_sock
*inet
= inet_sk(sk
);
1201 struct sockaddr_in
*sin
= (struct sockaddr_in
*)msg
->msg_name
;
1202 struct sk_buff
*skb
;
1203 unsigned int ulen
, copied
;
1204 int peeked
, off
= 0;
1206 int is_udplite
= IS_UDPLITE(sk
);
1210 * Check any passed addresses
1213 *addr_len
= sizeof(*sin
);
1215 if (flags
& MSG_ERRQUEUE
)
1216 return ip_recv_error(sk
, msg
, len
);
1219 skb
= __skb_recv_datagram(sk
, flags
| (noblock
? MSG_DONTWAIT
: 0),
1220 &peeked
, &off
, &err
);
1224 ulen
= skb
->len
- sizeof(struct udphdr
);
1228 else if (copied
< ulen
)
1229 msg
->msg_flags
|= MSG_TRUNC
;
1232 * If checksum is needed at all, try to do it while copying the
1233 * data. If the data is truncated, or if we only want a partial
1234 * coverage checksum (UDP-Lite), do it before the copy.
1237 if (copied
< ulen
|| UDP_SKB_CB(skb
)->partial_cov
) {
1238 if (udp_lib_checksum_complete(skb
))
1242 if (skb_csum_unnecessary(skb
))
1243 err
= skb_copy_datagram_iovec(skb
, sizeof(struct udphdr
),
1244 msg
->msg_iov
, copied
);
1246 err
= skb_copy_and_csum_datagram_iovec(skb
,
1247 sizeof(struct udphdr
),
1254 if (unlikely(err
)) {
1255 trace_kfree_skb(skb
, udp_recvmsg
);
1257 atomic_inc(&sk
->sk_drops
);
1258 UDP_INC_STATS_USER(sock_net(sk
),
1259 UDP_MIB_INERRORS
, is_udplite
);
1265 UDP_INC_STATS_USER(sock_net(sk
),
1266 UDP_MIB_INDATAGRAMS
, is_udplite
);
1268 sock_recv_ts_and_drops(msg
, sk
, skb
);
1270 /* Copy the address. */
1272 sin
->sin_family
= AF_INET
;
1273 sin
->sin_port
= udp_hdr(skb
)->source
;
1274 sin
->sin_addr
.s_addr
= ip_hdr(skb
)->saddr
;
1275 memset(sin
->sin_zero
, 0, sizeof(sin
->sin_zero
));
1277 if (inet
->cmsg_flags
)
1278 ip_cmsg_recv(msg
, skb
);
1281 if (flags
& MSG_TRUNC
)
1285 skb_free_datagram_locked(sk
, skb
);
1290 slow
= lock_sock_fast(sk
);
1291 if (!skb_kill_datagram(sk
, skb
, flags
)) {
1292 UDP_INC_STATS_USER(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1293 UDP_INC_STATS_USER(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1295 unlock_sock_fast(sk
, slow
);
1300 /* starting over for a new packet */
1301 msg
->msg_flags
&= ~MSG_TRUNC
;
1306 int udp_disconnect(struct sock
*sk
, int flags
)
1308 struct inet_sock
*inet
= inet_sk(sk
);
1310 * 1003.1g - break association.
1313 sk
->sk_state
= TCP_CLOSE
;
1314 inet
->inet_daddr
= 0;
1315 inet
->inet_dport
= 0;
1316 sock_rps_reset_rxhash(sk
);
1317 sk
->sk_bound_dev_if
= 0;
1318 if (!(sk
->sk_userlocks
& SOCK_BINDADDR_LOCK
))
1319 inet_reset_saddr(sk
);
1321 if (!(sk
->sk_userlocks
& SOCK_BINDPORT_LOCK
)) {
1322 sk
->sk_prot
->unhash(sk
);
1323 inet
->inet_sport
= 0;
1328 EXPORT_SYMBOL(udp_disconnect
);
1330 void udp_lib_unhash(struct sock
*sk
)
1332 if (sk_hashed(sk
)) {
1333 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1334 struct udp_hslot
*hslot
, *hslot2
;
1336 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1337 udp_sk(sk
)->udp_port_hash
);
1338 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1340 spin_lock_bh(&hslot
->lock
);
1341 if (sk_nulls_del_node_init_rcu(sk
)) {
1343 inet_sk(sk
)->inet_num
= 0;
1344 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
1346 spin_lock(&hslot2
->lock
);
1347 hlist_nulls_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1349 spin_unlock(&hslot2
->lock
);
1351 spin_unlock_bh(&hslot
->lock
);
1354 EXPORT_SYMBOL(udp_lib_unhash
);
1357 * inet_rcv_saddr was changed, we must rehash secondary hash
1359 void udp_lib_rehash(struct sock
*sk
, u16 newhash
)
1361 if (sk_hashed(sk
)) {
1362 struct udp_table
*udptable
= sk
->sk_prot
->h
.udp_table
;
1363 struct udp_hslot
*hslot
, *hslot2
, *nhslot2
;
1365 hslot2
= udp_hashslot2(udptable
, udp_sk(sk
)->udp_portaddr_hash
);
1366 nhslot2
= udp_hashslot2(udptable
, newhash
);
1367 udp_sk(sk
)->udp_portaddr_hash
= newhash
;
1368 if (hslot2
!= nhslot2
) {
1369 hslot
= udp_hashslot(udptable
, sock_net(sk
),
1370 udp_sk(sk
)->udp_port_hash
);
1371 /* we must lock primary chain too */
1372 spin_lock_bh(&hslot
->lock
);
1374 spin_lock(&hslot2
->lock
);
1375 hlist_nulls_del_init_rcu(&udp_sk(sk
)->udp_portaddr_node
);
1377 spin_unlock(&hslot2
->lock
);
1379 spin_lock(&nhslot2
->lock
);
1380 hlist_nulls_add_head_rcu(&udp_sk(sk
)->udp_portaddr_node
,
1383 spin_unlock(&nhslot2
->lock
);
1385 spin_unlock_bh(&hslot
->lock
);
1389 EXPORT_SYMBOL(udp_lib_rehash
);
1391 static void udp_v4_rehash(struct sock
*sk
)
1393 u16 new_hash
= udp4_portaddr_hash(sock_net(sk
),
1394 inet_sk(sk
)->inet_rcv_saddr
,
1395 inet_sk(sk
)->inet_num
);
1396 udp_lib_rehash(sk
, new_hash
);
1399 static int __udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1403 if (inet_sk(sk
)->inet_daddr
)
1404 sock_rps_save_rxhash(sk
, skb
);
1406 rc
= sock_queue_rcv_skb(sk
, skb
);
1408 int is_udplite
= IS_UDPLITE(sk
);
1410 /* Note that an ENOMEM error is charged twice */
1412 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1414 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1416 trace_udp_fail_queue_rcv_skb(rc
, sk
);
1424 static struct static_key udp_encap_needed __read_mostly
;
1425 void udp_encap_enable(void)
1427 if (!static_key_enabled(&udp_encap_needed
))
1428 static_key_slow_inc(&udp_encap_needed
);
1430 EXPORT_SYMBOL(udp_encap_enable
);
1435 * >0: "udp encap" protocol resubmission
1437 * Note that in the success and error cases, the skb is assumed to
1438 * have either been requeued or freed.
1440 int udp_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
1442 struct udp_sock
*up
= udp_sk(sk
);
1444 int is_udplite
= IS_UDPLITE(sk
);
1447 * Charge it to the socket, dropping if the queue is full.
1449 if (!xfrm4_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1453 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
1454 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
1457 * This is an encapsulation socket so pass the skb to
1458 * the socket's udp_encap_rcv() hook. Otherwise, just
1459 * fall through and pass this up the UDP socket.
1460 * up->encap_rcv() returns the following value:
1461 * =0 if skb was successfully passed to the encap
1462 * handler or was discarded by it.
1463 * >0 if skb should be passed on to UDP.
1464 * <0 if skb should be resubmitted as proto -N
1467 /* if we're overly short, let UDP handle it */
1468 encap_rcv
= ACCESS_ONCE(up
->encap_rcv
);
1469 if (skb
->len
> sizeof(struct udphdr
) && encap_rcv
!= NULL
) {
1472 ret
= encap_rcv(sk
, skb
);
1474 UDP_INC_STATS_BH(sock_net(sk
),
1475 UDP_MIB_INDATAGRAMS
,
1481 /* FALLTHROUGH -- it's a UDP Packet */
1485 * UDP-Lite specific tests, ignored on UDP sockets
1487 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
1490 * MIB statistics other than incrementing the error count are
1491 * disabled for the following two types of errors: these depend
1492 * on the application settings, not on the functioning of the
1493 * protocol stack as such.
1495 * RFC 3828 here recommends (sec 3.3): "There should also be a
1496 * way ... to ... at least let the receiving application block
1497 * delivery of packets with coverage values less than a value
1498 * provided by the application."
1500 if (up
->pcrlen
== 0) { /* full coverage was set */
1501 LIMIT_NETDEBUG(KERN_WARNING
"UDPLite: partial coverage %d while full coverage %d requested\n",
1502 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
1505 /* The next case involves violating the min. coverage requested
1506 * by the receiver. This is subtle: if receiver wants x and x is
1507 * greater than the buffersize/MTU then receiver will complain
1508 * that it wants x while sender emits packets of smaller size y.
1509 * Therefore the above ...()->partial_cov statement is essential.
1511 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
1512 LIMIT_NETDEBUG(KERN_WARNING
"UDPLite: coverage %d too small, need min %d\n",
1513 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
1518 if (rcu_access_pointer(sk
->sk_filter
) &&
1519 udp_lib_checksum_complete(skb
))
1523 if (sk_rcvqueues_full(sk
, skb
, sk
->sk_rcvbuf
))
1528 ipv4_pktinfo_prepare(skb
);
1530 if (!sock_owned_by_user(sk
))
1531 rc
= __udp_queue_rcv_skb(sk
, skb
);
1532 else if (sk_add_backlog(sk
, skb
, sk
->sk_rcvbuf
)) {
1541 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
1543 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
1544 atomic_inc(&sk
->sk_drops
);
1550 static void flush_stack(struct sock
**stack
, unsigned int count
,
1551 struct sk_buff
*skb
, unsigned int final
)
1554 struct sk_buff
*skb1
= NULL
;
1557 for (i
= 0; i
< count
; i
++) {
1559 if (likely(skb1
== NULL
))
1560 skb1
= (i
== final
) ? skb
: skb_clone(skb
, GFP_ATOMIC
);
1563 atomic_inc(&sk
->sk_drops
);
1564 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_RCVBUFERRORS
,
1566 UDP_INC_STATS_BH(sock_net(sk
), UDP_MIB_INERRORS
,
1570 if (skb1
&& udp_queue_rcv_skb(sk
, skb1
) <= 0)
1578 * Multicasts and broadcasts go to each listener.
1580 * Note: called only from the BH handler context.
1582 static int __udp4_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
1584 __be32 saddr
, __be32 daddr
,
1585 struct udp_table
*udptable
)
1587 struct sock
*sk
, *stack
[256 / sizeof(struct sock
*)];
1588 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, ntohs(uh
->dest
));
1590 unsigned int i
, count
= 0;
1592 spin_lock(&hslot
->lock
);
1593 sk
= sk_nulls_head(&hslot
->head
);
1594 dif
= skb
->dev
->ifindex
;
1595 sk
= udp_v4_mcast_next(net
, sk
, uh
->dest
, daddr
, uh
->source
, saddr
, dif
);
1597 stack
[count
++] = sk
;
1598 sk
= udp_v4_mcast_next(net
, sk_nulls_next(sk
), uh
->dest
,
1599 daddr
, uh
->source
, saddr
, dif
);
1600 if (unlikely(count
== ARRAY_SIZE(stack
))) {
1603 flush_stack(stack
, count
, skb
, ~0);
1608 * before releasing chain lock, we must take a reference on sockets
1610 for (i
= 0; i
< count
; i
++)
1611 sock_hold(stack
[i
]);
1613 spin_unlock(&hslot
->lock
);
1616 * do the slow work with no lock held
1619 flush_stack(stack
, count
, skb
, count
- 1);
1621 for (i
= 0; i
< count
; i
++)
1629 /* Initialize UDP checksum. If exited with zero value (success),
1630 * CHECKSUM_UNNECESSARY means, that no more checks are required.
1631 * Otherwise, csum completion requires chacksumming packet body,
1632 * including udp header and folding it to skb->csum.
1634 static inline int udp4_csum_init(struct sk_buff
*skb
, struct udphdr
*uh
,
1637 const struct iphdr
*iph
;
1640 UDP_SKB_CB(skb
)->partial_cov
= 0;
1641 UDP_SKB_CB(skb
)->cscov
= skb
->len
;
1643 if (proto
== IPPROTO_UDPLITE
) {
1644 err
= udplite_checksum_init(skb
, uh
);
1650 if (uh
->check
== 0) {
1651 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1652 } else if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1653 if (!csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, skb
->len
,
1655 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1657 if (!skb_csum_unnecessary(skb
))
1658 skb
->csum
= csum_tcpudp_nofold(iph
->saddr
, iph
->daddr
,
1659 skb
->len
, proto
, 0);
1660 /* Probably, we should checksum udp header (it should be in cache
1661 * in any case) and data in tiny packets (< rx copybreak).
1668 * All we need to do is get the socket, and then do a checksum.
1671 int __udp4_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
1676 unsigned short ulen
;
1677 struct rtable
*rt
= skb_rtable(skb
);
1678 __be32 saddr
, daddr
;
1679 struct net
*net
= dev_net(skb
->dev
);
1682 * Validate the packet.
1684 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
1685 goto drop
; /* No space for header. */
1688 ulen
= ntohs(uh
->len
);
1689 saddr
= ip_hdr(skb
)->saddr
;
1690 daddr
= ip_hdr(skb
)->daddr
;
1692 if (ulen
> skb
->len
)
1695 if (proto
== IPPROTO_UDP
) {
1696 /* UDP validates ulen. */
1697 if (ulen
< sizeof(*uh
) || pskb_trim_rcsum(skb
, ulen
))
1702 if (udp4_csum_init(skb
, uh
, proto
))
1705 if (rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
))
1706 return __udp4_lib_mcast_deliver(net
, skb
, uh
,
1707 saddr
, daddr
, udptable
);
1709 sk
= __udp4_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
1712 int ret
= udp_queue_rcv_skb(sk
, skb
);
1715 /* a return value > 0 means to resubmit the input, but
1716 * it wants the return to be -protocol, or 0
1723 if (!xfrm4_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1727 /* No socket. Drop packet silently, if checksum is wrong */
1728 if (udp_lib_checksum_complete(skb
))
1731 UDP_INC_STATS_BH(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
1732 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_PORT_UNREACH
, 0);
1735 * Hmm. We got an UDP packet to a port to which we
1736 * don't wanna listen. Ignore it.
1742 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n",
1743 proto
== IPPROTO_UDPLITE
? "Lite" : "",
1744 &saddr
, ntohs(uh
->source
),
1746 &daddr
, ntohs(uh
->dest
));
1751 * RFC1122: OK. Discards the bad packet silently (as far as
1752 * the network is concerned, anyway) as per 4.1.3.4 (MUST).
1754 LIMIT_NETDEBUG(KERN_DEBUG
"UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n",
1755 proto
== IPPROTO_UDPLITE
? "Lite" : "",
1756 &saddr
, ntohs(uh
->source
), &daddr
, ntohs(uh
->dest
),
1758 UDP_INC_STATS_BH(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
1760 UDP_INC_STATS_BH(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
1765 int udp_rcv(struct sk_buff
*skb
)
1767 return __udp4_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
1770 void udp_destroy_sock(struct sock
*sk
)
1772 struct udp_sock
*up
= udp_sk(sk
);
1773 bool slow
= lock_sock_fast(sk
);
1774 udp_flush_pending_frames(sk
);
1775 unlock_sock_fast(sk
, slow
);
1776 if (static_key_false(&udp_encap_needed
) && up
->encap_type
) {
1777 void (*encap_destroy
)(struct sock
*sk
);
1778 encap_destroy
= ACCESS_ONCE(up
->encap_destroy
);
1785 * Socket option code for UDP
1787 int udp_lib_setsockopt(struct sock
*sk
, int level
, int optname
,
1788 char __user
*optval
, unsigned int optlen
,
1789 int (*push_pending_frames
)(struct sock
*))
1791 struct udp_sock
*up
= udp_sk(sk
);
1794 int is_udplite
= IS_UDPLITE(sk
);
1796 if (optlen
< sizeof(int))
1799 if (get_user(val
, (int __user
*)optval
))
1809 (*push_pending_frames
)(sk
);
1817 case UDP_ENCAP_ESPINUDP
:
1818 case UDP_ENCAP_ESPINUDP_NON_IKE
:
1819 up
->encap_rcv
= xfrm4_udp_encap_rcv
;
1821 case UDP_ENCAP_L2TPINUDP
:
1822 up
->encap_type
= val
;
1832 * UDP-Lite's partial checksum coverage (RFC 3828).
1834 /* The sender sets actual checksum coverage length via this option.
1835 * The case coverage > packet length is handled by send module. */
1836 case UDPLITE_SEND_CSCOV
:
1837 if (!is_udplite
) /* Disable the option on UDP sockets */
1838 return -ENOPROTOOPT
;
1839 if (val
!= 0 && val
< 8) /* Illegal coverage: use default (8) */
1841 else if (val
> USHRT_MAX
)
1844 up
->pcflag
|= UDPLITE_SEND_CC
;
1847 /* The receiver specifies a minimum checksum coverage value. To make
1848 * sense, this should be set to at least 8 (as done below). If zero is
1849 * used, this again means full checksum coverage. */
1850 case UDPLITE_RECV_CSCOV
:
1851 if (!is_udplite
) /* Disable the option on UDP sockets */
1852 return -ENOPROTOOPT
;
1853 if (val
!= 0 && val
< 8) /* Avoid silly minimal values. */
1855 else if (val
> USHRT_MAX
)
1858 up
->pcflag
|= UDPLITE_RECV_CC
;
1868 EXPORT_SYMBOL(udp_lib_setsockopt
);
1870 int udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1871 char __user
*optval
, unsigned int optlen
)
1873 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1874 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1875 udp_push_pending_frames
);
1876 return ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1879 #ifdef CONFIG_COMPAT
1880 int compat_udp_setsockopt(struct sock
*sk
, int level
, int optname
,
1881 char __user
*optval
, unsigned int optlen
)
1883 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1884 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1885 udp_push_pending_frames
);
1886 return compat_ip_setsockopt(sk
, level
, optname
, optval
, optlen
);
1890 int udp_lib_getsockopt(struct sock
*sk
, int level
, int optname
,
1891 char __user
*optval
, int __user
*optlen
)
1893 struct udp_sock
*up
= udp_sk(sk
);
1896 if (get_user(len
, optlen
))
1899 len
= min_t(unsigned int, len
, sizeof(int));
1910 val
= up
->encap_type
;
1913 /* The following two cannot be changed on UDP sockets, the return is
1914 * always 0 (which corresponds to the full checksum coverage of UDP). */
1915 case UDPLITE_SEND_CSCOV
:
1919 case UDPLITE_RECV_CSCOV
:
1924 return -ENOPROTOOPT
;
1927 if (put_user(len
, optlen
))
1929 if (copy_to_user(optval
, &val
, len
))
1933 EXPORT_SYMBOL(udp_lib_getsockopt
);
1935 int udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1936 char __user
*optval
, int __user
*optlen
)
1938 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1939 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1940 return ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1943 #ifdef CONFIG_COMPAT
1944 int compat_udp_getsockopt(struct sock
*sk
, int level
, int optname
,
1945 char __user
*optval
, int __user
*optlen
)
1947 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1948 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1949 return compat_ip_getsockopt(sk
, level
, optname
, optval
, optlen
);
1953 * udp_poll - wait for a UDP event.
1954 * @file - file struct
1956 * @wait - poll table
1958 * This is same as datagram poll, except for the special case of
1959 * blocking sockets. If application is using a blocking fd
1960 * and a packet with checksum error is in the queue;
1961 * then it could get return from select indicating data available
1962 * but then block when reading it. Add special case code
1963 * to work around these arguably broken applications.
1965 unsigned int udp_poll(struct file
*file
, struct socket
*sock
, poll_table
*wait
)
1967 unsigned int mask
= datagram_poll(file
, sock
, wait
);
1968 struct sock
*sk
= sock
->sk
;
1970 /* Check for false positives due to checksum errors */
1971 if ((mask
& POLLRDNORM
) && !(file
->f_flags
& O_NONBLOCK
) &&
1972 !(sk
->sk_shutdown
& RCV_SHUTDOWN
) && !first_packet_length(sk
))
1973 mask
&= ~(POLLIN
| POLLRDNORM
);
1978 EXPORT_SYMBOL(udp_poll
);
1980 struct proto udp_prot
= {
1982 .owner
= THIS_MODULE
,
1983 .close
= udp_lib_close
,
1984 .connect
= ip4_datagram_connect
,
1985 .disconnect
= udp_disconnect
,
1987 .destroy
= udp_destroy_sock
,
1988 .setsockopt
= udp_setsockopt
,
1989 .getsockopt
= udp_getsockopt
,
1990 .sendmsg
= udp_sendmsg
,
1991 .recvmsg
= udp_recvmsg
,
1992 .sendpage
= udp_sendpage
,
1993 .backlog_rcv
= __udp_queue_rcv_skb
,
1994 .release_cb
= ip4_datagram_release_cb
,
1995 .hash
= udp_lib_hash
,
1996 .unhash
= udp_lib_unhash
,
1997 .rehash
= udp_v4_rehash
,
1998 .get_port
= udp_v4_get_port
,
1999 .memory_allocated
= &udp_memory_allocated
,
2000 .sysctl_mem
= sysctl_udp_mem
,
2001 .sysctl_wmem
= &sysctl_udp_wmem_min
,
2002 .sysctl_rmem
= &sysctl_udp_rmem_min
,
2003 .obj_size
= sizeof(struct udp_sock
),
2004 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2005 .h
.udp_table
= &udp_table
,
2006 #ifdef CONFIG_COMPAT
2007 .compat_setsockopt
= compat_udp_setsockopt
,
2008 .compat_getsockopt
= compat_udp_getsockopt
,
2010 .clear_sk
= sk_prot_clear_portaddr_nulls
,
2012 EXPORT_SYMBOL(udp_prot
);
2014 /* ------------------------------------------------------------------------ */
2015 #ifdef CONFIG_PROC_FS
2017 static struct sock
*udp_get_first(struct seq_file
*seq
, int start
)
2020 struct udp_iter_state
*state
= seq
->private;
2021 struct net
*net
= seq_file_net(seq
);
2023 for (state
->bucket
= start
; state
->bucket
<= state
->udp_table
->mask
;
2025 struct hlist_nulls_node
*node
;
2026 struct udp_hslot
*hslot
= &state
->udp_table
->hash
[state
->bucket
];
2028 if (hlist_nulls_empty(&hslot
->head
))
2031 spin_lock_bh(&hslot
->lock
);
2032 sk_nulls_for_each(sk
, node
, &hslot
->head
) {
2033 if (!net_eq(sock_net(sk
), net
))
2035 if (sk
->sk_family
== state
->family
)
2038 spin_unlock_bh(&hslot
->lock
);
2045 static struct sock
*udp_get_next(struct seq_file
*seq
, struct sock
*sk
)
2047 struct udp_iter_state
*state
= seq
->private;
2048 struct net
*net
= seq_file_net(seq
);
2051 sk
= sk_nulls_next(sk
);
2052 } while (sk
&& (!net_eq(sock_net(sk
), net
) || sk
->sk_family
!= state
->family
));
2055 if (state
->bucket
<= state
->udp_table
->mask
)
2056 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2057 return udp_get_first(seq
, state
->bucket
+ 1);
2062 static struct sock
*udp_get_idx(struct seq_file
*seq
, loff_t pos
)
2064 struct sock
*sk
= udp_get_first(seq
, 0);
2067 while (pos
&& (sk
= udp_get_next(seq
, sk
)) != NULL
)
2069 return pos
? NULL
: sk
;
2072 static void *udp_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2074 struct udp_iter_state
*state
= seq
->private;
2075 state
->bucket
= MAX_UDP_PORTS
;
2077 return *pos
? udp_get_idx(seq
, *pos
-1) : SEQ_START_TOKEN
;
2080 static void *udp_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2084 if (v
== SEQ_START_TOKEN
)
2085 sk
= udp_get_idx(seq
, 0);
2087 sk
= udp_get_next(seq
, v
);
2093 static void udp_seq_stop(struct seq_file
*seq
, void *v
)
2095 struct udp_iter_state
*state
= seq
->private;
2097 if (state
->bucket
<= state
->udp_table
->mask
)
2098 spin_unlock_bh(&state
->udp_table
->hash
[state
->bucket
].lock
);
2101 int udp_seq_open(struct inode
*inode
, struct file
*file
)
2103 struct udp_seq_afinfo
*afinfo
= PDE_DATA(inode
);
2104 struct udp_iter_state
*s
;
2107 err
= seq_open_net(inode
, file
, &afinfo
->seq_ops
,
2108 sizeof(struct udp_iter_state
));
2112 s
= ((struct seq_file
*)file
->private_data
)->private;
2113 s
->family
= afinfo
->family
;
2114 s
->udp_table
= afinfo
->udp_table
;
2117 EXPORT_SYMBOL(udp_seq_open
);
2119 /* ------------------------------------------------------------------------ */
2120 int udp_proc_register(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2122 struct proc_dir_entry
*p
;
2125 afinfo
->seq_ops
.start
= udp_seq_start
;
2126 afinfo
->seq_ops
.next
= udp_seq_next
;
2127 afinfo
->seq_ops
.stop
= udp_seq_stop
;
2129 p
= proc_create_data(afinfo
->name
, S_IRUGO
, net
->proc_net
,
2130 afinfo
->seq_fops
, afinfo
);
2135 EXPORT_SYMBOL(udp_proc_register
);
2137 void udp_proc_unregister(struct net
*net
, struct udp_seq_afinfo
*afinfo
)
2139 remove_proc_entry(afinfo
->name
, net
->proc_net
);
2141 EXPORT_SYMBOL(udp_proc_unregister
);
2143 /* ------------------------------------------------------------------------ */
2144 static void udp4_format_sock(struct sock
*sp
, struct seq_file
*f
,
2145 int bucket
, int *len
)
2147 struct inet_sock
*inet
= inet_sk(sp
);
2148 __be32 dest
= inet
->inet_daddr
;
2149 __be32 src
= inet
->inet_rcv_saddr
;
2150 __u16 destp
= ntohs(inet
->inet_dport
);
2151 __u16 srcp
= ntohs(inet
->inet_sport
);
2153 seq_printf(f
, "%5d: %08X:%04X %08X:%04X"
2154 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
2155 bucket
, src
, srcp
, dest
, destp
, sp
->sk_state
,
2156 sk_wmem_alloc_get(sp
),
2157 sk_rmem_alloc_get(sp
),
2159 from_kuid_munged(seq_user_ns(f
), sock_i_uid(sp
)),
2161 atomic_read(&sp
->sk_refcnt
), sp
,
2162 atomic_read(&sp
->sk_drops
), len
);
2165 int udp4_seq_show(struct seq_file
*seq
, void *v
)
2167 if (v
== SEQ_START_TOKEN
)
2168 seq_printf(seq
, "%-127s\n",
2169 " sl local_address rem_address st tx_queue "
2170 "rx_queue tr tm->when retrnsmt uid timeout "
2171 "inode ref pointer drops");
2173 struct udp_iter_state
*state
= seq
->private;
2176 udp4_format_sock(v
, seq
, state
->bucket
, &len
);
2177 seq_printf(seq
, "%*s\n", 127 - len
, "");
2182 static const struct file_operations udp_afinfo_seq_fops
= {
2183 .owner
= THIS_MODULE
,
2184 .open
= udp_seq_open
,
2186 .llseek
= seq_lseek
,
2187 .release
= seq_release_net
2190 /* ------------------------------------------------------------------------ */
2191 static struct udp_seq_afinfo udp4_seq_afinfo
= {
2194 .udp_table
= &udp_table
,
2195 .seq_fops
= &udp_afinfo_seq_fops
,
2197 .show
= udp4_seq_show
,
2201 static int __net_init
udp4_proc_init_net(struct net
*net
)
2203 return udp_proc_register(net
, &udp4_seq_afinfo
);
2206 static void __net_exit
udp4_proc_exit_net(struct net
*net
)
2208 udp_proc_unregister(net
, &udp4_seq_afinfo
);
2211 static struct pernet_operations udp4_net_ops
= {
2212 .init
= udp4_proc_init_net
,
2213 .exit
= udp4_proc_exit_net
,
2216 int __init
udp4_proc_init(void)
2218 return register_pernet_subsys(&udp4_net_ops
);
2221 void udp4_proc_exit(void)
2223 unregister_pernet_subsys(&udp4_net_ops
);
2225 #endif /* CONFIG_PROC_FS */
2227 static __initdata
unsigned long uhash_entries
;
2228 static int __init
set_uhash_entries(char *str
)
2235 ret
= kstrtoul(str
, 0, &uhash_entries
);
2239 if (uhash_entries
&& uhash_entries
< UDP_HTABLE_SIZE_MIN
)
2240 uhash_entries
= UDP_HTABLE_SIZE_MIN
;
2243 __setup("uhash_entries=", set_uhash_entries
);
2245 void __init
udp_table_init(struct udp_table
*table
, const char *name
)
2249 table
->hash
= alloc_large_system_hash(name
,
2250 2 * sizeof(struct udp_hslot
),
2252 21, /* one slot per 2 MB */
2256 UDP_HTABLE_SIZE_MIN
,
2259 table
->hash2
= table
->hash
+ (table
->mask
+ 1);
2260 for (i
= 0; i
<= table
->mask
; i
++) {
2261 INIT_HLIST_NULLS_HEAD(&table
->hash
[i
].head
, i
);
2262 table
->hash
[i
].count
= 0;
2263 spin_lock_init(&table
->hash
[i
].lock
);
2265 for (i
= 0; i
<= table
->mask
; i
++) {
2266 INIT_HLIST_NULLS_HEAD(&table
->hash2
[i
].head
, i
);
2267 table
->hash2
[i
].count
= 0;
2268 spin_lock_init(&table
->hash2
[i
].lock
);
2272 void __init
udp_init(void)
2274 unsigned long limit
;
2276 udp_table_init(&udp_table
, "UDP");
2277 limit
= nr_free_buffer_pages() / 8;
2278 limit
= max(limit
, 128UL);
2279 sysctl_udp_mem
[0] = limit
/ 4 * 3;
2280 sysctl_udp_mem
[1] = limit
;
2281 sysctl_udp_mem
[2] = sysctl_udp_mem
[0] * 2;
2283 sysctl_udp_rmem_min
= SK_MEM_QUANTUM
;
2284 sysctl_udp_wmem_min
= SK_MEM_QUANTUM
;
2287 int udp4_ufo_send_check(struct sk_buff
*skb
)
2289 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
2292 if (likely(!skb
->encapsulation
)) {
2293 const struct iphdr
*iph
;
2299 uh
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
, skb
->len
,
2301 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
2302 skb
->csum_offset
= offsetof(struct udphdr
, check
);
2303 skb
->ip_summed
= CHECKSUM_PARTIAL
;
2308 static struct sk_buff
*skb_udp_tunnel_segment(struct sk_buff
*skb
,
2309 netdev_features_t features
)
2311 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
2312 int mac_len
= skb
->mac_len
;
2313 int tnl_hlen
= skb_inner_mac_header(skb
) - skb_transport_header(skb
);
2314 __be16 protocol
= skb
->protocol
;
2315 netdev_features_t enc_features
;
2318 if (unlikely(!pskb_may_pull(skb
, tnl_hlen
)))
2321 skb
->encapsulation
= 0;
2322 __skb_pull(skb
, tnl_hlen
);
2323 skb_reset_mac_header(skb
);
2324 skb_set_network_header(skb
, skb_inner_network_offset(skb
));
2325 skb
->mac_len
= skb_inner_network_offset(skb
);
2326 skb
->protocol
= htons(ETH_P_TEB
);
2328 /* segment inner packet. */
2329 enc_features
= skb
->dev
->hw_enc_features
& netif_skb_features(skb
);
2330 segs
= skb_mac_gso_segment(skb
, enc_features
);
2331 if (!segs
|| IS_ERR(segs
))
2334 outer_hlen
= skb_tnl_header_len(skb
);
2338 int udp_offset
= outer_hlen
- tnl_hlen
;
2340 skb
->mac_len
= mac_len
;
2342 skb_push(skb
, outer_hlen
);
2343 skb_reset_mac_header(skb
);
2344 skb_set_network_header(skb
, mac_len
);
2345 skb_set_transport_header(skb
, udp_offset
);
2347 uh
->len
= htons(skb
->len
- udp_offset
);
2349 /* csum segment if tunnel sets skb with csum. */
2350 if (unlikely(uh
->check
)) {
2351 struct iphdr
*iph
= ip_hdr(skb
);
2353 uh
->check
= ~csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
2354 skb
->len
- udp_offset
,
2356 uh
->check
= csum_fold(skb_checksum(skb
, udp_offset
,
2357 skb
->len
- udp_offset
, 0));
2359 uh
->check
= CSUM_MANGLED_0
;
2362 skb
->ip_summed
= CHECKSUM_NONE
;
2363 skb
->protocol
= protocol
;
2364 } while ((skb
= skb
->next
));
2369 struct sk_buff
*udp4_ufo_fragment(struct sk_buff
*skb
,
2370 netdev_features_t features
)
2372 struct sk_buff
*segs
= ERR_PTR(-EINVAL
);
2374 mss
= skb_shinfo(skb
)->gso_size
;
2375 if (unlikely(skb
->len
<= mss
))
2378 if (skb_gso_ok(skb
, features
| NETIF_F_GSO_ROBUST
)) {
2379 /* Packet is from an untrusted source, reset gso_segs. */
2380 int type
= skb_shinfo(skb
)->gso_type
;
2382 if (unlikely(type
& ~(SKB_GSO_UDP
| SKB_GSO_DODGY
|
2383 SKB_GSO_UDP_TUNNEL
|
2385 !(type
& (SKB_GSO_UDP
))))
2388 skb_shinfo(skb
)->gso_segs
= DIV_ROUND_UP(skb
->len
, mss
);
2394 /* Fragment the skb. IP headers of the fragments are updated in
2395 * inet_gso_segment()
2397 if (skb
->encapsulation
&& skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_TUNNEL
)
2398 segs
= skb_udp_tunnel_segment(skb
, features
);
2403 /* Do software UFO. Complete and fill in the UDP checksum as
2404 * HW cannot do checksum of UDP packets sent as multiple
2407 offset
= skb_checksum_start_offset(skb
);
2408 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2409 offset
+= skb
->csum_offset
;
2410 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
2411 skb
->ip_summed
= CHECKSUM_NONE
;
2413 segs
= skb_segment(skb
, features
);