3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/ipv4/udp.c
11 * Hideaki YOSHIFUJI : sin6_scope_id support
12 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
13 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
14 * a single port at the same time.
15 * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
16 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/in6.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/ipv6.h>
33 #include <linux/icmpv6.h>
34 #include <linux/init.h>
35 #include <linux/module.h>
36 #include <linux/skbuff.h>
37 #include <linux/slab.h>
38 #include <linux/uaccess.h>
40 #include <net/addrconf.h>
41 #include <net/ndisc.h>
42 #include <net/protocol.h>
43 #include <net/transp_v6.h>
44 #include <net/ip6_route.h>
46 #include <net/tcp_states.h>
47 #include <net/ip6_checksum.h>
49 #include <net/inet_hashtables.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/busy_poll.h>
52 #include <net/sock_reuseport.h>
54 #include <linux/proc_fs.h>
55 #include <linux/seq_file.h>
56 #include <trace/events/skb.h>
59 static bool udp6_lib_exact_dif_match(struct net
*net
, struct sk_buff
*skb
)
61 #if defined(CONFIG_NET_L3_MASTER_DEV)
62 if (!net
->ipv4
.sysctl_udp_l3mdev_accept
&&
63 skb
&& ipv6_l3mdev_skb(IP6CB(skb
)->flags
))
69 static u32
udp6_ehashfn(const struct net
*net
,
70 const struct in6_addr
*laddr
,
72 const struct in6_addr
*faddr
,
75 static u32 udp6_ehash_secret __read_mostly
;
76 static u32 udp_ipv6_hash_secret __read_mostly
;
80 net_get_random_once(&udp6_ehash_secret
,
81 sizeof(udp6_ehash_secret
));
82 net_get_random_once(&udp_ipv6_hash_secret
,
83 sizeof(udp_ipv6_hash_secret
));
85 lhash
= (__force u32
)laddr
->s6_addr32
[3];
86 fhash
= __ipv6_addr_jhash(faddr
, udp_ipv6_hash_secret
);
88 return __inet6_ehashfn(lhash
, lport
, fhash
, fport
,
89 udp_ipv6_hash_secret
+ net_hash_mix(net
));
92 static u32
udp6_portaddr_hash(const struct net
*net
,
93 const struct in6_addr
*addr6
,
96 unsigned int hash
, mix
= net_hash_mix(net
);
98 if (ipv6_addr_any(addr6
))
99 hash
= jhash_1word(0, mix
);
100 else if (ipv6_addr_v4mapped(addr6
))
101 hash
= jhash_1word((__force u32
)addr6
->s6_addr32
[3], mix
);
103 hash
= jhash2((__force u32
*)addr6
->s6_addr32
, 4, mix
);
108 int udp_v6_get_port(struct sock
*sk
, unsigned short snum
)
110 unsigned int hash2_nulladdr
=
111 udp6_portaddr_hash(sock_net(sk
), &in6addr_any
, snum
);
112 unsigned int hash2_partial
=
113 udp6_portaddr_hash(sock_net(sk
), &sk
->sk_v6_rcv_saddr
, 0);
115 /* precompute partial secondary hash */
116 udp_sk(sk
)->udp_portaddr_hash
= hash2_partial
;
117 return udp_lib_get_port(sk
, snum
, hash2_nulladdr
);
120 static void udp_v6_rehash(struct sock
*sk
)
122 u16 new_hash
= udp6_portaddr_hash(sock_net(sk
),
123 &sk
->sk_v6_rcv_saddr
,
124 inet_sk(sk
)->inet_num
);
126 udp_lib_rehash(sk
, new_hash
);
129 static int compute_score(struct sock
*sk
, struct net
*net
,
130 const struct in6_addr
*saddr
, __be16 sport
,
131 const struct in6_addr
*daddr
, unsigned short hnum
,
132 int dif
, int sdif
, bool exact_dif
)
135 struct inet_sock
*inet
;
137 if (!net_eq(sock_net(sk
), net
) ||
138 udp_sk(sk
)->udp_port_hash
!= hnum
||
139 sk
->sk_family
!= PF_INET6
)
145 if (inet
->inet_dport
) {
146 if (inet
->inet_dport
!= sport
)
151 if (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
)) {
152 if (!ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, daddr
))
157 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
158 if (!ipv6_addr_equal(&sk
->sk_v6_daddr
, saddr
))
163 if (sk
->sk_bound_dev_if
|| exact_dif
) {
164 bool dev_match
= (sk
->sk_bound_dev_if
== dif
||
165 sk
->sk_bound_dev_if
== sdif
);
167 if (exact_dif
&& !dev_match
)
169 if (sk
->sk_bound_dev_if
&& dev_match
)
173 if (sk
->sk_incoming_cpu
== raw_smp_processor_id())
179 /* called with rcu_read_lock() */
180 static struct sock
*udp6_lib_lookup2(struct net
*net
,
181 const struct in6_addr
*saddr
, __be16 sport
,
182 const struct in6_addr
*daddr
, unsigned int hnum
,
183 int dif
, int sdif
, bool exact_dif
,
184 struct udp_hslot
*hslot2
, struct sk_buff
*skb
)
186 struct sock
*sk
, *result
;
187 int score
, badness
, matches
= 0, reuseport
= 0;
192 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
193 score
= compute_score(sk
, net
, saddr
, sport
,
194 daddr
, hnum
, dif
, sdif
, exact_dif
);
195 if (score
> badness
) {
196 reuseport
= sk
->sk_reuseport
;
198 hash
= udp6_ehashfn(net
, daddr
, hnum
,
201 result
= reuseport_select_sock(sk
, hash
, skb
,
202 sizeof(struct udphdr
));
209 } else if (score
== badness
&& reuseport
) {
211 if (reciprocal_scale(hash
, matches
) == 0)
213 hash
= next_pseudo_random32(hash
);
219 /* rcu_read_lock() must be held */
220 struct sock
*__udp6_lib_lookup(struct net
*net
,
221 const struct in6_addr
*saddr
, __be16 sport
,
222 const struct in6_addr
*daddr
, __be16 dport
,
223 int dif
, int sdif
, struct udp_table
*udptable
,
226 struct sock
*sk
, *result
;
227 unsigned short hnum
= ntohs(dport
);
228 unsigned int hash2
, slot2
, slot
= udp_hashfn(net
, hnum
, udptable
->mask
);
229 struct udp_hslot
*hslot2
, *hslot
= &udptable
->hash
[slot
];
230 bool exact_dif
= udp6_lib_exact_dif_match(net
, skb
);
231 int score
, badness
, matches
= 0, reuseport
= 0;
234 if (hslot
->count
> 10) {
235 hash2
= udp6_portaddr_hash(net
, daddr
, hnum
);
236 slot2
= hash2
& udptable
->mask
;
237 hslot2
= &udptable
->hash2
[slot2
];
238 if (hslot
->count
< hslot2
->count
)
241 result
= udp6_lib_lookup2(net
, saddr
, sport
,
242 daddr
, hnum
, dif
, sdif
, exact_dif
,
245 unsigned int old_slot2
= slot2
;
246 hash2
= udp6_portaddr_hash(net
, &in6addr_any
, hnum
);
247 slot2
= hash2
& udptable
->mask
;
248 /* avoid searching the same slot again. */
249 if (unlikely(slot2
== old_slot2
))
252 hslot2
= &udptable
->hash2
[slot2
];
253 if (hslot
->count
< hslot2
->count
)
256 result
= udp6_lib_lookup2(net
, saddr
, sport
,
257 daddr
, hnum
, dif
, sdif
,
266 sk_for_each_rcu(sk
, &hslot
->head
) {
267 score
= compute_score(sk
, net
, saddr
, sport
, daddr
, hnum
, dif
,
269 if (score
> badness
) {
270 reuseport
= sk
->sk_reuseport
;
272 hash
= udp6_ehashfn(net
, daddr
, hnum
,
274 result
= reuseport_select_sock(sk
, hash
, skb
,
275 sizeof(struct udphdr
));
282 } else if (score
== badness
&& reuseport
) {
284 if (reciprocal_scale(hash
, matches
) == 0)
286 hash
= next_pseudo_random32(hash
);
291 EXPORT_SYMBOL_GPL(__udp6_lib_lookup
);
293 static struct sock
*__udp6_lib_lookup_skb(struct sk_buff
*skb
,
294 __be16 sport
, __be16 dport
,
295 struct udp_table
*udptable
)
297 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
299 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
300 &iph
->daddr
, dport
, inet6_iif(skb
),
301 inet6_sdif(skb
), udptable
, skb
);
304 struct sock
*udp6_lib_lookup_skb(struct sk_buff
*skb
,
305 __be16 sport
, __be16 dport
)
307 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
309 return __udp6_lib_lookup(dev_net(skb
->dev
), &iph
->saddr
, sport
,
310 &iph
->daddr
, dport
, inet6_iif(skb
),
311 inet6_sdif(skb
), &udp_table
, skb
);
313 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb
);
315 /* Must be called under rcu_read_lock().
316 * Does increment socket refcount.
318 #if IS_ENABLED(CONFIG_NETFILTER_XT_MATCH_SOCKET) || \
319 IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TPROXY) || \
320 IS_ENABLED(CONFIG_NF_SOCKET_IPV6)
321 struct sock
*udp6_lib_lookup(struct net
*net
, const struct in6_addr
*saddr
, __be16 sport
,
322 const struct in6_addr
*daddr
, __be16 dport
, int dif
)
326 sk
= __udp6_lib_lookup(net
, saddr
, sport
, daddr
, dport
,
327 dif
, 0, &udp_table
, NULL
);
328 if (sk
&& !refcount_inc_not_zero(&sk
->sk_refcnt
))
332 EXPORT_SYMBOL_GPL(udp6_lib_lookup
);
335 /* do not use the scratch area len for jumbogram: their length execeeds the
336 * scratch area space; note that the IP6CB flags is still in the first
337 * cacheline, so checking for jumbograms is cheap
339 static int udp6_skb_len(struct sk_buff
*skb
)
341 return unlikely(inet6_is_jumbogram(skb
)) ? skb
->len
: udp_skb_len(skb
);
345 * This should be easy, if there is something there we
346 * return it, otherwise we block.
349 int udpv6_recvmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
,
350 int noblock
, int flags
, int *addr_len
)
352 struct ipv6_pinfo
*np
= inet6_sk(sk
);
353 struct inet_sock
*inet
= inet_sk(sk
);
355 unsigned int ulen
, copied
;
356 int peeked
, peeking
, off
;
358 int is_udplite
= IS_UDPLITE(sk
);
359 bool checksum_valid
= false;
362 if (flags
& MSG_ERRQUEUE
)
363 return ipv6_recv_error(sk
, msg
, len
, addr_len
);
365 if (np
->rxpmtu
&& np
->rxopt
.bits
.rxpmtu
)
366 return ipv6_recv_rxpmtu(sk
, msg
, len
, addr_len
);
369 peeking
= flags
& MSG_PEEK
;
370 off
= sk_peek_offset(sk
, flags
);
371 skb
= __skb_recv_udp(sk
, flags
, noblock
, &peeked
, &off
, &err
);
375 ulen
= udp6_skb_len(skb
);
377 if (copied
> ulen
- off
)
379 else if (copied
< ulen
)
380 msg
->msg_flags
|= MSG_TRUNC
;
382 is_udp4
= (skb
->protocol
== htons(ETH_P_IP
));
385 * If checksum is needed at all, try to do it while copying the
386 * data. If the data is truncated, or if we only want a partial
387 * coverage checksum (UDP-Lite), do it before the copy.
390 if (copied
< ulen
|| peeking
||
391 (is_udplite
&& UDP_SKB_CB(skb
)->partial_cov
)) {
392 checksum_valid
= udp_skb_csum_unnecessary(skb
) ||
393 !__udp_lib_checksum_complete(skb
);
398 if (checksum_valid
|| udp_skb_csum_unnecessary(skb
)) {
399 if (udp_skb_is_linear(skb
))
400 err
= copy_linear_skb(skb
, copied
, off
, &msg
->msg_iter
);
402 err
= skb_copy_datagram_msg(skb
, off
, msg
, copied
);
404 err
= skb_copy_and_csum_datagram_msg(skb
, off
, msg
);
410 atomic_inc(&sk
->sk_drops
);
412 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
415 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
,
423 UDP_INC_STATS(sock_net(sk
), UDP_MIB_INDATAGRAMS
,
426 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INDATAGRAMS
,
430 sock_recv_ts_and_drops(msg
, sk
, skb
);
432 /* Copy the address. */
434 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
435 sin6
->sin6_family
= AF_INET6
;
436 sin6
->sin6_port
= udp_hdr(skb
)->source
;
437 sin6
->sin6_flowinfo
= 0;
440 ipv6_addr_set_v4mapped(ip_hdr(skb
)->saddr
,
442 sin6
->sin6_scope_id
= 0;
444 sin6
->sin6_addr
= ipv6_hdr(skb
)->saddr
;
445 sin6
->sin6_scope_id
=
446 ipv6_iface_scope_id(&sin6
->sin6_addr
,
449 *addr_len
= sizeof(*sin6
);
453 ip6_datagram_recv_common_ctl(sk
, msg
, skb
);
456 if (inet
->cmsg_flags
)
457 ip_cmsg_recv_offset(msg
, sk
, skb
,
458 sizeof(struct udphdr
), off
);
461 ip6_datagram_recv_specific_ctl(sk
, msg
, skb
);
465 if (flags
& MSG_TRUNC
)
468 skb_consume_udp(sk
, skb
, peeking
? -err
: err
);
472 if (!__sk_queue_drop_skb(sk
, &udp_sk(sk
)->reader_queue
, skb
, flags
,
473 udp_skb_destructor
)) {
475 UDP_INC_STATS(sock_net(sk
),
476 UDP_MIB_CSUMERRORS
, is_udplite
);
477 UDP_INC_STATS(sock_net(sk
),
478 UDP_MIB_INERRORS
, is_udplite
);
480 UDP6_INC_STATS(sock_net(sk
),
481 UDP_MIB_CSUMERRORS
, is_udplite
);
482 UDP6_INC_STATS(sock_net(sk
),
483 UDP_MIB_INERRORS
, is_udplite
);
488 /* starting over for a new packet, but check if we need to yield */
490 msg
->msg_flags
&= ~MSG_TRUNC
;
494 void __udp6_lib_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
495 u8 type
, u8 code
, int offset
, __be32 info
,
496 struct udp_table
*udptable
)
498 struct ipv6_pinfo
*np
;
499 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
500 const struct in6_addr
*saddr
= &hdr
->saddr
;
501 const struct in6_addr
*daddr
= &hdr
->daddr
;
502 struct udphdr
*uh
= (struct udphdr
*)(skb
->data
+offset
);
506 struct net
*net
= dev_net(skb
->dev
);
508 sk
= __udp6_lib_lookup(net
, daddr
, uh
->dest
, saddr
, uh
->source
,
509 inet6_iif(skb
), 0, udptable
, skb
);
511 __ICMP6_INC_STATS(net
, __in6_dev_get(skb
->dev
),
516 harderr
= icmpv6_err_convert(type
, code
, &err
);
519 if (type
== ICMPV6_PKT_TOOBIG
) {
520 if (!ip6_sk_accept_pmtu(sk
))
522 ip6_sk_update_pmtu(skb
, sk
, info
);
523 if (np
->pmtudisc
!= IPV6_PMTUDISC_DONT
)
526 if (type
== NDISC_REDIRECT
) {
527 ip6_sk_redirect(skb
, sk
);
532 if (!harderr
|| sk
->sk_state
!= TCP_ESTABLISHED
)
535 ipv6_icmp_error(sk
, skb
, err
, uh
->dest
, ntohl(info
), (u8
*)(uh
+1));
539 sk
->sk_error_report(sk
);
544 static int __udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
548 if (!ipv6_addr_any(&sk
->sk_v6_daddr
)) {
549 sock_rps_save_rxhash(sk
, skb
);
550 sk_mark_napi_id(sk
, skb
);
551 sk_incoming_cpu_update(sk
);
553 sk_mark_napi_id_once(sk
, skb
);
556 rc
= __udp_enqueue_schedule_skb(sk
, skb
);
558 int is_udplite
= IS_UDPLITE(sk
);
560 /* Note that an ENOMEM error is charged twice */
562 UDP6_INC_STATS(sock_net(sk
),
563 UDP_MIB_RCVBUFERRORS
, is_udplite
);
564 UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
572 static __inline__
void udpv6_err(struct sk_buff
*skb
,
573 struct inet6_skb_parm
*opt
, u8 type
,
574 u8 code
, int offset
, __be32 info
)
576 __udp6_lib_err(skb
, opt
, type
, code
, offset
, info
, &udp_table
);
579 static struct static_key udpv6_encap_needed __read_mostly
;
580 void udpv6_encap_enable(void)
582 if (!static_key_enabled(&udpv6_encap_needed
))
583 static_key_slow_inc(&udpv6_encap_needed
);
585 EXPORT_SYMBOL(udpv6_encap_enable
);
587 static int udpv6_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
589 struct udp_sock
*up
= udp_sk(sk
);
590 int is_udplite
= IS_UDPLITE(sk
);
592 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
595 if (static_key_false(&udpv6_encap_needed
) && up
->encap_type
) {
596 int (*encap_rcv
)(struct sock
*sk
, struct sk_buff
*skb
);
599 * This is an encapsulation socket so pass the skb to
600 * the socket's udp_encap_rcv() hook. Otherwise, just
601 * fall through and pass this up the UDP socket.
602 * up->encap_rcv() returns the following value:
603 * =0 if skb was successfully passed to the encap
604 * handler or was discarded by it.
605 * >0 if skb should be passed on to UDP.
606 * <0 if skb should be resubmitted as proto -N
609 /* if we're overly short, let UDP handle it */
610 encap_rcv
= ACCESS_ONCE(up
->encap_rcv
);
614 /* Verify checksum before giving to encap */
615 if (udp_lib_checksum_complete(skb
))
618 ret
= encap_rcv(sk
, skb
);
620 __UDP_INC_STATS(sock_net(sk
),
627 /* FALLTHROUGH -- it's a UDP Packet */
631 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
633 if ((is_udplite
& UDPLITE_RECV_CC
) && UDP_SKB_CB(skb
)->partial_cov
) {
635 if (up
->pcrlen
== 0) { /* full coverage was set */
636 net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
637 UDP_SKB_CB(skb
)->cscov
, skb
->len
);
640 if (UDP_SKB_CB(skb
)->cscov
< up
->pcrlen
) {
641 net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
642 UDP_SKB_CB(skb
)->cscov
, up
->pcrlen
);
647 prefetch(&sk
->sk_rmem_alloc
);
648 if (rcu_access_pointer(sk
->sk_filter
) &&
649 udp_lib_checksum_complete(skb
))
652 if (sk_filter_trim_cap(sk
, skb
, sizeof(struct udphdr
)))
655 udp_csum_pull_header(skb
);
659 return __udpv6_queue_rcv_skb(sk
, skb
);
662 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_CSUMERRORS
, is_udplite
);
664 __UDP6_INC_STATS(sock_net(sk
), UDP_MIB_INERRORS
, is_udplite
);
665 atomic_inc(&sk
->sk_drops
);
670 static bool __udp_v6_is_mcast_sock(struct net
*net
, struct sock
*sk
,
671 __be16 loc_port
, const struct in6_addr
*loc_addr
,
672 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
673 int dif
, unsigned short hnum
)
675 struct inet_sock
*inet
= inet_sk(sk
);
677 if (!net_eq(sock_net(sk
), net
))
680 if (udp_sk(sk
)->udp_port_hash
!= hnum
||
681 sk
->sk_family
!= PF_INET6
||
682 (inet
->inet_dport
&& inet
->inet_dport
!= rmt_port
) ||
683 (!ipv6_addr_any(&sk
->sk_v6_daddr
) &&
684 !ipv6_addr_equal(&sk
->sk_v6_daddr
, rmt_addr
)) ||
685 (sk
->sk_bound_dev_if
&& sk
->sk_bound_dev_if
!= dif
) ||
686 (!ipv6_addr_any(&sk
->sk_v6_rcv_saddr
) &&
687 !ipv6_addr_equal(&sk
->sk_v6_rcv_saddr
, loc_addr
)))
689 if (!inet6_mc_check(sk
, loc_addr
, rmt_addr
))
694 static void udp6_csum_zero_error(struct sk_buff
*skb
)
696 /* RFC 2460 section 8.1 says that we SHOULD log
697 * this error. Well, it is reasonable.
699 net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
700 &ipv6_hdr(skb
)->saddr
, ntohs(udp_hdr(skb
)->source
),
701 &ipv6_hdr(skb
)->daddr
, ntohs(udp_hdr(skb
)->dest
));
705 * Note: called only from the BH handler context,
706 * so we don't need to lock the hashes.
708 static int __udp6_lib_mcast_deliver(struct net
*net
, struct sk_buff
*skb
,
709 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
710 struct udp_table
*udptable
, int proto
)
712 struct sock
*sk
, *first
= NULL
;
713 const struct udphdr
*uh
= udp_hdr(skb
);
714 unsigned short hnum
= ntohs(uh
->dest
);
715 struct udp_hslot
*hslot
= udp_hashslot(udptable
, net
, hnum
);
716 unsigned int offset
= offsetof(typeof(*sk
), sk_node
);
717 unsigned int hash2
= 0, hash2_any
= 0, use_hash2
= (hslot
->count
> 10);
718 int dif
= inet6_iif(skb
);
719 struct hlist_node
*node
;
720 struct sk_buff
*nskb
;
723 hash2_any
= udp6_portaddr_hash(net
, &in6addr_any
, hnum
) &
725 hash2
= udp6_portaddr_hash(net
, daddr
, hnum
) & udptable
->mask
;
727 hslot
= &udptable
->hash2
[hash2
];
728 offset
= offsetof(typeof(*sk
), __sk_common
.skc_portaddr_node
);
731 sk_for_each_entry_offset_rcu(sk
, node
, &hslot
->head
, offset
) {
732 if (!__udp_v6_is_mcast_sock(net
, sk
, uh
->dest
, daddr
,
733 uh
->source
, saddr
, dif
, hnum
))
735 /* If zero checksum and no_check is not on for
736 * the socket then skip it.
738 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
)
744 nskb
= skb_clone(skb
, GFP_ATOMIC
);
745 if (unlikely(!nskb
)) {
746 atomic_inc(&sk
->sk_drops
);
747 __UDP6_INC_STATS(net
, UDP_MIB_RCVBUFERRORS
,
749 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
,
754 if (udpv6_queue_rcv_skb(sk
, nskb
) > 0)
758 /* Also lookup *:port if we are using hash2 and haven't done so yet. */
759 if (use_hash2
&& hash2
!= hash2_any
) {
765 if (udpv6_queue_rcv_skb(first
, skb
) > 0)
769 __UDP6_INC_STATS(net
, UDP_MIB_IGNOREDMULTI
,
770 proto
== IPPROTO_UDPLITE
);
775 int __udp6_lib_rcv(struct sk_buff
*skb
, struct udp_table
*udptable
,
778 const struct in6_addr
*saddr
, *daddr
;
779 struct net
*net
= dev_net(skb
->dev
);
784 if (!pskb_may_pull(skb
, sizeof(struct udphdr
)))
787 saddr
= &ipv6_hdr(skb
)->saddr
;
788 daddr
= &ipv6_hdr(skb
)->daddr
;
791 ulen
= ntohs(uh
->len
);
795 if (proto
== IPPROTO_UDP
) {
796 /* UDP validates ulen. */
798 /* Check for jumbo payload */
802 if (ulen
< sizeof(*uh
))
805 if (ulen
< skb
->len
) {
806 if (pskb_trim_rcsum(skb
, ulen
))
808 saddr
= &ipv6_hdr(skb
)->saddr
;
809 daddr
= &ipv6_hdr(skb
)->daddr
;
814 if (udp6_csum_init(skb
, uh
, proto
))
817 /* Check if the socket is already available, e.g. due to early demux */
818 sk
= skb_steal_sock(skb
);
820 struct dst_entry
*dst
= skb_dst(skb
);
823 if (unlikely(sk
->sk_rx_dst
!= dst
))
824 udp_sk_rx_dst_set(sk
, dst
);
826 ret
= udpv6_queue_rcv_skb(sk
, skb
);
829 /* a return value > 0 means to resubmit the input */
836 * Multicast receive code
838 if (ipv6_addr_is_multicast(daddr
))
839 return __udp6_lib_mcast_deliver(net
, skb
,
840 saddr
, daddr
, udptable
, proto
);
843 sk
= __udp6_lib_lookup_skb(skb
, uh
->source
, uh
->dest
, udptable
);
847 if (!uh
->check
&& !udp_sk(sk
)->no_check6_rx
) {
848 udp6_csum_zero_error(skb
);
852 if (inet_get_convert_csum(sk
) && uh
->check
&& !IS_UDPLITE(sk
))
853 skb_checksum_try_convert(skb
, IPPROTO_UDP
, uh
->check
,
856 ret
= udpv6_queue_rcv_skb(sk
, skb
);
858 /* a return value > 0 means to resubmit the input */
866 udp6_csum_zero_error(skb
);
870 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
873 if (udp_lib_checksum_complete(skb
))
876 __UDP6_INC_STATS(net
, UDP_MIB_NOPORTS
, proto
== IPPROTO_UDPLITE
);
877 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_PORT_UNREACH
, 0);
883 net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
884 proto
== IPPROTO_UDPLITE
? "-Lite" : "",
885 saddr
, ntohs(uh
->source
),
887 daddr
, ntohs(uh
->dest
));
890 __UDP6_INC_STATS(net
, UDP_MIB_CSUMERRORS
, proto
== IPPROTO_UDPLITE
);
892 __UDP6_INC_STATS(net
, UDP_MIB_INERRORS
, proto
== IPPROTO_UDPLITE
);
898 static struct sock
*__udp6_lib_demux_lookup(struct net
*net
,
899 __be16 loc_port
, const struct in6_addr
*loc_addr
,
900 __be16 rmt_port
, const struct in6_addr
*rmt_addr
,
903 unsigned short hnum
= ntohs(loc_port
);
904 unsigned int hash2
= udp6_portaddr_hash(net
, loc_addr
, hnum
);
905 unsigned int slot2
= hash2
& udp_table
.mask
;
906 struct udp_hslot
*hslot2
= &udp_table
.hash2
[slot2
];
907 const __portpair ports
= INET_COMBINED_PORTS(rmt_port
, hnum
);
910 udp_portaddr_for_each_entry_rcu(sk
, &hslot2
->head
) {
911 if (sk
->sk_state
== TCP_ESTABLISHED
&&
912 INET6_MATCH(sk
, net
, rmt_addr
, loc_addr
, ports
, dif
, sdif
))
914 /* Only check first socket in chain */
920 static void udp_v6_early_demux(struct sk_buff
*skb
)
922 struct net
*net
= dev_net(skb
->dev
);
923 const struct udphdr
*uh
;
925 struct dst_entry
*dst
;
926 int dif
= skb
->dev
->ifindex
;
927 int sdif
= inet6_sdif(skb
);
929 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) +
930 sizeof(struct udphdr
)))
935 if (skb
->pkt_type
== PACKET_HOST
)
936 sk
= __udp6_lib_demux_lookup(net
, uh
->dest
,
937 &ipv6_hdr(skb
)->daddr
,
938 uh
->source
, &ipv6_hdr(skb
)->saddr
,
943 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
947 skb
->destructor
= sock_efree
;
948 dst
= READ_ONCE(sk
->sk_rx_dst
);
951 dst
= dst_check(dst
, inet6_sk(sk
)->rx_dst_cookie
);
953 /* set noref for now.
954 * any place which wants to hold dst has to call
957 skb_dst_set_noref(skb
, dst
);
961 static __inline__
int udpv6_rcv(struct sk_buff
*skb
)
963 return __udp6_lib_rcv(skb
, &udp_table
, IPPROTO_UDP
);
967 * Throw away all pending data and cancel the corking. Socket is locked.
969 static void udp_v6_flush_pending_frames(struct sock
*sk
)
971 struct udp_sock
*up
= udp_sk(sk
);
973 if (up
->pending
== AF_INET
)
974 udp_flush_pending_frames(sk
);
975 else if (up
->pending
) {
978 ip6_flush_pending_frames(sk
);
983 * udp6_hwcsum_outgoing - handle outgoing HW checksumming
984 * @sk: socket we are sending on
985 * @skb: sk_buff containing the filled-in UDP header
986 * (checksum field must be zeroed out)
988 static void udp6_hwcsum_outgoing(struct sock
*sk
, struct sk_buff
*skb
,
989 const struct in6_addr
*saddr
,
990 const struct in6_addr
*daddr
, int len
)
993 struct udphdr
*uh
= udp_hdr(skb
);
994 struct sk_buff
*frags
= skb_shinfo(skb
)->frag_list
;
998 /* Only one fragment on the socket. */
999 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
1000 skb
->csum_offset
= offsetof(struct udphdr
, check
);
1001 uh
->check
= ~csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
, 0);
1004 * HW-checksum won't work as there are two or more
1005 * fragments on the socket so that all csums of sk_buffs
1006 * should be together
1008 offset
= skb_transport_offset(skb
);
1009 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
1011 skb
->ip_summed
= CHECKSUM_NONE
;
1014 csum
= csum_add(csum
, frags
->csum
);
1015 } while ((frags
= frags
->next
));
1017 uh
->check
= csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_UDP
,
1020 uh
->check
= CSUM_MANGLED_0
;
1028 static int udp_v6_send_skb(struct sk_buff
*skb
, struct flowi6
*fl6
)
1030 struct sock
*sk
= skb
->sk
;
1033 int is_udplite
= IS_UDPLITE(sk
);
1035 int offset
= skb_transport_offset(skb
);
1036 int len
= skb
->len
- offset
;
1039 * Create a UDP header
1042 uh
->source
= fl6
->fl6_sport
;
1043 uh
->dest
= fl6
->fl6_dport
;
1044 uh
->len
= htons(len
);
1048 csum
= udplite_csum(skb
);
1049 else if (udp_sk(sk
)->no_check6_tx
) { /* UDP csum disabled */
1050 skb
->ip_summed
= CHECKSUM_NONE
;
1052 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
) { /* UDP hardware csum */
1053 udp6_hwcsum_outgoing(sk
, skb
, &fl6
->saddr
, &fl6
->daddr
, len
);
1056 csum
= udp_csum(skb
);
1058 /* add protocol-dependent pseudo-header */
1059 uh
->check
= csum_ipv6_magic(&fl6
->saddr
, &fl6
->daddr
,
1060 len
, fl6
->flowi6_proto
, csum
);
1062 uh
->check
= CSUM_MANGLED_0
;
1065 err
= ip6_send_skb(skb
);
1067 if (err
== -ENOBUFS
&& !inet6_sk(sk
)->recverr
) {
1068 UDP6_INC_STATS(sock_net(sk
),
1069 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1073 UDP6_INC_STATS(sock_net(sk
),
1074 UDP_MIB_OUTDATAGRAMS
, is_udplite
);
1079 static int udp_v6_push_pending_frames(struct sock
*sk
)
1081 struct sk_buff
*skb
;
1082 struct udp_sock
*up
= udp_sk(sk
);
1086 if (up
->pending
== AF_INET
)
1087 return udp_push_pending_frames(sk
);
1089 /* ip6_finish_skb will release the cork, so make a copy of
1092 fl6
= inet_sk(sk
)->cork
.fl
.u
.ip6
;
1094 skb
= ip6_finish_skb(sk
);
1098 err
= udp_v6_send_skb(skb
, &fl6
);
1106 int udpv6_sendmsg(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1108 struct ipv6_txoptions opt_space
;
1109 struct udp_sock
*up
= udp_sk(sk
);
1110 struct inet_sock
*inet
= inet_sk(sk
);
1111 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1112 DECLARE_SOCKADDR(struct sockaddr_in6
*, sin6
, msg
->msg_name
);
1113 struct in6_addr
*daddr
, *final_p
, final
;
1114 struct ipv6_txoptions
*opt
= NULL
;
1115 struct ipv6_txoptions
*opt_to_free
= NULL
;
1116 struct ip6_flowlabel
*flowlabel
= NULL
;
1118 struct dst_entry
*dst
;
1119 struct ipcm6_cookie ipc6
;
1120 int addr_len
= msg
->msg_namelen
;
1122 int corkreq
= up
->corkflag
|| msg
->msg_flags
&MSG_MORE
;
1125 int is_udplite
= IS_UDPLITE(sk
);
1126 int (*getfrag
)(void *, char *, int, int, int, struct sk_buff
*);
1127 struct sockcm_cookie sockc
;
1132 sockc
.tsflags
= sk
->sk_tsflags
;
1134 /* destination address check */
1136 if (addr_len
< offsetof(struct sockaddr
, sa_data
))
1139 switch (sin6
->sin6_family
) {
1141 if (addr_len
< SIN6_LEN_RFC2133
)
1143 daddr
= &sin6
->sin6_addr
;
1144 if (ipv6_addr_any(daddr
) &&
1145 ipv6_addr_v4mapped(&np
->saddr
))
1146 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK
),
1150 goto do_udp_sendmsg
;
1152 msg
->msg_name
= sin6
= NULL
;
1153 msg
->msg_namelen
= addr_len
= 0;
1159 } else if (!up
->pending
) {
1160 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1161 return -EDESTADDRREQ
;
1162 daddr
= &sk
->sk_v6_daddr
;
1167 if (ipv6_addr_v4mapped(daddr
)) {
1168 struct sockaddr_in sin
;
1169 sin
.sin_family
= AF_INET
;
1170 sin
.sin_port
= sin6
? sin6
->sin6_port
: inet
->inet_dport
;
1171 sin
.sin_addr
.s_addr
= daddr
->s6_addr32
[3];
1172 msg
->msg_name
= &sin
;
1173 msg
->msg_namelen
= sizeof(sin
);
1175 if (__ipv6_only_sock(sk
))
1176 return -ENETUNREACH
;
1177 return udp_sendmsg(sk
, msg
, len
);
1181 if (up
->pending
== AF_INET
)
1182 return udp_sendmsg(sk
, msg
, len
);
1184 /* Rough check on arithmetic overflow,
1185 better check is made in ip6_append_data().
1187 if (len
> INT_MAX
- sizeof(struct udphdr
))
1190 getfrag
= is_udplite
? udplite_getfrag
: ip_generic_getfrag
;
1193 * There are pending frames.
1194 * The socket lock must be held while it's corked.
1197 if (likely(up
->pending
)) {
1198 if (unlikely(up
->pending
!= AF_INET6
)) {
1200 return -EAFNOSUPPORT
;
1203 goto do_append_data
;
1207 ulen
+= sizeof(struct udphdr
);
1209 memset(&fl6
, 0, sizeof(fl6
));
1212 if (sin6
->sin6_port
== 0)
1215 fl6
.fl6_dport
= sin6
->sin6_port
;
1216 daddr
= &sin6
->sin6_addr
;
1219 fl6
.flowlabel
= sin6
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
1220 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
1221 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1228 * Otherwise it will be difficult to maintain
1231 if (sk
->sk_state
== TCP_ESTABLISHED
&&
1232 ipv6_addr_equal(daddr
, &sk
->sk_v6_daddr
))
1233 daddr
= &sk
->sk_v6_daddr
;
1235 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
1236 sin6
->sin6_scope_id
&&
1237 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr
)))
1238 fl6
.flowi6_oif
= sin6
->sin6_scope_id
;
1240 if (sk
->sk_state
!= TCP_ESTABLISHED
)
1241 return -EDESTADDRREQ
;
1243 fl6
.fl6_dport
= inet
->inet_dport
;
1244 daddr
= &sk
->sk_v6_daddr
;
1245 fl6
.flowlabel
= np
->flow_label
;
1249 if (!fl6
.flowi6_oif
)
1250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
1252 if (!fl6
.flowi6_oif
)
1253 fl6
.flowi6_oif
= np
->sticky_pktinfo
.ipi6_ifindex
;
1255 fl6
.flowi6_mark
= sk
->sk_mark
;
1256 fl6
.flowi6_uid
= sk
->sk_uid
;
1258 if (msg
->msg_controllen
) {
1260 memset(opt
, 0, sizeof(struct ipv6_txoptions
));
1261 opt
->tot_len
= sizeof(*opt
);
1264 err
= ip6_datagram_send_ctl(sock_net(sk
), sk
, msg
, &fl6
, &ipc6
, &sockc
);
1266 fl6_sock_release(flowlabel
);
1269 if ((fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) && !flowlabel
) {
1270 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
1274 if (!(opt
->opt_nflen
|opt
->opt_flen
))
1279 opt
= txopt_get(np
);
1283 opt
= fl6_merge_options(&opt_space
, flowlabel
, opt
);
1284 opt
= ipv6_fixup_options(&opt_space
, opt
);
1287 fl6
.flowi6_proto
= sk
->sk_protocol
;
1288 if (!ipv6_addr_any(daddr
))
1291 fl6
.daddr
.s6_addr
[15] = 0x1; /* :: means loopback (BSD'ism) */
1292 if (ipv6_addr_any(&fl6
.saddr
) && !ipv6_addr_any(&np
->saddr
))
1293 fl6
.saddr
= np
->saddr
;
1294 fl6
.fl6_sport
= inet
->inet_sport
;
1296 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
1300 if (!fl6
.flowi6_oif
&& ipv6_addr_is_multicast(&fl6
.daddr
)) {
1301 fl6
.flowi6_oif
= np
->mcast_oif
;
1303 } else if (!fl6
.flowi6_oif
)
1304 fl6
.flowi6_oif
= np
->ucast_oif
;
1306 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
1308 if (ipc6
.tclass
< 0)
1309 ipc6
.tclass
= np
->tclass
;
1311 fl6
.flowlabel
= ip6_make_flowinfo(ipc6
.tclass
, fl6
.flowlabel
);
1313 dst
= ip6_sk_dst_lookup_flow(sk
, &fl6
, final_p
);
1320 if (ipc6
.hlimit
< 0)
1321 ipc6
.hlimit
= ip6_sk_dst_hoplimit(np
, &fl6
, dst
);
1323 if (msg
->msg_flags
&MSG_CONFIRM
)
1327 /* Lockless fast path for the non-corking case */
1329 struct sk_buff
*skb
;
1331 skb
= ip6_make_skb(sk
, getfrag
, msg
, ulen
,
1332 sizeof(struct udphdr
), &ipc6
,
1333 &fl6
, (struct rt6_info
*)dst
,
1334 msg
->msg_flags
, &sockc
);
1336 if (!IS_ERR_OR_NULL(skb
))
1337 err
= udp_v6_send_skb(skb
, &fl6
);
1342 if (unlikely(up
->pending
)) {
1343 /* The socket is already corked while preparing it. */
1344 /* ... which is an evident application bug. --ANK */
1347 net_dbg_ratelimited("udp cork app bug 2\n");
1352 up
->pending
= AF_INET6
;
1355 if (ipc6
.dontfrag
< 0)
1356 ipc6
.dontfrag
= np
->dontfrag
;
1358 err
= ip6_append_data(sk
, getfrag
, msg
, ulen
, sizeof(struct udphdr
),
1359 &ipc6
, &fl6
, (struct rt6_info
*)dst
,
1360 corkreq
? msg
->msg_flags
|MSG_MORE
: msg
->msg_flags
, &sockc
);
1362 udp_v6_flush_pending_frames(sk
);
1364 err
= udp_v6_push_pending_frames(sk
);
1365 else if (unlikely(skb_queue_empty(&sk
->sk_write_queue
)))
1369 err
= np
->recverr
? net_xmit_errno(err
) : 0;
1375 ip6_dst_store(sk
, dst
,
1376 ipv6_addr_equal(&fl6
.daddr
, &sk
->sk_v6_daddr
) ?
1377 &sk
->sk_v6_daddr
: NULL
,
1378 #ifdef CONFIG_IPV6_SUBTREES
1379 ipv6_addr_equal(&fl6
.saddr
, &np
->saddr
) ?
1391 fl6_sock_release(flowlabel
);
1392 txopt_put(opt_to_free
);
1396 * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
1397 * ENOBUFS might not be good (it's not tunable per se), but otherwise
1398 * we don't have a good statistic (IpOutDiscards but it can be too many
1399 * things). We could add another new stat but at least for now that
1400 * seems like overkill.
1402 if (err
== -ENOBUFS
|| test_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
)) {
1403 UDP6_INC_STATS(sock_net(sk
),
1404 UDP_MIB_SNDBUFERRORS
, is_udplite
);
1409 if (msg
->msg_flags
& MSG_PROBE
)
1410 dst_confirm_neigh(dst
, &fl6
.daddr
);
1411 if (!(msg
->msg_flags
&MSG_PROBE
) || len
)
1412 goto back_from_confirm
;
1417 void udpv6_destroy_sock(struct sock
*sk
)
1419 struct udp_sock
*up
= udp_sk(sk
);
1421 udp_v6_flush_pending_frames(sk
);
1424 if (static_key_false(&udpv6_encap_needed
) && up
->encap_type
) {
1425 void (*encap_destroy
)(struct sock
*sk
);
1426 encap_destroy
= ACCESS_ONCE(up
->encap_destroy
);
1431 inet6_destroy_sock(sk
);
1435 * Socket option code for UDP
1437 int udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1438 char __user
*optval
, unsigned int optlen
)
1440 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1441 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1442 udp_v6_push_pending_frames
);
1443 return ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1446 #ifdef CONFIG_COMPAT
1447 int compat_udpv6_setsockopt(struct sock
*sk
, int level
, int optname
,
1448 char __user
*optval
, unsigned int optlen
)
1450 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1451 return udp_lib_setsockopt(sk
, level
, optname
, optval
, optlen
,
1452 udp_v6_push_pending_frames
);
1453 return compat_ipv6_setsockopt(sk
, level
, optname
, optval
, optlen
);
1457 int udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1458 char __user
*optval
, int __user
*optlen
)
1460 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1461 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1462 return ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1465 #ifdef CONFIG_COMPAT
1466 int compat_udpv6_getsockopt(struct sock
*sk
, int level
, int optname
,
1467 char __user
*optval
, int __user
*optlen
)
1469 if (level
== SOL_UDP
|| level
== SOL_UDPLITE
)
1470 return udp_lib_getsockopt(sk
, level
, optname
, optval
, optlen
);
1471 return compat_ipv6_getsockopt(sk
, level
, optname
, optval
, optlen
);
1475 static struct inet6_protocol udpv6_protocol
= {
1476 .early_demux
= udp_v6_early_demux
,
1477 .early_demux_handler
= udp_v6_early_demux
,
1478 .handler
= udpv6_rcv
,
1479 .err_handler
= udpv6_err
,
1480 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1483 /* ------------------------------------------------------------------------ */
1484 #ifdef CONFIG_PROC_FS
1485 int udp6_seq_show(struct seq_file
*seq
, void *v
)
1487 if (v
== SEQ_START_TOKEN
) {
1488 seq_puts(seq
, IPV6_SEQ_DGRAM_HEADER
);
1490 int bucket
= ((struct udp_iter_state
*)seq
->private)->bucket
;
1491 struct inet_sock
*inet
= inet_sk(v
);
1492 __u16 srcp
= ntohs(inet
->inet_sport
);
1493 __u16 destp
= ntohs(inet
->inet_dport
);
1494 ip6_dgram_sock_seq_show(seq
, v
, srcp
, destp
, bucket
);
1499 static const struct file_operations udp6_afinfo_seq_fops
= {
1500 .owner
= THIS_MODULE
,
1501 .open
= udp_seq_open
,
1503 .llseek
= seq_lseek
,
1504 .release
= seq_release_net
1507 static struct udp_seq_afinfo udp6_seq_afinfo
= {
1510 .udp_table
= &udp_table
,
1511 .seq_fops
= &udp6_afinfo_seq_fops
,
1513 .show
= udp6_seq_show
,
1517 int __net_init
udp6_proc_init(struct net
*net
)
1519 return udp_proc_register(net
, &udp6_seq_afinfo
);
1522 void udp6_proc_exit(struct net
*net
)
1524 udp_proc_unregister(net
, &udp6_seq_afinfo
);
1526 #endif /* CONFIG_PROC_FS */
1528 /* ------------------------------------------------------------------------ */
1530 struct proto udpv6_prot
= {
1532 .owner
= THIS_MODULE
,
1533 .close
= udp_lib_close
,
1534 .connect
= ip6_datagram_connect
,
1535 .disconnect
= udp_disconnect
,
1537 .init
= udp_init_sock
,
1538 .destroy
= udpv6_destroy_sock
,
1539 .setsockopt
= udpv6_setsockopt
,
1540 .getsockopt
= udpv6_getsockopt
,
1541 .sendmsg
= udpv6_sendmsg
,
1542 .recvmsg
= udpv6_recvmsg
,
1543 .release_cb
= ip6_datagram_release_cb
,
1544 .hash
= udp_lib_hash
,
1545 .unhash
= udp_lib_unhash
,
1546 .rehash
= udp_v6_rehash
,
1547 .get_port
= udp_v6_get_port
,
1548 .memory_allocated
= &udp_memory_allocated
,
1549 .sysctl_mem
= sysctl_udp_mem
,
1550 .sysctl_wmem
= &sysctl_udp_wmem_min
,
1551 .sysctl_rmem
= &sysctl_udp_rmem_min
,
1552 .obj_size
= sizeof(struct udp6_sock
),
1553 .h
.udp_table
= &udp_table
,
1554 #ifdef CONFIG_COMPAT
1555 .compat_setsockopt
= compat_udpv6_setsockopt
,
1556 .compat_getsockopt
= compat_udpv6_getsockopt
,
1558 .diag_destroy
= udp_abort
,
1561 static struct inet_protosw udpv6_protosw
= {
1563 .protocol
= IPPROTO_UDP
,
1564 .prot
= &udpv6_prot
,
1565 .ops
= &inet6_dgram_ops
,
1566 .flags
= INET_PROTOSW_PERMANENT
,
1569 int __init
udpv6_init(void)
1573 ret
= inet6_add_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1577 ret
= inet6_register_protosw(&udpv6_protosw
);
1579 goto out_udpv6_protocol
;
1584 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);
1588 void udpv6_exit(void)
1590 inet6_unregister_protosw(&udpv6_protosw
);
1591 inet6_del_protocol(&udpv6_protocol
, IPPROTO_UDP
);