3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
77 struct request_sock
*req
);
79 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static void __tcp_v6_send_check(struct sk_buff
*skb
,
81 const struct in6_addr
*saddr
,
82 const struct in6_addr
*daddr
);
84 static const struct inet_connection_sock_af_ops ipv6_mapped
;
85 static const struct inet_connection_sock_af_ops ipv6_specific
;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
90 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
91 const struct in6_addr
*addr
)
97 static void tcp_v6_hash(struct sock
*sk
)
99 if (sk
->sk_state
!= TCP_CLOSE
) {
100 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
105 __inet6_hash(sk
, NULL
);
110 static __inline__ __sum16
tcp_v6_check(int len
,
111 const struct in6_addr
*saddr
,
112 const struct in6_addr
*daddr
,
115 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
118 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
121 ipv6_hdr(skb
)->saddr
.s6_addr32
,
123 tcp_hdr(skb
)->source
);
126 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
129 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
130 struct inet_sock
*inet
= inet_sk(sk
);
131 struct inet_connection_sock
*icsk
= inet_csk(sk
);
132 struct ipv6_pinfo
*np
= inet6_sk(sk
);
133 struct tcp_sock
*tp
= tcp_sk(sk
);
134 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
137 struct dst_entry
*dst
;
141 if (addr_len
< SIN6_LEN_RFC2133
)
144 if (usin
->sin6_family
!= AF_INET6
)
145 return -EAFNOSUPPORT
;
147 memset(&fl6
, 0, sizeof(fl6
));
150 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
151 IP6_ECN_flow_init(fl6
.flowlabel
);
152 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
153 struct ip6_flowlabel
*flowlabel
;
154 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
155 if (flowlabel
== NULL
)
157 usin
->sin6_addr
= flowlabel
->dst
;
158 fl6_sock_release(flowlabel
);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin
->sin6_addr
))
167 usin
->sin6_addr
.s6_addr
[15] = 0x1;
169 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
171 if(addr_type
& IPV6_ADDR_MULTICAST
)
174 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
175 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
176 usin
->sin6_scope_id
) {
177 /* If interface is set while binding, indices
180 if (sk
->sk_bound_dev_if
&&
181 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
184 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
187 /* Connect to link-local address requires an interface */
188 if (!sk
->sk_bound_dev_if
)
192 if (tp
->rx_opt
.ts_recent_stamp
&&
193 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
199 np
->daddr
= usin
->sin6_addr
;
200 np
->flow_label
= fl6
.flowlabel
;
206 if (addr_type
== IPV6_ADDR_MAPPED
) {
207 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
208 struct sockaddr_in sin
;
210 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk
))
215 sin
.sin_family
= AF_INET
;
216 sin
.sin_port
= usin
->sin6_port
;
217 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
219 icsk
->icsk_af_ops
= &ipv6_mapped
;
220 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
225 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
228 icsk
->icsk_ext_hdr_len
= exthdrlen
;
229 icsk
->icsk_af_ops
= &ipv6_specific
;
230 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp
->af_specific
= &tcp_sock_ipv6_specific
;
236 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
237 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
244 if (!ipv6_addr_any(&np
->rcv_saddr
))
245 saddr
= &np
->rcv_saddr
;
247 fl6
.flowi6_proto
= IPPROTO_TCP
;
248 fl6
.daddr
= np
->daddr
;
249 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 np
->rcv_saddr
= *saddr
;
270 /* set the source address */
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
))
281 tcp_fetch_timewait_stamp(sk
, dst
);
283 icsk
->icsk_ext_hdr_len
= 0;
285 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
288 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
290 inet
->inet_dport
= usin
->sin6_port
;
292 tcp_set_state(sk
, TCP_SYN_SENT
);
293 err
= inet6_hash_connect(&tcp_death_row
, sk
);
298 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
303 err
= tcp_connect(sk
);
310 tcp_set_state(sk
, TCP_CLOSE
);
313 inet
->inet_dport
= 0;
314 sk
->sk_route_caps
= 0;
318 static void tcp_v6_mtu_reduced(struct sock
*sk
)
320 struct dst_entry
*dst
;
322 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
325 dst
= inet6_csk_update_pmtu(sk
, tcp_sk(sk
)->mtu_info
);
329 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
330 tcp_sync_mss(sk
, dst_mtu(dst
));
331 tcp_simple_retransmit(sk
);
335 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
336 u8 type
, u8 code
, int offset
, __be32 info
)
338 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
339 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
340 struct ipv6_pinfo
*np
;
345 struct net
*net
= dev_net(skb
->dev
);
347 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
348 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
351 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
356 if (sk
->sk_state
== TCP_TIME_WAIT
) {
357 inet_twsk_put(inet_twsk(sk
));
362 if (sock_owned_by_user(sk
) && type
!= ICMPV6_PKT_TOOBIG
)
363 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
365 if (sk
->sk_state
== TCP_CLOSE
)
368 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
369 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
374 seq
= ntohl(th
->seq
);
375 if (sk
->sk_state
!= TCP_LISTEN
&&
376 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
377 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
383 if (type
== NDISC_REDIRECT
) {
384 struct dst_entry
*dst
= __sk_dst_check(sk
, np
->dst_cookie
);
387 dst
->ops
->redirect(dst
, sk
, skb
);
390 if (type
== ICMPV6_PKT_TOOBIG
) {
391 tp
->mtu_info
= ntohl(info
);
392 if (!sock_owned_by_user(sk
))
393 tcp_v6_mtu_reduced(sk
);
395 set_bit(TCP_MTU_REDUCED_DEFERRED
, &tp
->tsq_flags
);
399 icmpv6_err_convert(type
, code
, &err
);
401 /* Might be for an request_sock */
402 switch (sk
->sk_state
) {
403 struct request_sock
*req
, **prev
;
405 if (sock_owned_by_user(sk
))
408 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
409 &hdr
->saddr
, inet6_iif(skb
));
413 /* ICMPs are not backlogged, hence we cannot get
414 * an established socket here.
416 WARN_ON(req
->sk
!= NULL
);
418 if (seq
!= tcp_rsk(req
)->snt_isn
) {
419 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
423 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
427 case TCP_SYN_RECV
: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk
)) {
431 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
435 sk
->sk_err_soft
= err
;
439 if (!sock_owned_by_user(sk
) && np
->recverr
) {
441 sk
->sk_error_report(sk
);
443 sk
->sk_err_soft
= err
;
451 static int tcp_v6_send_synack(struct sock
*sk
, struct dst_entry
*dst
,
453 struct request_sock
*req
,
454 struct request_values
*rvp
,
457 struct inet6_request_sock
*treq
= inet6_rsk(req
);
458 struct ipv6_pinfo
*np
= inet6_sk(sk
);
459 struct sk_buff
* skb
;
462 /* First, grab a route. */
463 if (!dst
&& (dst
= inet6_csk_route_req(sk
, fl6
, req
)) == NULL
)
466 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
469 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
471 fl6
->daddr
= treq
->rmt_addr
;
472 skb_set_queue_mapping(skb
, queue_mapping
);
473 err
= ip6_xmit(sk
, skb
, fl6
, np
->opt
, np
->tclass
);
474 err
= net_xmit_eval(err
);
481 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
482 struct request_values
*rvp
)
486 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
487 return tcp_v6_send_synack(sk
, NULL
, &fl6
, req
, rvp
, 0);
490 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
492 kfree_skb(inet6_rsk(req
)->pktopts
);
495 #ifdef CONFIG_TCP_MD5SIG
496 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
497 const struct in6_addr
*addr
)
499 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
502 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
503 struct sock
*addr_sk
)
505 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
508 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
509 struct request_sock
*req
)
511 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
514 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
517 struct tcp_md5sig cmd
;
518 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
520 if (optlen
< sizeof(cmd
))
523 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
526 if (sin6
->sin6_family
!= AF_INET6
)
529 if (!cmd
.tcpm_keylen
) {
530 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
531 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
533 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
537 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
540 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
541 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
542 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
544 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
545 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
548 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
549 const struct in6_addr
*daddr
,
550 const struct in6_addr
*saddr
, int nbytes
)
552 struct tcp6_pseudohdr
*bp
;
553 struct scatterlist sg
;
555 bp
= &hp
->md5_blk
.ip6
;
556 /* 1. TCP pseudo-header (RFC2460) */
559 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
560 bp
->len
= cpu_to_be32(nbytes
);
562 sg_init_one(&sg
, bp
, sizeof(*bp
));
563 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
566 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
567 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
568 const struct tcphdr
*th
)
570 struct tcp_md5sig_pool
*hp
;
571 struct hash_desc
*desc
;
573 hp
= tcp_get_md5sig_pool();
575 goto clear_hash_noput
;
576 desc
= &hp
->md5_desc
;
578 if (crypto_hash_init(desc
))
580 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
582 if (tcp_md5_hash_header(hp
, th
))
584 if (tcp_md5_hash_key(hp
, key
))
586 if (crypto_hash_final(desc
, md5_hash
))
589 tcp_put_md5sig_pool();
593 tcp_put_md5sig_pool();
595 memset(md5_hash
, 0, 16);
599 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
600 const struct sock
*sk
,
601 const struct request_sock
*req
,
602 const struct sk_buff
*skb
)
604 const struct in6_addr
*saddr
, *daddr
;
605 struct tcp_md5sig_pool
*hp
;
606 struct hash_desc
*desc
;
607 const struct tcphdr
*th
= tcp_hdr(skb
);
610 saddr
= &inet6_sk(sk
)->saddr
;
611 daddr
= &inet6_sk(sk
)->daddr
;
613 saddr
= &inet6_rsk(req
)->loc_addr
;
614 daddr
= &inet6_rsk(req
)->rmt_addr
;
616 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
617 saddr
= &ip6h
->saddr
;
618 daddr
= &ip6h
->daddr
;
621 hp
= tcp_get_md5sig_pool();
623 goto clear_hash_noput
;
624 desc
= &hp
->md5_desc
;
626 if (crypto_hash_init(desc
))
629 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
631 if (tcp_md5_hash_header(hp
, th
))
633 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
635 if (tcp_md5_hash_key(hp
, key
))
637 if (crypto_hash_final(desc
, md5_hash
))
640 tcp_put_md5sig_pool();
644 tcp_put_md5sig_pool();
646 memset(md5_hash
, 0, 16);
650 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
652 const __u8
*hash_location
= NULL
;
653 struct tcp_md5sig_key
*hash_expected
;
654 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
655 const struct tcphdr
*th
= tcp_hdr(skb
);
659 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
660 hash_location
= tcp_parse_md5sig_option(th
);
662 /* We've parsed the options - do we have a hash? */
663 if (!hash_expected
&& !hash_location
)
666 if (hash_expected
&& !hash_location
) {
667 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
671 if (!hash_expected
&& hash_location
) {
672 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
676 /* check the signature */
677 genhash
= tcp_v6_md5_hash_skb(newhash
,
681 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
682 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
683 genhash
? "failed" : "mismatch",
684 &ip6h
->saddr
, ntohs(th
->source
),
685 &ip6h
->daddr
, ntohs(th
->dest
));
692 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
694 .obj_size
= sizeof(struct tcp6_request_sock
),
695 .rtx_syn_ack
= tcp_v6_rtx_synack
,
696 .send_ack
= tcp_v6_reqsk_send_ack
,
697 .destructor
= tcp_v6_reqsk_destructor
,
698 .send_reset
= tcp_v6_send_reset
,
699 .syn_ack_timeout
= tcp_syn_ack_timeout
,
702 #ifdef CONFIG_TCP_MD5SIG
703 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
704 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
705 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
709 static void __tcp_v6_send_check(struct sk_buff
*skb
,
710 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
712 struct tcphdr
*th
= tcp_hdr(skb
);
714 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
715 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
716 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
717 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
719 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
720 csum_partial(th
, th
->doff
<< 2,
725 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
727 struct ipv6_pinfo
*np
= inet6_sk(sk
);
729 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
732 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
734 const struct ipv6hdr
*ipv6h
;
737 if (!pskb_may_pull(skb
, sizeof(*th
)))
740 ipv6h
= ipv6_hdr(skb
);
744 skb
->ip_summed
= CHECKSUM_PARTIAL
;
745 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
749 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
752 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
754 switch (skb
->ip_summed
) {
755 case CHECKSUM_COMPLETE
:
756 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
758 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
764 NAPI_GRO_CB(skb
)->flush
= 1;
768 return tcp_gro_receive(head
, skb
);
771 static int tcp6_gro_complete(struct sk_buff
*skb
)
773 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
774 struct tcphdr
*th
= tcp_hdr(skb
);
776 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
777 &iph
->saddr
, &iph
->daddr
, 0);
778 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
780 return tcp_gro_complete(skb
);
783 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
784 u32 ts
, struct tcp_md5sig_key
*key
, int rst
, u8 tclass
)
786 const struct tcphdr
*th
= tcp_hdr(skb
);
788 struct sk_buff
*buff
;
790 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
791 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
792 unsigned int tot_len
= sizeof(struct tcphdr
);
793 struct dst_entry
*dst
;
797 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
798 #ifdef CONFIG_TCP_MD5SIG
800 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
803 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
808 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
810 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
811 skb_reset_transport_header(buff
);
813 /* Swap the send and the receive. */
814 memset(t1
, 0, sizeof(*t1
));
815 t1
->dest
= th
->source
;
816 t1
->source
= th
->dest
;
817 t1
->doff
= tot_len
/ 4;
818 t1
->seq
= htonl(seq
);
819 t1
->ack_seq
= htonl(ack
);
820 t1
->ack
= !rst
|| !th
->ack
;
822 t1
->window
= htons(win
);
824 topt
= (__be32
*)(t1
+ 1);
827 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
828 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
829 *topt
++ = htonl(tcp_time_stamp
);
833 #ifdef CONFIG_TCP_MD5SIG
835 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
836 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
837 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
838 &ipv6_hdr(skb
)->saddr
,
839 &ipv6_hdr(skb
)->daddr
, t1
);
843 memset(&fl6
, 0, sizeof(fl6
));
844 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
845 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
847 buff
->ip_summed
= CHECKSUM_PARTIAL
;
850 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
852 fl6
.flowi6_proto
= IPPROTO_TCP
;
853 fl6
.flowi6_oif
= inet6_iif(skb
);
854 fl6
.fl6_dport
= t1
->dest
;
855 fl6
.fl6_sport
= t1
->source
;
856 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
858 /* Pass a socket to ip6_dst_lookup either it is for RST
859 * Underlying function will use this to retrieve the network
862 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
864 skb_dst_set(buff
, dst
);
865 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
866 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
868 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
875 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
877 const struct tcphdr
*th
= tcp_hdr(skb
);
878 u32 seq
= 0, ack_seq
= 0;
879 struct tcp_md5sig_key
*key
= NULL
;
880 #ifdef CONFIG_TCP_MD5SIG
881 const __u8
*hash_location
= NULL
;
882 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
883 unsigned char newhash
[16];
885 struct sock
*sk1
= NULL
;
891 if (!ipv6_unicast_destination(skb
))
894 #ifdef CONFIG_TCP_MD5SIG
895 hash_location
= tcp_parse_md5sig_option(th
);
896 if (!sk
&& hash_location
) {
898 * active side is lost. Try to find listening socket through
899 * source port, and then find md5 key through listening socket.
900 * we are not loose security here:
901 * Incoming packet is checked with md5 hash with finding key,
902 * no RST generated if md5 hash doesn't match.
904 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
905 &tcp_hashinfo
, &ipv6h
->daddr
,
906 ntohs(th
->source
), inet6_iif(skb
));
911 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
915 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
916 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
919 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
924 seq
= ntohl(th
->ack_seq
);
926 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
929 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1, 0);
931 #ifdef CONFIG_TCP_MD5SIG
940 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
941 struct tcp_md5sig_key
*key
, u8 tclass
)
943 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0, tclass
);
946 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
948 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
949 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
951 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
952 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
953 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
959 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
960 struct request_sock
*req
)
962 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
963 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
), 0);
967 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
969 struct request_sock
*req
, **prev
;
970 const struct tcphdr
*th
= tcp_hdr(skb
);
973 /* Find possible connection requests. */
974 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
975 &ipv6_hdr(skb
)->saddr
,
976 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
978 return tcp_check_req(sk
, skb
, req
, prev
);
980 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
981 &ipv6_hdr(skb
)->saddr
, th
->source
,
982 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
985 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
989 inet_twsk_put(inet_twsk(nsk
));
993 #ifdef CONFIG_SYN_COOKIES
995 sk
= cookie_v6_check(sk
, skb
);
1000 /* FIXME: this is substantially similar to the ipv4 code.
1001 * Can some kind of merge be done? -- erics
1003 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1005 struct tcp_extend_values tmp_ext
;
1006 struct tcp_options_received tmp_opt
;
1007 const u8
*hash_location
;
1008 struct request_sock
*req
;
1009 struct inet6_request_sock
*treq
;
1010 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1011 struct tcp_sock
*tp
= tcp_sk(sk
);
1012 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1013 struct dst_entry
*dst
= NULL
;
1015 bool want_cookie
= false;
1017 if (skb
->protocol
== htons(ETH_P_IP
))
1018 return tcp_v4_conn_request(sk
, skb
);
1020 if (!ipv6_unicast_destination(skb
))
1023 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1024 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1029 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1032 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1036 #ifdef CONFIG_TCP_MD5SIG
1037 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1040 tcp_clear_options(&tmp_opt
);
1041 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1042 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1043 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0, NULL
);
1045 if (tmp_opt
.cookie_plus
> 0 &&
1046 tmp_opt
.saw_tstamp
&&
1047 !tp
->rx_opt
.cookie_out_never
&&
1048 (sysctl_tcp_cookie_size
> 0 ||
1049 (tp
->cookie_values
!= NULL
&&
1050 tp
->cookie_values
->cookie_desired
> 0))) {
1053 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1054 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1056 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1059 /* Secret recipe starts with IP addresses */
1060 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1065 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1071 /* plus variable length Initiator Cookie */
1074 *c
++ ^= *hash_location
++;
1076 want_cookie
= false; /* not our kind of cookie */
1077 tmp_ext
.cookie_out_never
= 0; /* false */
1078 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1079 } else if (!tp
->rx_opt
.cookie_in_always
) {
1080 /* redundant indications, but ensure initialization. */
1081 tmp_ext
.cookie_out_never
= 1; /* true */
1082 tmp_ext
.cookie_plus
= 0;
1086 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1088 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1089 tcp_clear_options(&tmp_opt
);
1091 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1092 tcp_openreq_init(req
, &tmp_opt
, skb
);
1094 treq
= inet6_rsk(req
);
1095 treq
->rmt_addr
= ipv6_hdr(skb
)->saddr
;
1096 treq
->loc_addr
= ipv6_hdr(skb
)->daddr
;
1097 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1098 TCP_ECN_create_request(req
, skb
);
1100 treq
->iif
= sk
->sk_bound_dev_if
;
1102 /* So that link locals have meaning */
1103 if (!sk
->sk_bound_dev_if
&&
1104 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1105 treq
->iif
= inet6_iif(skb
);
1108 if (ipv6_opt_accepted(sk
, skb
) ||
1109 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1110 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1111 atomic_inc(&skb
->users
);
1112 treq
->pktopts
= skb
;
1116 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1117 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1121 /* VJ's idea. We save last timestamp seen
1122 * from the destination in peer table, when entering
1123 * state TIME-WAIT, and check against it before
1124 * accepting new connection request.
1126 * If "isn" is not zero, this request hit alive
1127 * timewait bucket, so that all the necessary checks
1128 * are made in the function processing timewait state.
1130 if (tmp_opt
.saw_tstamp
&&
1131 tcp_death_row
.sysctl_tw_recycle
&&
1132 (dst
= inet6_csk_route_req(sk
, &fl6
, req
)) != NULL
) {
1133 if (!tcp_peer_is_proven(req
, dst
, true)) {
1134 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1135 goto drop_and_release
;
1138 /* Kill the following clause, if you dislike this way. */
1139 else if (!sysctl_tcp_syncookies
&&
1140 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1141 (sysctl_max_syn_backlog
>> 2)) &&
1142 !tcp_peer_is_proven(req
, dst
, false)) {
1143 /* Without syncookies last quarter of
1144 * backlog is filled with destinations,
1145 * proven to be alive.
1146 * It means that we continue to communicate
1147 * to destinations, already remembered
1148 * to the moment of synflood.
1150 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1151 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1152 goto drop_and_release
;
1155 isn
= tcp_v6_init_sequence(skb
);
1158 tcp_rsk(req
)->snt_isn
= isn
;
1159 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1161 if (security_inet_conn_request(sk
, skb
, req
))
1162 goto drop_and_release
;
1164 if (tcp_v6_send_synack(sk
, dst
, &fl6
, req
,
1165 (struct request_values
*)&tmp_ext
,
1166 skb_get_queue_mapping(skb
)) ||
1170 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1178 return 0; /* don't send reset */
1181 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1182 struct request_sock
*req
,
1183 struct dst_entry
*dst
)
1185 struct inet6_request_sock
*treq
;
1186 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1187 struct tcp6_sock
*newtcp6sk
;
1188 struct inet_sock
*newinet
;
1189 struct tcp_sock
*newtp
;
1191 #ifdef CONFIG_TCP_MD5SIG
1192 struct tcp_md5sig_key
*key
;
1196 if (skb
->protocol
== htons(ETH_P_IP
)) {
1201 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1206 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1207 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1209 newinet
= inet_sk(newsk
);
1210 newnp
= inet6_sk(newsk
);
1211 newtp
= tcp_sk(newsk
);
1213 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1215 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1217 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1219 newnp
->rcv_saddr
= newnp
->saddr
;
1221 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1222 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1223 #ifdef CONFIG_TCP_MD5SIG
1224 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1227 newnp
->ipv6_ac_list
= NULL
;
1228 newnp
->ipv6_fl_list
= NULL
;
1229 newnp
->pktoptions
= NULL
;
1231 newnp
->mcast_oif
= inet6_iif(skb
);
1232 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1233 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1236 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1237 * here, tcp_create_openreq_child now does this for us, see the comment in
1238 * that function for the gory details. -acme
1241 /* It is tricky place. Until this moment IPv4 tcp
1242 worked with IPv6 icsk.icsk_af_ops.
1245 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1250 treq
= inet6_rsk(req
);
1252 if (sk_acceptq_is_full(sk
))
1256 dst
= inet6_csk_route_req(sk
, &fl6
, req
);
1261 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1266 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1267 * count here, tcp_create_openreq_child now does this for us, see the
1268 * comment in that function for the gory details. -acme
1271 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1272 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1274 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1275 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1277 newtp
= tcp_sk(newsk
);
1278 newinet
= inet_sk(newsk
);
1279 newnp
= inet6_sk(newsk
);
1281 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1283 newnp
->daddr
= treq
->rmt_addr
;
1284 newnp
->saddr
= treq
->loc_addr
;
1285 newnp
->rcv_saddr
= treq
->loc_addr
;
1286 newsk
->sk_bound_dev_if
= treq
->iif
;
1288 /* Now IPv6 options...
1290 First: no IPv4 options.
1292 newinet
->inet_opt
= NULL
;
1293 newnp
->ipv6_ac_list
= NULL
;
1294 newnp
->ipv6_fl_list
= NULL
;
1297 newnp
->rxopt
.all
= np
->rxopt
.all
;
1299 /* Clone pktoptions received with SYN */
1300 newnp
->pktoptions
= NULL
;
1301 if (treq
->pktopts
!= NULL
) {
1302 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1303 consume_skb(treq
->pktopts
);
1304 treq
->pktopts
= NULL
;
1305 if (newnp
->pktoptions
)
1306 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1309 newnp
->mcast_oif
= inet6_iif(skb
);
1310 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1311 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1313 /* Clone native IPv6 options from listening socket (if any)
1315 Yes, keeping reference count would be much more clever,
1316 but we make one more one thing there: reattach optmem
1320 newnp
->opt
= ipv6_dup_options(newsk
, np
->opt
);
1322 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1324 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1325 newnp
->opt
->opt_flen
);
1327 tcp_mtup_init(newsk
);
1328 tcp_sync_mss(newsk
, dst_mtu(dst
));
1329 newtp
->advmss
= dst_metric_advmss(dst
);
1330 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1331 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1332 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1334 tcp_initialize_rcv_mss(newsk
);
1335 if (tcp_rsk(req
)->snt_synack
)
1336 tcp_valid_rtt_meas(newsk
,
1337 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1338 newtp
->total_retrans
= req
->retrans
;
1340 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1341 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1343 #ifdef CONFIG_TCP_MD5SIG
1344 /* Copy over the MD5 key from the original socket */
1345 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1346 /* We're using one, so create a matching key
1347 * on the newsk structure. If we fail to get
1348 * memory, then we end up not copying the key
1351 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newnp
->daddr
,
1352 AF_INET6
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1356 if (__inet_inherit_port(sk
, newsk
) < 0) {
1360 __inet6_hash(newsk
, NULL
);
1365 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1369 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1373 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1375 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1376 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1377 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1378 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1383 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1384 &ipv6_hdr(skb
)->saddr
,
1385 &ipv6_hdr(skb
)->daddr
, 0));
1387 if (skb
->len
<= 76) {
1388 return __skb_checksum_complete(skb
);
1393 /* The socket must have it's spinlock held when we get
1396 * We have a potential double-lock case here, so even when
1397 * doing backlog processing we use the BH locking scheme.
1398 * This is because we cannot sleep with the original spinlock
1401 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1403 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1404 struct tcp_sock
*tp
;
1405 struct sk_buff
*opt_skb
= NULL
;
1407 /* Imagine: socket is IPv6. IPv4 packet arrives,
1408 goes to IPv4 receive handler and backlogged.
1409 From backlog it always goes here. Kerboom...
1410 Fortunately, tcp_rcv_established and rcv_established
1411 handle them correctly, but it is not case with
1412 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1415 if (skb
->protocol
== htons(ETH_P_IP
))
1416 return tcp_v4_do_rcv(sk
, skb
);
1418 #ifdef CONFIG_TCP_MD5SIG
1419 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1423 if (sk_filter(sk
, skb
))
1427 * socket locking is here for SMP purposes as backlog rcv
1428 * is currently called with bh processing disabled.
1431 /* Do Stevens' IPV6_PKTOPTIONS.
1433 Yes, guys, it is the only place in our code, where we
1434 may make it not affecting IPv4.
1435 The rest of code is protocol independent,
1436 and I do not like idea to uglify IPv4.
1438 Actually, all the idea behind IPV6_PKTOPTIONS
1439 looks not very well thought. For now we latch
1440 options, received in the last packet, enqueued
1441 by tcp. Feel free to propose better solution.
1445 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1447 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1448 sock_rps_save_rxhash(sk
, skb
);
1449 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1452 goto ipv6_pktoptions
;
1456 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1459 if (sk
->sk_state
== TCP_LISTEN
) {
1460 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1465 * Queue it on the new socket if the new socket is active,
1466 * otherwise we just shortcircuit this and continue with
1470 sock_rps_save_rxhash(nsk
, skb
);
1471 if (tcp_child_process(sk
, nsk
, skb
))
1474 __kfree_skb(opt_skb
);
1478 sock_rps_save_rxhash(sk
, skb
);
1480 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1483 goto ipv6_pktoptions
;
1487 tcp_v6_send_reset(sk
, skb
);
1490 __kfree_skb(opt_skb
);
1494 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1499 /* Do you ask, what is it?
1501 1. skb was enqueued by tcp.
1502 2. skb is added to tail of read queue, rather than out of order.
1503 3. socket is not in passive state.
1504 4. Finally, it really contains options, which user wants to receive.
1507 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1508 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1509 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1510 np
->mcast_oif
= inet6_iif(opt_skb
);
1511 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1512 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1513 if (np
->rxopt
.bits
.rxtclass
)
1514 np
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1515 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1516 skb_set_owner_r(opt_skb
, sk
);
1517 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1519 __kfree_skb(opt_skb
);
1520 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1528 static int tcp_v6_rcv(struct sk_buff
*skb
)
1530 const struct tcphdr
*th
;
1531 const struct ipv6hdr
*hdr
;
1534 struct net
*net
= dev_net(skb
->dev
);
1536 if (skb
->pkt_type
!= PACKET_HOST
)
1540 * Count it even if it's bad.
1542 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1544 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1549 if (th
->doff
< sizeof(struct tcphdr
)/4)
1551 if (!pskb_may_pull(skb
, th
->doff
*4))
1554 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1558 hdr
= ipv6_hdr(skb
);
1559 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1560 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1561 skb
->len
- th
->doff
*4);
1562 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1563 TCP_SKB_CB(skb
)->when
= 0;
1564 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1565 TCP_SKB_CB(skb
)->sacked
= 0;
1567 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1572 if (sk
->sk_state
== TCP_TIME_WAIT
)
1575 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1576 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1577 goto discard_and_relse
;
1580 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1581 goto discard_and_relse
;
1583 if (sk_filter(sk
, skb
))
1584 goto discard_and_relse
;
1588 bh_lock_sock_nested(sk
);
1590 if (!sock_owned_by_user(sk
)) {
1591 #ifdef CONFIG_NET_DMA
1592 struct tcp_sock
*tp
= tcp_sk(sk
);
1593 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1594 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1595 if (tp
->ucopy
.dma_chan
)
1596 ret
= tcp_v6_do_rcv(sk
, skb
);
1600 if (!tcp_prequeue(sk
, skb
))
1601 ret
= tcp_v6_do_rcv(sk
, skb
);
1603 } else if (unlikely(sk_add_backlog(sk
, skb
,
1604 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1606 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1607 goto discard_and_relse
;
1612 return ret
? -1 : 0;
1615 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1618 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1620 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1622 tcp_v6_send_reset(NULL
, skb
);
1639 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1640 inet_twsk_put(inet_twsk(sk
));
1644 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1645 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1646 inet_twsk_put(inet_twsk(sk
));
1650 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1655 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1656 &ipv6_hdr(skb
)->daddr
,
1657 ntohs(th
->dest
), inet6_iif(skb
));
1659 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1660 inet_twsk_deschedule(tw
, &tcp_death_row
);
1665 /* Fall through to ACK */
1668 tcp_v6_timewait_ack(sk
, skb
);
1672 case TCP_TW_SUCCESS
:;
1677 static void tcp_v6_early_demux(struct sk_buff
*skb
)
1679 const struct ipv6hdr
*hdr
;
1680 const struct tcphdr
*th
;
1683 if (skb
->pkt_type
!= PACKET_HOST
)
1686 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + sizeof(struct tcphdr
)))
1689 hdr
= ipv6_hdr(skb
);
1692 if (th
->doff
< sizeof(struct tcphdr
) / 4)
1695 sk
= __inet6_lookup_established(dev_net(skb
->dev
), &tcp_hashinfo
,
1696 &hdr
->saddr
, th
->source
,
1697 &hdr
->daddr
, ntohs(th
->dest
),
1701 skb
->destructor
= sock_edemux
;
1702 if (sk
->sk_state
!= TCP_TIME_WAIT
) {
1703 struct dst_entry
*dst
= sk
->sk_rx_dst
;
1704 struct inet_sock
*icsk
= inet_sk(sk
);
1706 dst
= dst_check(dst
, 0);
1708 icsk
->rx_dst_ifindex
== inet6_iif(skb
))
1709 skb_dst_set_noref(skb
, dst
);
1714 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1715 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1716 .twsk_unique
= tcp_twsk_unique
,
1717 .twsk_destructor
= tcp_twsk_destructor
,
1720 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1721 .queue_xmit
= inet6_csk_xmit
,
1722 .send_check
= tcp_v6_send_check
,
1723 .rebuild_header
= inet6_sk_rebuild_header
,
1724 .conn_request
= tcp_v6_conn_request
,
1725 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1726 .net_header_len
= sizeof(struct ipv6hdr
),
1727 .net_frag_header_len
= sizeof(struct frag_hdr
),
1728 .setsockopt
= ipv6_setsockopt
,
1729 .getsockopt
= ipv6_getsockopt
,
1730 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1731 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1732 .bind_conflict
= inet6_csk_bind_conflict
,
1733 #ifdef CONFIG_COMPAT
1734 .compat_setsockopt
= compat_ipv6_setsockopt
,
1735 .compat_getsockopt
= compat_ipv6_getsockopt
,
1739 #ifdef CONFIG_TCP_MD5SIG
1740 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1741 .md5_lookup
= tcp_v6_md5_lookup
,
1742 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1743 .md5_parse
= tcp_v6_parse_md5_keys
,
1748 * TCP over IPv4 via INET6 API
1751 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1752 .queue_xmit
= ip_queue_xmit
,
1753 .send_check
= tcp_v4_send_check
,
1754 .rebuild_header
= inet_sk_rebuild_header
,
1755 .conn_request
= tcp_v6_conn_request
,
1756 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1757 .net_header_len
= sizeof(struct iphdr
),
1758 .setsockopt
= ipv6_setsockopt
,
1759 .getsockopt
= ipv6_getsockopt
,
1760 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1761 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1762 .bind_conflict
= inet6_csk_bind_conflict
,
1763 #ifdef CONFIG_COMPAT
1764 .compat_setsockopt
= compat_ipv6_setsockopt
,
1765 .compat_getsockopt
= compat_ipv6_getsockopt
,
1769 #ifdef CONFIG_TCP_MD5SIG
1770 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1771 .md5_lookup
= tcp_v4_md5_lookup
,
1772 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1773 .md5_parse
= tcp_v6_parse_md5_keys
,
1777 /* NOTE: A lot of things set to zero explicitly by call to
1778 * sk_alloc() so need not be done here.
1780 static int tcp_v6_init_sock(struct sock
*sk
)
1782 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1786 icsk
->icsk_af_ops
= &ipv6_specific
;
1788 #ifdef CONFIG_TCP_MD5SIG
1789 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1795 static void tcp_v6_destroy_sock(struct sock
*sk
)
1797 tcp_v4_destroy_sock(sk
);
1798 inet6_destroy_sock(sk
);
1801 #ifdef CONFIG_PROC_FS
1802 /* Proc filesystem TCPv6 sock list dumping. */
1803 static void get_openreq6(struct seq_file
*seq
,
1804 const struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1806 int ttd
= req
->expires
- jiffies
;
1807 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1808 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1814 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1815 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1817 src
->s6_addr32
[0], src
->s6_addr32
[1],
1818 src
->s6_addr32
[2], src
->s6_addr32
[3],
1819 ntohs(inet_rsk(req
)->loc_port
),
1820 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1821 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1822 ntohs(inet_rsk(req
)->rmt_port
),
1824 0,0, /* could print option size, but that is af dependent. */
1825 1, /* timers active (only the expire timer) */
1826 jiffies_to_clock_t(ttd
),
1829 0, /* non standard timer */
1830 0, /* open_requests have no inode */
1834 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1836 const struct in6_addr
*dest
, *src
;
1839 unsigned long timer_expires
;
1840 const struct inet_sock
*inet
= inet_sk(sp
);
1841 const struct tcp_sock
*tp
= tcp_sk(sp
);
1842 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1843 const struct ipv6_pinfo
*np
= inet6_sk(sp
);
1846 src
= &np
->rcv_saddr
;
1847 destp
= ntohs(inet
->inet_dport
);
1848 srcp
= ntohs(inet
->inet_sport
);
1850 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1852 timer_expires
= icsk
->icsk_timeout
;
1853 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1855 timer_expires
= icsk
->icsk_timeout
;
1856 } else if (timer_pending(&sp
->sk_timer
)) {
1858 timer_expires
= sp
->sk_timer
.expires
;
1861 timer_expires
= jiffies
;
1865 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1866 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1868 src
->s6_addr32
[0], src
->s6_addr32
[1],
1869 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1870 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1871 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1873 tp
->write_seq
-tp
->snd_una
,
1874 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1876 jiffies_to_clock_t(timer_expires
- jiffies
),
1877 icsk
->icsk_retransmits
,
1879 icsk
->icsk_probes_out
,
1881 atomic_read(&sp
->sk_refcnt
), sp
,
1882 jiffies_to_clock_t(icsk
->icsk_rto
),
1883 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1884 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1886 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
1890 static void get_timewait6_sock(struct seq_file
*seq
,
1891 struct inet_timewait_sock
*tw
, int i
)
1893 const struct in6_addr
*dest
, *src
;
1895 const struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1896 int ttd
= tw
->tw_ttd
- jiffies
;
1901 dest
= &tw6
->tw_v6_daddr
;
1902 src
= &tw6
->tw_v6_rcv_saddr
;
1903 destp
= ntohs(tw
->tw_dport
);
1904 srcp
= ntohs(tw
->tw_sport
);
1907 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1908 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1910 src
->s6_addr32
[0], src
->s6_addr32
[1],
1911 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1912 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1913 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1914 tw
->tw_substate
, 0, 0,
1915 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1916 atomic_read(&tw
->tw_refcnt
), tw
);
1919 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1921 struct tcp_iter_state
*st
;
1923 if (v
== SEQ_START_TOKEN
) {
1928 "st tx_queue rx_queue tr tm->when retrnsmt"
1929 " uid timeout inode\n");
1934 switch (st
->state
) {
1935 case TCP_SEQ_STATE_LISTENING
:
1936 case TCP_SEQ_STATE_ESTABLISHED
:
1937 get_tcp6_sock(seq
, v
, st
->num
);
1939 case TCP_SEQ_STATE_OPENREQ
:
1940 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1942 case TCP_SEQ_STATE_TIME_WAIT
:
1943 get_timewait6_sock(seq
, v
, st
->num
);
1950 static const struct file_operations tcp6_afinfo_seq_fops
= {
1951 .owner
= THIS_MODULE
,
1952 .open
= tcp_seq_open
,
1954 .llseek
= seq_lseek
,
1955 .release
= seq_release_net
1958 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1961 .seq_fops
= &tcp6_afinfo_seq_fops
,
1963 .show
= tcp6_seq_show
,
1967 int __net_init
tcp6_proc_init(struct net
*net
)
1969 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
1972 void tcp6_proc_exit(struct net
*net
)
1974 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
1978 struct proto tcpv6_prot
= {
1980 .owner
= THIS_MODULE
,
1982 .connect
= tcp_v6_connect
,
1983 .disconnect
= tcp_disconnect
,
1984 .accept
= inet_csk_accept
,
1986 .init
= tcp_v6_init_sock
,
1987 .destroy
= tcp_v6_destroy_sock
,
1988 .shutdown
= tcp_shutdown
,
1989 .setsockopt
= tcp_setsockopt
,
1990 .getsockopt
= tcp_getsockopt
,
1991 .recvmsg
= tcp_recvmsg
,
1992 .sendmsg
= tcp_sendmsg
,
1993 .sendpage
= tcp_sendpage
,
1994 .backlog_rcv
= tcp_v6_do_rcv
,
1995 .release_cb
= tcp_release_cb
,
1996 .mtu_reduced
= tcp_v6_mtu_reduced
,
1997 .hash
= tcp_v6_hash
,
1998 .unhash
= inet_unhash
,
1999 .get_port
= inet_csk_get_port
,
2000 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2001 .sockets_allocated
= &tcp_sockets_allocated
,
2002 .memory_allocated
= &tcp_memory_allocated
,
2003 .memory_pressure
= &tcp_memory_pressure
,
2004 .orphan_count
= &tcp_orphan_count
,
2005 .sysctl_wmem
= sysctl_tcp_wmem
,
2006 .sysctl_rmem
= sysctl_tcp_rmem
,
2007 .max_header
= MAX_TCP_HEADER
,
2008 .obj_size
= sizeof(struct tcp6_sock
),
2009 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2010 .twsk_prot
= &tcp6_timewait_sock_ops
,
2011 .rsk_prot
= &tcp6_request_sock_ops
,
2012 .h
.hashinfo
= &tcp_hashinfo
,
2013 .no_autobind
= true,
2014 #ifdef CONFIG_COMPAT
2015 .compat_setsockopt
= compat_tcp_setsockopt
,
2016 .compat_getsockopt
= compat_tcp_getsockopt
,
2018 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2019 .proto_cgroup
= tcp_proto_cgroup
,
2023 static const struct inet6_protocol tcpv6_protocol
= {
2024 .early_demux
= tcp_v6_early_demux
,
2025 .handler
= tcp_v6_rcv
,
2026 .err_handler
= tcp_v6_err
,
2027 .gso_send_check
= tcp_v6_gso_send_check
,
2028 .gso_segment
= tcp_tso_segment
,
2029 .gro_receive
= tcp6_gro_receive
,
2030 .gro_complete
= tcp6_gro_complete
,
2031 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2034 static struct inet_protosw tcpv6_protosw
= {
2035 .type
= SOCK_STREAM
,
2036 .protocol
= IPPROTO_TCP
,
2037 .prot
= &tcpv6_prot
,
2038 .ops
= &inet6_stream_ops
,
2040 .flags
= INET_PROTOSW_PERMANENT
|
2044 static int __net_init
tcpv6_net_init(struct net
*net
)
2046 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2047 SOCK_RAW
, IPPROTO_TCP
, net
);
2050 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2052 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2055 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2057 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2060 static struct pernet_operations tcpv6_net_ops
= {
2061 .init
= tcpv6_net_init
,
2062 .exit
= tcpv6_net_exit
,
2063 .exit_batch
= tcpv6_net_exit_batch
,
2066 int __init
tcpv6_init(void)
2070 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2074 /* register inet6 protocol */
2075 ret
= inet6_register_protosw(&tcpv6_protosw
);
2077 goto out_tcpv6_protocol
;
2079 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2081 goto out_tcpv6_protosw
;
2086 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2088 inet6_unregister_protosw(&tcpv6_protosw
);
2092 void tcpv6_exit(void)
2094 unregister_pernet_subsys(&tcpv6_net_ops
);
2095 inet6_unregister_protosw(&tcpv6_protosw
);
2096 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);