3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
65 #include <net/tcp_memcontrol.h>
67 #include <asm/uaccess.h>
69 #include <linux/proc_fs.h>
70 #include <linux/seq_file.h>
72 #include <linux/crypto.h>
73 #include <linux/scatterlist.h>
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
77 struct request_sock
*req
);
79 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
80 static void __tcp_v6_send_check(struct sk_buff
*skb
,
81 const struct in6_addr
*saddr
,
82 const struct in6_addr
*daddr
);
84 static const struct inet_connection_sock_af_ops ipv6_mapped
;
85 static const struct inet_connection_sock_af_ops ipv6_specific
;
86 #ifdef CONFIG_TCP_MD5SIG
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
88 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
90 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
91 const struct in6_addr
*addr
)
97 static void tcp_v6_hash(struct sock
*sk
)
99 if (sk
->sk_state
!= TCP_CLOSE
) {
100 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
105 __inet6_hash(sk
, NULL
);
110 static __inline__ __sum16
tcp_v6_check(int len
,
111 const struct in6_addr
*saddr
,
112 const struct in6_addr
*daddr
,
115 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
118 static __u32
tcp_v6_init_sequence(const struct sk_buff
*skb
)
120 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
121 ipv6_hdr(skb
)->saddr
.s6_addr32
,
123 tcp_hdr(skb
)->source
);
126 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
129 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
130 struct inet_sock
*inet
= inet_sk(sk
);
131 struct inet_connection_sock
*icsk
= inet_csk(sk
);
132 struct ipv6_pinfo
*np
= inet6_sk(sk
);
133 struct tcp_sock
*tp
= tcp_sk(sk
);
134 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
137 struct dst_entry
*dst
;
141 if (addr_len
< SIN6_LEN_RFC2133
)
144 if (usin
->sin6_family
!= AF_INET6
)
145 return -EAFNOSUPPORT
;
147 memset(&fl6
, 0, sizeof(fl6
));
150 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
151 IP6_ECN_flow_init(fl6
.flowlabel
);
152 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
153 struct ip6_flowlabel
*flowlabel
;
154 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
155 if (flowlabel
== NULL
)
157 usin
->sin6_addr
= flowlabel
->dst
;
158 fl6_sock_release(flowlabel
);
163 * connect() to INADDR_ANY means loopback (BSD'ism).
166 if(ipv6_addr_any(&usin
->sin6_addr
))
167 usin
->sin6_addr
.s6_addr
[15] = 0x1;
169 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
171 if(addr_type
& IPV6_ADDR_MULTICAST
)
174 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
175 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
176 usin
->sin6_scope_id
) {
177 /* If interface is set while binding, indices
180 if (sk
->sk_bound_dev_if
&&
181 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
184 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
187 /* Connect to link-local address requires an interface */
188 if (!sk
->sk_bound_dev_if
)
192 if (tp
->rx_opt
.ts_recent_stamp
&&
193 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
194 tp
->rx_opt
.ts_recent
= 0;
195 tp
->rx_opt
.ts_recent_stamp
= 0;
199 np
->daddr
= usin
->sin6_addr
;
200 np
->flow_label
= fl6
.flowlabel
;
206 if (addr_type
== IPV6_ADDR_MAPPED
) {
207 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
208 struct sockaddr_in sin
;
210 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
212 if (__ipv6_only_sock(sk
))
215 sin
.sin_family
= AF_INET
;
216 sin
.sin_port
= usin
->sin6_port
;
217 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
219 icsk
->icsk_af_ops
= &ipv6_mapped
;
220 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
221 #ifdef CONFIG_TCP_MD5SIG
222 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
225 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
228 icsk
->icsk_ext_hdr_len
= exthdrlen
;
229 icsk
->icsk_af_ops
= &ipv6_specific
;
230 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
231 #ifdef CONFIG_TCP_MD5SIG
232 tp
->af_specific
= &tcp_sock_ipv6_specific
;
236 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
237 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
244 if (!ipv6_addr_any(&np
->rcv_saddr
))
245 saddr
= &np
->rcv_saddr
;
247 fl6
.flowi6_proto
= IPPROTO_TCP
;
248 fl6
.daddr
= np
->daddr
;
249 fl6
.saddr
= saddr
? *saddr
: np
->saddr
;
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 np
->rcv_saddr
= *saddr
;
270 /* set the source address */
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
281 struct inet_peer
*peer
= rt6_get_peer(rt
);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer
);
290 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
291 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
292 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
297 icsk
->icsk_ext_hdr_len
= 0;
299 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
302 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
304 inet
->inet_dport
= usin
->sin6_port
;
306 tcp_set_state(sk
, TCP_SYN_SENT
);
307 err
= inet6_hash_connect(&tcp_death_row
, sk
);
312 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
317 err
= tcp_connect(sk
);
324 tcp_set_state(sk
, TCP_CLOSE
);
327 inet
->inet_dport
= 0;
328 sk
->sk_route_caps
= 0;
332 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
333 u8 type
, u8 code
, int offset
, __be32 info
)
335 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
336 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
337 struct ipv6_pinfo
*np
;
342 struct net
*net
= dev_net(skb
->dev
);
344 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
345 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
348 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
353 if (sk
->sk_state
== TCP_TIME_WAIT
) {
354 inet_twsk_put(inet_twsk(sk
));
359 if (sock_owned_by_user(sk
))
360 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
362 if (sk
->sk_state
== TCP_CLOSE
)
365 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
366 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
371 seq
= ntohl(th
->seq
);
372 if (sk
->sk_state
!= TCP_LISTEN
&&
373 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
380 if (type
== ICMPV6_PKT_TOOBIG
) {
381 struct dst_entry
*dst
;
383 if (sock_owned_by_user(sk
))
385 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
388 /* icmp should have updated the destination cache entry */
389 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
392 struct inet_sock
*inet
= inet_sk(sk
);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6
, 0, sizeof(fl6
));
400 fl6
.flowi6_proto
= IPPROTO_TCP
;
401 fl6
.daddr
= np
->daddr
;
402 fl6
.saddr
= np
->saddr
;
403 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
404 fl6
.flowi6_mark
= sk
->sk_mark
;
405 fl6
.fl6_dport
= inet
->inet_dport
;
406 fl6
.fl6_sport
= inet
->inet_sport
;
407 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
409 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
411 sk
->sk_err_soft
= -PTR_ERR(dst
);
418 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
419 tcp_sync_mss(sk
, dst_mtu(dst
));
420 tcp_simple_retransmit(sk
);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type
, code
, &err
);
428 /* Might be for an request_sock */
429 switch (sk
->sk_state
) {
430 struct request_sock
*req
, **prev
;
432 if (sock_owned_by_user(sk
))
435 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
436 &hdr
->saddr
, inet6_iif(skb
));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req
->sk
!= NULL
);
445 if (seq
!= tcp_rsk(req
)->snt_isn
) {
446 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
450 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
454 case TCP_SYN_RECV
: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk
)) {
458 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
462 sk
->sk_err_soft
= err
;
466 if (!sock_owned_by_user(sk
) && np
->recverr
) {
468 sk
->sk_error_report(sk
);
470 sk
->sk_err_soft
= err
;
478 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
479 struct request_values
*rvp
,
482 struct inet6_request_sock
*treq
= inet6_rsk(req
);
483 struct ipv6_pinfo
*np
= inet6_sk(sk
);
484 struct sk_buff
* skb
;
485 struct ipv6_txoptions
*opt
= NULL
;
486 struct in6_addr
* final_p
, final
;
488 struct dst_entry
*dst
;
491 memset(&fl6
, 0, sizeof(fl6
));
492 fl6
.flowi6_proto
= IPPROTO_TCP
;
493 fl6
.daddr
= treq
->rmt_addr
;
494 fl6
.saddr
= treq
->loc_addr
;
496 fl6
.flowi6_oif
= treq
->iif
;
497 fl6
.flowi6_mark
= sk
->sk_mark
;
498 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
499 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
500 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
503 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
505 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
511 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
514 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
516 fl6
.daddr
= treq
->rmt_addr
;
517 skb_set_queue_mapping(skb
, queue_mapping
);
518 err
= ip6_xmit(sk
, skb
, &fl6
, opt
, np
->tclass
);
519 err
= net_xmit_eval(err
);
523 if (opt
&& opt
!= np
->opt
)
524 sock_kfree_s(sk
, opt
, opt
->tot_len
);
529 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
530 struct request_values
*rvp
)
532 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
533 return tcp_v6_send_synack(sk
, req
, rvp
, 0);
536 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
538 kfree_skb(inet6_rsk(req
)->pktopts
);
541 #ifdef CONFIG_TCP_MD5SIG
542 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
543 const struct in6_addr
*addr
)
545 return tcp_md5_do_lookup(sk
, (union tcp_md5_addr
*)addr
, AF_INET6
);
548 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
549 struct sock
*addr_sk
)
551 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
554 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
555 struct request_sock
*req
)
557 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
560 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
563 struct tcp_md5sig cmd
;
564 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
566 if (optlen
< sizeof(cmd
))
569 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
572 if (sin6
->sin6_family
!= AF_INET6
)
575 if (!cmd
.tcpm_keylen
) {
576 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
577 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
579 return tcp_md5_do_del(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
583 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
586 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
587 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
.s6_addr32
[3],
588 AF_INET
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
590 return tcp_md5_do_add(sk
, (union tcp_md5_addr
*)&sin6
->sin6_addr
,
591 AF_INET6
, cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
594 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
595 const struct in6_addr
*daddr
,
596 const struct in6_addr
*saddr
, int nbytes
)
598 struct tcp6_pseudohdr
*bp
;
599 struct scatterlist sg
;
601 bp
= &hp
->md5_blk
.ip6
;
602 /* 1. TCP pseudo-header (RFC2460) */
605 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
606 bp
->len
= cpu_to_be32(nbytes
);
608 sg_init_one(&sg
, bp
, sizeof(*bp
));
609 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
612 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
613 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
614 const struct tcphdr
*th
)
616 struct tcp_md5sig_pool
*hp
;
617 struct hash_desc
*desc
;
619 hp
= tcp_get_md5sig_pool();
621 goto clear_hash_noput
;
622 desc
= &hp
->md5_desc
;
624 if (crypto_hash_init(desc
))
626 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
628 if (tcp_md5_hash_header(hp
, th
))
630 if (tcp_md5_hash_key(hp
, key
))
632 if (crypto_hash_final(desc
, md5_hash
))
635 tcp_put_md5sig_pool();
639 tcp_put_md5sig_pool();
641 memset(md5_hash
, 0, 16);
645 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
646 const struct sock
*sk
,
647 const struct request_sock
*req
,
648 const struct sk_buff
*skb
)
650 const struct in6_addr
*saddr
, *daddr
;
651 struct tcp_md5sig_pool
*hp
;
652 struct hash_desc
*desc
;
653 const struct tcphdr
*th
= tcp_hdr(skb
);
656 saddr
= &inet6_sk(sk
)->saddr
;
657 daddr
= &inet6_sk(sk
)->daddr
;
659 saddr
= &inet6_rsk(req
)->loc_addr
;
660 daddr
= &inet6_rsk(req
)->rmt_addr
;
662 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
663 saddr
= &ip6h
->saddr
;
664 daddr
= &ip6h
->daddr
;
667 hp
= tcp_get_md5sig_pool();
669 goto clear_hash_noput
;
670 desc
= &hp
->md5_desc
;
672 if (crypto_hash_init(desc
))
675 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
677 if (tcp_md5_hash_header(hp
, th
))
679 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
681 if (tcp_md5_hash_key(hp
, key
))
683 if (crypto_hash_final(desc
, md5_hash
))
686 tcp_put_md5sig_pool();
690 tcp_put_md5sig_pool();
692 memset(md5_hash
, 0, 16);
696 static int tcp_v6_inbound_md5_hash(struct sock
*sk
, const struct sk_buff
*skb
)
698 const __u8
*hash_location
= NULL
;
699 struct tcp_md5sig_key
*hash_expected
;
700 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
701 const struct tcphdr
*th
= tcp_hdr(skb
);
705 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
706 hash_location
= tcp_parse_md5sig_option(th
);
708 /* We've parsed the options - do we have a hash? */
709 if (!hash_expected
&& !hash_location
)
712 if (hash_expected
&& !hash_location
) {
713 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
717 if (!hash_expected
&& hash_location
) {
718 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
722 /* check the signature */
723 genhash
= tcp_v6_md5_hash_skb(newhash
,
727 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
728 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
729 genhash
? "failed" : "mismatch",
730 &ip6h
->saddr
, ntohs(th
->source
),
731 &ip6h
->daddr
, ntohs(th
->dest
));
738 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
740 .obj_size
= sizeof(struct tcp6_request_sock
),
741 .rtx_syn_ack
= tcp_v6_rtx_synack
,
742 .send_ack
= tcp_v6_reqsk_send_ack
,
743 .destructor
= tcp_v6_reqsk_destructor
,
744 .send_reset
= tcp_v6_send_reset
,
745 .syn_ack_timeout
= tcp_syn_ack_timeout
,
748 #ifdef CONFIG_TCP_MD5SIG
749 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
750 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
751 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
755 static void __tcp_v6_send_check(struct sk_buff
*skb
,
756 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
758 struct tcphdr
*th
= tcp_hdr(skb
);
760 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
761 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
762 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
763 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
765 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
766 csum_partial(th
, th
->doff
<< 2,
771 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
773 struct ipv6_pinfo
*np
= inet6_sk(sk
);
775 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
778 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
780 const struct ipv6hdr
*ipv6h
;
783 if (!pskb_may_pull(skb
, sizeof(*th
)))
786 ipv6h
= ipv6_hdr(skb
);
790 skb
->ip_summed
= CHECKSUM_PARTIAL
;
791 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
795 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
798 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
800 switch (skb
->ip_summed
) {
801 case CHECKSUM_COMPLETE
:
802 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
804 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
810 NAPI_GRO_CB(skb
)->flush
= 1;
814 return tcp_gro_receive(head
, skb
);
817 static int tcp6_gro_complete(struct sk_buff
*skb
)
819 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
820 struct tcphdr
*th
= tcp_hdr(skb
);
822 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
823 &iph
->saddr
, &iph
->daddr
, 0);
824 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
826 return tcp_gro_complete(skb
);
829 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
830 u32 ts
, struct tcp_md5sig_key
*key
, int rst
, u8 tclass
)
832 const struct tcphdr
*th
= tcp_hdr(skb
);
834 struct sk_buff
*buff
;
836 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
837 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
838 unsigned int tot_len
= sizeof(struct tcphdr
);
839 struct dst_entry
*dst
;
843 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
844 #ifdef CONFIG_TCP_MD5SIG
846 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
849 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
854 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
856 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
857 skb_reset_transport_header(buff
);
859 /* Swap the send and the receive. */
860 memset(t1
, 0, sizeof(*t1
));
861 t1
->dest
= th
->source
;
862 t1
->source
= th
->dest
;
863 t1
->doff
= tot_len
/ 4;
864 t1
->seq
= htonl(seq
);
865 t1
->ack_seq
= htonl(ack
);
866 t1
->ack
= !rst
|| !th
->ack
;
868 t1
->window
= htons(win
);
870 topt
= (__be32
*)(t1
+ 1);
873 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
874 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
875 *topt
++ = htonl(tcp_time_stamp
);
879 #ifdef CONFIG_TCP_MD5SIG
881 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
882 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
883 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
884 &ipv6_hdr(skb
)->saddr
,
885 &ipv6_hdr(skb
)->daddr
, t1
);
889 memset(&fl6
, 0, sizeof(fl6
));
890 fl6
.daddr
= ipv6_hdr(skb
)->saddr
;
891 fl6
.saddr
= ipv6_hdr(skb
)->daddr
;
893 buff
->ip_summed
= CHECKSUM_PARTIAL
;
896 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
898 fl6
.flowi6_proto
= IPPROTO_TCP
;
899 fl6
.flowi6_oif
= inet6_iif(skb
);
900 fl6
.fl6_dport
= t1
->dest
;
901 fl6
.fl6_sport
= t1
->source
;
902 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
904 /* Pass a socket to ip6_dst_lookup either it is for RST
905 * Underlying function will use this to retrieve the network
908 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
910 skb_dst_set(buff
, dst
);
911 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
, tclass
);
912 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
914 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
921 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
923 const struct tcphdr
*th
= tcp_hdr(skb
);
924 u32 seq
= 0, ack_seq
= 0;
925 struct tcp_md5sig_key
*key
= NULL
;
926 #ifdef CONFIG_TCP_MD5SIG
927 const __u8
*hash_location
= NULL
;
928 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
929 unsigned char newhash
[16];
931 struct sock
*sk1
= NULL
;
937 if (!ipv6_unicast_destination(skb
))
940 #ifdef CONFIG_TCP_MD5SIG
941 hash_location
= tcp_parse_md5sig_option(th
);
942 if (!sk
&& hash_location
) {
944 * active side is lost. Try to find listening socket through
945 * source port, and then find md5 key through listening socket.
946 * we are not loose security here:
947 * Incoming packet is checked with md5 hash with finding key,
948 * no RST generated if md5 hash doesn't match.
950 sk1
= inet6_lookup_listener(dev_net(skb_dst(skb
)->dev
),
951 &tcp_hashinfo
, &ipv6h
->daddr
,
952 ntohs(th
->source
), inet6_iif(skb
));
957 key
= tcp_v6_md5_do_lookup(sk1
, &ipv6h
->saddr
);
961 genhash
= tcp_v6_md5_hash_skb(newhash
, key
, NULL
, NULL
, skb
);
962 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0)
965 key
= sk
? tcp_v6_md5_do_lookup(sk
, &ipv6h
->saddr
) : NULL
;
970 seq
= ntohl(th
->ack_seq
);
972 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
975 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1, 0);
977 #ifdef CONFIG_TCP_MD5SIG
986 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
987 struct tcp_md5sig_key
*key
, u8 tclass
)
989 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0, tclass
);
992 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
994 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
995 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
997 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
998 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
999 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
),
1005 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1006 struct request_sock
*req
)
1008 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1009 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
), 0);
1013 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1015 struct request_sock
*req
, **prev
;
1016 const struct tcphdr
*th
= tcp_hdr(skb
);
1019 /* Find possible connection requests. */
1020 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1021 &ipv6_hdr(skb
)->saddr
,
1022 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1024 return tcp_check_req(sk
, skb
, req
, prev
);
1026 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1027 &ipv6_hdr(skb
)->saddr
, th
->source
,
1028 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1031 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1035 inet_twsk_put(inet_twsk(nsk
));
1039 #ifdef CONFIG_SYN_COOKIES
1041 sk
= cookie_v6_check(sk
, skb
);
1046 /* FIXME: this is substantially similar to the ipv4 code.
1047 * Can some kind of merge be done? -- erics
1049 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1051 struct tcp_extend_values tmp_ext
;
1052 struct tcp_options_received tmp_opt
;
1053 const u8
*hash_location
;
1054 struct request_sock
*req
;
1055 struct inet6_request_sock
*treq
;
1056 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1057 struct tcp_sock
*tp
= tcp_sk(sk
);
1058 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1059 struct dst_entry
*dst
= NULL
;
1060 bool want_cookie
= false;
1062 if (skb
->protocol
== htons(ETH_P_IP
))
1063 return tcp_v4_conn_request(sk
, skb
);
1065 if (!ipv6_unicast_destination(skb
))
1068 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1069 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1074 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1077 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1081 #ifdef CONFIG_TCP_MD5SIG
1082 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1085 tcp_clear_options(&tmp_opt
);
1086 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1087 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1088 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1090 if (tmp_opt
.cookie_plus
> 0 &&
1091 tmp_opt
.saw_tstamp
&&
1092 !tp
->rx_opt
.cookie_out_never
&&
1093 (sysctl_tcp_cookie_size
> 0 ||
1094 (tp
->cookie_values
!= NULL
&&
1095 tp
->cookie_values
->cookie_desired
> 0))) {
1098 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1099 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1101 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1104 /* Secret recipe starts with IP addresses */
1105 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1110 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1116 /* plus variable length Initiator Cookie */
1119 *c
++ ^= *hash_location
++;
1121 want_cookie
= false; /* not our kind of cookie */
1122 tmp_ext
.cookie_out_never
= 0; /* false */
1123 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1124 } else if (!tp
->rx_opt
.cookie_in_always
) {
1125 /* redundant indications, but ensure initialization. */
1126 tmp_ext
.cookie_out_never
= 1; /* true */
1127 tmp_ext
.cookie_plus
= 0;
1131 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1133 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1134 tcp_clear_options(&tmp_opt
);
1136 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1137 tcp_openreq_init(req
, &tmp_opt
, skb
);
1139 treq
= inet6_rsk(req
);
1140 treq
->rmt_addr
= ipv6_hdr(skb
)->saddr
;
1141 treq
->loc_addr
= ipv6_hdr(skb
)->daddr
;
1142 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1143 TCP_ECN_create_request(req
, skb
);
1145 treq
->iif
= sk
->sk_bound_dev_if
;
1147 /* So that link locals have meaning */
1148 if (!sk
->sk_bound_dev_if
&&
1149 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1150 treq
->iif
= inet6_iif(skb
);
1153 struct inet_peer
*peer
= NULL
;
1155 if (ipv6_opt_accepted(sk
, skb
) ||
1156 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1157 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1158 atomic_inc(&skb
->users
);
1159 treq
->pktopts
= skb
;
1163 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1164 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1168 /* VJ's idea. We save last timestamp seen
1169 * from the destination in peer table, when entering
1170 * state TIME-WAIT, and check against it before
1171 * accepting new connection request.
1173 * If "isn" is not zero, this request hit alive
1174 * timewait bucket, so that all the necessary checks
1175 * are made in the function processing timewait state.
1177 if (tmp_opt
.saw_tstamp
&&
1178 tcp_death_row
.sysctl_tw_recycle
&&
1179 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1180 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1181 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1183 inet_peer_refcheck(peer
);
1184 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1185 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1187 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1188 goto drop_and_release
;
1191 /* Kill the following clause, if you dislike this way. */
1192 else if (!sysctl_tcp_syncookies
&&
1193 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1194 (sysctl_max_syn_backlog
>> 2)) &&
1195 (!peer
|| !peer
->tcp_ts_stamp
) &&
1196 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1197 /* Without syncookies last quarter of
1198 * backlog is filled with destinations,
1199 * proven to be alive.
1200 * It means that we continue to communicate
1201 * to destinations, already remembered
1202 * to the moment of synflood.
1204 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1205 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1206 goto drop_and_release
;
1209 isn
= tcp_v6_init_sequence(skb
);
1212 tcp_rsk(req
)->snt_isn
= isn
;
1213 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1215 if (security_inet_conn_request(sk
, skb
, req
))
1216 goto drop_and_release
;
1218 if (tcp_v6_send_synack(sk
, req
,
1219 (struct request_values
*)&tmp_ext
,
1220 skb_get_queue_mapping(skb
)) ||
1224 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1232 return 0; /* don't send reset */
1235 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1236 struct request_sock
*req
,
1237 struct dst_entry
*dst
)
1239 struct inet6_request_sock
*treq
;
1240 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1241 struct tcp6_sock
*newtcp6sk
;
1242 struct inet_sock
*newinet
;
1243 struct tcp_sock
*newtp
;
1245 struct ipv6_txoptions
*opt
;
1246 #ifdef CONFIG_TCP_MD5SIG
1247 struct tcp_md5sig_key
*key
;
1250 if (skb
->protocol
== htons(ETH_P_IP
)) {
1255 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1260 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1261 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1263 newinet
= inet_sk(newsk
);
1264 newnp
= inet6_sk(newsk
);
1265 newtp
= tcp_sk(newsk
);
1267 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1269 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1271 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1273 newnp
->rcv_saddr
= newnp
->saddr
;
1275 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1276 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1277 #ifdef CONFIG_TCP_MD5SIG
1278 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1281 newnp
->ipv6_ac_list
= NULL
;
1282 newnp
->ipv6_fl_list
= NULL
;
1283 newnp
->pktoptions
= NULL
;
1285 newnp
->mcast_oif
= inet6_iif(skb
);
1286 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1287 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1290 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1291 * here, tcp_create_openreq_child now does this for us, see the comment in
1292 * that function for the gory details. -acme
1295 /* It is tricky place. Until this moment IPv4 tcp
1296 worked with IPv6 icsk.icsk_af_ops.
1299 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1304 treq
= inet6_rsk(req
);
1307 if (sk_acceptq_is_full(sk
))
1311 dst
= inet6_csk_route_req(sk
, req
);
1316 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1321 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1322 * count here, tcp_create_openreq_child now does this for us, see the
1323 * comment in that function for the gory details. -acme
1326 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1327 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1329 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1330 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1332 newtp
= tcp_sk(newsk
);
1333 newinet
= inet_sk(newsk
);
1334 newnp
= inet6_sk(newsk
);
1336 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1338 newnp
->daddr
= treq
->rmt_addr
;
1339 newnp
->saddr
= treq
->loc_addr
;
1340 newnp
->rcv_saddr
= treq
->loc_addr
;
1341 newsk
->sk_bound_dev_if
= treq
->iif
;
1343 /* Now IPv6 options...
1345 First: no IPv4 options.
1347 newinet
->inet_opt
= NULL
;
1348 newnp
->ipv6_ac_list
= NULL
;
1349 newnp
->ipv6_fl_list
= NULL
;
1352 newnp
->rxopt
.all
= np
->rxopt
.all
;
1354 /* Clone pktoptions received with SYN */
1355 newnp
->pktoptions
= NULL
;
1356 if (treq
->pktopts
!= NULL
) {
1357 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1358 consume_skb(treq
->pktopts
);
1359 treq
->pktopts
= NULL
;
1360 if (newnp
->pktoptions
)
1361 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1364 newnp
->mcast_oif
= inet6_iif(skb
);
1365 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1366 newnp
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1368 /* Clone native IPv6 options from listening socket (if any)
1370 Yes, keeping reference count would be much more clever,
1371 but we make one more one thing there: reattach optmem
1375 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1377 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1380 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1382 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1383 newnp
->opt
->opt_flen
);
1385 tcp_mtup_init(newsk
);
1386 tcp_sync_mss(newsk
, dst_mtu(dst
));
1387 newtp
->advmss
= dst_metric_advmss(dst
);
1388 if (tcp_sk(sk
)->rx_opt
.user_mss
&&
1389 tcp_sk(sk
)->rx_opt
.user_mss
< newtp
->advmss
)
1390 newtp
->advmss
= tcp_sk(sk
)->rx_opt
.user_mss
;
1392 tcp_initialize_rcv_mss(newsk
);
1393 if (tcp_rsk(req
)->snt_synack
)
1394 tcp_valid_rtt_meas(newsk
,
1395 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1396 newtp
->total_retrans
= req
->retrans
;
1398 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1399 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1401 #ifdef CONFIG_TCP_MD5SIG
1402 /* Copy over the MD5 key from the original socket */
1403 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1404 /* We're using one, so create a matching key
1405 * on the newsk structure. If we fail to get
1406 * memory, then we end up not copying the key
1409 tcp_md5_do_add(newsk
, (union tcp_md5_addr
*)&newnp
->daddr
,
1410 AF_INET6
, key
->key
, key
->keylen
, GFP_ATOMIC
);
1414 if (__inet_inherit_port(sk
, newsk
) < 0) {
1418 __inet6_hash(newsk
, NULL
);
1423 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1425 if (opt
&& opt
!= np
->opt
)
1426 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1429 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1433 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1435 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1436 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1437 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1438 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1443 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1444 &ipv6_hdr(skb
)->saddr
,
1445 &ipv6_hdr(skb
)->daddr
, 0));
1447 if (skb
->len
<= 76) {
1448 return __skb_checksum_complete(skb
);
1453 /* The socket must have it's spinlock held when we get
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1461 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1463 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1464 struct tcp_sock
*tp
;
1465 struct sk_buff
*opt_skb
= NULL
;
1467 /* Imagine: socket is IPv6. IPv4 packet arrives,
1468 goes to IPv4 receive handler and backlogged.
1469 From backlog it always goes here. Kerboom...
1470 Fortunately, tcp_rcv_established and rcv_established
1471 handle them correctly, but it is not case with
1472 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1475 if (skb
->protocol
== htons(ETH_P_IP
))
1476 return tcp_v4_do_rcv(sk
, skb
);
1478 #ifdef CONFIG_TCP_MD5SIG
1479 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1483 if (sk_filter(sk
, skb
))
1487 * socket locking is here for SMP purposes as backlog rcv
1488 * is currently called with bh processing disabled.
1491 /* Do Stevens' IPV6_PKTOPTIONS.
1493 Yes, guys, it is the only place in our code, where we
1494 may make it not affecting IPv4.
1495 The rest of code is protocol independent,
1496 and I do not like idea to uglify IPv4.
1498 Actually, all the idea behind IPV6_PKTOPTIONS
1499 looks not very well thought. For now we latch
1500 options, received in the last packet, enqueued
1501 by tcp. Feel free to propose better solution.
1505 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1507 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1508 sock_rps_save_rxhash(sk
, skb
);
1509 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1512 goto ipv6_pktoptions
;
1516 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1519 if (sk
->sk_state
== TCP_LISTEN
) {
1520 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1525 * Queue it on the new socket if the new socket is active,
1526 * otherwise we just shortcircuit this and continue with
1530 sock_rps_save_rxhash(nsk
, skb
);
1531 if (tcp_child_process(sk
, nsk
, skb
))
1534 __kfree_skb(opt_skb
);
1538 sock_rps_save_rxhash(sk
, skb
);
1540 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1543 goto ipv6_pktoptions
;
1547 tcp_v6_send_reset(sk
, skb
);
1550 __kfree_skb(opt_skb
);
1554 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1559 /* Do you ask, what is it?
1561 1. skb was enqueued by tcp.
1562 2. skb is added to tail of read queue, rather than out of order.
1563 3. socket is not in passive state.
1564 4. Finally, it really contains options, which user wants to receive.
1567 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1568 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1569 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1570 np
->mcast_oif
= inet6_iif(opt_skb
);
1571 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1572 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1573 if (np
->rxopt
.bits
.rxtclass
)
1574 np
->rcv_tclass
= ipv6_tclass(ipv6_hdr(skb
));
1575 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1576 skb_set_owner_r(opt_skb
, sk
);
1577 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1579 __kfree_skb(opt_skb
);
1580 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1588 static int tcp_v6_rcv(struct sk_buff
*skb
)
1590 const struct tcphdr
*th
;
1591 const struct ipv6hdr
*hdr
;
1594 struct net
*net
= dev_net(skb
->dev
);
1596 if (skb
->pkt_type
!= PACKET_HOST
)
1600 * Count it even if it's bad.
1602 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1604 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1609 if (th
->doff
< sizeof(struct tcphdr
)/4)
1611 if (!pskb_may_pull(skb
, th
->doff
*4))
1614 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1618 hdr
= ipv6_hdr(skb
);
1619 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1620 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1621 skb
->len
- th
->doff
*4);
1622 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1623 TCP_SKB_CB(skb
)->when
= 0;
1624 TCP_SKB_CB(skb
)->ip_dsfield
= ipv6_get_dsfield(hdr
);
1625 TCP_SKB_CB(skb
)->sacked
= 0;
1627 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1632 if (sk
->sk_state
== TCP_TIME_WAIT
)
1635 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1636 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1637 goto discard_and_relse
;
1640 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1641 goto discard_and_relse
;
1643 if (sk_filter(sk
, skb
))
1644 goto discard_and_relse
;
1648 bh_lock_sock_nested(sk
);
1650 if (!sock_owned_by_user(sk
)) {
1651 #ifdef CONFIG_NET_DMA
1652 struct tcp_sock
*tp
= tcp_sk(sk
);
1653 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1654 tp
->ucopy
.dma_chan
= net_dma_find_channel();
1655 if (tp
->ucopy
.dma_chan
)
1656 ret
= tcp_v6_do_rcv(sk
, skb
);
1660 if (!tcp_prequeue(sk
, skb
))
1661 ret
= tcp_v6_do_rcv(sk
, skb
);
1663 } else if (unlikely(sk_add_backlog(sk
, skb
,
1664 sk
->sk_rcvbuf
+ sk
->sk_sndbuf
))) {
1666 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1667 goto discard_and_relse
;
1672 return ret
? -1 : 0;
1675 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1678 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1680 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1682 tcp_v6_send_reset(NULL
, skb
);
1699 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1700 inet_twsk_put(inet_twsk(sk
));
1704 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1705 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1706 inet_twsk_put(inet_twsk(sk
));
1710 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1715 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1716 &ipv6_hdr(skb
)->daddr
,
1717 ntohs(th
->dest
), inet6_iif(skb
));
1719 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1720 inet_twsk_deschedule(tw
, &tcp_death_row
);
1725 /* Fall through to ACK */
1728 tcp_v6_timewait_ack(sk
, skb
);
1732 case TCP_TW_SUCCESS
:;
1737 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1739 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1740 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1741 struct inet_peer
*peer
;
1744 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1745 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1749 rt6_bind_peer(rt
, 1);
1750 peer
= rt
->rt6i_peer
;
1751 *release_it
= false;
1757 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1759 const struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1760 const struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1762 if (tw
->tw_family
== AF_INET
)
1763 return tcp_v4_tw_get_peer(sk
);
1765 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1768 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1769 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1770 .twsk_unique
= tcp_twsk_unique
,
1771 .twsk_destructor
= tcp_twsk_destructor
,
1772 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1775 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1776 .queue_xmit
= inet6_csk_xmit
,
1777 .send_check
= tcp_v6_send_check
,
1778 .rebuild_header
= inet6_sk_rebuild_header
,
1779 .conn_request
= tcp_v6_conn_request
,
1780 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1781 .get_peer
= tcp_v6_get_peer
,
1782 .net_header_len
= sizeof(struct ipv6hdr
),
1783 .net_frag_header_len
= sizeof(struct frag_hdr
),
1784 .setsockopt
= ipv6_setsockopt
,
1785 .getsockopt
= ipv6_getsockopt
,
1786 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1787 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1788 .bind_conflict
= inet6_csk_bind_conflict
,
1789 #ifdef CONFIG_COMPAT
1790 .compat_setsockopt
= compat_ipv6_setsockopt
,
1791 .compat_getsockopt
= compat_ipv6_getsockopt
,
1795 #ifdef CONFIG_TCP_MD5SIG
1796 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1797 .md5_lookup
= tcp_v6_md5_lookup
,
1798 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1799 .md5_parse
= tcp_v6_parse_md5_keys
,
1804 * TCP over IPv4 via INET6 API
1807 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1808 .queue_xmit
= ip_queue_xmit
,
1809 .send_check
= tcp_v4_send_check
,
1810 .rebuild_header
= inet_sk_rebuild_header
,
1811 .conn_request
= tcp_v6_conn_request
,
1812 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1813 .get_peer
= tcp_v4_get_peer
,
1814 .net_header_len
= sizeof(struct iphdr
),
1815 .setsockopt
= ipv6_setsockopt
,
1816 .getsockopt
= ipv6_getsockopt
,
1817 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1818 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1819 .bind_conflict
= inet6_csk_bind_conflict
,
1820 #ifdef CONFIG_COMPAT
1821 .compat_setsockopt
= compat_ipv6_setsockopt
,
1822 .compat_getsockopt
= compat_ipv6_getsockopt
,
1826 #ifdef CONFIG_TCP_MD5SIG
1827 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1828 .md5_lookup
= tcp_v4_md5_lookup
,
1829 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1830 .md5_parse
= tcp_v6_parse_md5_keys
,
1834 /* NOTE: A lot of things set to zero explicitly by call to
1835 * sk_alloc() so need not be done here.
1837 static int tcp_v6_init_sock(struct sock
*sk
)
1839 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1843 icsk
->icsk_af_ops
= &ipv6_specific
;
1845 #ifdef CONFIG_TCP_MD5SIG
1846 tcp_sk(sk
)->af_specific
= &tcp_sock_ipv6_specific
;
1852 static void tcp_v6_destroy_sock(struct sock
*sk
)
1854 tcp_v4_destroy_sock(sk
);
1855 inet6_destroy_sock(sk
);
1858 #ifdef CONFIG_PROC_FS
1859 /* Proc filesystem TCPv6 sock list dumping. */
1860 static void get_openreq6(struct seq_file
*seq
,
1861 const struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1863 int ttd
= req
->expires
- jiffies
;
1864 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1865 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1871 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1872 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1874 src
->s6_addr32
[0], src
->s6_addr32
[1],
1875 src
->s6_addr32
[2], src
->s6_addr32
[3],
1876 ntohs(inet_rsk(req
)->loc_port
),
1877 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1878 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1879 ntohs(inet_rsk(req
)->rmt_port
),
1881 0,0, /* could print option size, but that is af dependent. */
1882 1, /* timers active (only the expire timer) */
1883 jiffies_to_clock_t(ttd
),
1886 0, /* non standard timer */
1887 0, /* open_requests have no inode */
1891 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1893 const struct in6_addr
*dest
, *src
;
1896 unsigned long timer_expires
;
1897 const struct inet_sock
*inet
= inet_sk(sp
);
1898 const struct tcp_sock
*tp
= tcp_sk(sp
);
1899 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1900 const struct ipv6_pinfo
*np
= inet6_sk(sp
);
1903 src
= &np
->rcv_saddr
;
1904 destp
= ntohs(inet
->inet_dport
);
1905 srcp
= ntohs(inet
->inet_sport
);
1907 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1909 timer_expires
= icsk
->icsk_timeout
;
1910 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1912 timer_expires
= icsk
->icsk_timeout
;
1913 } else if (timer_pending(&sp
->sk_timer
)) {
1915 timer_expires
= sp
->sk_timer
.expires
;
1918 timer_expires
= jiffies
;
1922 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1923 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
1925 src
->s6_addr32
[0], src
->s6_addr32
[1],
1926 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1927 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1928 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1930 tp
->write_seq
-tp
->snd_una
,
1931 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1933 jiffies_to_clock_t(timer_expires
- jiffies
),
1934 icsk
->icsk_retransmits
,
1936 icsk
->icsk_probes_out
,
1938 atomic_read(&sp
->sk_refcnt
), sp
,
1939 jiffies_to_clock_t(icsk
->icsk_rto
),
1940 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1941 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1943 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
1947 static void get_timewait6_sock(struct seq_file
*seq
,
1948 struct inet_timewait_sock
*tw
, int i
)
1950 const struct in6_addr
*dest
, *src
;
1952 const struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1953 int ttd
= tw
->tw_ttd
- jiffies
;
1958 dest
= &tw6
->tw_v6_daddr
;
1959 src
= &tw6
->tw_v6_rcv_saddr
;
1960 destp
= ntohs(tw
->tw_dport
);
1961 srcp
= ntohs(tw
->tw_sport
);
1964 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1965 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
1967 src
->s6_addr32
[0], src
->s6_addr32
[1],
1968 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1969 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1970 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1971 tw
->tw_substate
, 0, 0,
1972 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1973 atomic_read(&tw
->tw_refcnt
), tw
);
1976 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1978 struct tcp_iter_state
*st
;
1980 if (v
== SEQ_START_TOKEN
) {
1985 "st tx_queue rx_queue tr tm->when retrnsmt"
1986 " uid timeout inode\n");
1991 switch (st
->state
) {
1992 case TCP_SEQ_STATE_LISTENING
:
1993 case TCP_SEQ_STATE_ESTABLISHED
:
1994 get_tcp6_sock(seq
, v
, st
->num
);
1996 case TCP_SEQ_STATE_OPENREQ
:
1997 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1999 case TCP_SEQ_STATE_TIME_WAIT
:
2000 get_timewait6_sock(seq
, v
, st
->num
);
2007 static const struct file_operations tcp6_afinfo_seq_fops
= {
2008 .owner
= THIS_MODULE
,
2009 .open
= tcp_seq_open
,
2011 .llseek
= seq_lseek
,
2012 .release
= seq_release_net
2015 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2018 .seq_fops
= &tcp6_afinfo_seq_fops
,
2020 .show
= tcp6_seq_show
,
2024 int __net_init
tcp6_proc_init(struct net
*net
)
2026 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2029 void tcp6_proc_exit(struct net
*net
)
2031 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2035 struct proto tcpv6_prot
= {
2037 .owner
= THIS_MODULE
,
2039 .connect
= tcp_v6_connect
,
2040 .disconnect
= tcp_disconnect
,
2041 .accept
= inet_csk_accept
,
2043 .init
= tcp_v6_init_sock
,
2044 .destroy
= tcp_v6_destroy_sock
,
2045 .shutdown
= tcp_shutdown
,
2046 .setsockopt
= tcp_setsockopt
,
2047 .getsockopt
= tcp_getsockopt
,
2048 .recvmsg
= tcp_recvmsg
,
2049 .sendmsg
= tcp_sendmsg
,
2050 .sendpage
= tcp_sendpage
,
2051 .backlog_rcv
= tcp_v6_do_rcv
,
2052 .hash
= tcp_v6_hash
,
2053 .unhash
= inet_unhash
,
2054 .get_port
= inet_csk_get_port
,
2055 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2056 .sockets_allocated
= &tcp_sockets_allocated
,
2057 .memory_allocated
= &tcp_memory_allocated
,
2058 .memory_pressure
= &tcp_memory_pressure
,
2059 .orphan_count
= &tcp_orphan_count
,
2060 .sysctl_wmem
= sysctl_tcp_wmem
,
2061 .sysctl_rmem
= sysctl_tcp_rmem
,
2062 .max_header
= MAX_TCP_HEADER
,
2063 .obj_size
= sizeof(struct tcp6_sock
),
2064 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2065 .twsk_prot
= &tcp6_timewait_sock_ops
,
2066 .rsk_prot
= &tcp6_request_sock_ops
,
2067 .h
.hashinfo
= &tcp_hashinfo
,
2068 .no_autobind
= true,
2069 #ifdef CONFIG_COMPAT
2070 .compat_setsockopt
= compat_tcp_setsockopt
,
2071 .compat_getsockopt
= compat_tcp_getsockopt
,
2073 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2074 .proto_cgroup
= tcp_proto_cgroup
,
2078 static const struct inet6_protocol tcpv6_protocol
= {
2079 .handler
= tcp_v6_rcv
,
2080 .err_handler
= tcp_v6_err
,
2081 .gso_send_check
= tcp_v6_gso_send_check
,
2082 .gso_segment
= tcp_tso_segment
,
2083 .gro_receive
= tcp6_gro_receive
,
2084 .gro_complete
= tcp6_gro_complete
,
2085 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2088 static struct inet_protosw tcpv6_protosw
= {
2089 .type
= SOCK_STREAM
,
2090 .protocol
= IPPROTO_TCP
,
2091 .prot
= &tcpv6_prot
,
2092 .ops
= &inet6_stream_ops
,
2094 .flags
= INET_PROTOSW_PERMANENT
|
2098 static int __net_init
tcpv6_net_init(struct net
*net
)
2100 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2101 SOCK_RAW
, IPPROTO_TCP
, net
);
2104 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2106 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2109 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2111 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2114 static struct pernet_operations tcpv6_net_ops
= {
2115 .init
= tcpv6_net_init
,
2116 .exit
= tcpv6_net_exit
,
2117 .exit_batch
= tcpv6_net_exit_batch
,
2120 int __init
tcpv6_init(void)
2124 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2128 /* register inet6 protocol */
2129 ret
= inet6_register_protosw(&tcpv6_protosw
);
2131 goto out_tcpv6_protocol
;
2133 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2135 goto out_tcpv6_protosw
;
2140 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2142 inet6_unregister_protosw(&tcpv6_protosw
);
2146 void tcpv6_exit(void)
2148 unregister_pernet_subsys(&tcpv6_net_ops
);
2149 inet6_unregister_protosw(&tcpv6_protosw
);
2150 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);