3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
75 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
76 struct request_sock
*req
);
78 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static void __tcp_v6_send_check(struct sk_buff
*skb
,
80 const struct in6_addr
*saddr
,
81 const struct in6_addr
*daddr
);
83 static const struct inet_connection_sock_af_ops ipv6_mapped
;
84 static const struct inet_connection_sock_af_ops ipv6_specific
;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
89 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
90 const struct in6_addr
*addr
)
96 static void tcp_v6_hash(struct sock
*sk
)
98 if (sk
->sk_state
!= TCP_CLOSE
) {
99 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
104 __inet6_hash(sk
, NULL
);
109 static __inline__ __sum16
tcp_v6_check(int len
,
110 const struct in6_addr
*saddr
,
111 const struct in6_addr
*daddr
,
114 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
117 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
120 ipv6_hdr(skb
)->saddr
.s6_addr32
,
122 tcp_hdr(skb
)->source
);
125 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
128 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
129 struct inet_sock
*inet
= inet_sk(sk
);
130 struct inet_connection_sock
*icsk
= inet_csk(sk
);
131 struct ipv6_pinfo
*np
= inet6_sk(sk
);
132 struct tcp_sock
*tp
= tcp_sk(sk
);
133 struct in6_addr
*saddr
= NULL
, *final_p
, final
;
136 struct dst_entry
*dst
;
140 if (addr_len
< SIN6_LEN_RFC2133
)
143 if (usin
->sin6_family
!= AF_INET6
)
144 return -EAFNOSUPPORT
;
146 memset(&fl6
, 0, sizeof(fl6
));
149 fl6
.flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
150 IP6_ECN_flow_init(fl6
.flowlabel
);
151 if (fl6
.flowlabel
&IPV6_FLOWLABEL_MASK
) {
152 struct ip6_flowlabel
*flowlabel
;
153 flowlabel
= fl6_sock_lookup(sk
, fl6
.flowlabel
);
154 if (flowlabel
== NULL
)
156 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
157 fl6_sock_release(flowlabel
);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin
->sin6_addr
))
166 usin
->sin6_addr
.s6_addr
[15] = 0x1;
168 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
170 if(addr_type
& IPV6_ADDR_MULTICAST
)
173 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
174 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
175 usin
->sin6_scope_id
) {
176 /* If interface is set while binding, indices
179 if (sk
->sk_bound_dev_if
&&
180 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
183 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
186 /* Connect to link-local address requires an interface */
187 if (!sk
->sk_bound_dev_if
)
191 if (tp
->rx_opt
.ts_recent_stamp
&&
192 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
193 tp
->rx_opt
.ts_recent
= 0;
194 tp
->rx_opt
.ts_recent_stamp
= 0;
198 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
199 np
->flow_label
= fl6
.flowlabel
;
205 if (addr_type
== IPV6_ADDR_MAPPED
) {
206 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
207 struct sockaddr_in sin
;
209 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk
))
214 sin
.sin_family
= AF_INET
;
215 sin
.sin_port
= usin
->sin6_port
;
216 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
218 icsk
->icsk_af_ops
= &ipv6_mapped
;
219 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
224 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
227 icsk
->icsk_ext_hdr_len
= exthdrlen
;
228 icsk
->icsk_af_ops
= &ipv6_specific
;
229 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp
->af_specific
= &tcp_sock_ipv6_specific
;
235 ipv6_addr_set_v4mapped(inet
->inet_saddr
, &np
->saddr
);
236 ipv6_addr_set_v4mapped(inet
->inet_rcv_saddr
,
243 if (!ipv6_addr_any(&np
->rcv_saddr
))
244 saddr
= &np
->rcv_saddr
;
246 fl6
.flowi6_proto
= IPPROTO_TCP
;
247 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
248 ipv6_addr_copy(&fl6
.saddr
,
249 (saddr
? saddr
: &np
->saddr
));
250 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
251 fl6
.flowi6_mark
= sk
->sk_mark
;
252 fl6
.fl6_dport
= usin
->sin6_port
;
253 fl6
.fl6_sport
= inet
->inet_sport
;
255 final_p
= fl6_update_dst(&fl6
, np
->opt
, &final
);
257 security_sk_classify_flow(sk
, flowi6_to_flowi(&fl6
));
259 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, true);
267 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
270 /* set the source address */
271 ipv6_addr_copy(&np
->saddr
, saddr
);
272 inet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 rt
= (struct rt6_info
*) dst
;
278 if (tcp_death_row
.sysctl_tw_recycle
&&
279 !tp
->rx_opt
.ts_recent_stamp
&&
280 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, &np
->daddr
)) {
281 struct inet_peer
*peer
= rt6_get_peer(rt
);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
289 inet_peer_refcheck(peer
);
290 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
<= TCP_PAWS_MSL
) {
291 tp
->rx_opt
.ts_recent_stamp
= peer
->tcp_ts_stamp
;
292 tp
->rx_opt
.ts_recent
= peer
->tcp_ts
;
297 icsk
->icsk_ext_hdr_len
= 0;
299 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
302 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
304 inet
->inet_dport
= usin
->sin6_port
;
306 tcp_set_state(sk
, TCP_SYN_SENT
);
307 err
= inet6_hash_connect(&tcp_death_row
, sk
);
312 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
317 err
= tcp_connect(sk
);
324 tcp_set_state(sk
, TCP_CLOSE
);
327 inet
->inet_dport
= 0;
328 sk
->sk_route_caps
= 0;
332 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
333 u8 type
, u8 code
, int offset
, __be32 info
)
335 const struct ipv6hdr
*hdr
= (const struct ipv6hdr
*)skb
->data
;
336 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
337 struct ipv6_pinfo
*np
;
342 struct net
*net
= dev_net(skb
->dev
);
344 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
345 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
348 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
353 if (sk
->sk_state
== TCP_TIME_WAIT
) {
354 inet_twsk_put(inet_twsk(sk
));
359 if (sock_owned_by_user(sk
))
360 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
362 if (sk
->sk_state
== TCP_CLOSE
)
365 if (ipv6_hdr(skb
)->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
366 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
371 seq
= ntohl(th
->seq
);
372 if (sk
->sk_state
!= TCP_LISTEN
&&
373 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
374 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
380 if (type
== ICMPV6_PKT_TOOBIG
) {
381 struct dst_entry
*dst
;
383 if (sock_owned_by_user(sk
))
385 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
388 /* icmp should have updated the destination cache entry */
389 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
392 struct inet_sock
*inet
= inet_sk(sk
);
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
399 memset(&fl6
, 0, sizeof(fl6
));
400 fl6
.flowi6_proto
= IPPROTO_TCP
;
401 ipv6_addr_copy(&fl6
.daddr
, &np
->daddr
);
402 ipv6_addr_copy(&fl6
.saddr
, &np
->saddr
);
403 fl6
.flowi6_oif
= sk
->sk_bound_dev_if
;
404 fl6
.flowi6_mark
= sk
->sk_mark
;
405 fl6
.fl6_dport
= inet
->inet_dport
;
406 fl6
.fl6_sport
= inet
->inet_sport
;
407 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
409 dst
= ip6_dst_lookup_flow(sk
, &fl6
, NULL
, false);
411 sk
->sk_err_soft
= -PTR_ERR(dst
);
418 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
419 tcp_sync_mss(sk
, dst_mtu(dst
));
420 tcp_simple_retransmit(sk
);
421 } /* else let the usual retransmit timer handle it */
426 icmpv6_err_convert(type
, code
, &err
);
428 /* Might be for an request_sock */
429 switch (sk
->sk_state
) {
430 struct request_sock
*req
, **prev
;
432 if (sock_owned_by_user(sk
))
435 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
436 &hdr
->saddr
, inet6_iif(skb
));
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req
->sk
!= NULL
);
445 if (seq
!= tcp_rsk(req
)->snt_isn
) {
446 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
450 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
454 case TCP_SYN_RECV
: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk
)) {
458 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
462 sk
->sk_err_soft
= err
;
466 if (!sock_owned_by_user(sk
) && np
->recverr
) {
468 sk
->sk_error_report(sk
);
470 sk
->sk_err_soft
= err
;
478 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
479 struct request_values
*rvp
)
481 struct inet6_request_sock
*treq
= inet6_rsk(req
);
482 struct ipv6_pinfo
*np
= inet6_sk(sk
);
483 struct sk_buff
* skb
;
484 struct ipv6_txoptions
*opt
= NULL
;
485 struct in6_addr
* final_p
, final
;
487 struct dst_entry
*dst
;
490 memset(&fl6
, 0, sizeof(fl6
));
491 fl6
.flowi6_proto
= IPPROTO_TCP
;
492 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
493 ipv6_addr_copy(&fl6
.saddr
, &treq
->loc_addr
);
495 fl6
.flowi6_oif
= treq
->iif
;
496 fl6
.flowi6_mark
= sk
->sk_mark
;
497 fl6
.fl6_dport
= inet_rsk(req
)->rmt_port
;
498 fl6
.fl6_sport
= inet_rsk(req
)->loc_port
;
499 security_req_classify_flow(req
, flowi6_to_flowi(&fl6
));
502 final_p
= fl6_update_dst(&fl6
, opt
, &final
);
504 dst
= ip6_dst_lookup_flow(sk
, &fl6
, final_p
, false);
510 skb
= tcp_make_synack(sk
, dst
, req
, rvp
);
513 __tcp_v6_send_check(skb
, &treq
->loc_addr
, &treq
->rmt_addr
);
515 ipv6_addr_copy(&fl6
.daddr
, &treq
->rmt_addr
);
516 err
= ip6_xmit(sk
, skb
, &fl6
, opt
);
517 err
= net_xmit_eval(err
);
521 if (opt
&& opt
!= np
->opt
)
522 sock_kfree_s(sk
, opt
, opt
->tot_len
);
527 static int tcp_v6_rtx_synack(struct sock
*sk
, struct request_sock
*req
,
528 struct request_values
*rvp
)
530 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_RETRANSSEGS
);
531 return tcp_v6_send_synack(sk
, req
, rvp
);
534 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
536 kfree_skb(inet6_rsk(req
)->pktopts
);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
541 const struct in6_addr
*addr
)
543 struct tcp_sock
*tp
= tcp_sk(sk
);
548 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
551 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
552 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
553 return &tp
->md5sig_info
->keys6
[i
].base
;
558 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
559 struct sock
*addr_sk
)
561 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
564 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
565 struct request_sock
*req
)
567 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
570 static int tcp_v6_md5_do_add(struct sock
*sk
, const struct in6_addr
*peer
,
571 char *newkey
, u8 newkeylen
)
573 /* Add key to the list */
574 struct tcp_md5sig_key
*key
;
575 struct tcp_sock
*tp
= tcp_sk(sk
);
576 struct tcp6_md5sig_key
*keys
;
578 key
= tcp_v6_md5_do_lookup(sk
, peer
);
580 /* modify existing entry - just update that one */
583 key
->keylen
= newkeylen
;
585 /* reallocate new list if current one is full. */
586 if (!tp
->md5sig_info
) {
587 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
588 if (!tp
->md5sig_info
) {
592 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
594 if (tp
->md5sig_info
->entries6
== 0 &&
595 tcp_alloc_md5sig_pool(sk
) == NULL
) {
599 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
600 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
601 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
605 if (tp
->md5sig_info
->entries6
== 0)
606 tcp_free_md5sig_pool();
610 if (tp
->md5sig_info
->entries6
)
611 memmove(keys
, tp
->md5sig_info
->keys6
,
612 (sizeof (tp
->md5sig_info
->keys6
[0]) *
613 tp
->md5sig_info
->entries6
));
615 kfree(tp
->md5sig_info
->keys6
);
616 tp
->md5sig_info
->keys6
= keys
;
617 tp
->md5sig_info
->alloced6
++;
620 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
622 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
623 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
625 tp
->md5sig_info
->entries6
++;
630 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
631 u8
*newkey
, __u8 newkeylen
)
633 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
637 static int tcp_v6_md5_do_del(struct sock
*sk
, const struct in6_addr
*peer
)
639 struct tcp_sock
*tp
= tcp_sk(sk
);
642 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
643 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
645 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
646 tp
->md5sig_info
->entries6
--;
648 if (tp
->md5sig_info
->entries6
== 0) {
649 kfree(tp
->md5sig_info
->keys6
);
650 tp
->md5sig_info
->keys6
= NULL
;
651 tp
->md5sig_info
->alloced6
= 0;
652 tcp_free_md5sig_pool();
654 /* shrink the database */
655 if (tp
->md5sig_info
->entries6
!= i
)
656 memmove(&tp
->md5sig_info
->keys6
[i
],
657 &tp
->md5sig_info
->keys6
[i
+1],
658 (tp
->md5sig_info
->entries6
- i
)
659 * sizeof (tp
->md5sig_info
->keys6
[0]));
667 static void tcp_v6_clear_md5_list (struct sock
*sk
)
669 struct tcp_sock
*tp
= tcp_sk(sk
);
672 if (tp
->md5sig_info
->entries6
) {
673 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
674 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
675 tp
->md5sig_info
->entries6
= 0;
676 tcp_free_md5sig_pool();
679 kfree(tp
->md5sig_info
->keys6
);
680 tp
->md5sig_info
->keys6
= NULL
;
681 tp
->md5sig_info
->alloced6
= 0;
683 if (tp
->md5sig_info
->entries4
) {
684 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
685 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
686 tp
->md5sig_info
->entries4
= 0;
687 tcp_free_md5sig_pool();
690 kfree(tp
->md5sig_info
->keys4
);
691 tp
->md5sig_info
->keys4
= NULL
;
692 tp
->md5sig_info
->alloced4
= 0;
695 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
698 struct tcp_md5sig cmd
;
699 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
702 if (optlen
< sizeof(cmd
))
705 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
708 if (sin6
->sin6_family
!= AF_INET6
)
711 if (!cmd
.tcpm_keylen
) {
712 if (!tcp_sk(sk
)->md5sig_info
)
714 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
715 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
716 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
719 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
722 if (!tcp_sk(sk
)->md5sig_info
) {
723 struct tcp_sock
*tp
= tcp_sk(sk
);
724 struct tcp_md5sig_info
*p
;
726 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
731 sk_nocaps_add(sk
, NETIF_F_GSO_MASK
);
734 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
737 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
738 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
739 newkey
, cmd
.tcpm_keylen
);
741 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
744 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
745 const struct in6_addr
*daddr
,
746 const struct in6_addr
*saddr
, int nbytes
)
748 struct tcp6_pseudohdr
*bp
;
749 struct scatterlist sg
;
751 bp
= &hp
->md5_blk
.ip6
;
752 /* 1. TCP pseudo-header (RFC2460) */
753 ipv6_addr_copy(&bp
->saddr
, saddr
);
754 ipv6_addr_copy(&bp
->daddr
, daddr
);
755 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
756 bp
->len
= cpu_to_be32(nbytes
);
758 sg_init_one(&sg
, bp
, sizeof(*bp
));
759 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
762 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
763 const struct in6_addr
*daddr
, struct in6_addr
*saddr
,
766 struct tcp_md5sig_pool
*hp
;
767 struct hash_desc
*desc
;
769 hp
= tcp_get_md5sig_pool();
771 goto clear_hash_noput
;
772 desc
= &hp
->md5_desc
;
774 if (crypto_hash_init(desc
))
776 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
778 if (tcp_md5_hash_header(hp
, th
))
780 if (tcp_md5_hash_key(hp
, key
))
782 if (crypto_hash_final(desc
, md5_hash
))
785 tcp_put_md5sig_pool();
789 tcp_put_md5sig_pool();
791 memset(md5_hash
, 0, 16);
795 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
796 struct sock
*sk
, struct request_sock
*req
,
799 const struct in6_addr
*saddr
, *daddr
;
800 struct tcp_md5sig_pool
*hp
;
801 struct hash_desc
*desc
;
802 struct tcphdr
*th
= tcp_hdr(skb
);
805 saddr
= &inet6_sk(sk
)->saddr
;
806 daddr
= &inet6_sk(sk
)->daddr
;
808 saddr
= &inet6_rsk(req
)->loc_addr
;
809 daddr
= &inet6_rsk(req
)->rmt_addr
;
811 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
812 saddr
= &ip6h
->saddr
;
813 daddr
= &ip6h
->daddr
;
816 hp
= tcp_get_md5sig_pool();
818 goto clear_hash_noput
;
819 desc
= &hp
->md5_desc
;
821 if (crypto_hash_init(desc
))
824 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
826 if (tcp_md5_hash_header(hp
, th
))
828 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
830 if (tcp_md5_hash_key(hp
, key
))
832 if (crypto_hash_final(desc
, md5_hash
))
835 tcp_put_md5sig_pool();
839 tcp_put_md5sig_pool();
841 memset(md5_hash
, 0, 16);
845 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
847 __u8
*hash_location
= NULL
;
848 struct tcp_md5sig_key
*hash_expected
;
849 const struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
850 struct tcphdr
*th
= tcp_hdr(skb
);
854 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
855 hash_location
= tcp_parse_md5sig_option(th
);
857 /* We've parsed the options - do we have a hash? */
858 if (!hash_expected
&& !hash_location
)
861 if (hash_expected
&& !hash_location
) {
862 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
866 if (!hash_expected
&& hash_location
) {
867 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
871 /* check the signature */
872 genhash
= tcp_v6_md5_hash_skb(newhash
,
876 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
877 if (net_ratelimit()) {
878 printk(KERN_INFO
"MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
879 genhash
? "failed" : "mismatch",
880 &ip6h
->saddr
, ntohs(th
->source
),
881 &ip6h
->daddr
, ntohs(th
->dest
));
889 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
891 .obj_size
= sizeof(struct tcp6_request_sock
),
892 .rtx_syn_ack
= tcp_v6_rtx_synack
,
893 .send_ack
= tcp_v6_reqsk_send_ack
,
894 .destructor
= tcp_v6_reqsk_destructor
,
895 .send_reset
= tcp_v6_send_reset
,
896 .syn_ack_timeout
= tcp_syn_ack_timeout
,
899 #ifdef CONFIG_TCP_MD5SIG
900 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
901 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
902 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
906 static void __tcp_v6_send_check(struct sk_buff
*skb
,
907 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
)
909 struct tcphdr
*th
= tcp_hdr(skb
);
911 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
912 th
->check
= ~tcp_v6_check(skb
->len
, saddr
, daddr
, 0);
913 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
914 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
916 th
->check
= tcp_v6_check(skb
->len
, saddr
, daddr
,
917 csum_partial(th
, th
->doff
<< 2,
922 static void tcp_v6_send_check(struct sock
*sk
, struct sk_buff
*skb
)
924 struct ipv6_pinfo
*np
= inet6_sk(sk
);
926 __tcp_v6_send_check(skb
, &np
->saddr
, &np
->daddr
);
929 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
931 const struct ipv6hdr
*ipv6h
;
934 if (!pskb_may_pull(skb
, sizeof(*th
)))
937 ipv6h
= ipv6_hdr(skb
);
941 skb
->ip_summed
= CHECKSUM_PARTIAL
;
942 __tcp_v6_send_check(skb
, &ipv6h
->saddr
, &ipv6h
->daddr
);
946 static struct sk_buff
**tcp6_gro_receive(struct sk_buff
**head
,
949 const struct ipv6hdr
*iph
= skb_gro_network_header(skb
);
951 switch (skb
->ip_summed
) {
952 case CHECKSUM_COMPLETE
:
953 if (!tcp_v6_check(skb_gro_len(skb
), &iph
->saddr
, &iph
->daddr
,
955 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
961 NAPI_GRO_CB(skb
)->flush
= 1;
965 return tcp_gro_receive(head
, skb
);
968 static int tcp6_gro_complete(struct sk_buff
*skb
)
970 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
971 struct tcphdr
*th
= tcp_hdr(skb
);
973 th
->check
= ~tcp_v6_check(skb
->len
- skb_transport_offset(skb
),
974 &iph
->saddr
, &iph
->daddr
, 0);
975 skb_shinfo(skb
)->gso_type
= SKB_GSO_TCPV6
;
977 return tcp_gro_complete(skb
);
980 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
981 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
983 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
984 struct sk_buff
*buff
;
986 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
987 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
988 unsigned int tot_len
= sizeof(struct tcphdr
);
989 struct dst_entry
*dst
;
993 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
994 #ifdef CONFIG_TCP_MD5SIG
996 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
999 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1004 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1006 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1007 skb_reset_transport_header(buff
);
1009 /* Swap the send and the receive. */
1010 memset(t1
, 0, sizeof(*t1
));
1011 t1
->dest
= th
->source
;
1012 t1
->source
= th
->dest
;
1013 t1
->doff
= tot_len
/ 4;
1014 t1
->seq
= htonl(seq
);
1015 t1
->ack_seq
= htonl(ack
);
1016 t1
->ack
= !rst
|| !th
->ack
;
1018 t1
->window
= htons(win
);
1020 topt
= (__be32
*)(t1
+ 1);
1023 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1024 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1025 *topt
++ = htonl(tcp_time_stamp
);
1026 *topt
++ = htonl(ts
);
1029 #ifdef CONFIG_TCP_MD5SIG
1031 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1032 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1033 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1034 &ipv6_hdr(skb
)->saddr
,
1035 &ipv6_hdr(skb
)->daddr
, t1
);
1039 memset(&fl6
, 0, sizeof(fl6
));
1040 ipv6_addr_copy(&fl6
.daddr
, &ipv6_hdr(skb
)->saddr
);
1041 ipv6_addr_copy(&fl6
.saddr
, &ipv6_hdr(skb
)->daddr
);
1043 buff
->ip_summed
= CHECKSUM_PARTIAL
;
1046 __tcp_v6_send_check(buff
, &fl6
.saddr
, &fl6
.daddr
);
1048 fl6
.flowi6_proto
= IPPROTO_TCP
;
1049 fl6
.flowi6_oif
= inet6_iif(skb
);
1050 fl6
.fl6_dport
= t1
->dest
;
1051 fl6
.fl6_sport
= t1
->source
;
1052 security_skb_classify_flow(skb
, flowi6_to_flowi(&fl6
));
1054 /* Pass a socket to ip6_dst_lookup either it is for RST
1055 * Underlying function will use this to retrieve the network
1058 dst
= ip6_dst_lookup_flow(ctl_sk
, &fl6
, NULL
, false);
1060 skb_dst_set(buff
, dst
);
1061 ip6_xmit(ctl_sk
, buff
, &fl6
, NULL
);
1062 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1064 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1071 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1073 struct tcphdr
*th
= tcp_hdr(skb
);
1074 u32 seq
= 0, ack_seq
= 0;
1075 struct tcp_md5sig_key
*key
= NULL
;
1080 if (!ipv6_unicast_destination(skb
))
1083 #ifdef CONFIG_TCP_MD5SIG
1085 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1089 seq
= ntohl(th
->ack_seq
);
1091 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1094 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1097 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1098 struct tcp_md5sig_key
*key
)
1100 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1103 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1105 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1106 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1108 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1109 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1110 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1115 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1116 struct request_sock
*req
)
1118 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1119 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1123 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1125 struct request_sock
*req
, **prev
;
1126 const struct tcphdr
*th
= tcp_hdr(skb
);
1129 /* Find possible connection requests. */
1130 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1131 &ipv6_hdr(skb
)->saddr
,
1132 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1134 return tcp_check_req(sk
, skb
, req
, prev
);
1136 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1137 &ipv6_hdr(skb
)->saddr
, th
->source
,
1138 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1141 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1145 inet_twsk_put(inet_twsk(nsk
));
1149 #ifdef CONFIG_SYN_COOKIES
1151 sk
= cookie_v6_check(sk
, skb
);
1156 /* FIXME: this is substantially similar to the ipv4 code.
1157 * Can some kind of merge be done? -- erics
1159 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1161 struct tcp_extend_values tmp_ext
;
1162 struct tcp_options_received tmp_opt
;
1164 struct request_sock
*req
;
1165 struct inet6_request_sock
*treq
;
1166 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1167 struct tcp_sock
*tp
= tcp_sk(sk
);
1168 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1169 struct dst_entry
*dst
= NULL
;
1170 int want_cookie
= 0;
1172 if (skb
->protocol
== htons(ETH_P_IP
))
1173 return tcp_v4_conn_request(sk
, skb
);
1175 if (!ipv6_unicast_destination(skb
))
1178 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1179 want_cookie
= tcp_syn_flood_action(sk
, skb
, "TCPv6");
1184 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1187 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1191 #ifdef CONFIG_TCP_MD5SIG
1192 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1195 tcp_clear_options(&tmp_opt
);
1196 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1197 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1198 tcp_parse_options(skb
, &tmp_opt
, &hash_location
, 0);
1200 if (tmp_opt
.cookie_plus
> 0 &&
1201 tmp_opt
.saw_tstamp
&&
1202 !tp
->rx_opt
.cookie_out_never
&&
1203 (sysctl_tcp_cookie_size
> 0 ||
1204 (tp
->cookie_values
!= NULL
&&
1205 tp
->cookie_values
->cookie_desired
> 0))) {
1208 u32
*mess
= &tmp_ext
.cookie_bakery
[COOKIE_DIGEST_WORDS
];
1209 int l
= tmp_opt
.cookie_plus
- TCPOLEN_COOKIE_BASE
;
1211 if (tcp_cookie_generator(&tmp_ext
.cookie_bakery
[0]) != 0)
1214 /* Secret recipe starts with IP addresses */
1215 d
= (__force u32
*)&ipv6_hdr(skb
)->daddr
.s6_addr32
[0];
1220 d
= (__force u32
*)&ipv6_hdr(skb
)->saddr
.s6_addr32
[0];
1226 /* plus variable length Initiator Cookie */
1229 *c
++ ^= *hash_location
++;
1231 want_cookie
= 0; /* not our kind of cookie */
1232 tmp_ext
.cookie_out_never
= 0; /* false */
1233 tmp_ext
.cookie_plus
= tmp_opt
.cookie_plus
;
1234 } else if (!tp
->rx_opt
.cookie_in_always
) {
1235 /* redundant indications, but ensure initialization. */
1236 tmp_ext
.cookie_out_never
= 1; /* true */
1237 tmp_ext
.cookie_plus
= 0;
1241 tmp_ext
.cookie_in_always
= tp
->rx_opt
.cookie_in_always
;
1243 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1244 tcp_clear_options(&tmp_opt
);
1246 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1247 tcp_openreq_init(req
, &tmp_opt
, skb
);
1249 treq
= inet6_rsk(req
);
1250 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1251 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1252 if (!want_cookie
|| tmp_opt
.tstamp_ok
)
1253 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1256 struct inet_peer
*peer
= NULL
;
1258 if (ipv6_opt_accepted(sk
, skb
) ||
1259 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1260 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1261 atomic_inc(&skb
->users
);
1262 treq
->pktopts
= skb
;
1264 treq
->iif
= sk
->sk_bound_dev_if
;
1266 /* So that link locals have meaning */
1267 if (!sk
->sk_bound_dev_if
&&
1268 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1269 treq
->iif
= inet6_iif(skb
);
1272 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1273 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1277 /* VJ's idea. We save last timestamp seen
1278 * from the destination in peer table, when entering
1279 * state TIME-WAIT, and check against it before
1280 * accepting new connection request.
1282 * If "isn" is not zero, this request hit alive
1283 * timewait bucket, so that all the necessary checks
1284 * are made in the function processing timewait state.
1286 if (tmp_opt
.saw_tstamp
&&
1287 tcp_death_row
.sysctl_tw_recycle
&&
1288 (dst
= inet6_csk_route_req(sk
, req
)) != NULL
&&
1289 (peer
= rt6_get_peer((struct rt6_info
*)dst
)) != NULL
&&
1290 ipv6_addr_equal((struct in6_addr
*)peer
->daddr
.addr
.a6
,
1292 inet_peer_refcheck(peer
);
1293 if ((u32
)get_seconds() - peer
->tcp_ts_stamp
< TCP_PAWS_MSL
&&
1294 (s32
)(peer
->tcp_ts
- req
->ts_recent
) >
1296 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_PAWSPASSIVEREJECTED
);
1297 goto drop_and_release
;
1300 /* Kill the following clause, if you dislike this way. */
1301 else if (!sysctl_tcp_syncookies
&&
1302 (sysctl_max_syn_backlog
- inet_csk_reqsk_queue_len(sk
) <
1303 (sysctl_max_syn_backlog
>> 2)) &&
1304 (!peer
|| !peer
->tcp_ts_stamp
) &&
1305 (!dst
|| !dst_metric(dst
, RTAX_RTT
))) {
1306 /* Without syncookies last quarter of
1307 * backlog is filled with destinations,
1308 * proven to be alive.
1309 * It means that we continue to communicate
1310 * to destinations, already remembered
1311 * to the moment of synflood.
1313 LIMIT_NETDEBUG(KERN_DEBUG
"TCP: drop open request from %pI6/%u\n",
1314 &treq
->rmt_addr
, ntohs(tcp_hdr(skb
)->source
));
1315 goto drop_and_release
;
1318 isn
= tcp_v6_init_sequence(skb
);
1321 tcp_rsk(req
)->snt_isn
= isn
;
1322 tcp_rsk(req
)->snt_synack
= tcp_time_stamp
;
1324 security_inet_conn_request(sk
, skb
, req
);
1326 if (tcp_v6_send_synack(sk
, req
,
1327 (struct request_values
*)&tmp_ext
) ||
1331 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1339 return 0; /* don't send reset */
1342 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1343 struct request_sock
*req
,
1344 struct dst_entry
*dst
)
1346 struct inet6_request_sock
*treq
;
1347 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1348 struct tcp6_sock
*newtcp6sk
;
1349 struct inet_sock
*newinet
;
1350 struct tcp_sock
*newtp
;
1352 struct ipv6_txoptions
*opt
;
1353 #ifdef CONFIG_TCP_MD5SIG
1354 struct tcp_md5sig_key
*key
;
1357 if (skb
->protocol
== htons(ETH_P_IP
)) {
1362 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1367 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1368 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1370 newinet
= inet_sk(newsk
);
1371 newnp
= inet6_sk(newsk
);
1372 newtp
= tcp_sk(newsk
);
1374 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1376 ipv6_addr_set_v4mapped(newinet
->inet_daddr
, &newnp
->daddr
);
1378 ipv6_addr_set_v4mapped(newinet
->inet_saddr
, &newnp
->saddr
);
1380 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1382 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1383 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1384 #ifdef CONFIG_TCP_MD5SIG
1385 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1388 newnp
->ipv6_ac_list
= NULL
;
1389 newnp
->ipv6_fl_list
= NULL
;
1390 newnp
->pktoptions
= NULL
;
1392 newnp
->mcast_oif
= inet6_iif(skb
);
1393 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1396 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1397 * here, tcp_create_openreq_child now does this for us, see the comment in
1398 * that function for the gory details. -acme
1401 /* It is tricky place. Until this moment IPv4 tcp
1402 worked with IPv6 icsk.icsk_af_ops.
1405 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1410 treq
= inet6_rsk(req
);
1413 if (sk_acceptq_is_full(sk
))
1417 dst
= inet6_csk_route_req(sk
, req
);
1422 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1427 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1428 * count here, tcp_create_openreq_child now does this for us, see the
1429 * comment in that function for the gory details. -acme
1432 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1433 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1435 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1436 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1438 newtp
= tcp_sk(newsk
);
1439 newinet
= inet_sk(newsk
);
1440 newnp
= inet6_sk(newsk
);
1442 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1444 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1445 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1446 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1447 newsk
->sk_bound_dev_if
= treq
->iif
;
1449 /* Now IPv6 options...
1451 First: no IPv4 options.
1453 newinet
->inet_opt
= NULL
;
1454 newnp
->ipv6_ac_list
= NULL
;
1455 newnp
->ipv6_fl_list
= NULL
;
1458 newnp
->rxopt
.all
= np
->rxopt
.all
;
1460 /* Clone pktoptions received with SYN */
1461 newnp
->pktoptions
= NULL
;
1462 if (treq
->pktopts
!= NULL
) {
1463 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1464 kfree_skb(treq
->pktopts
);
1465 treq
->pktopts
= NULL
;
1466 if (newnp
->pktoptions
)
1467 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1470 newnp
->mcast_oif
= inet6_iif(skb
);
1471 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1473 /* Clone native IPv6 options from listening socket (if any)
1475 Yes, keeping reference count would be much more clever,
1476 but we make one more one thing there: reattach optmem
1480 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1482 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1485 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1487 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1488 newnp
->opt
->opt_flen
);
1490 tcp_mtup_init(newsk
);
1491 tcp_sync_mss(newsk
, dst_mtu(dst
));
1492 newtp
->advmss
= dst_metric_advmss(dst
);
1493 tcp_initialize_rcv_mss(newsk
);
1494 if (tcp_rsk(req
)->snt_synack
)
1495 tcp_valid_rtt_meas(newsk
,
1496 tcp_time_stamp
- tcp_rsk(req
)->snt_synack
);
1497 newtp
->total_retrans
= req
->retrans
;
1499 newinet
->inet_daddr
= newinet
->inet_saddr
= LOOPBACK4_IPV6
;
1500 newinet
->inet_rcv_saddr
= LOOPBACK4_IPV6
;
1502 #ifdef CONFIG_TCP_MD5SIG
1503 /* Copy over the MD5 key from the original socket */
1504 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1505 /* We're using one, so create a matching key
1506 * on the newsk structure. If we fail to get
1507 * memory, then we end up not copying the key
1510 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1512 tcp_v6_md5_do_add(newsk
, &newnp
->daddr
,
1513 newkey
, key
->keylen
);
1517 if (__inet_inherit_port(sk
, newsk
) < 0) {
1521 __inet6_hash(newsk
, NULL
);
1526 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1528 if (opt
&& opt
!= np
->opt
)
1529 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1532 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1536 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1538 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1539 if (!tcp_v6_check(skb
->len
, &ipv6_hdr(skb
)->saddr
,
1540 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1541 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1546 skb
->csum
= ~csum_unfold(tcp_v6_check(skb
->len
,
1547 &ipv6_hdr(skb
)->saddr
,
1548 &ipv6_hdr(skb
)->daddr
, 0));
1550 if (skb
->len
<= 76) {
1551 return __skb_checksum_complete(skb
);
1556 /* The socket must have it's spinlock held when we get
1559 * We have a potential double-lock case here, so even when
1560 * doing backlog processing we use the BH locking scheme.
1561 * This is because we cannot sleep with the original spinlock
1564 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1566 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1567 struct tcp_sock
*tp
;
1568 struct sk_buff
*opt_skb
= NULL
;
1570 /* Imagine: socket is IPv6. IPv4 packet arrives,
1571 goes to IPv4 receive handler and backlogged.
1572 From backlog it always goes here. Kerboom...
1573 Fortunately, tcp_rcv_established and rcv_established
1574 handle them correctly, but it is not case with
1575 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1578 if (skb
->protocol
== htons(ETH_P_IP
))
1579 return tcp_v4_do_rcv(sk
, skb
);
1581 #ifdef CONFIG_TCP_MD5SIG
1582 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1586 if (sk_filter(sk
, skb
))
1590 * socket locking is here for SMP purposes as backlog rcv
1591 * is currently called with bh processing disabled.
1594 /* Do Stevens' IPV6_PKTOPTIONS.
1596 Yes, guys, it is the only place in our code, where we
1597 may make it not affecting IPv4.
1598 The rest of code is protocol independent,
1599 and I do not like idea to uglify IPv4.
1601 Actually, all the idea behind IPV6_PKTOPTIONS
1602 looks not very well thought. For now we latch
1603 options, received in the last packet, enqueued
1604 by tcp. Feel free to propose better solution.
1608 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1610 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1611 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1612 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1615 goto ipv6_pktoptions
;
1619 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1622 if (sk
->sk_state
== TCP_LISTEN
) {
1623 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1628 * Queue it on the new socket if the new socket is active,
1629 * otherwise we just shortcircuit this and continue with
1633 sock_rps_save_rxhash(nsk
, skb
->rxhash
);
1634 if (tcp_child_process(sk
, nsk
, skb
))
1637 __kfree_skb(opt_skb
);
1641 sock_rps_save_rxhash(sk
, skb
->rxhash
);
1643 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1646 goto ipv6_pktoptions
;
1650 tcp_v6_send_reset(sk
, skb
);
1653 __kfree_skb(opt_skb
);
1657 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1662 /* Do you ask, what is it?
1664 1. skb was enqueued by tcp.
1665 2. skb is added to tail of read queue, rather than out of order.
1666 3. socket is not in passive state.
1667 4. Finally, it really contains options, which user wants to receive.
1670 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1671 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1672 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1673 np
->mcast_oif
= inet6_iif(opt_skb
);
1674 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1675 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1676 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1677 skb_set_owner_r(opt_skb
, sk
);
1678 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1680 __kfree_skb(opt_skb
);
1681 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1689 static int tcp_v6_rcv(struct sk_buff
*skb
)
1692 const struct ipv6hdr
*hdr
;
1695 struct net
*net
= dev_net(skb
->dev
);
1697 if (skb
->pkt_type
!= PACKET_HOST
)
1701 * Count it even if it's bad.
1703 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1705 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1710 if (th
->doff
< sizeof(struct tcphdr
)/4)
1712 if (!pskb_may_pull(skb
, th
->doff
*4))
1715 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1719 hdr
= ipv6_hdr(skb
);
1720 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1721 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1722 skb
->len
- th
->doff
*4);
1723 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1724 TCP_SKB_CB(skb
)->when
= 0;
1725 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(hdr
);
1726 TCP_SKB_CB(skb
)->sacked
= 0;
1728 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1733 if (sk
->sk_state
== TCP_TIME_WAIT
)
1736 if (hdr
->hop_limit
< inet6_sk(sk
)->min_hopcount
) {
1737 NET_INC_STATS_BH(net
, LINUX_MIB_TCPMINTTLDROP
);
1738 goto discard_and_relse
;
1741 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1742 goto discard_and_relse
;
1744 if (sk_filter(sk
, skb
))
1745 goto discard_and_relse
;
1749 bh_lock_sock_nested(sk
);
1751 if (!sock_owned_by_user(sk
)) {
1752 #ifdef CONFIG_NET_DMA
1753 struct tcp_sock
*tp
= tcp_sk(sk
);
1754 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1755 tp
->ucopy
.dma_chan
= dma_find_channel(DMA_MEMCPY
);
1756 if (tp
->ucopy
.dma_chan
)
1757 ret
= tcp_v6_do_rcv(sk
, skb
);
1761 if (!tcp_prequeue(sk
, skb
))
1762 ret
= tcp_v6_do_rcv(sk
, skb
);
1764 } else if (unlikely(sk_add_backlog(sk
, skb
))) {
1766 NET_INC_STATS_BH(net
, LINUX_MIB_TCPBACKLOGDROP
);
1767 goto discard_and_relse
;
1772 return ret
? -1 : 0;
1775 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1778 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1780 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1782 tcp_v6_send_reset(NULL
, skb
);
1799 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1800 inet_twsk_put(inet_twsk(sk
));
1804 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1805 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1806 inet_twsk_put(inet_twsk(sk
));
1810 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1815 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1816 &ipv6_hdr(skb
)->daddr
,
1817 ntohs(th
->dest
), inet6_iif(skb
));
1819 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1820 inet_twsk_deschedule(tw
, &tcp_death_row
);
1825 /* Fall through to ACK */
1828 tcp_v6_timewait_ack(sk
, skb
);
1832 case TCP_TW_SUCCESS
:;
1837 static struct inet_peer
*tcp_v6_get_peer(struct sock
*sk
, bool *release_it
)
1839 struct rt6_info
*rt
= (struct rt6_info
*) __sk_dst_get(sk
);
1840 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1841 struct inet_peer
*peer
;
1844 !ipv6_addr_equal(&np
->daddr
, &rt
->rt6i_dst
.addr
)) {
1845 peer
= inet_getpeer_v6(&np
->daddr
, 1);
1849 rt6_bind_peer(rt
, 1);
1850 peer
= rt
->rt6i_peer
;
1851 *release_it
= false;
1857 static void *tcp_v6_tw_get_peer(struct sock
*sk
)
1859 struct inet6_timewait_sock
*tw6
= inet6_twsk(sk
);
1860 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1862 if (tw
->tw_family
== AF_INET
)
1863 return tcp_v4_tw_get_peer(sk
);
1865 return inet_getpeer_v6(&tw6
->tw_v6_daddr
, 1);
1868 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
1869 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
1870 .twsk_unique
= tcp_twsk_unique
,
1871 .twsk_destructor
= tcp_twsk_destructor
,
1872 .twsk_getpeer
= tcp_v6_tw_get_peer
,
1875 static const struct inet_connection_sock_af_ops ipv6_specific
= {
1876 .queue_xmit
= inet6_csk_xmit
,
1877 .send_check
= tcp_v6_send_check
,
1878 .rebuild_header
= inet6_sk_rebuild_header
,
1879 .conn_request
= tcp_v6_conn_request
,
1880 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1881 .get_peer
= tcp_v6_get_peer
,
1882 .net_header_len
= sizeof(struct ipv6hdr
),
1883 .setsockopt
= ipv6_setsockopt
,
1884 .getsockopt
= ipv6_getsockopt
,
1885 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1886 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1887 .bind_conflict
= inet6_csk_bind_conflict
,
1888 #ifdef CONFIG_COMPAT
1889 .compat_setsockopt
= compat_ipv6_setsockopt
,
1890 .compat_getsockopt
= compat_ipv6_getsockopt
,
1894 #ifdef CONFIG_TCP_MD5SIG
1895 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1896 .md5_lookup
= tcp_v6_md5_lookup
,
1897 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1898 .md5_add
= tcp_v6_md5_add_func
,
1899 .md5_parse
= tcp_v6_parse_md5_keys
,
1904 * TCP over IPv4 via INET6 API
1907 static const struct inet_connection_sock_af_ops ipv6_mapped
= {
1908 .queue_xmit
= ip_queue_xmit
,
1909 .send_check
= tcp_v4_send_check
,
1910 .rebuild_header
= inet_sk_rebuild_header
,
1911 .conn_request
= tcp_v6_conn_request
,
1912 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1913 .get_peer
= tcp_v4_get_peer
,
1914 .net_header_len
= sizeof(struct iphdr
),
1915 .setsockopt
= ipv6_setsockopt
,
1916 .getsockopt
= ipv6_getsockopt
,
1917 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1918 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1919 .bind_conflict
= inet6_csk_bind_conflict
,
1920 #ifdef CONFIG_COMPAT
1921 .compat_setsockopt
= compat_ipv6_setsockopt
,
1922 .compat_getsockopt
= compat_ipv6_getsockopt
,
1926 #ifdef CONFIG_TCP_MD5SIG
1927 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1928 .md5_lookup
= tcp_v4_md5_lookup
,
1929 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1930 .md5_add
= tcp_v6_md5_add_func
,
1931 .md5_parse
= tcp_v6_parse_md5_keys
,
1935 /* NOTE: A lot of things set to zero explicitly by call to
1936 * sk_alloc() so need not be done here.
1938 static int tcp_v6_init_sock(struct sock
*sk
)
1940 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1941 struct tcp_sock
*tp
= tcp_sk(sk
);
1943 skb_queue_head_init(&tp
->out_of_order_queue
);
1944 tcp_init_xmit_timers(sk
);
1945 tcp_prequeue_init(tp
);
1947 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1948 tp
->mdev
= TCP_TIMEOUT_INIT
;
1950 /* So many TCP implementations out there (incorrectly) count the
1951 * initial SYN frame in their delayed-ACK and congestion control
1952 * algorithms that we must have the following bandaid to talk
1953 * efficiently to them. -DaveM
1957 /* See draft-stevens-tcpca-spec-01 for discussion of the
1958 * initialization of these values.
1960 tp
->snd_ssthresh
= TCP_INFINITE_SSTHRESH
;
1961 tp
->snd_cwnd_clamp
= ~0;
1962 tp
->mss_cache
= TCP_MSS_DEFAULT
;
1964 tp
->reordering
= sysctl_tcp_reordering
;
1966 sk
->sk_state
= TCP_CLOSE
;
1968 icsk
->icsk_af_ops
= &ipv6_specific
;
1969 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1970 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1971 sk
->sk_write_space
= sk_stream_write_space
;
1972 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1974 #ifdef CONFIG_TCP_MD5SIG
1975 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1978 /* TCP Cookie Transactions */
1979 if (sysctl_tcp_cookie_size
> 0) {
1980 /* Default, cookies without s_data_payload. */
1982 kzalloc(sizeof(*tp
->cookie_values
),
1984 if (tp
->cookie_values
!= NULL
)
1985 kref_init(&tp
->cookie_values
->kref
);
1987 /* Presumed zeroed, in order of appearance:
1988 * cookie_in_always, cookie_out_never,
1989 * s_data_constant, s_data_in, s_data_out
1991 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1992 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1995 percpu_counter_inc(&tcp_sockets_allocated
);
2001 static void tcp_v6_destroy_sock(struct sock
*sk
)
2003 #ifdef CONFIG_TCP_MD5SIG
2004 /* Clean up the MD5 key list */
2005 if (tcp_sk(sk
)->md5sig_info
)
2006 tcp_v6_clear_md5_list(sk
);
2008 tcp_v4_destroy_sock(sk
);
2009 inet6_destroy_sock(sk
);
2012 #ifdef CONFIG_PROC_FS
2013 /* Proc filesystem TCPv6 sock list dumping. */
2014 static void get_openreq6(struct seq_file
*seq
,
2015 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
2017 int ttd
= req
->expires
- jiffies
;
2018 const struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
2019 const struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
2025 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2026 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2028 src
->s6_addr32
[0], src
->s6_addr32
[1],
2029 src
->s6_addr32
[2], src
->s6_addr32
[3],
2030 ntohs(inet_rsk(req
)->loc_port
),
2031 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2032 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
2033 ntohs(inet_rsk(req
)->rmt_port
),
2035 0,0, /* could print option size, but that is af dependent. */
2036 1, /* timers active (only the expire timer) */
2037 jiffies_to_clock_t(ttd
),
2040 0, /* non standard timer */
2041 0, /* open_requests have no inode */
2045 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2047 const struct in6_addr
*dest
, *src
;
2050 unsigned long timer_expires
;
2051 struct inet_sock
*inet
= inet_sk(sp
);
2052 struct tcp_sock
*tp
= tcp_sk(sp
);
2053 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2054 struct ipv6_pinfo
*np
= inet6_sk(sp
);
2057 src
= &np
->rcv_saddr
;
2058 destp
= ntohs(inet
->inet_dport
);
2059 srcp
= ntohs(inet
->inet_sport
);
2061 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2063 timer_expires
= icsk
->icsk_timeout
;
2064 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2066 timer_expires
= icsk
->icsk_timeout
;
2067 } else if (timer_pending(&sp
->sk_timer
)) {
2069 timer_expires
= sp
->sk_timer
.expires
;
2072 timer_expires
= jiffies
;
2076 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2077 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2079 src
->s6_addr32
[0], src
->s6_addr32
[1],
2080 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2081 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2082 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2084 tp
->write_seq
-tp
->snd_una
,
2085 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2087 jiffies_to_clock_t(timer_expires
- jiffies
),
2088 icsk
->icsk_retransmits
,
2090 icsk
->icsk_probes_out
,
2092 atomic_read(&sp
->sk_refcnt
), sp
,
2093 jiffies_to_clock_t(icsk
->icsk_rto
),
2094 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
2095 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2097 tcp_in_initial_slowstart(tp
) ? -1 : tp
->snd_ssthresh
2101 static void get_timewait6_sock(struct seq_file
*seq
,
2102 struct inet_timewait_sock
*tw
, int i
)
2104 const struct in6_addr
*dest
, *src
;
2106 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2107 int ttd
= tw
->tw_ttd
- jiffies
;
2112 dest
= &tw6
->tw_v6_daddr
;
2113 src
= &tw6
->tw_v6_rcv_saddr
;
2114 destp
= ntohs(tw
->tw_dport
);
2115 srcp
= ntohs(tw
->tw_sport
);
2118 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2119 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2121 src
->s6_addr32
[0], src
->s6_addr32
[1],
2122 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2123 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2124 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2125 tw
->tw_substate
, 0, 0,
2126 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2127 atomic_read(&tw
->tw_refcnt
), tw
);
2130 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2132 struct tcp_iter_state
*st
;
2134 if (v
== SEQ_START_TOKEN
) {
2139 "st tx_queue rx_queue tr tm->when retrnsmt"
2140 " uid timeout inode\n");
2145 switch (st
->state
) {
2146 case TCP_SEQ_STATE_LISTENING
:
2147 case TCP_SEQ_STATE_ESTABLISHED
:
2148 get_tcp6_sock(seq
, v
, st
->num
);
2150 case TCP_SEQ_STATE_OPENREQ
:
2151 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2153 case TCP_SEQ_STATE_TIME_WAIT
:
2154 get_timewait6_sock(seq
, v
, st
->num
);
2161 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2165 .owner
= THIS_MODULE
,
2168 .show
= tcp6_seq_show
,
2172 int __net_init
tcp6_proc_init(struct net
*net
)
2174 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2177 void tcp6_proc_exit(struct net
*net
)
2179 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2183 struct proto tcpv6_prot
= {
2185 .owner
= THIS_MODULE
,
2187 .connect
= tcp_v6_connect
,
2188 .disconnect
= tcp_disconnect
,
2189 .accept
= inet_csk_accept
,
2191 .init
= tcp_v6_init_sock
,
2192 .destroy
= tcp_v6_destroy_sock
,
2193 .shutdown
= tcp_shutdown
,
2194 .setsockopt
= tcp_setsockopt
,
2195 .getsockopt
= tcp_getsockopt
,
2196 .recvmsg
= tcp_recvmsg
,
2197 .sendmsg
= tcp_sendmsg
,
2198 .sendpage
= tcp_sendpage
,
2199 .backlog_rcv
= tcp_v6_do_rcv
,
2200 .hash
= tcp_v6_hash
,
2201 .unhash
= inet_unhash
,
2202 .get_port
= inet_csk_get_port
,
2203 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2204 .sockets_allocated
= &tcp_sockets_allocated
,
2205 .memory_allocated
= &tcp_memory_allocated
,
2206 .memory_pressure
= &tcp_memory_pressure
,
2207 .orphan_count
= &tcp_orphan_count
,
2208 .sysctl_mem
= sysctl_tcp_mem
,
2209 .sysctl_wmem
= sysctl_tcp_wmem
,
2210 .sysctl_rmem
= sysctl_tcp_rmem
,
2211 .max_header
= MAX_TCP_HEADER
,
2212 .obj_size
= sizeof(struct tcp6_sock
),
2213 .slab_flags
= SLAB_DESTROY_BY_RCU
,
2214 .twsk_prot
= &tcp6_timewait_sock_ops
,
2215 .rsk_prot
= &tcp6_request_sock_ops
,
2216 .h
.hashinfo
= &tcp_hashinfo
,
2217 .no_autobind
= true,
2218 #ifdef CONFIG_COMPAT
2219 .compat_setsockopt
= compat_tcp_setsockopt
,
2220 .compat_getsockopt
= compat_tcp_getsockopt
,
2224 static const struct inet6_protocol tcpv6_protocol
= {
2225 .handler
= tcp_v6_rcv
,
2226 .err_handler
= tcp_v6_err
,
2227 .gso_send_check
= tcp_v6_gso_send_check
,
2228 .gso_segment
= tcp_tso_segment
,
2229 .gro_receive
= tcp6_gro_receive
,
2230 .gro_complete
= tcp6_gro_complete
,
2231 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2234 static struct inet_protosw tcpv6_protosw
= {
2235 .type
= SOCK_STREAM
,
2236 .protocol
= IPPROTO_TCP
,
2237 .prot
= &tcpv6_prot
,
2238 .ops
= &inet6_stream_ops
,
2240 .flags
= INET_PROTOSW_PERMANENT
|
2244 static int __net_init
tcpv6_net_init(struct net
*net
)
2246 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2247 SOCK_RAW
, IPPROTO_TCP
, net
);
2250 static void __net_exit
tcpv6_net_exit(struct net
*net
)
2252 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2255 static void __net_exit
tcpv6_net_exit_batch(struct list_head
*net_exit_list
)
2257 inet_twsk_purge(&tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2260 static struct pernet_operations tcpv6_net_ops
= {
2261 .init
= tcpv6_net_init
,
2262 .exit
= tcpv6_net_exit
,
2263 .exit_batch
= tcpv6_net_exit_batch
,
2266 int __init
tcpv6_init(void)
2270 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2274 /* register inet6 protocol */
2275 ret
= inet6_register_protosw(&tcpv6_protosw
);
2277 goto out_tcpv6_protocol
;
2279 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2281 goto out_tcpv6_protosw
;
2286 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2288 inet6_unregister_protosw(&tcpv6_protosw
);
2292 void tcpv6_exit(void)
2294 unregister_pernet_subsys(&tcpv6_net_ops
);
2295 inet6_unregister_protosw(&tcpv6_protosw
);
2296 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);