3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 /* Socket used for sending RSTs and ACKs */
70 static struct socket
*tcp6_socket
;
72 static void tcp_v6_send_reset(struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
74 static void tcp_v6_send_check(struct sock
*sk
, int len
,
77 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static struct inet_connection_sock_af_ops ipv6_mapped
;
80 static struct inet_connection_sock_af_ops ipv6_specific
;
82 static int tcp_v6_get_port(struct sock
*sk
, unsigned short snum
)
84 return inet_csk_get_port(&tcp_hashinfo
, sk
, snum
,
85 inet6_csk_bind_conflict
);
88 static void tcp_v6_hash(struct sock
*sk
)
90 if (sk
->sk_state
!= TCP_CLOSE
) {
91 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
96 __inet6_hash(&tcp_hashinfo
, sk
);
101 static __inline__ u16
tcp_v6_check(struct tcphdr
*th
, int len
,
102 struct in6_addr
*saddr
,
103 struct in6_addr
*daddr
,
106 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
109 static __u32
tcp_v6_init_sequence(struct sock
*sk
, struct sk_buff
*skb
)
111 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
112 return secure_tcpv6_sequence_number(skb
->nh
.ipv6h
->daddr
.s6_addr32
,
113 skb
->nh
.ipv6h
->saddr
.s6_addr32
,
117 return secure_tcp_sequence_number(skb
->nh
.iph
->daddr
,
124 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
127 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
128 struct inet_sock
*inet
= inet_sk(sk
);
129 struct inet_connection_sock
*icsk
= inet_csk(sk
);
130 struct ipv6_pinfo
*np
= inet6_sk(sk
);
131 struct tcp_sock
*tp
= tcp_sk(sk
);
132 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
134 struct dst_entry
*dst
;
138 if (addr_len
< SIN6_LEN_RFC2133
)
141 if (usin
->sin6_family
!= AF_INET6
)
142 return(-EAFNOSUPPORT
);
144 memset(&fl
, 0, sizeof(fl
));
147 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
148 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
149 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
150 struct ip6_flowlabel
*flowlabel
;
151 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
152 if (flowlabel
== NULL
)
154 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
155 fl6_sock_release(flowlabel
);
160 * connect() to INADDR_ANY means loopback (BSD'ism).
163 if(ipv6_addr_any(&usin
->sin6_addr
))
164 usin
->sin6_addr
.s6_addr
[15] = 0x1;
166 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
168 if(addr_type
& IPV6_ADDR_MULTICAST
)
171 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
172 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
173 usin
->sin6_scope_id
) {
174 /* If interface is set while binding, indices
177 if (sk
->sk_bound_dev_if
&&
178 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
181 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
184 /* Connect to link-local address requires an interface */
185 if (!sk
->sk_bound_dev_if
)
189 if (tp
->rx_opt
.ts_recent_stamp
&&
190 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
191 tp
->rx_opt
.ts_recent
= 0;
192 tp
->rx_opt
.ts_recent_stamp
= 0;
196 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
197 np
->flow_label
= fl
.fl6_flowlabel
;
203 if (addr_type
== IPV6_ADDR_MAPPED
) {
204 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
205 struct sockaddr_in sin
;
207 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk
))
212 sin
.sin_family
= AF_INET
;
213 sin
.sin_port
= usin
->sin6_port
;
214 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
216 icsk
->icsk_af_ops
= &ipv6_mapped
;
217 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
219 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
222 icsk
->icsk_ext_hdr_len
= exthdrlen
;
223 icsk
->icsk_af_ops
= &ipv6_specific
;
224 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
227 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
229 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
236 if (!ipv6_addr_any(&np
->rcv_saddr
))
237 saddr
= &np
->rcv_saddr
;
239 fl
.proto
= IPPROTO_TCP
;
240 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
241 ipv6_addr_copy(&fl
.fl6_src
,
242 (saddr
? saddr
: &np
->saddr
));
243 fl
.oif
= sk
->sk_bound_dev_if
;
244 fl
.fl_ip_dport
= usin
->sin6_port
;
245 fl
.fl_ip_sport
= inet
->sport
;
247 if (np
->opt
&& np
->opt
->srcrt
) {
248 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
249 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
250 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
254 security_sk_classify_flow(sk
, &fl
);
256 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
260 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
262 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
267 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
270 /* set the source address */
271 ipv6_addr_copy(&np
->saddr
, saddr
);
272 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 icsk
->icsk_ext_hdr_len
= 0;
279 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
282 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
284 inet
->dport
= usin
->sin6_port
;
286 tcp_set_state(sk
, TCP_SYN_SENT
);
287 err
= inet6_hash_connect(&tcp_death_row
, sk
);
292 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
297 err
= tcp_connect(sk
);
304 tcp_set_state(sk
, TCP_CLOSE
);
308 sk
->sk_route_caps
= 0;
312 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
313 int type
, int code
, int offset
, __u32 info
)
315 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
316 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
317 struct ipv6_pinfo
*np
;
323 sk
= inet6_lookup(&tcp_hashinfo
, &hdr
->daddr
, th
->dest
, &hdr
->saddr
,
324 th
->source
, skb
->dev
->ifindex
);
327 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
331 if (sk
->sk_state
== TCP_TIME_WAIT
) {
332 inet_twsk_put((struct inet_timewait_sock
*)sk
);
337 if (sock_owned_by_user(sk
))
338 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
340 if (sk
->sk_state
== TCP_CLOSE
)
344 seq
= ntohl(th
->seq
);
345 if (sk
->sk_state
!= TCP_LISTEN
&&
346 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
347 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
353 if (type
== ICMPV6_PKT_TOOBIG
) {
354 struct dst_entry
*dst
= NULL
;
356 if (sock_owned_by_user(sk
))
358 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
361 /* icmp should have updated the destination cache entry */
362 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
365 struct inet_sock
*inet
= inet_sk(sk
);
368 /* BUGGG_FUTURE: Again, it is not clear how
369 to handle rthdr case. Ignore this complexity
372 memset(&fl
, 0, sizeof(fl
));
373 fl
.proto
= IPPROTO_TCP
;
374 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
375 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
376 fl
.oif
= sk
->sk_bound_dev_if
;
377 fl
.fl_ip_dport
= inet
->dport
;
378 fl
.fl_ip_sport
= inet
->sport
;
379 security_skb_classify_flow(skb
, &fl
);
381 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
382 sk
->sk_err_soft
= -err
;
386 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
387 sk
->sk_err_soft
= -err
;
394 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
395 tcp_sync_mss(sk
, dst_mtu(dst
));
396 tcp_simple_retransmit(sk
);
397 } /* else let the usual retransmit timer handle it */
402 icmpv6_err_convert(type
, code
, &err
);
404 /* Might be for an request_sock */
405 switch (sk
->sk_state
) {
406 struct request_sock
*req
, **prev
;
408 if (sock_owned_by_user(sk
))
411 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
412 &hdr
->saddr
, inet6_iif(skb
));
416 /* ICMPs are not backlogged, hence we cannot get
417 * an established socket here.
419 BUG_TRAP(req
->sk
== NULL
);
421 if (seq
!= tcp_rsk(req
)->snt_isn
) {
422 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
426 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
430 case TCP_SYN_RECV
: /* Cannot happen.
431 It can, it SYNs are crossed. --ANK */
432 if (!sock_owned_by_user(sk
)) {
434 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
438 sk
->sk_err_soft
= err
;
442 if (!sock_owned_by_user(sk
) && np
->recverr
) {
444 sk
->sk_error_report(sk
);
446 sk
->sk_err_soft
= err
;
454 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
455 struct dst_entry
*dst
)
457 struct inet6_request_sock
*treq
= inet6_rsk(req
);
458 struct ipv6_pinfo
*np
= inet6_sk(sk
);
459 struct sk_buff
* skb
;
460 struct ipv6_txoptions
*opt
= NULL
;
461 struct in6_addr
* final_p
= NULL
, final
;
465 memset(&fl
, 0, sizeof(fl
));
466 fl
.proto
= IPPROTO_TCP
;
467 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
468 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
469 fl
.fl6_flowlabel
= 0;
471 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
472 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
473 security_req_classify_flow(req
, &fl
);
478 np
->rxopt
.bits
.osrcrt
== 2 &&
480 struct sk_buff
*pktopts
= treq
->pktopts
;
481 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
483 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(pktopts
->nh
.raw
+ rxopt
->srcrt
));
486 if (opt
&& opt
->srcrt
) {
487 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
488 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
489 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
493 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
497 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
498 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
502 skb
= tcp_make_synack(sk
, dst
, req
);
504 struct tcphdr
*th
= skb
->h
.th
;
506 th
->check
= tcp_v6_check(th
, skb
->len
,
507 &treq
->loc_addr
, &treq
->rmt_addr
,
508 csum_partial((char *)th
, skb
->len
, skb
->csum
));
510 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
511 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
512 if (err
== NET_XMIT_CN
)
517 if (opt
&& opt
!= np
->opt
)
518 sock_kfree_s(sk
, opt
, opt
->tot_len
);
523 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
525 if (inet6_rsk(req
)->pktopts
)
526 kfree_skb(inet6_rsk(req
)->pktopts
);
529 static struct request_sock_ops tcp6_request_sock_ops
= {
531 .obj_size
= sizeof(struct tcp6_request_sock
),
532 .rtx_syn_ack
= tcp_v6_send_synack
,
533 .send_ack
= tcp_v6_reqsk_send_ack
,
534 .destructor
= tcp_v6_reqsk_destructor
,
535 .send_reset
= tcp_v6_send_reset
538 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
539 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
540 .twsk_unique
= tcp_twsk_unique
,
543 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
545 struct ipv6_pinfo
*np
= inet6_sk(sk
);
546 struct tcphdr
*th
= skb
->h
.th
;
548 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
549 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
550 skb
->csum
= offsetof(struct tcphdr
, check
);
552 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
553 csum_partial((char *)th
, th
->doff
<<2,
558 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
560 struct ipv6hdr
*ipv6h
;
563 if (!pskb_may_pull(skb
, sizeof(*th
)))
566 ipv6h
= skb
->nh
.ipv6h
;
570 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
572 skb
->csum
= offsetof(struct tcphdr
, check
);
573 skb
->ip_summed
= CHECKSUM_PARTIAL
;
577 static void tcp_v6_send_reset(struct sk_buff
*skb
)
579 struct tcphdr
*th
= skb
->h
.th
, *t1
;
580 struct sk_buff
*buff
;
586 if (!ipv6_unicast_destination(skb
))
590 * We need to grab some memory, and put together an RST,
591 * and then put it into the queue to be sent.
594 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
),
599 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
));
601 t1
= (struct tcphdr
*) skb_push(buff
,sizeof(struct tcphdr
));
603 /* Swap the send and the receive. */
604 memset(t1
, 0, sizeof(*t1
));
605 t1
->dest
= th
->source
;
606 t1
->source
= th
->dest
;
607 t1
->doff
= sizeof(*t1
)/4;
611 t1
->seq
= th
->ack_seq
;
614 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
615 + skb
->len
- (th
->doff
<<2));
618 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
620 memset(&fl
, 0, sizeof(fl
));
621 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
622 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
624 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
625 sizeof(*t1
), IPPROTO_TCP
,
628 fl
.proto
= IPPROTO_TCP
;
629 fl
.oif
= inet6_iif(skb
);
630 fl
.fl_ip_dport
= t1
->dest
;
631 fl
.fl_ip_sport
= t1
->source
;
632 security_skb_classify_flow(skb
, &fl
);
634 /* sk = NULL, but it is safe for now. RST socket required. */
635 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
637 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
638 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
639 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
640 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
648 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
650 struct tcphdr
*th
= skb
->h
.th
, *t1
;
651 struct sk_buff
*buff
;
653 int tot_len
= sizeof(struct tcphdr
);
658 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
663 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
665 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
667 /* Swap the send and the receive. */
668 memset(t1
, 0, sizeof(*t1
));
669 t1
->dest
= th
->source
;
670 t1
->source
= th
->dest
;
671 t1
->doff
= tot_len
/4;
672 t1
->seq
= htonl(seq
);
673 t1
->ack_seq
= htonl(ack
);
675 t1
->window
= htons(win
);
678 u32
*ptr
= (u32
*)(t1
+ 1);
679 *ptr
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
680 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
681 *ptr
++ = htonl(tcp_time_stamp
);
685 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
687 memset(&fl
, 0, sizeof(fl
));
688 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
689 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
691 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
692 tot_len
, IPPROTO_TCP
,
695 fl
.proto
= IPPROTO_TCP
;
696 fl
.oif
= inet6_iif(skb
);
697 fl
.fl_ip_dport
= t1
->dest
;
698 fl
.fl_ip_sport
= t1
->source
;
699 security_skb_classify_flow(skb
, &fl
);
701 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
702 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
703 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
704 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
712 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
714 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
715 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
717 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
718 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
719 tcptw
->tw_ts_recent
);
724 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
726 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
730 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
732 struct request_sock
*req
, **prev
;
733 const struct tcphdr
*th
= skb
->h
.th
;
736 /* Find possible connection requests. */
737 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
738 &skb
->nh
.ipv6h
->saddr
,
739 &skb
->nh
.ipv6h
->daddr
, inet6_iif(skb
));
741 return tcp_check_req(sk
, skb
, req
, prev
);
743 nsk
= __inet6_lookup_established(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
,
744 th
->source
, &skb
->nh
.ipv6h
->daddr
,
745 ntohs(th
->dest
), inet6_iif(skb
));
748 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
752 inet_twsk_put((struct inet_timewait_sock
*)nsk
);
756 #if 0 /*def CONFIG_SYN_COOKIES*/
757 if (!th
->rst
&& !th
->syn
&& th
->ack
)
758 sk
= cookie_v6_check(sk
, skb
, &(IPCB(skb
)->opt
));
763 /* FIXME: this is substantially similar to the ipv4 code.
764 * Can some kind of merge be done? -- erics
766 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
768 struct inet6_request_sock
*treq
;
769 struct ipv6_pinfo
*np
= inet6_sk(sk
);
770 struct tcp_options_received tmp_opt
;
771 struct tcp_sock
*tp
= tcp_sk(sk
);
772 struct request_sock
*req
= NULL
;
773 __u32 isn
= TCP_SKB_CB(skb
)->when
;
775 if (skb
->protocol
== htons(ETH_P_IP
))
776 return tcp_v4_conn_request(sk
, skb
);
778 if (!ipv6_unicast_destination(skb
))
782 * There are no SYN attacks on IPv6, yet...
784 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
786 printk(KERN_INFO
"TCPv6: dropping request, synflood is possible\n");
790 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
793 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
797 tcp_clear_options(&tmp_opt
);
798 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
799 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
801 tcp_parse_options(skb
, &tmp_opt
, 0);
803 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
804 tcp_openreq_init(req
, &tmp_opt
, skb
);
806 treq
= inet6_rsk(req
);
807 ipv6_addr_copy(&treq
->rmt_addr
, &skb
->nh
.ipv6h
->saddr
);
808 ipv6_addr_copy(&treq
->loc_addr
, &skb
->nh
.ipv6h
->daddr
);
809 TCP_ECN_create_request(req
, skb
->h
.th
);
810 treq
->pktopts
= NULL
;
811 if (ipv6_opt_accepted(sk
, skb
) ||
812 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
813 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
814 atomic_inc(&skb
->users
);
817 treq
->iif
= sk
->sk_bound_dev_if
;
819 /* So that link locals have meaning */
820 if (!sk
->sk_bound_dev_if
&&
821 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
822 treq
->iif
= inet6_iif(skb
);
825 isn
= tcp_v6_init_sequence(sk
,skb
);
827 tcp_rsk(req
)->snt_isn
= isn
;
829 security_inet_conn_request(sk
, skb
, req
);
831 if (tcp_v6_send_synack(sk
, req
, NULL
))
834 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
841 return 0; /* don't send reset */
844 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
845 struct request_sock
*req
,
846 struct dst_entry
*dst
)
848 struct inet6_request_sock
*treq
= inet6_rsk(req
);
849 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
850 struct tcp6_sock
*newtcp6sk
;
851 struct inet_sock
*newinet
;
852 struct tcp_sock
*newtp
;
854 struct ipv6_txoptions
*opt
;
856 if (skb
->protocol
== htons(ETH_P_IP
)) {
861 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
866 newtcp6sk
= (struct tcp6_sock
*)newsk
;
867 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
869 newinet
= inet_sk(newsk
);
870 newnp
= inet6_sk(newsk
);
871 newtp
= tcp_sk(newsk
);
873 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
875 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
878 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
881 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
883 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
884 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
885 newnp
->pktoptions
= NULL
;
887 newnp
->mcast_oif
= inet6_iif(skb
);
888 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
891 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
892 * here, tcp_create_openreq_child now does this for us, see the comment in
893 * that function for the gory details. -acme
896 /* It is tricky place. Until this moment IPv4 tcp
897 worked with IPv6 icsk.icsk_af_ops.
900 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
907 if (sk_acceptq_is_full(sk
))
910 if (np
->rxopt
.bits
.osrcrt
== 2 &&
911 opt
== NULL
&& treq
->pktopts
) {
912 struct inet6_skb_parm
*rxopt
= IP6CB(treq
->pktopts
);
914 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(treq
->pktopts
->nh
.raw
+ rxopt
->srcrt
));
918 struct in6_addr
*final_p
= NULL
, final
;
921 memset(&fl
, 0, sizeof(fl
));
922 fl
.proto
= IPPROTO_TCP
;
923 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
924 if (opt
&& opt
->srcrt
) {
925 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
926 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
927 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
930 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
931 fl
.oif
= sk
->sk_bound_dev_if
;
932 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
933 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
934 security_req_classify_flow(req
, &fl
);
936 if (ip6_dst_lookup(sk
, &dst
, &fl
))
940 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
942 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
946 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
951 * No need to charge this sock to the relevant IPv6 refcnt debug socks
952 * count here, tcp_create_openreq_child now does this for us, see the
953 * comment in that function for the gory details. -acme
956 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
957 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
959 newtcp6sk
= (struct tcp6_sock
*)newsk
;
960 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
962 newtp
= tcp_sk(newsk
);
963 newinet
= inet_sk(newsk
);
964 newnp
= inet6_sk(newsk
);
966 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
968 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
969 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
970 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
971 newsk
->sk_bound_dev_if
= treq
->iif
;
973 /* Now IPv6 options...
975 First: no IPv4 options.
980 newnp
->rxopt
.all
= np
->rxopt
.all
;
982 /* Clone pktoptions received with SYN */
983 newnp
->pktoptions
= NULL
;
984 if (treq
->pktopts
!= NULL
) {
985 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
986 kfree_skb(treq
->pktopts
);
987 treq
->pktopts
= NULL
;
988 if (newnp
->pktoptions
)
989 skb_set_owner_r(newnp
->pktoptions
, newsk
);
992 newnp
->mcast_oif
= inet6_iif(skb
);
993 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
995 /* Clone native IPv6 options from listening socket (if any)
997 Yes, keeping reference count would be much more clever,
998 but we make one more one thing there: reattach optmem
1002 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1004 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1007 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1009 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1010 newnp
->opt
->opt_flen
);
1012 tcp_mtup_init(newsk
);
1013 tcp_sync_mss(newsk
, dst_mtu(dst
));
1014 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1015 tcp_initialize_rcv_mss(newsk
);
1017 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1019 __inet6_hash(&tcp_hashinfo
, newsk
);
1020 inet_inherit_port(&tcp_hashinfo
, sk
, newsk
);
1025 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1027 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1028 if (opt
&& opt
!= np
->opt
)
1029 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1034 static int tcp_v6_checksum_init(struct sk_buff
*skb
)
1036 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1037 if (!tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1038 &skb
->nh
.ipv6h
->daddr
,skb
->csum
)) {
1039 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1044 skb
->csum
= ~tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1045 &skb
->nh
.ipv6h
->daddr
, 0);
1047 if (skb
->len
<= 76) {
1048 return __skb_checksum_complete(skb
);
1053 /* The socket must have it's spinlock held when we get
1056 * We have a potential double-lock case here, so even when
1057 * doing backlog processing we use the BH locking scheme.
1058 * This is because we cannot sleep with the original spinlock
1061 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1063 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1064 struct tcp_sock
*tp
;
1065 struct sk_buff
*opt_skb
= NULL
;
1067 /* Imagine: socket is IPv6. IPv4 packet arrives,
1068 goes to IPv4 receive handler and backlogged.
1069 From backlog it always goes here. Kerboom...
1070 Fortunately, tcp_rcv_established and rcv_established
1071 handle them correctly, but it is not case with
1072 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1075 if (skb
->protocol
== htons(ETH_P_IP
))
1076 return tcp_v4_do_rcv(sk
, skb
);
1078 if (sk_filter(sk
, skb
, 0))
1082 * socket locking is here for SMP purposes as backlog rcv
1083 * is currently called with bh processing disabled.
1086 /* Do Stevens' IPV6_PKTOPTIONS.
1088 Yes, guys, it is the only place in our code, where we
1089 may make it not affecting IPv4.
1090 The rest of code is protocol independent,
1091 and I do not like idea to uglify IPv4.
1093 Actually, all the idea behind IPV6_PKTOPTIONS
1094 looks not very well thought. For now we latch
1095 options, received in the last packet, enqueued
1096 by tcp. Feel free to propose better solution.
1100 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1102 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1103 TCP_CHECK_TIMER(sk
);
1104 if (tcp_rcv_established(sk
, skb
, skb
->h
.th
, skb
->len
))
1106 TCP_CHECK_TIMER(sk
);
1108 goto ipv6_pktoptions
;
1112 if (skb
->len
< (skb
->h
.th
->doff
<<2) || tcp_checksum_complete(skb
))
1115 if (sk
->sk_state
== TCP_LISTEN
) {
1116 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1121 * Queue it on the new socket if the new socket is active,
1122 * otherwise we just shortcircuit this and continue with
1126 if (tcp_child_process(sk
, nsk
, skb
))
1129 __kfree_skb(opt_skb
);
1134 TCP_CHECK_TIMER(sk
);
1135 if (tcp_rcv_state_process(sk
, skb
, skb
->h
.th
, skb
->len
))
1137 TCP_CHECK_TIMER(sk
);
1139 goto ipv6_pktoptions
;
1143 tcp_v6_send_reset(skb
);
1146 __kfree_skb(opt_skb
);
1150 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1155 /* Do you ask, what is it?
1157 1. skb was enqueued by tcp.
1158 2. skb is added to tail of read queue, rather than out of order.
1159 3. socket is not in passive state.
1160 4. Finally, it really contains options, which user wants to receive.
1163 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1164 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1165 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1166 np
->mcast_oif
= inet6_iif(opt_skb
);
1167 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1168 np
->mcast_hops
= opt_skb
->nh
.ipv6h
->hop_limit
;
1169 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1170 skb_set_owner_r(opt_skb
, sk
);
1171 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1173 __kfree_skb(opt_skb
);
1174 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1183 static int tcp_v6_rcv(struct sk_buff
**pskb
)
1185 struct sk_buff
*skb
= *pskb
;
1190 if (skb
->pkt_type
!= PACKET_HOST
)
1194 * Count it even if it's bad.
1196 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1198 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1203 if (th
->doff
< sizeof(struct tcphdr
)/4)
1205 if (!pskb_may_pull(skb
, th
->doff
*4))
1208 if ((skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
1209 tcp_v6_checksum_init(skb
)))
1213 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1214 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1215 skb
->len
- th
->doff
*4);
1216 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1217 TCP_SKB_CB(skb
)->when
= 0;
1218 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(skb
->nh
.ipv6h
);
1219 TCP_SKB_CB(skb
)->sacked
= 0;
1221 sk
= __inet6_lookup(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
, th
->source
,
1222 &skb
->nh
.ipv6h
->daddr
, ntohs(th
->dest
),
1229 if (sk
->sk_state
== TCP_TIME_WAIT
)
1232 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1233 goto discard_and_relse
;
1235 if (sk_filter(sk
, skb
, 0))
1236 goto discard_and_relse
;
1242 if (!sock_owned_by_user(sk
)) {
1243 #ifdef CONFIG_NET_DMA
1244 struct tcp_sock
*tp
= tcp_sk(sk
);
1245 if (tp
->ucopy
.dma_chan
)
1246 ret
= tcp_v6_do_rcv(sk
, skb
);
1250 if (!tcp_prequeue(sk
, skb
))
1251 ret
= tcp_v6_do_rcv(sk
, skb
);
1254 sk_add_backlog(sk
, skb
);
1258 return ret
? -1 : 0;
1261 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1264 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1266 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1268 tcp_v6_send_reset(skb
);
1285 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1286 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1290 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1291 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1292 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1296 switch (tcp_timewait_state_process((struct inet_timewait_sock
*)sk
,
1302 sk2
= inet6_lookup_listener(&tcp_hashinfo
,
1303 &skb
->nh
.ipv6h
->daddr
,
1304 ntohs(th
->dest
), inet6_iif(skb
));
1306 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1307 inet_twsk_deschedule(tw
, &tcp_death_row
);
1312 /* Fall through to ACK */
1315 tcp_v6_timewait_ack(sk
, skb
);
1319 case TCP_TW_SUCCESS
:;
1324 static int tcp_v6_remember_stamp(struct sock
*sk
)
1326 /* Alas, not yet... */
1330 static struct inet_connection_sock_af_ops ipv6_specific
= {
1331 .queue_xmit
= inet6_csk_xmit
,
1332 .send_check
= tcp_v6_send_check
,
1333 .rebuild_header
= inet6_sk_rebuild_header
,
1334 .conn_request
= tcp_v6_conn_request
,
1335 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1336 .remember_stamp
= tcp_v6_remember_stamp
,
1337 .net_header_len
= sizeof(struct ipv6hdr
),
1338 .setsockopt
= ipv6_setsockopt
,
1339 .getsockopt
= ipv6_getsockopt
,
1340 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1341 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1342 #ifdef CONFIG_COMPAT
1343 .compat_setsockopt
= compat_ipv6_setsockopt
,
1344 .compat_getsockopt
= compat_ipv6_getsockopt
,
1349 * TCP over IPv4 via INET6 API
1352 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1353 .queue_xmit
= ip_queue_xmit
,
1354 .send_check
= tcp_v4_send_check
,
1355 .rebuild_header
= inet_sk_rebuild_header
,
1356 .conn_request
= tcp_v6_conn_request
,
1357 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1358 .remember_stamp
= tcp_v4_remember_stamp
,
1359 .net_header_len
= sizeof(struct iphdr
),
1360 .setsockopt
= ipv6_setsockopt
,
1361 .getsockopt
= ipv6_getsockopt
,
1362 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1363 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1364 #ifdef CONFIG_COMPAT
1365 .compat_setsockopt
= compat_ipv6_setsockopt
,
1366 .compat_getsockopt
= compat_ipv6_getsockopt
,
1370 /* NOTE: A lot of things set to zero explicitly by call to
1371 * sk_alloc() so need not be done here.
1373 static int tcp_v6_init_sock(struct sock
*sk
)
1375 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1376 struct tcp_sock
*tp
= tcp_sk(sk
);
1378 skb_queue_head_init(&tp
->out_of_order_queue
);
1379 tcp_init_xmit_timers(sk
);
1380 tcp_prequeue_init(tp
);
1382 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1383 tp
->mdev
= TCP_TIMEOUT_INIT
;
1385 /* So many TCP implementations out there (incorrectly) count the
1386 * initial SYN frame in their delayed-ACK and congestion control
1387 * algorithms that we must have the following bandaid to talk
1388 * efficiently to them. -DaveM
1392 /* See draft-stevens-tcpca-spec-01 for discussion of the
1393 * initialization of these values.
1395 tp
->snd_ssthresh
= 0x7fffffff;
1396 tp
->snd_cwnd_clamp
= ~0;
1397 tp
->mss_cache
= 536;
1399 tp
->reordering
= sysctl_tcp_reordering
;
1401 sk
->sk_state
= TCP_CLOSE
;
1403 icsk
->icsk_af_ops
= &ipv6_specific
;
1404 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1405 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1406 sk
->sk_write_space
= sk_stream_write_space
;
1407 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1409 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1410 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1412 atomic_inc(&tcp_sockets_allocated
);
1417 static int tcp_v6_destroy_sock(struct sock
*sk
)
1419 tcp_v4_destroy_sock(sk
);
1420 return inet6_destroy_sock(sk
);
1423 /* Proc filesystem TCPv6 sock list dumping. */
1424 static void get_openreq6(struct seq_file
*seq
,
1425 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1427 int ttd
= req
->expires
- jiffies
;
1428 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1429 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1435 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1436 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1438 src
->s6_addr32
[0], src
->s6_addr32
[1],
1439 src
->s6_addr32
[2], src
->s6_addr32
[3],
1440 ntohs(inet_sk(sk
)->sport
),
1441 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1442 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1443 ntohs(inet_rsk(req
)->rmt_port
),
1445 0,0, /* could print option size, but that is af dependent. */
1446 1, /* timers active (only the expire timer) */
1447 jiffies_to_clock_t(ttd
),
1450 0, /* non standard timer */
1451 0, /* open_requests have no inode */
1455 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1457 struct in6_addr
*dest
, *src
;
1460 unsigned long timer_expires
;
1461 struct inet_sock
*inet
= inet_sk(sp
);
1462 struct tcp_sock
*tp
= tcp_sk(sp
);
1463 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1464 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1467 src
= &np
->rcv_saddr
;
1468 destp
= ntohs(inet
->dport
);
1469 srcp
= ntohs(inet
->sport
);
1471 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1473 timer_expires
= icsk
->icsk_timeout
;
1474 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1476 timer_expires
= icsk
->icsk_timeout
;
1477 } else if (timer_pending(&sp
->sk_timer
)) {
1479 timer_expires
= sp
->sk_timer
.expires
;
1482 timer_expires
= jiffies
;
1486 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1487 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1489 src
->s6_addr32
[0], src
->s6_addr32
[1],
1490 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1491 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1492 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1494 tp
->write_seq
-tp
->snd_una
,
1495 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1497 jiffies_to_clock_t(timer_expires
- jiffies
),
1498 icsk
->icsk_retransmits
,
1500 icsk
->icsk_probes_out
,
1502 atomic_read(&sp
->sk_refcnt
), sp
,
1505 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1506 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
1510 static void get_timewait6_sock(struct seq_file
*seq
,
1511 struct inet_timewait_sock
*tw
, int i
)
1513 struct in6_addr
*dest
, *src
;
1515 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1516 int ttd
= tw
->tw_ttd
- jiffies
;
1521 dest
= &tw6
->tw_v6_daddr
;
1522 src
= &tw6
->tw_v6_rcv_saddr
;
1523 destp
= ntohs(tw
->tw_dport
);
1524 srcp
= ntohs(tw
->tw_sport
);
1527 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1528 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1530 src
->s6_addr32
[0], src
->s6_addr32
[1],
1531 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1532 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1533 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1534 tw
->tw_substate
, 0, 0,
1535 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1536 atomic_read(&tw
->tw_refcnt
), tw
);
1539 #ifdef CONFIG_PROC_FS
1540 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1542 struct tcp_iter_state
*st
;
1544 if (v
== SEQ_START_TOKEN
) {
1549 "st tx_queue rx_queue tr tm->when retrnsmt"
1550 " uid timeout inode\n");
1555 switch (st
->state
) {
1556 case TCP_SEQ_STATE_LISTENING
:
1557 case TCP_SEQ_STATE_ESTABLISHED
:
1558 get_tcp6_sock(seq
, v
, st
->num
);
1560 case TCP_SEQ_STATE_OPENREQ
:
1561 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1563 case TCP_SEQ_STATE_TIME_WAIT
:
1564 get_timewait6_sock(seq
, v
, st
->num
);
1571 static struct file_operations tcp6_seq_fops
;
1572 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1573 .owner
= THIS_MODULE
,
1576 .seq_show
= tcp6_seq_show
,
1577 .seq_fops
= &tcp6_seq_fops
,
1580 int __init
tcp6_proc_init(void)
1582 return tcp_proc_register(&tcp6_seq_afinfo
);
1585 void tcp6_proc_exit(void)
1587 tcp_proc_unregister(&tcp6_seq_afinfo
);
1591 struct proto tcpv6_prot
= {
1593 .owner
= THIS_MODULE
,
1595 .connect
= tcp_v6_connect
,
1596 .disconnect
= tcp_disconnect
,
1597 .accept
= inet_csk_accept
,
1599 .init
= tcp_v6_init_sock
,
1600 .destroy
= tcp_v6_destroy_sock
,
1601 .shutdown
= tcp_shutdown
,
1602 .setsockopt
= tcp_setsockopt
,
1603 .getsockopt
= tcp_getsockopt
,
1604 .sendmsg
= tcp_sendmsg
,
1605 .recvmsg
= tcp_recvmsg
,
1606 .backlog_rcv
= tcp_v6_do_rcv
,
1607 .hash
= tcp_v6_hash
,
1608 .unhash
= tcp_unhash
,
1609 .get_port
= tcp_v6_get_port
,
1610 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1611 .sockets_allocated
= &tcp_sockets_allocated
,
1612 .memory_allocated
= &tcp_memory_allocated
,
1613 .memory_pressure
= &tcp_memory_pressure
,
1614 .orphan_count
= &tcp_orphan_count
,
1615 .sysctl_mem
= sysctl_tcp_mem
,
1616 .sysctl_wmem
= sysctl_tcp_wmem
,
1617 .sysctl_rmem
= sysctl_tcp_rmem
,
1618 .max_header
= MAX_TCP_HEADER
,
1619 .obj_size
= sizeof(struct tcp6_sock
),
1620 .twsk_prot
= &tcp6_timewait_sock_ops
,
1621 .rsk_prot
= &tcp6_request_sock_ops
,
1622 #ifdef CONFIG_COMPAT
1623 .compat_setsockopt
= compat_tcp_setsockopt
,
1624 .compat_getsockopt
= compat_tcp_getsockopt
,
1628 static struct inet6_protocol tcpv6_protocol
= {
1629 .handler
= tcp_v6_rcv
,
1630 .err_handler
= tcp_v6_err
,
1631 .gso_send_check
= tcp_v6_gso_send_check
,
1632 .gso_segment
= tcp_tso_segment
,
1633 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1636 static struct inet_protosw tcpv6_protosw
= {
1637 .type
= SOCK_STREAM
,
1638 .protocol
= IPPROTO_TCP
,
1639 .prot
= &tcpv6_prot
,
1640 .ops
= &inet6_stream_ops
,
1643 .flags
= INET_PROTOSW_PERMANENT
|
1647 void __init
tcpv6_init(void)
1649 /* register inet6 protocol */
1650 if (inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
) < 0)
1651 printk(KERN_ERR
"tcpv6_init: Could not register protocol\n");
1652 inet6_register_protosw(&tcpv6_protosw
);
1654 if (inet_csk_ctl_sock_create(&tcp6_socket
, PF_INET6
, SOCK_RAW
,
1656 panic("Failed to create the TCPv6 control socket.\n");