3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
49 #include <net/ndisc.h>
50 #include <net/inet6_hashtables.h>
51 #include <net/inet6_connection_sock.h>
53 #include <net/transp_v6.h>
54 #include <net/addrconf.h>
55 #include <net/ip6_route.h>
56 #include <net/ip6_checksum.h>
57 #include <net/inet_ecn.h>
58 #include <net/protocol.h>
60 #include <net/addrconf.h>
62 #include <net/dsfield.h>
63 #include <net/timewait_sock.h>
65 #include <asm/uaccess.h>
67 #include <linux/proc_fs.h>
68 #include <linux/seq_file.h>
70 static void tcp_v6_send_reset(struct sk_buff
*skb
);
71 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
72 static void tcp_v6_send_check(struct sock
*sk
, int len
,
75 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 static struct inet_connection_sock_af_ops ipv6_mapped
;
78 static struct inet_connection_sock_af_ops ipv6_specific
;
80 static int tcp_v6_get_port(struct sock
*sk
, unsigned short snum
)
82 return inet_csk_get_port(&tcp_hashinfo
, sk
, snum
,
83 inet6_csk_bind_conflict
);
86 static void tcp_v6_hash(struct sock
*sk
)
88 if (sk
->sk_state
!= TCP_CLOSE
) {
89 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
94 __inet6_hash(&tcp_hashinfo
, sk
);
99 static __inline__ u16
tcp_v6_check(struct tcphdr
*th
, int len
,
100 struct in6_addr
*saddr
,
101 struct in6_addr
*daddr
,
104 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
107 static __u32
tcp_v6_init_sequence(struct sock
*sk
, struct sk_buff
*skb
)
109 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
110 return secure_tcpv6_sequence_number(skb
->nh
.ipv6h
->daddr
.s6_addr32
,
111 skb
->nh
.ipv6h
->saddr
.s6_addr32
,
115 return secure_tcp_sequence_number(skb
->nh
.iph
->daddr
,
122 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
125 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
126 struct inet_sock
*inet
= inet_sk(sk
);
127 struct ipv6_pinfo
*np
= inet6_sk(sk
);
128 struct tcp_sock
*tp
= tcp_sk(sk
);
129 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
131 struct dst_entry
*dst
;
135 if (addr_len
< SIN6_LEN_RFC2133
)
138 if (usin
->sin6_family
!= AF_INET6
)
139 return(-EAFNOSUPPORT
);
141 memset(&fl
, 0, sizeof(fl
));
144 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
145 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
146 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
147 struct ip6_flowlabel
*flowlabel
;
148 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
149 if (flowlabel
== NULL
)
151 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
152 fl6_sock_release(flowlabel
);
157 * connect() to INADDR_ANY means loopback (BSD'ism).
160 if(ipv6_addr_any(&usin
->sin6_addr
))
161 usin
->sin6_addr
.s6_addr
[15] = 0x1;
163 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
165 if(addr_type
& IPV6_ADDR_MULTICAST
)
168 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
169 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
170 usin
->sin6_scope_id
) {
171 /* If interface is set while binding, indices
174 if (sk
->sk_bound_dev_if
&&
175 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
178 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
181 /* Connect to link-local address requires an interface */
182 if (!sk
->sk_bound_dev_if
)
186 if (tp
->rx_opt
.ts_recent_stamp
&&
187 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
188 tp
->rx_opt
.ts_recent
= 0;
189 tp
->rx_opt
.ts_recent_stamp
= 0;
193 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
194 np
->flow_label
= fl
.fl6_flowlabel
;
200 if (addr_type
== IPV6_ADDR_MAPPED
) {
201 u32 exthdrlen
= tp
->ext_header_len
;
202 struct sockaddr_in sin
;
204 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
206 if (__ipv6_only_sock(sk
))
209 sin
.sin_family
= AF_INET
;
210 sin
.sin_port
= usin
->sin6_port
;
211 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
213 inet_csk(sk
)->icsk_af_ops
= &ipv6_mapped
;
214 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
216 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
219 tp
->ext_header_len
= exthdrlen
;
220 inet_csk(sk
)->icsk_af_ops
= &ipv6_specific
;
221 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
224 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
226 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
233 if (!ipv6_addr_any(&np
->rcv_saddr
))
234 saddr
= &np
->rcv_saddr
;
236 fl
.proto
= IPPROTO_TCP
;
237 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
238 ipv6_addr_copy(&fl
.fl6_src
,
239 (saddr
? saddr
: &np
->saddr
));
240 fl
.oif
= sk
->sk_bound_dev_if
;
241 fl
.fl_ip_dport
= usin
->sin6_port
;
242 fl
.fl_ip_sport
= inet
->sport
;
244 if (np
->opt
&& np
->opt
->srcrt
) {
245 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
246 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
247 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
251 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
255 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
257 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
262 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
265 /* set the source address */
266 ipv6_addr_copy(&np
->saddr
, saddr
);
267 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
269 ip6_dst_store(sk
, dst
, NULL
);
270 sk
->sk_route_caps
= dst
->dev
->features
&
271 ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
273 tp
->ext_header_len
= 0;
275 tp
->ext_header_len
= np
->opt
->opt_flen
+ np
->opt
->opt_nflen
;
277 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
279 inet
->dport
= usin
->sin6_port
;
281 tcp_set_state(sk
, TCP_SYN_SENT
);
282 err
= inet6_hash_connect(&tcp_death_row
, sk
);
287 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
292 err
= tcp_connect(sk
);
299 tcp_set_state(sk
, TCP_CLOSE
);
303 sk
->sk_route_caps
= 0;
307 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
308 int type
, int code
, int offset
, __u32 info
)
310 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
311 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
312 struct ipv6_pinfo
*np
;
318 sk
= inet6_lookup(&tcp_hashinfo
, &hdr
->daddr
, th
->dest
, &hdr
->saddr
,
319 th
->source
, skb
->dev
->ifindex
);
322 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
326 if (sk
->sk_state
== TCP_TIME_WAIT
) {
327 inet_twsk_put((struct inet_timewait_sock
*)sk
);
332 if (sock_owned_by_user(sk
))
333 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
335 if (sk
->sk_state
== TCP_CLOSE
)
339 seq
= ntohl(th
->seq
);
340 if (sk
->sk_state
!= TCP_LISTEN
&&
341 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
342 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
348 if (type
== ICMPV6_PKT_TOOBIG
) {
349 struct dst_entry
*dst
= NULL
;
351 if (sock_owned_by_user(sk
))
353 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
356 /* icmp should have updated the destination cache entry */
357 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
360 struct inet_sock
*inet
= inet_sk(sk
);
363 /* BUGGG_FUTURE: Again, it is not clear how
364 to handle rthdr case. Ignore this complexity
367 memset(&fl
, 0, sizeof(fl
));
368 fl
.proto
= IPPROTO_TCP
;
369 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
370 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
371 fl
.oif
= sk
->sk_bound_dev_if
;
372 fl
.fl_ip_dport
= inet
->dport
;
373 fl
.fl_ip_sport
= inet
->sport
;
375 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
376 sk
->sk_err_soft
= -err
;
380 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
381 sk
->sk_err_soft
= -err
;
388 if (tp
->pmtu_cookie
> dst_mtu(dst
)) {
389 tcp_sync_mss(sk
, dst_mtu(dst
));
390 tcp_simple_retransmit(sk
);
391 } /* else let the usual retransmit timer handle it */
396 icmpv6_err_convert(type
, code
, &err
);
398 /* Might be for an request_sock */
399 switch (sk
->sk_state
) {
400 struct request_sock
*req
, **prev
;
402 if (sock_owned_by_user(sk
))
405 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
406 &hdr
->saddr
, inet6_iif(skb
));
410 /* ICMPs are not backlogged, hence we cannot get
411 * an established socket here.
413 BUG_TRAP(req
->sk
== NULL
);
415 if (seq
!= tcp_rsk(req
)->snt_isn
) {
416 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
420 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
424 case TCP_SYN_RECV
: /* Cannot happen.
425 It can, it SYNs are crossed. --ANK */
426 if (!sock_owned_by_user(sk
)) {
427 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS
);
429 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
433 sk
->sk_err_soft
= err
;
437 if (!sock_owned_by_user(sk
) && np
->recverr
) {
439 sk
->sk_error_report(sk
);
441 sk
->sk_err_soft
= err
;
449 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
450 struct dst_entry
*dst
)
452 struct inet6_request_sock
*treq
= inet6_rsk(req
);
453 struct ipv6_pinfo
*np
= inet6_sk(sk
);
454 struct sk_buff
* skb
;
455 struct ipv6_txoptions
*opt
= NULL
;
456 struct in6_addr
* final_p
= NULL
, final
;
460 memset(&fl
, 0, sizeof(fl
));
461 fl
.proto
= IPPROTO_TCP
;
462 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
463 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
464 fl
.fl6_flowlabel
= 0;
466 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
467 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
472 np
->rxopt
.bits
.osrcrt
== 2 &&
474 struct sk_buff
*pktopts
= treq
->pktopts
;
475 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
477 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(pktopts
->nh
.raw
+ rxopt
->srcrt
));
480 if (opt
&& opt
->srcrt
) {
481 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
482 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
483 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
487 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
491 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
492 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
496 skb
= tcp_make_synack(sk
, dst
, req
);
498 struct tcphdr
*th
= skb
->h
.th
;
500 th
->check
= tcp_v6_check(th
, skb
->len
,
501 &treq
->loc_addr
, &treq
->rmt_addr
,
502 csum_partial((char *)th
, skb
->len
, skb
->csum
));
504 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
505 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
506 if (err
== NET_XMIT_CN
)
511 if (opt
&& opt
!= np
->opt
)
512 sock_kfree_s(sk
, opt
, opt
->tot_len
);
516 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
518 if (inet6_rsk(req
)->pktopts
)
519 kfree_skb(inet6_rsk(req
)->pktopts
);
522 static struct request_sock_ops tcp6_request_sock_ops
= {
524 .obj_size
= sizeof(struct tcp6_request_sock
),
525 .rtx_syn_ack
= tcp_v6_send_synack
,
526 .send_ack
= tcp_v6_reqsk_send_ack
,
527 .destructor
= tcp_v6_reqsk_destructor
,
528 .send_reset
= tcp_v6_send_reset
531 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
532 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
533 .twsk_unique
= tcp_twsk_unique
,
536 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
538 struct ipv6_pinfo
*np
= inet6_sk(sk
);
539 struct tcphdr
*th
= skb
->h
.th
;
541 if (skb
->ip_summed
== CHECKSUM_HW
) {
542 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
543 skb
->csum
= offsetof(struct tcphdr
, check
);
545 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
546 csum_partial((char *)th
, th
->doff
<<2,
552 static void tcp_v6_send_reset(struct sk_buff
*skb
)
554 struct tcphdr
*th
= skb
->h
.th
, *t1
;
555 struct sk_buff
*buff
;
561 if (!ipv6_unicast_destination(skb
))
565 * We need to grab some memory, and put together an RST,
566 * and then put it into the queue to be sent.
569 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
),
574 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
));
576 t1
= (struct tcphdr
*) skb_push(buff
,sizeof(struct tcphdr
));
578 /* Swap the send and the receive. */
579 memset(t1
, 0, sizeof(*t1
));
580 t1
->dest
= th
->source
;
581 t1
->source
= th
->dest
;
582 t1
->doff
= sizeof(*t1
)/4;
586 t1
->seq
= th
->ack_seq
;
589 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
590 + skb
->len
- (th
->doff
<<2));
593 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
595 memset(&fl
, 0, sizeof(fl
));
596 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
597 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
599 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
600 sizeof(*t1
), IPPROTO_TCP
,
603 fl
.proto
= IPPROTO_TCP
;
604 fl
.oif
= inet6_iif(skb
);
605 fl
.fl_ip_dport
= t1
->dest
;
606 fl
.fl_ip_sport
= t1
->source
;
608 /* sk = NULL, but it is safe for now. RST socket required. */
609 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
611 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
612 ip6_xmit(NULL
, buff
, &fl
, NULL
, 0);
613 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
614 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
622 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
624 struct tcphdr
*th
= skb
->h
.th
, *t1
;
625 struct sk_buff
*buff
;
627 int tot_len
= sizeof(struct tcphdr
);
632 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
637 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
639 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
641 /* Swap the send and the receive. */
642 memset(t1
, 0, sizeof(*t1
));
643 t1
->dest
= th
->source
;
644 t1
->source
= th
->dest
;
645 t1
->doff
= tot_len
/4;
646 t1
->seq
= htonl(seq
);
647 t1
->ack_seq
= htonl(ack
);
649 t1
->window
= htons(win
);
652 u32
*ptr
= (u32
*)(t1
+ 1);
653 *ptr
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
654 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
655 *ptr
++ = htonl(tcp_time_stamp
);
659 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
661 memset(&fl
, 0, sizeof(fl
));
662 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
663 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
665 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
666 tot_len
, IPPROTO_TCP
,
669 fl
.proto
= IPPROTO_TCP
;
670 fl
.oif
= inet6_iif(skb
);
671 fl
.fl_ip_dport
= t1
->dest
;
672 fl
.fl_ip_sport
= t1
->source
;
674 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
675 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
676 ip6_xmit(NULL
, buff
, &fl
, NULL
, 0);
677 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
685 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
687 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
688 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
690 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
691 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
692 tcptw
->tw_ts_recent
);
697 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
699 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
703 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
705 struct request_sock
*req
, **prev
;
706 const struct tcphdr
*th
= skb
->h
.th
;
709 /* Find possible connection requests. */
710 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
711 &skb
->nh
.ipv6h
->saddr
,
712 &skb
->nh
.ipv6h
->daddr
, inet6_iif(skb
));
714 return tcp_check_req(sk
, skb
, req
, prev
);
716 nsk
= __inet6_lookup_established(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
,
717 th
->source
, &skb
->nh
.ipv6h
->daddr
,
718 ntohs(th
->dest
), inet6_iif(skb
));
721 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
725 inet_twsk_put((struct inet_timewait_sock
*)nsk
);
729 #if 0 /*def CONFIG_SYN_COOKIES*/
730 if (!th
->rst
&& !th
->syn
&& th
->ack
)
731 sk
= cookie_v6_check(sk
, skb
, &(IPCB(skb
)->opt
));
736 /* FIXME: this is substantially similar to the ipv4 code.
737 * Can some kind of merge be done? -- erics
739 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
741 struct inet6_request_sock
*treq
;
742 struct ipv6_pinfo
*np
= inet6_sk(sk
);
743 struct tcp_options_received tmp_opt
;
744 struct tcp_sock
*tp
= tcp_sk(sk
);
745 struct request_sock
*req
= NULL
;
746 __u32 isn
= TCP_SKB_CB(skb
)->when
;
748 if (skb
->protocol
== htons(ETH_P_IP
))
749 return tcp_v4_conn_request(sk
, skb
);
751 if (!ipv6_unicast_destination(skb
))
755 * There are no SYN attacks on IPv6, yet...
757 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
759 printk(KERN_INFO
"TCPv6: dropping request, synflood is possible\n");
763 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
766 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
770 tcp_clear_options(&tmp_opt
);
771 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
772 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
774 tcp_parse_options(skb
, &tmp_opt
, 0);
776 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
777 tcp_openreq_init(req
, &tmp_opt
, skb
);
779 treq
= inet6_rsk(req
);
780 ipv6_addr_copy(&treq
->rmt_addr
, &skb
->nh
.ipv6h
->saddr
);
781 ipv6_addr_copy(&treq
->loc_addr
, &skb
->nh
.ipv6h
->daddr
);
782 TCP_ECN_create_request(req
, skb
->h
.th
);
783 treq
->pktopts
= NULL
;
784 if (ipv6_opt_accepted(sk
, skb
) ||
785 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
786 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
787 atomic_inc(&skb
->users
);
790 treq
->iif
= sk
->sk_bound_dev_if
;
792 /* So that link locals have meaning */
793 if (!sk
->sk_bound_dev_if
&&
794 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
795 treq
->iif
= inet6_iif(skb
);
798 isn
= tcp_v6_init_sequence(sk
,skb
);
800 tcp_rsk(req
)->snt_isn
= isn
;
802 if (tcp_v6_send_synack(sk
, req
, NULL
))
805 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
812 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS
);
813 return 0; /* don't send reset */
816 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
817 struct request_sock
*req
,
818 struct dst_entry
*dst
)
820 struct inet6_request_sock
*treq
= inet6_rsk(req
);
821 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
822 struct tcp6_sock
*newtcp6sk
;
823 struct inet_sock
*newinet
;
824 struct tcp_sock
*newtp
;
826 struct ipv6_txoptions
*opt
;
828 if (skb
->protocol
== htons(ETH_P_IP
)) {
833 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
838 newtcp6sk
= (struct tcp6_sock
*)newsk
;
839 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
841 newinet
= inet_sk(newsk
);
842 newnp
= inet6_sk(newsk
);
843 newtp
= tcp_sk(newsk
);
845 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
847 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
850 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
853 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
855 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
856 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
857 newnp
->pktoptions
= NULL
;
859 newnp
->mcast_oif
= inet6_iif(skb
);
860 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
863 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
864 * here, tcp_create_openreq_child now does this for us, see the comment in
865 * that function for the gory details. -acme
868 /* It is tricky place. Until this moment IPv4 tcp
869 worked with IPv6 icsk.icsk_af_ops.
872 tcp_sync_mss(newsk
, newtp
->pmtu_cookie
);
879 if (sk_acceptq_is_full(sk
))
882 if (np
->rxopt
.bits
.osrcrt
== 2 &&
883 opt
== NULL
&& treq
->pktopts
) {
884 struct inet6_skb_parm
*rxopt
= IP6CB(treq
->pktopts
);
886 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(treq
->pktopts
->nh
.raw
+ rxopt
->srcrt
));
890 struct in6_addr
*final_p
= NULL
, final
;
893 memset(&fl
, 0, sizeof(fl
));
894 fl
.proto
= IPPROTO_TCP
;
895 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
896 if (opt
&& opt
->srcrt
) {
897 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
898 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
899 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
902 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
903 fl
.oif
= sk
->sk_bound_dev_if
;
904 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
905 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
907 if (ip6_dst_lookup(sk
, &dst
, &fl
))
911 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
913 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
917 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
922 * No need to charge this sock to the relevant IPv6 refcnt debug socks
923 * count here, tcp_create_openreq_child now does this for us, see the
924 * comment in that function for the gory details. -acme
927 ip6_dst_store(newsk
, dst
, NULL
);
928 newsk
->sk_route_caps
= dst
->dev
->features
&
929 ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
931 newtcp6sk
= (struct tcp6_sock
*)newsk
;
932 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
934 newtp
= tcp_sk(newsk
);
935 newinet
= inet_sk(newsk
);
936 newnp
= inet6_sk(newsk
);
938 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
940 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
941 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
942 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
943 newsk
->sk_bound_dev_if
= treq
->iif
;
945 /* Now IPv6 options...
947 First: no IPv4 options.
952 newnp
->rxopt
.all
= np
->rxopt
.all
;
954 /* Clone pktoptions received with SYN */
955 newnp
->pktoptions
= NULL
;
956 if (treq
->pktopts
!= NULL
) {
957 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
958 kfree_skb(treq
->pktopts
);
959 treq
->pktopts
= NULL
;
960 if (newnp
->pktoptions
)
961 skb_set_owner_r(newnp
->pktoptions
, newsk
);
964 newnp
->mcast_oif
= inet6_iif(skb
);
965 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
967 /* Clone native IPv6 options from listening socket (if any)
969 Yes, keeping reference count would be much more clever,
970 but we make one more one thing there: reattach optmem
974 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
976 sock_kfree_s(sk
, opt
, opt
->tot_len
);
979 newtp
->ext_header_len
= 0;
981 newtp
->ext_header_len
= newnp
->opt
->opt_nflen
+
982 newnp
->opt
->opt_flen
;
984 tcp_sync_mss(newsk
, dst_mtu(dst
));
985 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
986 tcp_initialize_rcv_mss(newsk
);
988 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
990 __inet6_hash(&tcp_hashinfo
, newsk
);
991 inet_inherit_port(&tcp_hashinfo
, sk
, newsk
);
996 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
998 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
999 if (opt
&& opt
!= np
->opt
)
1000 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1005 static int tcp_v6_checksum_init(struct sk_buff
*skb
)
1007 if (skb
->ip_summed
== CHECKSUM_HW
) {
1008 if (!tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1009 &skb
->nh
.ipv6h
->daddr
,skb
->csum
)) {
1010 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1015 skb
->csum
= ~tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1016 &skb
->nh
.ipv6h
->daddr
, 0);
1018 if (skb
->len
<= 76) {
1019 return __skb_checksum_complete(skb
);
1024 /* The socket must have it's spinlock held when we get
1027 * We have a potential double-lock case here, so even when
1028 * doing backlog processing we use the BH locking scheme.
1029 * This is because we cannot sleep with the original spinlock
1032 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1034 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1035 struct tcp_sock
*tp
;
1036 struct sk_buff
*opt_skb
= NULL
;
1038 /* Imagine: socket is IPv6. IPv4 packet arrives,
1039 goes to IPv4 receive handler and backlogged.
1040 From backlog it always goes here. Kerboom...
1041 Fortunately, tcp_rcv_established and rcv_established
1042 handle them correctly, but it is not case with
1043 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1046 if (skb
->protocol
== htons(ETH_P_IP
))
1047 return tcp_v4_do_rcv(sk
, skb
);
1049 if (sk_filter(sk
, skb
, 0))
1053 * socket locking is here for SMP purposes as backlog rcv
1054 * is currently called with bh processing disabled.
1057 /* Do Stevens' IPV6_PKTOPTIONS.
1059 Yes, guys, it is the only place in our code, where we
1060 may make it not affecting IPv4.
1061 The rest of code is protocol independent,
1062 and I do not like idea to uglify IPv4.
1064 Actually, all the idea behind IPV6_PKTOPTIONS
1065 looks not very well thought. For now we latch
1066 options, received in the last packet, enqueued
1067 by tcp. Feel free to propose better solution.
1071 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1073 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1074 TCP_CHECK_TIMER(sk
);
1075 if (tcp_rcv_established(sk
, skb
, skb
->h
.th
, skb
->len
))
1077 TCP_CHECK_TIMER(sk
);
1079 goto ipv6_pktoptions
;
1083 if (skb
->len
< (skb
->h
.th
->doff
<<2) || tcp_checksum_complete(skb
))
1086 if (sk
->sk_state
== TCP_LISTEN
) {
1087 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1092 * Queue it on the new socket if the new socket is active,
1093 * otherwise we just shortcircuit this and continue with
1097 if (tcp_child_process(sk
, nsk
, skb
))
1100 __kfree_skb(opt_skb
);
1105 TCP_CHECK_TIMER(sk
);
1106 if (tcp_rcv_state_process(sk
, skb
, skb
->h
.th
, skb
->len
))
1108 TCP_CHECK_TIMER(sk
);
1110 goto ipv6_pktoptions
;
1114 tcp_v6_send_reset(skb
);
1117 __kfree_skb(opt_skb
);
1121 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1126 /* Do you ask, what is it?
1128 1. skb was enqueued by tcp.
1129 2. skb is added to tail of read queue, rather than out of order.
1130 3. socket is not in passive state.
1131 4. Finally, it really contains options, which user wants to receive.
1134 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1135 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1136 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1137 np
->mcast_oif
= inet6_iif(opt_skb
);
1138 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1139 np
->mcast_hops
= opt_skb
->nh
.ipv6h
->hop_limit
;
1140 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1141 skb_set_owner_r(opt_skb
, sk
);
1142 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1144 __kfree_skb(opt_skb
);
1145 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1154 static int tcp_v6_rcv(struct sk_buff
**pskb
, unsigned int *nhoffp
)
1156 struct sk_buff
*skb
= *pskb
;
1161 if (skb
->pkt_type
!= PACKET_HOST
)
1165 * Count it even if it's bad.
1167 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1169 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1174 if (th
->doff
< sizeof(struct tcphdr
)/4)
1176 if (!pskb_may_pull(skb
, th
->doff
*4))
1179 if ((skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
1180 tcp_v6_checksum_init(skb
)))
1184 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1185 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1186 skb
->len
- th
->doff
*4);
1187 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1188 TCP_SKB_CB(skb
)->when
= 0;
1189 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(skb
->nh
.ipv6h
);
1190 TCP_SKB_CB(skb
)->sacked
= 0;
1192 sk
= __inet6_lookup(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
, th
->source
,
1193 &skb
->nh
.ipv6h
->daddr
, ntohs(th
->dest
),
1200 if (sk
->sk_state
== TCP_TIME_WAIT
)
1203 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1204 goto discard_and_relse
;
1206 if (sk_filter(sk
, skb
, 0))
1207 goto discard_and_relse
;
1213 if (!sock_owned_by_user(sk
)) {
1214 if (!tcp_prequeue(sk
, skb
))
1215 ret
= tcp_v6_do_rcv(sk
, skb
);
1217 sk_add_backlog(sk
, skb
);
1221 return ret
? -1 : 0;
1224 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1227 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1229 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1231 tcp_v6_send_reset(skb
);
1248 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1249 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1253 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1254 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1255 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1259 switch (tcp_timewait_state_process((struct inet_timewait_sock
*)sk
,
1265 sk2
= inet6_lookup_listener(&tcp_hashinfo
,
1266 &skb
->nh
.ipv6h
->daddr
,
1267 ntohs(th
->dest
), inet6_iif(skb
));
1269 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1270 inet_twsk_deschedule(tw
, &tcp_death_row
);
1275 /* Fall through to ACK */
1278 tcp_v6_timewait_ack(sk
, skb
);
1282 case TCP_TW_SUCCESS
:;
1287 static int tcp_v6_remember_stamp(struct sock
*sk
)
1289 /* Alas, not yet... */
1293 static struct inet_connection_sock_af_ops ipv6_specific
= {
1294 .queue_xmit
= inet6_csk_xmit
,
1295 .send_check
= tcp_v6_send_check
,
1296 .rebuild_header
= inet6_sk_rebuild_header
,
1297 .conn_request
= tcp_v6_conn_request
,
1298 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1299 .remember_stamp
= tcp_v6_remember_stamp
,
1300 .net_header_len
= sizeof(struct ipv6hdr
),
1302 .setsockopt
= ipv6_setsockopt
,
1303 .getsockopt
= ipv6_getsockopt
,
1304 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1305 .sockaddr_len
= sizeof(struct sockaddr_in6
)
1309 * TCP over IPv4 via INET6 API
1312 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1313 .queue_xmit
= ip_queue_xmit
,
1314 .send_check
= tcp_v4_send_check
,
1315 .rebuild_header
= inet_sk_rebuild_header
,
1316 .conn_request
= tcp_v6_conn_request
,
1317 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1318 .remember_stamp
= tcp_v4_remember_stamp
,
1319 .net_header_len
= sizeof(struct iphdr
),
1321 .setsockopt
= ipv6_setsockopt
,
1322 .getsockopt
= ipv6_getsockopt
,
1323 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1324 .sockaddr_len
= sizeof(struct sockaddr_in6
)
1329 /* NOTE: A lot of things set to zero explicitly by call to
1330 * sk_alloc() so need not be done here.
1332 static int tcp_v6_init_sock(struct sock
*sk
)
1334 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1335 struct tcp_sock
*tp
= tcp_sk(sk
);
1337 skb_queue_head_init(&tp
->out_of_order_queue
);
1338 tcp_init_xmit_timers(sk
);
1339 tcp_prequeue_init(tp
);
1341 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1342 tp
->mdev
= TCP_TIMEOUT_INIT
;
1344 /* So many TCP implementations out there (incorrectly) count the
1345 * initial SYN frame in their delayed-ACK and congestion control
1346 * algorithms that we must have the following bandaid to talk
1347 * efficiently to them. -DaveM
1351 /* See draft-stevens-tcpca-spec-01 for discussion of the
1352 * initialization of these values.
1354 tp
->snd_ssthresh
= 0x7fffffff;
1355 tp
->snd_cwnd_clamp
= ~0;
1356 tp
->mss_cache
= 536;
1358 tp
->reordering
= sysctl_tcp_reordering
;
1360 sk
->sk_state
= TCP_CLOSE
;
1362 icsk
->icsk_af_ops
= &ipv6_specific
;
1363 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1364 sk
->sk_write_space
= sk_stream_write_space
;
1365 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1367 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1368 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1370 atomic_inc(&tcp_sockets_allocated
);
1375 static int tcp_v6_destroy_sock(struct sock
*sk
)
1377 tcp_v4_destroy_sock(sk
);
1378 return inet6_destroy_sock(sk
);
1381 /* Proc filesystem TCPv6 sock list dumping. */
1382 static void get_openreq6(struct seq_file
*seq
,
1383 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1385 int ttd
= req
->expires
- jiffies
;
1386 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1387 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1393 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1396 src
->s6_addr32
[0], src
->s6_addr32
[1],
1397 src
->s6_addr32
[2], src
->s6_addr32
[3],
1398 ntohs(inet_sk(sk
)->sport
),
1399 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1400 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1401 ntohs(inet_rsk(req
)->rmt_port
),
1403 0,0, /* could print option size, but that is af dependent. */
1404 1, /* timers active (only the expire timer) */
1405 jiffies_to_clock_t(ttd
),
1408 0, /* non standard timer */
1409 0, /* open_requests have no inode */
1413 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1415 struct in6_addr
*dest
, *src
;
1418 unsigned long timer_expires
;
1419 struct inet_sock
*inet
= inet_sk(sp
);
1420 struct tcp_sock
*tp
= tcp_sk(sp
);
1421 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1422 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1425 src
= &np
->rcv_saddr
;
1426 destp
= ntohs(inet
->dport
);
1427 srcp
= ntohs(inet
->sport
);
1429 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1431 timer_expires
= icsk
->icsk_timeout
;
1432 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1434 timer_expires
= icsk
->icsk_timeout
;
1435 } else if (timer_pending(&sp
->sk_timer
)) {
1437 timer_expires
= sp
->sk_timer
.expires
;
1440 timer_expires
= jiffies
;
1444 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1445 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1447 src
->s6_addr32
[0], src
->s6_addr32
[1],
1448 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1449 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1450 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1452 tp
->write_seq
-tp
->snd_una
, tp
->rcv_nxt
-tp
->copied_seq
,
1454 jiffies_to_clock_t(timer_expires
- jiffies
),
1455 icsk
->icsk_retransmits
,
1457 icsk
->icsk_probes_out
,
1459 atomic_read(&sp
->sk_refcnt
), sp
,
1462 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1463 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
1467 static void get_timewait6_sock(struct seq_file
*seq
,
1468 struct inet_timewait_sock
*tw
, int i
)
1470 struct in6_addr
*dest
, *src
;
1472 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1473 int ttd
= tw
->tw_ttd
- jiffies
;
1478 dest
= &tw6
->tw_v6_daddr
;
1479 src
= &tw6
->tw_v6_rcv_saddr
;
1480 destp
= ntohs(tw
->tw_dport
);
1481 srcp
= ntohs(tw
->tw_sport
);
1484 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1485 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1487 src
->s6_addr32
[0], src
->s6_addr32
[1],
1488 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1489 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1490 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1491 tw
->tw_substate
, 0, 0,
1492 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1493 atomic_read(&tw
->tw_refcnt
), tw
);
1496 #ifdef CONFIG_PROC_FS
1497 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1499 struct tcp_iter_state
*st
;
1501 if (v
== SEQ_START_TOKEN
) {
1506 "st tx_queue rx_queue tr tm->when retrnsmt"
1507 " uid timeout inode\n");
1512 switch (st
->state
) {
1513 case TCP_SEQ_STATE_LISTENING
:
1514 case TCP_SEQ_STATE_ESTABLISHED
:
1515 get_tcp6_sock(seq
, v
, st
->num
);
1517 case TCP_SEQ_STATE_OPENREQ
:
1518 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1520 case TCP_SEQ_STATE_TIME_WAIT
:
1521 get_timewait6_sock(seq
, v
, st
->num
);
1528 static struct file_operations tcp6_seq_fops
;
1529 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1530 .owner
= THIS_MODULE
,
1533 .seq_show
= tcp6_seq_show
,
1534 .seq_fops
= &tcp6_seq_fops
,
1537 int __init
tcp6_proc_init(void)
1539 return tcp_proc_register(&tcp6_seq_afinfo
);
1542 void tcp6_proc_exit(void)
1544 tcp_proc_unregister(&tcp6_seq_afinfo
);
1548 struct proto tcpv6_prot
= {
1550 .owner
= THIS_MODULE
,
1552 .connect
= tcp_v6_connect
,
1553 .disconnect
= tcp_disconnect
,
1554 .accept
= inet_csk_accept
,
1556 .init
= tcp_v6_init_sock
,
1557 .destroy
= tcp_v6_destroy_sock
,
1558 .shutdown
= tcp_shutdown
,
1559 .setsockopt
= tcp_setsockopt
,
1560 .getsockopt
= tcp_getsockopt
,
1561 .sendmsg
= tcp_sendmsg
,
1562 .recvmsg
= tcp_recvmsg
,
1563 .backlog_rcv
= tcp_v6_do_rcv
,
1564 .hash
= tcp_v6_hash
,
1565 .unhash
= tcp_unhash
,
1566 .get_port
= tcp_v6_get_port
,
1567 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1568 .sockets_allocated
= &tcp_sockets_allocated
,
1569 .memory_allocated
= &tcp_memory_allocated
,
1570 .memory_pressure
= &tcp_memory_pressure
,
1571 .orphan_count
= &tcp_orphan_count
,
1572 .sysctl_mem
= sysctl_tcp_mem
,
1573 .sysctl_wmem
= sysctl_tcp_wmem
,
1574 .sysctl_rmem
= sysctl_tcp_rmem
,
1575 .max_header
= MAX_TCP_HEADER
,
1576 .obj_size
= sizeof(struct tcp6_sock
),
1577 .twsk_prot
= &tcp6_timewait_sock_ops
,
1578 .rsk_prot
= &tcp6_request_sock_ops
,
1581 static struct inet6_protocol tcpv6_protocol
= {
1582 .handler
= tcp_v6_rcv
,
1583 .err_handler
= tcp_v6_err
,
1584 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1587 static struct inet_protosw tcpv6_protosw
= {
1588 .type
= SOCK_STREAM
,
1589 .protocol
= IPPROTO_TCP
,
1590 .prot
= &tcpv6_prot
,
1591 .ops
= &inet6_stream_ops
,
1594 .flags
= INET_PROTOSW_PERMANENT
,
1597 void __init
tcpv6_init(void)
1599 /* register inet6 protocol */
1600 if (inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
) < 0)
1601 printk(KERN_ERR
"tcpv6_init: Could not register protocol\n");
1602 inet6_register_protosw(&tcpv6_protosw
);