3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 /* Socket used for sending RSTs and ACKs */
70 static struct socket
*tcp6_socket
;
72 static void tcp_v6_send_reset(struct sk_buff
*skb
);
73 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
74 static void tcp_v6_send_check(struct sock
*sk
, int len
,
77 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
79 static struct inet_connection_sock_af_ops ipv6_mapped
;
80 static struct inet_connection_sock_af_ops ipv6_specific
;
82 static int tcp_v6_get_port(struct sock
*sk
, unsigned short snum
)
84 return inet_csk_get_port(&tcp_hashinfo
, sk
, snum
,
85 inet6_csk_bind_conflict
);
88 static void tcp_v6_hash(struct sock
*sk
)
90 if (sk
->sk_state
!= TCP_CLOSE
) {
91 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
96 __inet6_hash(&tcp_hashinfo
, sk
);
101 static __inline__ u16
tcp_v6_check(struct tcphdr
*th
, int len
,
102 struct in6_addr
*saddr
,
103 struct in6_addr
*daddr
,
106 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
109 static __u32
tcp_v6_init_sequence(struct sock
*sk
, struct sk_buff
*skb
)
111 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
112 return secure_tcpv6_sequence_number(skb
->nh
.ipv6h
->daddr
.s6_addr32
,
113 skb
->nh
.ipv6h
->saddr
.s6_addr32
,
117 return secure_tcp_sequence_number(skb
->nh
.iph
->daddr
,
124 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
127 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
128 struct inet_sock
*inet
= inet_sk(sk
);
129 struct inet_connection_sock
*icsk
= inet_csk(sk
);
130 struct ipv6_pinfo
*np
= inet6_sk(sk
);
131 struct tcp_sock
*tp
= tcp_sk(sk
);
132 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
134 struct dst_entry
*dst
;
138 if (addr_len
< SIN6_LEN_RFC2133
)
141 if (usin
->sin6_family
!= AF_INET6
)
142 return(-EAFNOSUPPORT
);
144 memset(&fl
, 0, sizeof(fl
));
147 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
148 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
149 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
150 struct ip6_flowlabel
*flowlabel
;
151 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
152 if (flowlabel
== NULL
)
154 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
155 fl6_sock_release(flowlabel
);
160 * connect() to INADDR_ANY means loopback (BSD'ism).
163 if(ipv6_addr_any(&usin
->sin6_addr
))
164 usin
->sin6_addr
.s6_addr
[15] = 0x1;
166 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
168 if(addr_type
& IPV6_ADDR_MULTICAST
)
171 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
172 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
173 usin
->sin6_scope_id
) {
174 /* If interface is set while binding, indices
177 if (sk
->sk_bound_dev_if
&&
178 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
181 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
184 /* Connect to link-local address requires an interface */
185 if (!sk
->sk_bound_dev_if
)
189 if (tp
->rx_opt
.ts_recent_stamp
&&
190 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
191 tp
->rx_opt
.ts_recent
= 0;
192 tp
->rx_opt
.ts_recent_stamp
= 0;
196 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
197 np
->flow_label
= fl
.fl6_flowlabel
;
203 if (addr_type
== IPV6_ADDR_MAPPED
) {
204 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
205 struct sockaddr_in sin
;
207 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk
))
212 sin
.sin_family
= AF_INET
;
213 sin
.sin_port
= usin
->sin6_port
;
214 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
216 icsk
->icsk_af_ops
= &ipv6_mapped
;
217 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
219 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
222 icsk
->icsk_ext_hdr_len
= exthdrlen
;
223 icsk
->icsk_af_ops
= &ipv6_specific
;
224 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
227 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
229 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
236 if (!ipv6_addr_any(&np
->rcv_saddr
))
237 saddr
= &np
->rcv_saddr
;
239 fl
.proto
= IPPROTO_TCP
;
240 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
241 ipv6_addr_copy(&fl
.fl6_src
,
242 (saddr
? saddr
: &np
->saddr
));
243 fl
.oif
= sk
->sk_bound_dev_if
;
244 fl
.fl_ip_dport
= usin
->sin6_port
;
245 fl
.fl_ip_sport
= inet
->sport
;
247 if (np
->opt
&& np
->opt
->srcrt
) {
248 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
249 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
250 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
254 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
258 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
260 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
265 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
268 /* set the source address */
269 ipv6_addr_copy(&np
->saddr
, saddr
);
270 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
272 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
273 ip6_dst_store(sk
, dst
, NULL
);
275 icsk
->icsk_ext_hdr_len
= 0;
277 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
280 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
282 inet
->dport
= usin
->sin6_port
;
284 tcp_set_state(sk
, TCP_SYN_SENT
);
285 err
= inet6_hash_connect(&tcp_death_row
, sk
);
290 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
295 err
= tcp_connect(sk
);
302 tcp_set_state(sk
, TCP_CLOSE
);
306 sk
->sk_route_caps
= 0;
310 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
311 int type
, int code
, int offset
, __u32 info
)
313 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
314 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
315 struct ipv6_pinfo
*np
;
321 sk
= inet6_lookup(&tcp_hashinfo
, &hdr
->daddr
, th
->dest
, &hdr
->saddr
,
322 th
->source
, skb
->dev
->ifindex
);
325 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
329 if (sk
->sk_state
== TCP_TIME_WAIT
) {
330 inet_twsk_put((struct inet_timewait_sock
*)sk
);
335 if (sock_owned_by_user(sk
))
336 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
338 if (sk
->sk_state
== TCP_CLOSE
)
342 seq
= ntohl(th
->seq
);
343 if (sk
->sk_state
!= TCP_LISTEN
&&
344 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
345 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
351 if (type
== ICMPV6_PKT_TOOBIG
) {
352 struct dst_entry
*dst
= NULL
;
354 if (sock_owned_by_user(sk
))
356 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
359 /* icmp should have updated the destination cache entry */
360 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
363 struct inet_sock
*inet
= inet_sk(sk
);
366 /* BUGGG_FUTURE: Again, it is not clear how
367 to handle rthdr case. Ignore this complexity
370 memset(&fl
, 0, sizeof(fl
));
371 fl
.proto
= IPPROTO_TCP
;
372 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
373 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
374 fl
.oif
= sk
->sk_bound_dev_if
;
375 fl
.fl_ip_dport
= inet
->dport
;
376 fl
.fl_ip_sport
= inet
->sport
;
378 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
379 sk
->sk_err_soft
= -err
;
383 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
384 sk
->sk_err_soft
= -err
;
391 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
392 tcp_sync_mss(sk
, dst_mtu(dst
));
393 tcp_simple_retransmit(sk
);
394 } /* else let the usual retransmit timer handle it */
399 icmpv6_err_convert(type
, code
, &err
);
401 /* Might be for an request_sock */
402 switch (sk
->sk_state
) {
403 struct request_sock
*req
, **prev
;
405 if (sock_owned_by_user(sk
))
408 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
409 &hdr
->saddr
, inet6_iif(skb
));
413 /* ICMPs are not backlogged, hence we cannot get
414 * an established socket here.
416 BUG_TRAP(req
->sk
== NULL
);
418 if (seq
!= tcp_rsk(req
)->snt_isn
) {
419 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
423 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
427 case TCP_SYN_RECV
: /* Cannot happen.
428 It can, it SYNs are crossed. --ANK */
429 if (!sock_owned_by_user(sk
)) {
430 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS
);
432 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
436 sk
->sk_err_soft
= err
;
440 if (!sock_owned_by_user(sk
) && np
->recverr
) {
442 sk
->sk_error_report(sk
);
444 sk
->sk_err_soft
= err
;
452 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
453 struct dst_entry
*dst
)
455 struct inet6_request_sock
*treq
= inet6_rsk(req
);
456 struct ipv6_pinfo
*np
= inet6_sk(sk
);
457 struct sk_buff
* skb
;
458 struct ipv6_txoptions
*opt
= NULL
;
459 struct in6_addr
* final_p
= NULL
, final
;
463 memset(&fl
, 0, sizeof(fl
));
464 fl
.proto
= IPPROTO_TCP
;
465 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
466 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
467 fl
.fl6_flowlabel
= 0;
469 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
470 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
475 np
->rxopt
.bits
.osrcrt
== 2 &&
477 struct sk_buff
*pktopts
= treq
->pktopts
;
478 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
480 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(pktopts
->nh
.raw
+ rxopt
->srcrt
));
483 if (opt
&& opt
->srcrt
) {
484 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
485 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
486 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
490 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
494 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
495 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
499 skb
= tcp_make_synack(sk
, dst
, req
);
501 struct tcphdr
*th
= skb
->h
.th
;
503 th
->check
= tcp_v6_check(th
, skb
->len
,
504 &treq
->loc_addr
, &treq
->rmt_addr
,
505 csum_partial((char *)th
, skb
->len
, skb
->csum
));
507 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
508 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
509 if (err
== NET_XMIT_CN
)
514 if (opt
&& opt
!= np
->opt
)
515 sock_kfree_s(sk
, opt
, opt
->tot_len
);
520 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
522 if (inet6_rsk(req
)->pktopts
)
523 kfree_skb(inet6_rsk(req
)->pktopts
);
526 static struct request_sock_ops tcp6_request_sock_ops
= {
528 .obj_size
= sizeof(struct tcp6_request_sock
),
529 .rtx_syn_ack
= tcp_v6_send_synack
,
530 .send_ack
= tcp_v6_reqsk_send_ack
,
531 .destructor
= tcp_v6_reqsk_destructor
,
532 .send_reset
= tcp_v6_send_reset
535 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
536 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
537 .twsk_unique
= tcp_twsk_unique
,
540 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
542 struct ipv6_pinfo
*np
= inet6_sk(sk
);
543 struct tcphdr
*th
= skb
->h
.th
;
545 if (skb
->ip_summed
== CHECKSUM_HW
) {
546 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
547 skb
->csum
= offsetof(struct tcphdr
, check
);
549 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
550 csum_partial((char *)th
, th
->doff
<<2,
556 static void tcp_v6_send_reset(struct sk_buff
*skb
)
558 struct tcphdr
*th
= skb
->h
.th
, *t1
;
559 struct sk_buff
*buff
;
565 if (!ipv6_unicast_destination(skb
))
569 * We need to grab some memory, and put together an RST,
570 * and then put it into the queue to be sent.
573 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
),
578 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
));
580 t1
= (struct tcphdr
*) skb_push(buff
,sizeof(struct tcphdr
));
582 /* Swap the send and the receive. */
583 memset(t1
, 0, sizeof(*t1
));
584 t1
->dest
= th
->source
;
585 t1
->source
= th
->dest
;
586 t1
->doff
= sizeof(*t1
)/4;
590 t1
->seq
= th
->ack_seq
;
593 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
594 + skb
->len
- (th
->doff
<<2));
597 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
599 memset(&fl
, 0, sizeof(fl
));
600 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
601 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
603 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
604 sizeof(*t1
), IPPROTO_TCP
,
607 fl
.proto
= IPPROTO_TCP
;
608 fl
.oif
= inet6_iif(skb
);
609 fl
.fl_ip_dport
= t1
->dest
;
610 fl
.fl_ip_sport
= t1
->source
;
612 /* sk = NULL, but it is safe for now. RST socket required. */
613 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
615 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
616 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
617 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
618 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
626 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
628 struct tcphdr
*th
= skb
->h
.th
, *t1
;
629 struct sk_buff
*buff
;
631 int tot_len
= sizeof(struct tcphdr
);
636 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
641 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
643 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
645 /* Swap the send and the receive. */
646 memset(t1
, 0, sizeof(*t1
));
647 t1
->dest
= th
->source
;
648 t1
->source
= th
->dest
;
649 t1
->doff
= tot_len
/4;
650 t1
->seq
= htonl(seq
);
651 t1
->ack_seq
= htonl(ack
);
653 t1
->window
= htons(win
);
656 u32
*ptr
= (u32
*)(t1
+ 1);
657 *ptr
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
658 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
659 *ptr
++ = htonl(tcp_time_stamp
);
663 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
665 memset(&fl
, 0, sizeof(fl
));
666 ipv6_addr_copy(&fl
.fl6_dst
, &skb
->nh
.ipv6h
->saddr
);
667 ipv6_addr_copy(&fl
.fl6_src
, &skb
->nh
.ipv6h
->daddr
);
669 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
670 tot_len
, IPPROTO_TCP
,
673 fl
.proto
= IPPROTO_TCP
;
674 fl
.oif
= inet6_iif(skb
);
675 fl
.fl_ip_dport
= t1
->dest
;
676 fl
.fl_ip_sport
= t1
->source
;
678 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
679 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
680 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
681 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
689 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
691 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
692 const struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
694 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
695 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
696 tcptw
->tw_ts_recent
);
701 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
703 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
707 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
709 struct request_sock
*req
, **prev
;
710 const struct tcphdr
*th
= skb
->h
.th
;
713 /* Find possible connection requests. */
714 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
715 &skb
->nh
.ipv6h
->saddr
,
716 &skb
->nh
.ipv6h
->daddr
, inet6_iif(skb
));
718 return tcp_check_req(sk
, skb
, req
, prev
);
720 nsk
= __inet6_lookup_established(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
,
721 th
->source
, &skb
->nh
.ipv6h
->daddr
,
722 ntohs(th
->dest
), inet6_iif(skb
));
725 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
729 inet_twsk_put((struct inet_timewait_sock
*)nsk
);
733 #if 0 /*def CONFIG_SYN_COOKIES*/
734 if (!th
->rst
&& !th
->syn
&& th
->ack
)
735 sk
= cookie_v6_check(sk
, skb
, &(IPCB(skb
)->opt
));
740 /* FIXME: this is substantially similar to the ipv4 code.
741 * Can some kind of merge be done? -- erics
743 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
745 struct inet6_request_sock
*treq
;
746 struct ipv6_pinfo
*np
= inet6_sk(sk
);
747 struct tcp_options_received tmp_opt
;
748 struct tcp_sock
*tp
= tcp_sk(sk
);
749 struct request_sock
*req
= NULL
;
750 __u32 isn
= TCP_SKB_CB(skb
)->when
;
752 if (skb
->protocol
== htons(ETH_P_IP
))
753 return tcp_v4_conn_request(sk
, skb
);
755 if (!ipv6_unicast_destination(skb
))
759 * There are no SYN attacks on IPv6, yet...
761 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
763 printk(KERN_INFO
"TCPv6: dropping request, synflood is possible\n");
767 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
770 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
774 tcp_clear_options(&tmp_opt
);
775 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
776 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
778 tcp_parse_options(skb
, &tmp_opt
, 0);
780 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
781 tcp_openreq_init(req
, &tmp_opt
, skb
);
783 treq
= inet6_rsk(req
);
784 ipv6_addr_copy(&treq
->rmt_addr
, &skb
->nh
.ipv6h
->saddr
);
785 ipv6_addr_copy(&treq
->loc_addr
, &skb
->nh
.ipv6h
->daddr
);
786 TCP_ECN_create_request(req
, skb
->h
.th
);
787 treq
->pktopts
= NULL
;
788 if (ipv6_opt_accepted(sk
, skb
) ||
789 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
790 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
791 atomic_inc(&skb
->users
);
794 treq
->iif
= sk
->sk_bound_dev_if
;
796 /* So that link locals have meaning */
797 if (!sk
->sk_bound_dev_if
&&
798 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
799 treq
->iif
= inet6_iif(skb
);
802 isn
= tcp_v6_init_sequence(sk
,skb
);
804 tcp_rsk(req
)->snt_isn
= isn
;
806 if (tcp_v6_send_synack(sk
, req
, NULL
))
809 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
816 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS
);
817 return 0; /* don't send reset */
820 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
821 struct request_sock
*req
,
822 struct dst_entry
*dst
)
824 struct inet6_request_sock
*treq
= inet6_rsk(req
);
825 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
826 struct tcp6_sock
*newtcp6sk
;
827 struct inet_sock
*newinet
;
828 struct tcp_sock
*newtp
;
830 struct ipv6_txoptions
*opt
;
832 if (skb
->protocol
== htons(ETH_P_IP
)) {
837 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
842 newtcp6sk
= (struct tcp6_sock
*)newsk
;
843 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
845 newinet
= inet_sk(newsk
);
846 newnp
= inet6_sk(newsk
);
847 newtp
= tcp_sk(newsk
);
849 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
851 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
854 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
857 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
859 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
860 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
861 newnp
->pktoptions
= NULL
;
863 newnp
->mcast_oif
= inet6_iif(skb
);
864 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
867 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
868 * here, tcp_create_openreq_child now does this for us, see the comment in
869 * that function for the gory details. -acme
872 /* It is tricky place. Until this moment IPv4 tcp
873 worked with IPv6 icsk.icsk_af_ops.
876 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
883 if (sk_acceptq_is_full(sk
))
886 if (np
->rxopt
.bits
.osrcrt
== 2 &&
887 opt
== NULL
&& treq
->pktopts
) {
888 struct inet6_skb_parm
*rxopt
= IP6CB(treq
->pktopts
);
890 opt
= ipv6_invert_rthdr(sk
, (struct ipv6_rt_hdr
*)(treq
->pktopts
->nh
.raw
+ rxopt
->srcrt
));
894 struct in6_addr
*final_p
= NULL
, final
;
897 memset(&fl
, 0, sizeof(fl
));
898 fl
.proto
= IPPROTO_TCP
;
899 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
900 if (opt
&& opt
->srcrt
) {
901 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
902 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
903 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
906 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
907 fl
.oif
= sk
->sk_bound_dev_if
;
908 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
909 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
911 if (ip6_dst_lookup(sk
, &dst
, &fl
))
915 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
917 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
921 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
926 * No need to charge this sock to the relevant IPv6 refcnt debug socks
927 * count here, tcp_create_openreq_child now does this for us, see the
928 * comment in that function for the gory details. -acme
931 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
932 ip6_dst_store(newsk
, dst
, NULL
);
934 newtcp6sk
= (struct tcp6_sock
*)newsk
;
935 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
937 newtp
= tcp_sk(newsk
);
938 newinet
= inet_sk(newsk
);
939 newnp
= inet6_sk(newsk
);
941 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
943 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
944 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
945 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
946 newsk
->sk_bound_dev_if
= treq
->iif
;
948 /* Now IPv6 options...
950 First: no IPv4 options.
955 newnp
->rxopt
.all
= np
->rxopt
.all
;
957 /* Clone pktoptions received with SYN */
958 newnp
->pktoptions
= NULL
;
959 if (treq
->pktopts
!= NULL
) {
960 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
961 kfree_skb(treq
->pktopts
);
962 treq
->pktopts
= NULL
;
963 if (newnp
->pktoptions
)
964 skb_set_owner_r(newnp
->pktoptions
, newsk
);
967 newnp
->mcast_oif
= inet6_iif(skb
);
968 newnp
->mcast_hops
= skb
->nh
.ipv6h
->hop_limit
;
970 /* Clone native IPv6 options from listening socket (if any)
972 Yes, keeping reference count would be much more clever,
973 but we make one more one thing there: reattach optmem
977 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
979 sock_kfree_s(sk
, opt
, opt
->tot_len
);
982 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
984 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
985 newnp
->opt
->opt_flen
);
987 tcp_mtup_init(newsk
);
988 tcp_sync_mss(newsk
, dst_mtu(dst
));
989 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
990 tcp_initialize_rcv_mss(newsk
);
992 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
994 __inet6_hash(&tcp_hashinfo
, newsk
);
995 inet_inherit_port(&tcp_hashinfo
, sk
, newsk
);
1000 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1002 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1003 if (opt
&& opt
!= np
->opt
)
1004 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1009 static int tcp_v6_checksum_init(struct sk_buff
*skb
)
1011 if (skb
->ip_summed
== CHECKSUM_HW
) {
1012 if (!tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1013 &skb
->nh
.ipv6h
->daddr
,skb
->csum
)) {
1014 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1019 skb
->csum
= ~tcp_v6_check(skb
->h
.th
,skb
->len
,&skb
->nh
.ipv6h
->saddr
,
1020 &skb
->nh
.ipv6h
->daddr
, 0);
1022 if (skb
->len
<= 76) {
1023 return __skb_checksum_complete(skb
);
1028 /* The socket must have it's spinlock held when we get
1031 * We have a potential double-lock case here, so even when
1032 * doing backlog processing we use the BH locking scheme.
1033 * This is because we cannot sleep with the original spinlock
1036 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1038 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1039 struct tcp_sock
*tp
;
1040 struct sk_buff
*opt_skb
= NULL
;
1042 /* Imagine: socket is IPv6. IPv4 packet arrives,
1043 goes to IPv4 receive handler and backlogged.
1044 From backlog it always goes here. Kerboom...
1045 Fortunately, tcp_rcv_established and rcv_established
1046 handle them correctly, but it is not case with
1047 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1050 if (skb
->protocol
== htons(ETH_P_IP
))
1051 return tcp_v4_do_rcv(sk
, skb
);
1053 if (sk_filter(sk
, skb
, 0))
1057 * socket locking is here for SMP purposes as backlog rcv
1058 * is currently called with bh processing disabled.
1061 /* Do Stevens' IPV6_PKTOPTIONS.
1063 Yes, guys, it is the only place in our code, where we
1064 may make it not affecting IPv4.
1065 The rest of code is protocol independent,
1066 and I do not like idea to uglify IPv4.
1068 Actually, all the idea behind IPV6_PKTOPTIONS
1069 looks not very well thought. For now we latch
1070 options, received in the last packet, enqueued
1071 by tcp. Feel free to propose better solution.
1075 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1077 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1078 TCP_CHECK_TIMER(sk
);
1079 if (tcp_rcv_established(sk
, skb
, skb
->h
.th
, skb
->len
))
1081 TCP_CHECK_TIMER(sk
);
1083 goto ipv6_pktoptions
;
1087 if (skb
->len
< (skb
->h
.th
->doff
<<2) || tcp_checksum_complete(skb
))
1090 if (sk
->sk_state
== TCP_LISTEN
) {
1091 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1096 * Queue it on the new socket if the new socket is active,
1097 * otherwise we just shortcircuit this and continue with
1101 if (tcp_child_process(sk
, nsk
, skb
))
1104 __kfree_skb(opt_skb
);
1109 TCP_CHECK_TIMER(sk
);
1110 if (tcp_rcv_state_process(sk
, skb
, skb
->h
.th
, skb
->len
))
1112 TCP_CHECK_TIMER(sk
);
1114 goto ipv6_pktoptions
;
1118 tcp_v6_send_reset(skb
);
1121 __kfree_skb(opt_skb
);
1125 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1130 /* Do you ask, what is it?
1132 1. skb was enqueued by tcp.
1133 2. skb is added to tail of read queue, rather than out of order.
1134 3. socket is not in passive state.
1135 4. Finally, it really contains options, which user wants to receive.
1138 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1139 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1140 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1141 np
->mcast_oif
= inet6_iif(opt_skb
);
1142 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1143 np
->mcast_hops
= opt_skb
->nh
.ipv6h
->hop_limit
;
1144 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1145 skb_set_owner_r(opt_skb
, sk
);
1146 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1148 __kfree_skb(opt_skb
);
1149 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1158 static int tcp_v6_rcv(struct sk_buff
**pskb
)
1160 struct sk_buff
*skb
= *pskb
;
1165 if (skb
->pkt_type
!= PACKET_HOST
)
1169 * Count it even if it's bad.
1171 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1173 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1178 if (th
->doff
< sizeof(struct tcphdr
)/4)
1180 if (!pskb_may_pull(skb
, th
->doff
*4))
1183 if ((skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&&
1184 tcp_v6_checksum_init(skb
)))
1188 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1189 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1190 skb
->len
- th
->doff
*4);
1191 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1192 TCP_SKB_CB(skb
)->when
= 0;
1193 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(skb
->nh
.ipv6h
);
1194 TCP_SKB_CB(skb
)->sacked
= 0;
1196 sk
= __inet6_lookup(&tcp_hashinfo
, &skb
->nh
.ipv6h
->saddr
, th
->source
,
1197 &skb
->nh
.ipv6h
->daddr
, ntohs(th
->dest
),
1204 if (sk
->sk_state
== TCP_TIME_WAIT
)
1207 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1208 goto discard_and_relse
;
1210 if (sk_filter(sk
, skb
, 0))
1211 goto discard_and_relse
;
1217 if (!sock_owned_by_user(sk
)) {
1218 #ifdef CONFIG_NET_DMA
1219 struct tcp_sock
*tp
= tcp_sk(sk
);
1220 if (tp
->ucopy
.dma_chan
)
1221 ret
= tcp_v6_do_rcv(sk
, skb
);
1225 if (!tcp_prequeue(sk
, skb
))
1226 ret
= tcp_v6_do_rcv(sk
, skb
);
1229 sk_add_backlog(sk
, skb
);
1233 return ret
? -1 : 0;
1236 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1239 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1241 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1243 tcp_v6_send_reset(skb
);
1260 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1261 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1265 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1266 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1267 inet_twsk_put((struct inet_timewait_sock
*)sk
);
1271 switch (tcp_timewait_state_process((struct inet_timewait_sock
*)sk
,
1277 sk2
= inet6_lookup_listener(&tcp_hashinfo
,
1278 &skb
->nh
.ipv6h
->daddr
,
1279 ntohs(th
->dest
), inet6_iif(skb
));
1281 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1282 inet_twsk_deschedule(tw
, &tcp_death_row
);
1287 /* Fall through to ACK */
1290 tcp_v6_timewait_ack(sk
, skb
);
1294 case TCP_TW_SUCCESS
:;
1299 static int tcp_v6_remember_stamp(struct sock
*sk
)
1301 /* Alas, not yet... */
1305 static struct inet_connection_sock_af_ops ipv6_specific
= {
1306 .queue_xmit
= inet6_csk_xmit
,
1307 .send_check
= tcp_v6_send_check
,
1308 .rebuild_header
= inet6_sk_rebuild_header
,
1309 .conn_request
= tcp_v6_conn_request
,
1310 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1311 .remember_stamp
= tcp_v6_remember_stamp
,
1312 .net_header_len
= sizeof(struct ipv6hdr
),
1313 .setsockopt
= ipv6_setsockopt
,
1314 .getsockopt
= ipv6_getsockopt
,
1315 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1316 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1317 #ifdef CONFIG_COMPAT
1318 .compat_setsockopt
= compat_ipv6_setsockopt
,
1319 .compat_getsockopt
= compat_ipv6_getsockopt
,
1324 * TCP over IPv4 via INET6 API
1327 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1328 .queue_xmit
= ip_queue_xmit
,
1329 .send_check
= tcp_v4_send_check
,
1330 .rebuild_header
= inet_sk_rebuild_header
,
1331 .conn_request
= tcp_v6_conn_request
,
1332 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1333 .remember_stamp
= tcp_v4_remember_stamp
,
1334 .net_header_len
= sizeof(struct iphdr
),
1335 .setsockopt
= ipv6_setsockopt
,
1336 .getsockopt
= ipv6_getsockopt
,
1337 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1338 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1339 #ifdef CONFIG_COMPAT
1340 .compat_setsockopt
= compat_ipv6_setsockopt
,
1341 .compat_getsockopt
= compat_ipv6_getsockopt
,
1345 /* NOTE: A lot of things set to zero explicitly by call to
1346 * sk_alloc() so need not be done here.
1348 static int tcp_v6_init_sock(struct sock
*sk
)
1350 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1351 struct tcp_sock
*tp
= tcp_sk(sk
);
1353 skb_queue_head_init(&tp
->out_of_order_queue
);
1354 tcp_init_xmit_timers(sk
);
1355 tcp_prequeue_init(tp
);
1357 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1358 tp
->mdev
= TCP_TIMEOUT_INIT
;
1360 /* So many TCP implementations out there (incorrectly) count the
1361 * initial SYN frame in their delayed-ACK and congestion control
1362 * algorithms that we must have the following bandaid to talk
1363 * efficiently to them. -DaveM
1367 /* See draft-stevens-tcpca-spec-01 for discussion of the
1368 * initialization of these values.
1370 tp
->snd_ssthresh
= 0x7fffffff;
1371 tp
->snd_cwnd_clamp
= ~0;
1372 tp
->mss_cache
= 536;
1374 tp
->reordering
= sysctl_tcp_reordering
;
1376 sk
->sk_state
= TCP_CLOSE
;
1378 icsk
->icsk_af_ops
= &ipv6_specific
;
1379 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1380 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1381 sk
->sk_write_space
= sk_stream_write_space
;
1382 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1384 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1385 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1387 atomic_inc(&tcp_sockets_allocated
);
1392 static int tcp_v6_destroy_sock(struct sock
*sk
)
1394 tcp_v4_destroy_sock(sk
);
1395 return inet6_destroy_sock(sk
);
1398 /* Proc filesystem TCPv6 sock list dumping. */
1399 static void get_openreq6(struct seq_file
*seq
,
1400 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1402 int ttd
= req
->expires
- jiffies
;
1403 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1404 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1410 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1411 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1413 src
->s6_addr32
[0], src
->s6_addr32
[1],
1414 src
->s6_addr32
[2], src
->s6_addr32
[3],
1415 ntohs(inet_sk(sk
)->sport
),
1416 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1417 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1418 ntohs(inet_rsk(req
)->rmt_port
),
1420 0,0, /* could print option size, but that is af dependent. */
1421 1, /* timers active (only the expire timer) */
1422 jiffies_to_clock_t(ttd
),
1425 0, /* non standard timer */
1426 0, /* open_requests have no inode */
1430 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1432 struct in6_addr
*dest
, *src
;
1435 unsigned long timer_expires
;
1436 struct inet_sock
*inet
= inet_sk(sp
);
1437 struct tcp_sock
*tp
= tcp_sk(sp
);
1438 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1439 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1442 src
= &np
->rcv_saddr
;
1443 destp
= ntohs(inet
->dport
);
1444 srcp
= ntohs(inet
->sport
);
1446 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1448 timer_expires
= icsk
->icsk_timeout
;
1449 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1451 timer_expires
= icsk
->icsk_timeout
;
1452 } else if (timer_pending(&sp
->sk_timer
)) {
1454 timer_expires
= sp
->sk_timer
.expires
;
1457 timer_expires
= jiffies
;
1461 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1462 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
1464 src
->s6_addr32
[0], src
->s6_addr32
[1],
1465 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1466 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1467 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1469 tp
->write_seq
-tp
->snd_una
,
1470 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1472 jiffies_to_clock_t(timer_expires
- jiffies
),
1473 icsk
->icsk_retransmits
,
1475 icsk
->icsk_probes_out
,
1477 atomic_read(&sp
->sk_refcnt
), sp
,
1480 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1481 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
1485 static void get_timewait6_sock(struct seq_file
*seq
,
1486 struct inet_timewait_sock
*tw
, int i
)
1488 struct in6_addr
*dest
, *src
;
1490 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1491 int ttd
= tw
->tw_ttd
- jiffies
;
1496 dest
= &tw6
->tw_v6_daddr
;
1497 src
= &tw6
->tw_v6_rcv_saddr
;
1498 destp
= ntohs(tw
->tw_dport
);
1499 srcp
= ntohs(tw
->tw_sport
);
1502 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1503 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1505 src
->s6_addr32
[0], src
->s6_addr32
[1],
1506 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1507 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1508 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1509 tw
->tw_substate
, 0, 0,
1510 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1511 atomic_read(&tw
->tw_refcnt
), tw
);
1514 #ifdef CONFIG_PROC_FS
1515 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1517 struct tcp_iter_state
*st
;
1519 if (v
== SEQ_START_TOKEN
) {
1524 "st tx_queue rx_queue tr tm->when retrnsmt"
1525 " uid timeout inode\n");
1530 switch (st
->state
) {
1531 case TCP_SEQ_STATE_LISTENING
:
1532 case TCP_SEQ_STATE_ESTABLISHED
:
1533 get_tcp6_sock(seq
, v
, st
->num
);
1535 case TCP_SEQ_STATE_OPENREQ
:
1536 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1538 case TCP_SEQ_STATE_TIME_WAIT
:
1539 get_timewait6_sock(seq
, v
, st
->num
);
1546 static struct file_operations tcp6_seq_fops
;
1547 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
1548 .owner
= THIS_MODULE
,
1551 .seq_show
= tcp6_seq_show
,
1552 .seq_fops
= &tcp6_seq_fops
,
1555 int __init
tcp6_proc_init(void)
1557 return tcp_proc_register(&tcp6_seq_afinfo
);
1560 void tcp6_proc_exit(void)
1562 tcp_proc_unregister(&tcp6_seq_afinfo
);
1566 struct proto tcpv6_prot
= {
1568 .owner
= THIS_MODULE
,
1570 .connect
= tcp_v6_connect
,
1571 .disconnect
= tcp_disconnect
,
1572 .accept
= inet_csk_accept
,
1574 .init
= tcp_v6_init_sock
,
1575 .destroy
= tcp_v6_destroy_sock
,
1576 .shutdown
= tcp_shutdown
,
1577 .setsockopt
= tcp_setsockopt
,
1578 .getsockopt
= tcp_getsockopt
,
1579 .sendmsg
= tcp_sendmsg
,
1580 .recvmsg
= tcp_recvmsg
,
1581 .backlog_rcv
= tcp_v6_do_rcv
,
1582 .hash
= tcp_v6_hash
,
1583 .unhash
= tcp_unhash
,
1584 .get_port
= tcp_v6_get_port
,
1585 .enter_memory_pressure
= tcp_enter_memory_pressure
,
1586 .sockets_allocated
= &tcp_sockets_allocated
,
1587 .memory_allocated
= &tcp_memory_allocated
,
1588 .memory_pressure
= &tcp_memory_pressure
,
1589 .orphan_count
= &tcp_orphan_count
,
1590 .sysctl_mem
= sysctl_tcp_mem
,
1591 .sysctl_wmem
= sysctl_tcp_wmem
,
1592 .sysctl_rmem
= sysctl_tcp_rmem
,
1593 .max_header
= MAX_TCP_HEADER
,
1594 .obj_size
= sizeof(struct tcp6_sock
),
1595 .twsk_prot
= &tcp6_timewait_sock_ops
,
1596 .rsk_prot
= &tcp6_request_sock_ops
,
1597 #ifdef CONFIG_COMPAT
1598 .compat_setsockopt
= compat_tcp_setsockopt
,
1599 .compat_getsockopt
= compat_tcp_getsockopt
,
1603 static struct inet6_protocol tcpv6_protocol
= {
1604 .handler
= tcp_v6_rcv
,
1605 .err_handler
= tcp_v6_err
,
1606 .gso_segment
= tcp_tso_segment
,
1607 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
1610 static struct inet_protosw tcpv6_protosw
= {
1611 .type
= SOCK_STREAM
,
1612 .protocol
= IPPROTO_TCP
,
1613 .prot
= &tcpv6_prot
,
1614 .ops
= &inet6_stream_ops
,
1617 .flags
= INET_PROTOSW_PERMANENT
|
1621 void __init
tcpv6_init(void)
1623 /* register inet6 protocol */
1624 if (inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
) < 0)
1625 printk(KERN_ERR
"tcpv6_init: Could not register protocol\n");
1626 inet6_register_protosw(&tcpv6_protosw
);
1628 if (inet_csk_ctl_sock_create(&tcp6_socket
, PF_INET6
, SOCK_RAW
,
1630 panic("Failed to create the TCPv6 control socket.\n");