3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
11 * linux/net/ipv4/tcp.c
12 * linux/net/ipv4/tcp_input.c
13 * linux/net/ipv4/tcp_output.c
16 * Hideaki YOSHIFUJI : sin6_scope_id support
17 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
18 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
19 * a single port at the same time.
20 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
28 #include <linux/module.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/socket.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/jiffies.h>
36 #include <linux/in6.h>
37 #include <linux/netdevice.h>
38 #include <linux/init.h>
39 #include <linux/jhash.h>
40 #include <linux/ipsec.h>
41 #include <linux/times.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
59 #include <net/addrconf.h>
61 #include <net/dsfield.h>
62 #include <net/timewait_sock.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 /* Socket used for sending RSTs and ACKs */
73 static struct socket
*tcp6_socket
;
75 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
76 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
77 static void tcp_v6_send_check(struct sock
*sk
, int len
,
80 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
82 static struct inet_connection_sock_af_ops ipv6_mapped
;
83 static struct inet_connection_sock_af_ops ipv6_specific
;
84 #ifdef CONFIG_TCP_MD5SIG
85 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
86 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
89 static int tcp_v6_get_port(struct sock
*sk
, unsigned short snum
)
91 return inet_csk_get_port(&tcp_hashinfo
, sk
, snum
,
92 inet6_csk_bind_conflict
);
95 static void tcp_v6_hash(struct sock
*sk
)
97 if (sk
->sk_state
!= TCP_CLOSE
) {
98 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
103 __inet6_hash(&tcp_hashinfo
, sk
);
108 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
109 struct in6_addr
*saddr
,
110 struct in6_addr
*daddr
,
113 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
116 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
118 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
119 ipv6_hdr(skb
)->saddr
.s6_addr32
,
121 tcp_hdr(skb
)->source
);
124 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
127 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
128 struct inet_sock
*inet
= inet_sk(sk
);
129 struct inet_connection_sock
*icsk
= inet_csk(sk
);
130 struct ipv6_pinfo
*np
= inet6_sk(sk
);
131 struct tcp_sock
*tp
= tcp_sk(sk
);
132 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
134 struct dst_entry
*dst
;
138 if (addr_len
< SIN6_LEN_RFC2133
)
141 if (usin
->sin6_family
!= AF_INET6
)
142 return(-EAFNOSUPPORT
);
144 memset(&fl
, 0, sizeof(fl
));
147 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
148 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
149 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
150 struct ip6_flowlabel
*flowlabel
;
151 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
152 if (flowlabel
== NULL
)
154 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
155 fl6_sock_release(flowlabel
);
160 * connect() to INADDR_ANY means loopback (BSD'ism).
163 if(ipv6_addr_any(&usin
->sin6_addr
))
164 usin
->sin6_addr
.s6_addr
[15] = 0x1;
166 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
168 if(addr_type
& IPV6_ADDR_MULTICAST
)
171 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
172 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
173 usin
->sin6_scope_id
) {
174 /* If interface is set while binding, indices
177 if (sk
->sk_bound_dev_if
&&
178 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
181 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
184 /* Connect to link-local address requires an interface */
185 if (!sk
->sk_bound_dev_if
)
189 if (tp
->rx_opt
.ts_recent_stamp
&&
190 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
191 tp
->rx_opt
.ts_recent
= 0;
192 tp
->rx_opt
.ts_recent_stamp
= 0;
196 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
197 np
->flow_label
= fl
.fl6_flowlabel
;
203 if (addr_type
== IPV6_ADDR_MAPPED
) {
204 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
205 struct sockaddr_in sin
;
207 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
209 if (__ipv6_only_sock(sk
))
212 sin
.sin_family
= AF_INET
;
213 sin
.sin_port
= usin
->sin6_port
;
214 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
216 icsk
->icsk_af_ops
= &ipv6_mapped
;
217 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
222 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
225 icsk
->icsk_ext_hdr_len
= exthdrlen
;
226 icsk
->icsk_af_ops
= &ipv6_specific
;
227 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
228 #ifdef CONFIG_TCP_MD5SIG
229 tp
->af_specific
= &tcp_sock_ipv6_specific
;
233 ipv6_addr_set_v4mapped(inet
->saddr
, &np
->saddr
);
234 ipv6_addr_set_v4mapped(inet
->rcv_saddr
, &np
->rcv_saddr
);
240 if (!ipv6_addr_any(&np
->rcv_saddr
))
241 saddr
= &np
->rcv_saddr
;
243 fl
.proto
= IPPROTO_TCP
;
244 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
245 ipv6_addr_copy(&fl
.fl6_src
,
246 (saddr
? saddr
: &np
->saddr
));
247 fl
.oif
= sk
->sk_bound_dev_if
;
248 fl
.fl_ip_dport
= usin
->sin6_port
;
249 fl
.fl_ip_sport
= inet
->sport
;
251 if (np
->opt
&& np
->opt
->srcrt
) {
252 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
253 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
254 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
258 security_sk_classify_flow(sk
, &fl
);
260 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
264 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
266 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, 1)) < 0) {
268 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
275 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
278 /* set the source address */
279 ipv6_addr_copy(&np
->saddr
, saddr
);
280 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
282 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
283 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
285 icsk
->icsk_ext_hdr_len
= 0;
287 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
290 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
292 inet
->dport
= usin
->sin6_port
;
294 tcp_set_state(sk
, TCP_SYN_SENT
);
295 err
= inet6_hash_connect(&tcp_death_row
, sk
);
300 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
305 err
= tcp_connect(sk
);
312 tcp_set_state(sk
, TCP_CLOSE
);
316 sk
->sk_route_caps
= 0;
320 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
321 int type
, int code
, int offset
, __be32 info
)
323 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
324 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
325 struct ipv6_pinfo
*np
;
331 sk
= inet6_lookup(&tcp_hashinfo
, &hdr
->daddr
, th
->dest
, &hdr
->saddr
,
332 th
->source
, skb
->dev
->ifindex
);
335 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
339 if (sk
->sk_state
== TCP_TIME_WAIT
) {
340 inet_twsk_put(inet_twsk(sk
));
345 if (sock_owned_by_user(sk
))
346 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
348 if (sk
->sk_state
== TCP_CLOSE
)
352 seq
= ntohl(th
->seq
);
353 if (sk
->sk_state
!= TCP_LISTEN
&&
354 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
355 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
361 if (type
== ICMPV6_PKT_TOOBIG
) {
362 struct dst_entry
*dst
= NULL
;
364 if (sock_owned_by_user(sk
))
366 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
369 /* icmp should have updated the destination cache entry */
370 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
373 struct inet_sock
*inet
= inet_sk(sk
);
376 /* BUGGG_FUTURE: Again, it is not clear how
377 to handle rthdr case. Ignore this complexity
380 memset(&fl
, 0, sizeof(fl
));
381 fl
.proto
= IPPROTO_TCP
;
382 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
383 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
384 fl
.oif
= sk
->sk_bound_dev_if
;
385 fl
.fl_ip_dport
= inet
->dport
;
386 fl
.fl_ip_sport
= inet
->sport
;
387 security_skb_classify_flow(skb
, &fl
);
389 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
390 sk
->sk_err_soft
= -err
;
394 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
395 sk
->sk_err_soft
= -err
;
402 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
403 tcp_sync_mss(sk
, dst_mtu(dst
));
404 tcp_simple_retransmit(sk
);
405 } /* else let the usual retransmit timer handle it */
410 icmpv6_err_convert(type
, code
, &err
);
412 /* Might be for an request_sock */
413 switch (sk
->sk_state
) {
414 struct request_sock
*req
, **prev
;
416 if (sock_owned_by_user(sk
))
419 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
420 &hdr
->saddr
, inet6_iif(skb
));
424 /* ICMPs are not backlogged, hence we cannot get
425 * an established socket here.
427 BUG_TRAP(req
->sk
== NULL
);
429 if (seq
!= tcp_rsk(req
)->snt_isn
) {
430 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
434 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
438 case TCP_SYN_RECV
: /* Cannot happen.
439 It can, it SYNs are crossed. --ANK */
440 if (!sock_owned_by_user(sk
)) {
442 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
446 sk
->sk_err_soft
= err
;
450 if (!sock_owned_by_user(sk
) && np
->recverr
) {
452 sk
->sk_error_report(sk
);
454 sk
->sk_err_soft
= err
;
462 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
,
463 struct dst_entry
*dst
)
465 struct inet6_request_sock
*treq
= inet6_rsk(req
);
466 struct ipv6_pinfo
*np
= inet6_sk(sk
);
467 struct sk_buff
* skb
;
468 struct ipv6_txoptions
*opt
= NULL
;
469 struct in6_addr
* final_p
= NULL
, final
;
473 memset(&fl
, 0, sizeof(fl
));
474 fl
.proto
= IPPROTO_TCP
;
475 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
476 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
477 fl
.fl6_flowlabel
= 0;
479 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
480 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
481 security_req_classify_flow(req
, &fl
);
486 np
->rxopt
.bits
.osrcrt
== 2 &&
488 struct sk_buff
*pktopts
= treq
->pktopts
;
489 struct inet6_skb_parm
*rxopt
= IP6CB(pktopts
);
491 opt
= ipv6_invert_rthdr(sk
,
492 (struct ipv6_rt_hdr
*)(skb_network_header(pktopts
) +
496 if (opt
&& opt
->srcrt
) {
497 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
498 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
499 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
503 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
507 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
508 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
512 skb
= tcp_make_synack(sk
, dst
, req
);
514 struct tcphdr
*th
= tcp_hdr(skb
);
516 th
->check
= tcp_v6_check(th
, skb
->len
,
517 &treq
->loc_addr
, &treq
->rmt_addr
,
518 csum_partial((char *)th
, skb
->len
, skb
->csum
));
520 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
521 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
522 err
= net_xmit_eval(err
);
526 if (opt
&& opt
!= np
->opt
)
527 sock_kfree_s(sk
, opt
, opt
->tot_len
);
532 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
534 if (inet6_rsk(req
)->pktopts
)
535 kfree_skb(inet6_rsk(req
)->pktopts
);
538 #ifdef CONFIG_TCP_MD5SIG
539 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
540 struct in6_addr
*addr
)
542 struct tcp_sock
*tp
= tcp_sk(sk
);
547 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
550 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
551 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, addr
) == 0)
552 return &tp
->md5sig_info
->keys6
[i
].base
;
557 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
558 struct sock
*addr_sk
)
560 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
563 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
564 struct request_sock
*req
)
566 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
569 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
570 char *newkey
, u8 newkeylen
)
572 /* Add key to the list */
573 struct tcp6_md5sig_key
*key
;
574 struct tcp_sock
*tp
= tcp_sk(sk
);
575 struct tcp6_md5sig_key
*keys
;
577 key
= (struct tcp6_md5sig_key
*) tcp_v6_md5_do_lookup(sk
, peer
);
579 /* modify existing entry - just update that one */
580 kfree(key
->base
.key
);
581 key
->base
.key
= newkey
;
582 key
->base
.keylen
= newkeylen
;
584 /* reallocate new list if current one is full. */
585 if (!tp
->md5sig_info
) {
586 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
587 if (!tp
->md5sig_info
) {
591 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
593 tcp_alloc_md5sig_pool();
594 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
595 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
596 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
599 tcp_free_md5sig_pool();
604 if (tp
->md5sig_info
->entries6
)
605 memmove(keys
, tp
->md5sig_info
->keys6
,
606 (sizeof (tp
->md5sig_info
->keys6
[0]) *
607 tp
->md5sig_info
->entries6
));
609 kfree(tp
->md5sig_info
->keys6
);
610 tp
->md5sig_info
->keys6
= keys
;
611 tp
->md5sig_info
->alloced6
++;
614 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
616 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
617 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
619 tp
->md5sig_info
->entries6
++;
624 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
625 u8
*newkey
, __u8 newkeylen
)
627 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
631 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
633 struct tcp_sock
*tp
= tcp_sk(sk
);
636 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
637 if (ipv6_addr_cmp(&tp
->md5sig_info
->keys6
[i
].addr
, peer
) == 0) {
639 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
640 tp
->md5sig_info
->entries6
--;
642 if (tp
->md5sig_info
->entries6
== 0) {
643 kfree(tp
->md5sig_info
->keys6
);
644 tp
->md5sig_info
->keys6
= NULL
;
645 tp
->md5sig_info
->alloced6
= 0;
647 tcp_free_md5sig_pool();
651 /* shrink the database */
652 if (tp
->md5sig_info
->entries6
!= i
)
653 memmove(&tp
->md5sig_info
->keys6
[i
],
654 &tp
->md5sig_info
->keys6
[i
+1],
655 (tp
->md5sig_info
->entries6
- i
)
656 * sizeof (tp
->md5sig_info
->keys6
[0]));
663 static void tcp_v6_clear_md5_list (struct sock
*sk
)
665 struct tcp_sock
*tp
= tcp_sk(sk
);
668 if (tp
->md5sig_info
->entries6
) {
669 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
670 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
671 tp
->md5sig_info
->entries6
= 0;
672 tcp_free_md5sig_pool();
675 kfree(tp
->md5sig_info
->keys6
);
676 tp
->md5sig_info
->keys6
= NULL
;
677 tp
->md5sig_info
->alloced6
= 0;
679 if (tp
->md5sig_info
->entries4
) {
680 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
681 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
682 tp
->md5sig_info
->entries4
= 0;
683 tcp_free_md5sig_pool();
686 kfree(tp
->md5sig_info
->keys4
);
687 tp
->md5sig_info
->keys4
= NULL
;
688 tp
->md5sig_info
->alloced4
= 0;
691 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
694 struct tcp_md5sig cmd
;
695 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
698 if (optlen
< sizeof(cmd
))
701 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
704 if (sin6
->sin6_family
!= AF_INET6
)
707 if (!cmd
.tcpm_keylen
) {
708 if (!tcp_sk(sk
)->md5sig_info
)
710 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
711 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
712 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
715 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
718 if (!tcp_sk(sk
)->md5sig_info
) {
719 struct tcp_sock
*tp
= tcp_sk(sk
);
720 struct tcp_md5sig_info
*p
;
722 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
727 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
730 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
733 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
734 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
735 newkey
, cmd
.tcpm_keylen
);
737 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
740 static int tcp_v6_do_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
741 struct in6_addr
*saddr
,
742 struct in6_addr
*daddr
,
743 struct tcphdr
*th
, int protocol
,
746 struct scatterlist sg
[4];
750 struct tcp_md5sig_pool
*hp
;
751 struct tcp6_pseudohdr
*bp
;
752 struct hash_desc
*desc
;
754 unsigned int nbytes
= 0;
756 hp
= tcp_get_md5sig_pool();
758 printk(KERN_WARNING
"%s(): hash pool not found...\n", __FUNCTION__
);
759 goto clear_hash_noput
;
761 bp
= &hp
->md5_blk
.ip6
;
762 desc
= &hp
->md5_desc
;
764 /* 1. TCP pseudo-header (RFC2460) */
765 ipv6_addr_copy(&bp
->saddr
, saddr
);
766 ipv6_addr_copy(&bp
->daddr
, daddr
);
767 bp
->len
= htonl(tcplen
);
768 bp
->protocol
= htonl(protocol
);
770 sg_set_buf(&sg
[block
++], bp
, sizeof(*bp
));
771 nbytes
+= sizeof(*bp
);
773 /* 2. TCP header, excluding options */
776 sg_set_buf(&sg
[block
++], th
, sizeof(*th
));
777 nbytes
+= sizeof(*th
);
779 /* 3. TCP segment data (if any) */
780 data_len
= tcplen
- (th
->doff
<< 2);
782 u8
*data
= (u8
*)th
+ (th
->doff
<< 2);
783 sg_set_buf(&sg
[block
++], data
, data_len
);
788 sg_set_buf(&sg
[block
++], key
->key
, key
->keylen
);
789 nbytes
+= key
->keylen
;
791 /* Now store the hash into the packet */
792 err
= crypto_hash_init(desc
);
794 printk(KERN_WARNING
"%s(): hash_init failed\n", __FUNCTION__
);
797 err
= crypto_hash_update(desc
, sg
, nbytes
);
799 printk(KERN_WARNING
"%s(): hash_update failed\n", __FUNCTION__
);
802 err
= crypto_hash_final(desc
, md5_hash
);
804 printk(KERN_WARNING
"%s(): hash_final failed\n", __FUNCTION__
);
808 /* Reset header, and free up the crypto */
809 tcp_put_md5sig_pool();
814 tcp_put_md5sig_pool();
816 memset(md5_hash
, 0, 16);
820 static int tcp_v6_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
822 struct dst_entry
*dst
,
823 struct request_sock
*req
,
824 struct tcphdr
*th
, int protocol
,
827 struct in6_addr
*saddr
, *daddr
;
830 saddr
= &inet6_sk(sk
)->saddr
;
831 daddr
= &inet6_sk(sk
)->daddr
;
833 saddr
= &inet6_rsk(req
)->loc_addr
;
834 daddr
= &inet6_rsk(req
)->rmt_addr
;
836 return tcp_v6_do_calc_md5_hash(md5_hash
, key
,
838 th
, protocol
, tcplen
);
841 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
843 __u8
*hash_location
= NULL
;
844 struct tcp_md5sig_key
*hash_expected
;
845 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
846 struct tcphdr
*th
= tcp_hdr(skb
);
847 int length
= (th
->doff
<< 2) - sizeof (*th
);
852 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
854 /* If the TCP option is too short, we can short cut */
855 if (length
< TCPOLEN_MD5SIG
)
856 return hash_expected
? 1 : 0;
872 if (opsize
< 2 || opsize
> length
)
874 if (opcode
== TCPOPT_MD5SIG
) {
884 /* do we have a hash as expected? */
885 if (!hash_expected
) {
888 if (net_ratelimit()) {
889 printk(KERN_INFO
"MD5 Hash NOT expected but found "
890 "(" NIP6_FMT
", %u)->"
891 "(" NIP6_FMT
", %u)\n",
892 NIP6(ip6h
->saddr
), ntohs(th
->source
),
893 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
898 if (!hash_location
) {
899 if (net_ratelimit()) {
900 printk(KERN_INFO
"MD5 Hash expected but NOT found "
901 "(" NIP6_FMT
", %u)->"
902 "(" NIP6_FMT
", %u)\n",
903 NIP6(ip6h
->saddr
), ntohs(th
->source
),
904 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
909 /* check the signature */
910 genhash
= tcp_v6_do_calc_md5_hash(newhash
,
912 &ip6h
->saddr
, &ip6h
->daddr
,
915 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
916 if (net_ratelimit()) {
917 printk(KERN_INFO
"MD5 Hash %s for "
918 "(" NIP6_FMT
", %u)->"
919 "(" NIP6_FMT
", %u)\n",
920 genhash
? "failed" : "mismatch",
921 NIP6(ip6h
->saddr
), ntohs(th
->source
),
922 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
930 static struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
932 .obj_size
= sizeof(struct tcp6_request_sock
),
933 .rtx_syn_ack
= tcp_v6_send_synack
,
934 .send_ack
= tcp_v6_reqsk_send_ack
,
935 .destructor
= tcp_v6_reqsk_destructor
,
936 .send_reset
= tcp_v6_send_reset
939 #ifdef CONFIG_TCP_MD5SIG
940 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
941 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
945 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
946 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
947 .twsk_unique
= tcp_twsk_unique
,
948 .twsk_destructor
= tcp_twsk_destructor
,
951 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
953 struct ipv6_pinfo
*np
= inet6_sk(sk
);
954 struct tcphdr
*th
= tcp_hdr(skb
);
956 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
957 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
958 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
959 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
961 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
962 csum_partial((char *)th
, th
->doff
<<2,
967 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
969 struct ipv6hdr
*ipv6h
;
972 if (!pskb_may_pull(skb
, sizeof(*th
)))
975 ipv6h
= ipv6_hdr(skb
);
979 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
981 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
982 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
983 skb
->ip_summed
= CHECKSUM_PARTIAL
;
987 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
989 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
990 struct sk_buff
*buff
;
992 int tot_len
= sizeof(*th
);
993 #ifdef CONFIG_TCP_MD5SIG
994 struct tcp_md5sig_key
*key
;
1000 if (!ipv6_unicast_destination(skb
))
1003 #ifdef CONFIG_TCP_MD5SIG
1005 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1010 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1014 * We need to grab some memory, and put together an RST,
1015 * and then put it into the queue to be sent.
1018 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1023 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1025 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1026 skb_reset_transport_header(buff
);
1028 /* Swap the send and the receive. */
1029 memset(t1
, 0, sizeof(*t1
));
1030 t1
->dest
= th
->source
;
1031 t1
->source
= th
->dest
;
1032 t1
->doff
= tot_len
/ 4;
1036 t1
->seq
= th
->ack_seq
;
1039 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
1040 + skb
->len
- (th
->doff
<<2));
1043 #ifdef CONFIG_TCP_MD5SIG
1045 __be32
*opt
= (__be32
*)(t1
+ 1);
1046 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1047 (TCPOPT_NOP
<< 16) |
1048 (TCPOPT_MD5SIG
<< 8) |
1050 tcp_v6_do_calc_md5_hash((__u8
*)&opt
[1], key
,
1051 &ipv6_hdr(skb
)->daddr
,
1052 &ipv6_hdr(skb
)->saddr
,
1053 t1
, IPPROTO_TCP
, tot_len
);
1057 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
1059 memset(&fl
, 0, sizeof(fl
));
1060 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1061 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1063 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1064 sizeof(*t1
), IPPROTO_TCP
,
1067 fl
.proto
= IPPROTO_TCP
;
1068 fl
.oif
= inet6_iif(skb
);
1069 fl
.fl_ip_dport
= t1
->dest
;
1070 fl
.fl_ip_sport
= t1
->source
;
1071 security_skb_classify_flow(skb
, &fl
);
1073 /* sk = NULL, but it is safe for now. RST socket required. */
1074 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
1076 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1077 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
1078 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1079 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
1087 static void tcp_v6_send_ack(struct tcp_timewait_sock
*tw
,
1088 struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
1090 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
1091 struct sk_buff
*buff
;
1093 int tot_len
= sizeof(struct tcphdr
);
1095 #ifdef CONFIG_TCP_MD5SIG
1096 struct tcp_md5sig_key
*key
;
1097 struct tcp_md5sig_key tw_key
;
1100 #ifdef CONFIG_TCP_MD5SIG
1101 if (!tw
&& skb
->sk
) {
1102 key
= tcp_v6_md5_do_lookup(skb
->sk
, &ipv6_hdr(skb
)->daddr
);
1103 } else if (tw
&& tw
->tw_md5_keylen
) {
1104 tw_key
.key
= tw
->tw_md5_key
;
1105 tw_key
.keylen
= tw
->tw_md5_keylen
;
1113 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1114 #ifdef CONFIG_TCP_MD5SIG
1116 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1119 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1124 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1126 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
1128 /* Swap the send and the receive. */
1129 memset(t1
, 0, sizeof(*t1
));
1130 t1
->dest
= th
->source
;
1131 t1
->source
= th
->dest
;
1132 t1
->doff
= tot_len
/4;
1133 t1
->seq
= htonl(seq
);
1134 t1
->ack_seq
= htonl(ack
);
1136 t1
->window
= htons(win
);
1138 topt
= (__be32
*)(t1
+ 1);
1141 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1142 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1143 *topt
++ = htonl(tcp_time_stamp
);
1147 #ifdef CONFIG_TCP_MD5SIG
1149 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1150 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1151 tcp_v6_do_calc_md5_hash((__u8
*)topt
, key
,
1152 &ipv6_hdr(skb
)->daddr
,
1153 &ipv6_hdr(skb
)->saddr
,
1154 t1
, IPPROTO_TCP
, tot_len
);
1158 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1160 memset(&fl
, 0, sizeof(fl
));
1161 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1162 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1164 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1165 tot_len
, IPPROTO_TCP
,
1168 fl
.proto
= IPPROTO_TCP
;
1169 fl
.oif
= inet6_iif(skb
);
1170 fl
.fl_ip_dport
= t1
->dest
;
1171 fl
.fl_ip_sport
= t1
->source
;
1172 security_skb_classify_flow(skb
, &fl
);
1174 if (!ip6_dst_lookup(NULL
, &buff
->dst
, &fl
)) {
1175 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1176 ip6_xmit(tcp6_socket
->sk
, buff
, &fl
, NULL
, 0);
1177 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1185 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1187 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1188 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1190 tcp_v6_send_ack(tcptw
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1191 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1192 tcptw
->tw_ts_recent
);
1197 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
1199 tcp_v6_send_ack(NULL
, skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
1203 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1205 struct request_sock
*req
, **prev
;
1206 const struct tcphdr
*th
= tcp_hdr(skb
);
1209 /* Find possible connection requests. */
1210 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1211 &ipv6_hdr(skb
)->saddr
,
1212 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1214 return tcp_check_req(sk
, skb
, req
, prev
);
1216 nsk
= __inet6_lookup_established(&tcp_hashinfo
, &ipv6_hdr(skb
)->saddr
,
1217 th
->source
, &ipv6_hdr(skb
)->daddr
,
1218 ntohs(th
->dest
), inet6_iif(skb
));
1221 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1225 inet_twsk_put(inet_twsk(nsk
));
1229 #if 0 /*def CONFIG_SYN_COOKIES*/
1230 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1231 sk
= cookie_v6_check(sk
, skb
, &(IPCB(skb
)->opt
));
1236 /* FIXME: this is substantially similar to the ipv4 code.
1237 * Can some kind of merge be done? -- erics
1239 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1241 struct inet6_request_sock
*treq
;
1242 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1243 struct tcp_options_received tmp_opt
;
1244 struct tcp_sock
*tp
= tcp_sk(sk
);
1245 struct request_sock
*req
= NULL
;
1246 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1248 if (skb
->protocol
== htons(ETH_P_IP
))
1249 return tcp_v4_conn_request(sk
, skb
);
1251 if (!ipv6_unicast_destination(skb
))
1255 * There are no SYN attacks on IPv6, yet...
1257 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1258 if (net_ratelimit())
1259 printk(KERN_INFO
"TCPv6: dropping request, synflood is possible\n");
1263 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1266 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1270 #ifdef CONFIG_TCP_MD5SIG
1271 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1274 tcp_clear_options(&tmp_opt
);
1275 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1276 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1278 tcp_parse_options(skb
, &tmp_opt
, 0);
1280 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1281 tcp_openreq_init(req
, &tmp_opt
, skb
);
1283 treq
= inet6_rsk(req
);
1284 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1285 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1286 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1287 treq
->pktopts
= NULL
;
1288 if (ipv6_opt_accepted(sk
, skb
) ||
1289 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1290 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1291 atomic_inc(&skb
->users
);
1292 treq
->pktopts
= skb
;
1294 treq
->iif
= sk
->sk_bound_dev_if
;
1296 /* So that link locals have meaning */
1297 if (!sk
->sk_bound_dev_if
&&
1298 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1299 treq
->iif
= inet6_iif(skb
);
1302 isn
= tcp_v6_init_sequence(skb
);
1304 tcp_rsk(req
)->snt_isn
= isn
;
1306 security_inet_conn_request(sk
, skb
, req
);
1308 if (tcp_v6_send_synack(sk
, req
, NULL
))
1311 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1318 return 0; /* don't send reset */
1321 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1322 struct request_sock
*req
,
1323 struct dst_entry
*dst
)
1325 struct inet6_request_sock
*treq
= inet6_rsk(req
);
1326 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1327 struct tcp6_sock
*newtcp6sk
;
1328 struct inet_sock
*newinet
;
1329 struct tcp_sock
*newtp
;
1331 struct ipv6_txoptions
*opt
;
1332 #ifdef CONFIG_TCP_MD5SIG
1333 struct tcp_md5sig_key
*key
;
1336 if (skb
->protocol
== htons(ETH_P_IP
)) {
1341 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1346 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1347 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1349 newinet
= inet_sk(newsk
);
1350 newnp
= inet6_sk(newsk
);
1351 newtp
= tcp_sk(newsk
);
1353 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1355 ipv6_addr_set_v4mapped(newinet
->daddr
, &newnp
->daddr
);
1357 ipv6_addr_set_v4mapped(newinet
->saddr
, &newnp
->saddr
);
1359 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1361 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1362 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1363 #ifdef CONFIG_TCP_MD5SIG
1364 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1367 newnp
->pktoptions
= NULL
;
1369 newnp
->mcast_oif
= inet6_iif(skb
);
1370 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1373 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1374 * here, tcp_create_openreq_child now does this for us, see the comment in
1375 * that function for the gory details. -acme
1378 /* It is tricky place. Until this moment IPv4 tcp
1379 worked with IPv6 icsk.icsk_af_ops.
1382 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1389 if (sk_acceptq_is_full(sk
))
1392 if (np
->rxopt
.bits
.osrcrt
== 2 &&
1393 opt
== NULL
&& treq
->pktopts
) {
1394 struct inet6_skb_parm
*rxopt
= IP6CB(treq
->pktopts
);
1396 opt
= ipv6_invert_rthdr(sk
,
1397 (struct ipv6_rt_hdr
*)(skb_network_header(treq
->pktopts
) +
1402 struct in6_addr
*final_p
= NULL
, final
;
1405 memset(&fl
, 0, sizeof(fl
));
1406 fl
.proto
= IPPROTO_TCP
;
1407 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1408 if (opt
&& opt
->srcrt
) {
1409 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1410 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1411 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1414 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1415 fl
.oif
= sk
->sk_bound_dev_if
;
1416 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1417 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1418 security_req_classify_flow(req
, &fl
);
1420 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1424 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1426 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1430 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1435 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1436 * count here, tcp_create_openreq_child now does this for us, see the
1437 * comment in that function for the gory details. -acme
1440 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1441 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1443 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1444 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1446 newtp
= tcp_sk(newsk
);
1447 newinet
= inet_sk(newsk
);
1448 newnp
= inet6_sk(newsk
);
1450 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1452 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1453 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1454 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1455 newsk
->sk_bound_dev_if
= treq
->iif
;
1457 /* Now IPv6 options...
1459 First: no IPv4 options.
1461 newinet
->opt
= NULL
;
1462 newnp
->ipv6_fl_list
= NULL
;
1465 newnp
->rxopt
.all
= np
->rxopt
.all
;
1467 /* Clone pktoptions received with SYN */
1468 newnp
->pktoptions
= NULL
;
1469 if (treq
->pktopts
!= NULL
) {
1470 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1471 kfree_skb(treq
->pktopts
);
1472 treq
->pktopts
= NULL
;
1473 if (newnp
->pktoptions
)
1474 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1477 newnp
->mcast_oif
= inet6_iif(skb
);
1478 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1480 /* Clone native IPv6 options from listening socket (if any)
1482 Yes, keeping reference count would be much more clever,
1483 but we make one more one thing there: reattach optmem
1487 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1489 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1492 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1494 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1495 newnp
->opt
->opt_flen
);
1497 tcp_mtup_init(newsk
);
1498 tcp_sync_mss(newsk
, dst_mtu(dst
));
1499 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1500 tcp_initialize_rcv_mss(newsk
);
1502 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1504 #ifdef CONFIG_TCP_MD5SIG
1505 /* Copy over the MD5 key from the original socket */
1506 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1507 /* We're using one, so create a matching key
1508 * on the newsk structure. If we fail to get
1509 * memory, then we end up not copying the key
1512 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1514 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1515 newkey
, key
->keylen
);
1519 __inet6_hash(&tcp_hashinfo
, newsk
);
1520 inet_inherit_port(&tcp_hashinfo
, sk
, newsk
);
1525 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1527 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1528 if (opt
&& opt
!= np
->opt
)
1529 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1534 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1536 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1537 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1538 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1539 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1544 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1545 &ipv6_hdr(skb
)->saddr
,
1546 &ipv6_hdr(skb
)->daddr
, 0));
1548 if (skb
->len
<= 76) {
1549 return __skb_checksum_complete(skb
);
1554 /* The socket must have it's spinlock held when we get
1557 * We have a potential double-lock case here, so even when
1558 * doing backlog processing we use the BH locking scheme.
1559 * This is because we cannot sleep with the original spinlock
1562 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1564 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1565 struct tcp_sock
*tp
;
1566 struct sk_buff
*opt_skb
= NULL
;
1568 /* Imagine: socket is IPv6. IPv4 packet arrives,
1569 goes to IPv4 receive handler and backlogged.
1570 From backlog it always goes here. Kerboom...
1571 Fortunately, tcp_rcv_established and rcv_established
1572 handle them correctly, but it is not case with
1573 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1576 if (skb
->protocol
== htons(ETH_P_IP
))
1577 return tcp_v4_do_rcv(sk
, skb
);
1579 #ifdef CONFIG_TCP_MD5SIG
1580 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1584 if (sk_filter(sk
, skb
))
1588 * socket locking is here for SMP purposes as backlog rcv
1589 * is currently called with bh processing disabled.
1592 /* Do Stevens' IPV6_PKTOPTIONS.
1594 Yes, guys, it is the only place in our code, where we
1595 may make it not affecting IPv4.
1596 The rest of code is protocol independent,
1597 and I do not like idea to uglify IPv4.
1599 Actually, all the idea behind IPV6_PKTOPTIONS
1600 looks not very well thought. For now we latch
1601 options, received in the last packet, enqueued
1602 by tcp. Feel free to propose better solution.
1606 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1608 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1609 TCP_CHECK_TIMER(sk
);
1610 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1612 TCP_CHECK_TIMER(sk
);
1614 goto ipv6_pktoptions
;
1618 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1621 if (sk
->sk_state
== TCP_LISTEN
) {
1622 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1627 * Queue it on the new socket if the new socket is active,
1628 * otherwise we just shortcircuit this and continue with
1632 if (tcp_child_process(sk
, nsk
, skb
))
1635 __kfree_skb(opt_skb
);
1640 TCP_CHECK_TIMER(sk
);
1641 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1643 TCP_CHECK_TIMER(sk
);
1645 goto ipv6_pktoptions
;
1649 tcp_v6_send_reset(sk
, skb
);
1652 __kfree_skb(opt_skb
);
1656 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1661 /* Do you ask, what is it?
1663 1. skb was enqueued by tcp.
1664 2. skb is added to tail of read queue, rather than out of order.
1665 3. socket is not in passive state.
1666 4. Finally, it really contains options, which user wants to receive.
1669 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1670 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1671 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1672 np
->mcast_oif
= inet6_iif(opt_skb
);
1673 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1674 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1675 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1676 skb_set_owner_r(opt_skb
, sk
);
1677 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1679 __kfree_skb(opt_skb
);
1680 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1689 static int tcp_v6_rcv(struct sk_buff
*skb
)
1695 if (skb
->pkt_type
!= PACKET_HOST
)
1699 * Count it even if it's bad.
1701 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1703 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1708 if (th
->doff
< sizeof(struct tcphdr
)/4)
1710 if (!pskb_may_pull(skb
, th
->doff
*4))
1713 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1717 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1718 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1719 skb
->len
- th
->doff
*4);
1720 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1721 TCP_SKB_CB(skb
)->when
= 0;
1722 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1723 TCP_SKB_CB(skb
)->sacked
= 0;
1725 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1730 if (sk
->sk_state
== TCP_TIME_WAIT
)
1733 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1734 goto discard_and_relse
;
1736 if (sk_filter(sk
, skb
))
1737 goto discard_and_relse
;
1741 bh_lock_sock_nested(sk
);
1743 if (!sock_owned_by_user(sk
)) {
1744 #ifdef CONFIG_NET_DMA
1745 struct tcp_sock
*tp
= tcp_sk(sk
);
1746 if (tp
->ucopy
.dma_chan
)
1747 ret
= tcp_v6_do_rcv(sk
, skb
);
1751 if (!tcp_prequeue(sk
, skb
))
1752 ret
= tcp_v6_do_rcv(sk
, skb
);
1755 sk_add_backlog(sk
, skb
);
1759 return ret
? -1 : 0;
1762 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1765 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1767 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1769 tcp_v6_send_reset(NULL
, skb
);
1786 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1787 inet_twsk_put(inet_twsk(sk
));
1791 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1792 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1793 inet_twsk_put(inet_twsk(sk
));
1797 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1802 sk2
= inet6_lookup_listener(&tcp_hashinfo
,
1803 &ipv6_hdr(skb
)->daddr
,
1804 ntohs(th
->dest
), inet6_iif(skb
));
1806 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1807 inet_twsk_deschedule(tw
, &tcp_death_row
);
1812 /* Fall through to ACK */
1815 tcp_v6_timewait_ack(sk
, skb
);
1819 case TCP_TW_SUCCESS
:;
1824 static int tcp_v6_remember_stamp(struct sock
*sk
)
1826 /* Alas, not yet... */
1830 static struct inet_connection_sock_af_ops ipv6_specific
= {
1831 .queue_xmit
= inet6_csk_xmit
,
1832 .send_check
= tcp_v6_send_check
,
1833 .rebuild_header
= inet6_sk_rebuild_header
,
1834 .conn_request
= tcp_v6_conn_request
,
1835 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1836 .remember_stamp
= tcp_v6_remember_stamp
,
1837 .net_header_len
= sizeof(struct ipv6hdr
),
1838 .setsockopt
= ipv6_setsockopt
,
1839 .getsockopt
= ipv6_getsockopt
,
1840 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1841 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1842 #ifdef CONFIG_COMPAT
1843 .compat_setsockopt
= compat_ipv6_setsockopt
,
1844 .compat_getsockopt
= compat_ipv6_getsockopt
,
1848 #ifdef CONFIG_TCP_MD5SIG
1849 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1850 .md5_lookup
= tcp_v6_md5_lookup
,
1851 .calc_md5_hash
= tcp_v6_calc_md5_hash
,
1852 .md5_add
= tcp_v6_md5_add_func
,
1853 .md5_parse
= tcp_v6_parse_md5_keys
,
1858 * TCP over IPv4 via INET6 API
1861 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1862 .queue_xmit
= ip_queue_xmit
,
1863 .send_check
= tcp_v4_send_check
,
1864 .rebuild_header
= inet_sk_rebuild_header
,
1865 .conn_request
= tcp_v6_conn_request
,
1866 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1867 .remember_stamp
= tcp_v4_remember_stamp
,
1868 .net_header_len
= sizeof(struct iphdr
),
1869 .setsockopt
= ipv6_setsockopt
,
1870 .getsockopt
= ipv6_getsockopt
,
1871 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1872 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1873 #ifdef CONFIG_COMPAT
1874 .compat_setsockopt
= compat_ipv6_setsockopt
,
1875 .compat_getsockopt
= compat_ipv6_getsockopt
,
1879 #ifdef CONFIG_TCP_MD5SIG
1880 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1881 .md5_lookup
= tcp_v4_md5_lookup
,
1882 .calc_md5_hash
= tcp_v4_calc_md5_hash
,
1883 .md5_add
= tcp_v6_md5_add_func
,
1884 .md5_parse
= tcp_v6_parse_md5_keys
,
1888 /* NOTE: A lot of things set to zero explicitly by call to
1889 * sk_alloc() so need not be done here.
1891 static int tcp_v6_init_sock(struct sock
*sk
)
1893 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1894 struct tcp_sock
*tp
= tcp_sk(sk
);
1896 skb_queue_head_init(&tp
->out_of_order_queue
);
1897 tcp_init_xmit_timers(sk
);
1898 tcp_prequeue_init(tp
);
1900 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1901 tp
->mdev
= TCP_TIMEOUT_INIT
;
1903 /* So many TCP implementations out there (incorrectly) count the
1904 * initial SYN frame in their delayed-ACK and congestion control
1905 * algorithms that we must have the following bandaid to talk
1906 * efficiently to them. -DaveM
1910 /* See draft-stevens-tcpca-spec-01 for discussion of the
1911 * initialization of these values.
1913 tp
->snd_ssthresh
= 0x7fffffff;
1914 tp
->snd_cwnd_clamp
= ~0;
1915 tp
->mss_cache
= 536;
1917 tp
->reordering
= sysctl_tcp_reordering
;
1919 sk
->sk_state
= TCP_CLOSE
;
1921 icsk
->icsk_af_ops
= &ipv6_specific
;
1922 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1923 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1924 sk
->sk_write_space
= sk_stream_write_space
;
1925 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1927 #ifdef CONFIG_TCP_MD5SIG
1928 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1931 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1932 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1934 atomic_inc(&tcp_sockets_allocated
);
1939 static int tcp_v6_destroy_sock(struct sock
*sk
)
1941 #ifdef CONFIG_TCP_MD5SIG
1942 /* Clean up the MD5 key list */
1943 if (tcp_sk(sk
)->md5sig_info
)
1944 tcp_v6_clear_md5_list(sk
);
1946 tcp_v4_destroy_sock(sk
);
1947 return inet6_destroy_sock(sk
);
1950 #ifdef CONFIG_PROC_FS
1951 /* Proc filesystem TCPv6 sock list dumping. */
1952 static void get_openreq6(struct seq_file
*seq
,
1953 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1955 int ttd
= req
->expires
- jiffies
;
1956 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1957 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1963 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1964 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1966 src
->s6_addr32
[0], src
->s6_addr32
[1],
1967 src
->s6_addr32
[2], src
->s6_addr32
[3],
1968 ntohs(inet_sk(sk
)->sport
),
1969 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1970 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1971 ntohs(inet_rsk(req
)->rmt_port
),
1973 0,0, /* could print option size, but that is af dependent. */
1974 1, /* timers active (only the expire timer) */
1975 jiffies_to_clock_t(ttd
),
1978 0, /* non standard timer */
1979 0, /* open_requests have no inode */
1983 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1985 struct in6_addr
*dest
, *src
;
1988 unsigned long timer_expires
;
1989 struct inet_sock
*inet
= inet_sk(sp
);
1990 struct tcp_sock
*tp
= tcp_sk(sp
);
1991 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1992 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1995 src
= &np
->rcv_saddr
;
1996 destp
= ntohs(inet
->dport
);
1997 srcp
= ntohs(inet
->sport
);
1999 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2001 timer_expires
= icsk
->icsk_timeout
;
2002 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2004 timer_expires
= icsk
->icsk_timeout
;
2005 } else if (timer_pending(&sp
->sk_timer
)) {
2007 timer_expires
= sp
->sk_timer
.expires
;
2010 timer_expires
= jiffies
;
2014 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2015 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2017 src
->s6_addr32
[0], src
->s6_addr32
[1],
2018 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2019 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2020 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2022 tp
->write_seq
-tp
->snd_una
,
2023 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2025 jiffies_to_clock_t(timer_expires
- jiffies
),
2026 icsk
->icsk_retransmits
,
2028 icsk
->icsk_probes_out
,
2030 atomic_read(&sp
->sk_refcnt
), sp
,
2033 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2034 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2038 static void get_timewait6_sock(struct seq_file
*seq
,
2039 struct inet_timewait_sock
*tw
, int i
)
2041 struct in6_addr
*dest
, *src
;
2043 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2044 int ttd
= tw
->tw_ttd
- jiffies
;
2049 dest
= &tw6
->tw_v6_daddr
;
2050 src
= &tw6
->tw_v6_rcv_saddr
;
2051 destp
= ntohs(tw
->tw_dport
);
2052 srcp
= ntohs(tw
->tw_sport
);
2055 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2056 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2058 src
->s6_addr32
[0], src
->s6_addr32
[1],
2059 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2060 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2061 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2062 tw
->tw_substate
, 0, 0,
2063 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2064 atomic_read(&tw
->tw_refcnt
), tw
);
2067 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2069 struct tcp_iter_state
*st
;
2071 if (v
== SEQ_START_TOKEN
) {
2076 "st tx_queue rx_queue tr tm->when retrnsmt"
2077 " uid timeout inode\n");
2082 switch (st
->state
) {
2083 case TCP_SEQ_STATE_LISTENING
:
2084 case TCP_SEQ_STATE_ESTABLISHED
:
2085 get_tcp6_sock(seq
, v
, st
->num
);
2087 case TCP_SEQ_STATE_OPENREQ
:
2088 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2090 case TCP_SEQ_STATE_TIME_WAIT
:
2091 get_timewait6_sock(seq
, v
, st
->num
);
2098 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2099 .owner
= THIS_MODULE
,
2103 .show
= tcp6_seq_show
,
2107 int __init
tcp6_proc_init(void)
2109 return tcp_proc_register(&tcp6_seq_afinfo
);
2112 void tcp6_proc_exit(void)
2114 tcp_proc_unregister(&tcp6_seq_afinfo
);
2118 struct proto tcpv6_prot
= {
2120 .owner
= THIS_MODULE
,
2122 .connect
= tcp_v6_connect
,
2123 .disconnect
= tcp_disconnect
,
2124 .accept
= inet_csk_accept
,
2126 .init
= tcp_v6_init_sock
,
2127 .destroy
= tcp_v6_destroy_sock
,
2128 .shutdown
= tcp_shutdown
,
2129 .setsockopt
= tcp_setsockopt
,
2130 .getsockopt
= tcp_getsockopt
,
2131 .recvmsg
= tcp_recvmsg
,
2132 .backlog_rcv
= tcp_v6_do_rcv
,
2133 .hash
= tcp_v6_hash
,
2134 .unhash
= tcp_unhash
,
2135 .get_port
= tcp_v6_get_port
,
2136 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2137 .sockets_allocated
= &tcp_sockets_allocated
,
2138 .memory_allocated
= &tcp_memory_allocated
,
2139 .memory_pressure
= &tcp_memory_pressure
,
2140 .orphan_count
= &tcp_orphan_count
,
2141 .sysctl_mem
= sysctl_tcp_mem
,
2142 .sysctl_wmem
= sysctl_tcp_wmem
,
2143 .sysctl_rmem
= sysctl_tcp_rmem
,
2144 .max_header
= MAX_TCP_HEADER
,
2145 .obj_size
= sizeof(struct tcp6_sock
),
2146 .twsk_prot
= &tcp6_timewait_sock_ops
,
2147 .rsk_prot
= &tcp6_request_sock_ops
,
2148 #ifdef CONFIG_COMPAT
2149 .compat_setsockopt
= compat_tcp_setsockopt
,
2150 .compat_getsockopt
= compat_tcp_getsockopt
,
2154 static struct inet6_protocol tcpv6_protocol
= {
2155 .handler
= tcp_v6_rcv
,
2156 .err_handler
= tcp_v6_err
,
2157 .gso_send_check
= tcp_v6_gso_send_check
,
2158 .gso_segment
= tcp_tso_segment
,
2159 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2162 static struct inet_protosw tcpv6_protosw
= {
2163 .type
= SOCK_STREAM
,
2164 .protocol
= IPPROTO_TCP
,
2165 .prot
= &tcpv6_prot
,
2166 .ops
= &inet6_stream_ops
,
2169 .flags
= INET_PROTOSW_PERMANENT
|
2173 void __init
tcpv6_init(void)
2175 /* register inet6 protocol */
2176 if (inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
) < 0)
2177 printk(KERN_ERR
"tcpv6_init: Could not register protocol\n");
2178 inet6_register_protosw(&tcpv6_protosw
);
2180 if (inet_csk_ctl_sock_create(&tcp6_socket
, PF_INET6
, SOCK_RAW
,
2182 panic("Failed to create the TCPv6 control socket.\n");