3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/net.h>
32 #include <linux/jiffies.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/init.h>
37 #include <linux/jhash.h>
38 #include <linux/ipsec.h>
39 #include <linux/times.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
46 #include <net/ndisc.h>
47 #include <net/inet6_hashtables.h>
48 #include <net/inet6_connection_sock.h>
50 #include <net/transp_v6.h>
51 #include <net/addrconf.h>
52 #include <net/ip6_route.h>
53 #include <net/ip6_checksum.h>
54 #include <net/inet_ecn.h>
55 #include <net/protocol.h>
58 #include <net/dsfield.h>
59 #include <net/timewait_sock.h>
60 #include <net/netdma.h>
61 #include <net/inet_common.h>
63 #include <asm/uaccess.h>
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
68 #include <linux/crypto.h>
69 #include <linux/scatterlist.h>
71 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
72 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
);
73 static void tcp_v6_send_check(struct sock
*sk
, int len
,
76 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
78 static struct inet_connection_sock_af_ops ipv6_mapped
;
79 static struct inet_connection_sock_af_ops ipv6_specific
;
80 #ifdef CONFIG_TCP_MD5SIG
81 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
82 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
85 static void tcp_v6_hash(struct sock
*sk
)
87 if (sk
->sk_state
!= TCP_CLOSE
) {
88 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
98 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
99 struct in6_addr
*saddr
,
100 struct in6_addr
*daddr
,
103 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
106 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
108 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
109 ipv6_hdr(skb
)->saddr
.s6_addr32
,
111 tcp_hdr(skb
)->source
);
114 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
117 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
118 struct inet_sock
*inet
= inet_sk(sk
);
119 struct inet_connection_sock
*icsk
= inet_csk(sk
);
120 struct ipv6_pinfo
*np
= inet6_sk(sk
);
121 struct tcp_sock
*tp
= tcp_sk(sk
);
122 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
124 struct dst_entry
*dst
;
128 if (addr_len
< SIN6_LEN_RFC2133
)
131 if (usin
->sin6_family
!= AF_INET6
)
132 return(-EAFNOSUPPORT
);
134 memset(&fl
, 0, sizeof(fl
));
137 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
138 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
139 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
140 struct ip6_flowlabel
*flowlabel
;
141 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
142 if (flowlabel
== NULL
)
144 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
145 fl6_sock_release(flowlabel
);
150 * connect() to INADDR_ANY means loopback (BSD'ism).
153 if(ipv6_addr_any(&usin
->sin6_addr
))
154 usin
->sin6_addr
.s6_addr
[15] = 0x1;
156 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
158 if(addr_type
& IPV6_ADDR_MULTICAST
)
161 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
162 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
163 usin
->sin6_scope_id
) {
164 /* If interface is set while binding, indices
167 if (sk
->sk_bound_dev_if
&&
168 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
171 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
174 /* Connect to link-local address requires an interface */
175 if (!sk
->sk_bound_dev_if
)
179 if (tp
->rx_opt
.ts_recent_stamp
&&
180 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
181 tp
->rx_opt
.ts_recent
= 0;
182 tp
->rx_opt
.ts_recent_stamp
= 0;
186 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
187 np
->flow_label
= fl
.fl6_flowlabel
;
193 if (addr_type
== IPV6_ADDR_MAPPED
) {
194 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
195 struct sockaddr_in sin
;
197 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
199 if (__ipv6_only_sock(sk
))
202 sin
.sin_family
= AF_INET
;
203 sin
.sin_port
= usin
->sin6_port
;
204 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
206 icsk
->icsk_af_ops
= &ipv6_mapped
;
207 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
208 #ifdef CONFIG_TCP_MD5SIG
209 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
212 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
215 icsk
->icsk_ext_hdr_len
= exthdrlen
;
216 icsk
->icsk_af_ops
= &ipv6_specific
;
217 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
218 #ifdef CONFIG_TCP_MD5SIG
219 tp
->af_specific
= &tcp_sock_ipv6_specific
;
223 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
225 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
232 if (!ipv6_addr_any(&np
->rcv_saddr
))
233 saddr
= &np
->rcv_saddr
;
235 fl
.proto
= IPPROTO_TCP
;
236 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
237 ipv6_addr_copy(&fl
.fl6_src
,
238 (saddr
? saddr
: &np
->saddr
));
239 fl
.oif
= sk
->sk_bound_dev_if
;
240 fl
.fl_ip_dport
= usin
->sin6_port
;
241 fl
.fl_ip_sport
= inet
->sport
;
243 if (np
->opt
&& np
->opt
->srcrt
) {
244 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
245 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
246 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
250 security_sk_classify_flow(sk
, &fl
);
252 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
256 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
258 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
)) < 0) {
260 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
267 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
270 /* set the source address */
271 ipv6_addr_copy(&np
->saddr
, saddr
);
272 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
274 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
275 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
277 icsk
->icsk_ext_hdr_len
= 0;
279 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
282 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
284 inet
->dport
= usin
->sin6_port
;
286 tcp_set_state(sk
, TCP_SYN_SENT
);
287 err
= inet6_hash_connect(&tcp_death_row
, sk
);
292 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
297 err
= tcp_connect(sk
);
304 tcp_set_state(sk
, TCP_CLOSE
);
308 sk
->sk_route_caps
= 0;
312 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
313 int type
, int code
, int offset
, __be32 info
)
315 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
316 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
317 struct ipv6_pinfo
*np
;
323 sk
= inet6_lookup(dev_net(skb
->dev
), &tcp_hashinfo
, &hdr
->daddr
,
324 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
327 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
331 if (sk
->sk_state
== TCP_TIME_WAIT
) {
332 inet_twsk_put(inet_twsk(sk
));
337 if (sock_owned_by_user(sk
))
338 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
340 if (sk
->sk_state
== TCP_CLOSE
)
344 seq
= ntohl(th
->seq
);
345 if (sk
->sk_state
!= TCP_LISTEN
&&
346 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
347 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
353 if (type
== ICMPV6_PKT_TOOBIG
) {
354 struct dst_entry
*dst
= NULL
;
356 if (sock_owned_by_user(sk
))
358 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
361 /* icmp should have updated the destination cache entry */
362 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
365 struct inet_sock
*inet
= inet_sk(sk
);
368 /* BUGGG_FUTURE: Again, it is not clear how
369 to handle rthdr case. Ignore this complexity
372 memset(&fl
, 0, sizeof(fl
));
373 fl
.proto
= IPPROTO_TCP
;
374 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
375 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
376 fl
.oif
= sk
->sk_bound_dev_if
;
377 fl
.fl_ip_dport
= inet
->dport
;
378 fl
.fl_ip_sport
= inet
->sport
;
379 security_skb_classify_flow(skb
, &fl
);
381 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
382 sk
->sk_err_soft
= -err
;
386 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
387 sk
->sk_err_soft
= -err
;
394 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
395 tcp_sync_mss(sk
, dst_mtu(dst
));
396 tcp_simple_retransmit(sk
);
397 } /* else let the usual retransmit timer handle it */
402 icmpv6_err_convert(type
, code
, &err
);
404 /* Might be for an request_sock */
405 switch (sk
->sk_state
) {
406 struct request_sock
*req
, **prev
;
408 if (sock_owned_by_user(sk
))
411 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
412 &hdr
->saddr
, inet6_iif(skb
));
416 /* ICMPs are not backlogged, hence we cannot get
417 * an established socket here.
419 BUG_TRAP(req
->sk
== NULL
);
421 if (seq
!= tcp_rsk(req
)->snt_isn
) {
422 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
426 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
430 case TCP_SYN_RECV
: /* Cannot happen.
431 It can, it SYNs are crossed. --ANK */
432 if (!sock_owned_by_user(sk
)) {
434 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
438 sk
->sk_err_soft
= err
;
442 if (!sock_owned_by_user(sk
) && np
->recverr
) {
444 sk
->sk_error_report(sk
);
446 sk
->sk_err_soft
= err
;
454 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
456 struct inet6_request_sock
*treq
= inet6_rsk(req
);
457 struct ipv6_pinfo
*np
= inet6_sk(sk
);
458 struct sk_buff
* skb
;
459 struct ipv6_txoptions
*opt
= NULL
;
460 struct in6_addr
* final_p
= NULL
, final
;
462 struct dst_entry
*dst
;
465 memset(&fl
, 0, sizeof(fl
));
466 fl
.proto
= IPPROTO_TCP
;
467 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
468 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
469 fl
.fl6_flowlabel
= 0;
471 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
472 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
473 security_req_classify_flow(req
, &fl
);
476 if (opt
&& opt
->srcrt
) {
477 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
478 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
479 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
483 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
487 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
488 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
491 skb
= tcp_make_synack(sk
, dst
, req
);
493 struct tcphdr
*th
= tcp_hdr(skb
);
495 th
->check
= tcp_v6_check(th
, skb
->len
,
496 &treq
->loc_addr
, &treq
->rmt_addr
,
497 csum_partial((char *)th
, skb
->len
, skb
->csum
));
499 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
500 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
501 err
= net_xmit_eval(err
);
505 if (opt
&& opt
!= np
->opt
)
506 sock_kfree_s(sk
, opt
, opt
->tot_len
);
511 static inline void syn_flood_warning(struct sk_buff
*skb
)
513 #ifdef CONFIG_SYN_COOKIES
514 if (sysctl_tcp_syncookies
)
516 "TCPv6: Possible SYN flooding on port %d. "
517 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
521 "TCPv6: Possible SYN flooding on port %d. "
522 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
525 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
527 if (inet6_rsk(req
)->pktopts
)
528 kfree_skb(inet6_rsk(req
)->pktopts
);
531 #ifdef CONFIG_TCP_MD5SIG
532 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
533 struct in6_addr
*addr
)
535 struct tcp_sock
*tp
= tcp_sk(sk
);
540 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
543 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
544 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
545 return &tp
->md5sig_info
->keys6
[i
].base
;
550 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
551 struct sock
*addr_sk
)
553 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
556 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
557 struct request_sock
*req
)
559 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
562 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
563 char *newkey
, u8 newkeylen
)
565 /* Add key to the list */
566 struct tcp_md5sig_key
*key
;
567 struct tcp_sock
*tp
= tcp_sk(sk
);
568 struct tcp6_md5sig_key
*keys
;
570 key
= tcp_v6_md5_do_lookup(sk
, peer
);
572 /* modify existing entry - just update that one */
575 key
->keylen
= newkeylen
;
577 /* reallocate new list if current one is full. */
578 if (!tp
->md5sig_info
) {
579 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
580 if (!tp
->md5sig_info
) {
584 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
586 if (tcp_alloc_md5sig_pool() == NULL
) {
590 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
591 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
592 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
595 tcp_free_md5sig_pool();
600 if (tp
->md5sig_info
->entries6
)
601 memmove(keys
, tp
->md5sig_info
->keys6
,
602 (sizeof (tp
->md5sig_info
->keys6
[0]) *
603 tp
->md5sig_info
->entries6
));
605 kfree(tp
->md5sig_info
->keys6
);
606 tp
->md5sig_info
->keys6
= keys
;
607 tp
->md5sig_info
->alloced6
++;
610 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
612 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
613 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
615 tp
->md5sig_info
->entries6
++;
620 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
621 u8
*newkey
, __u8 newkeylen
)
623 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
627 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
629 struct tcp_sock
*tp
= tcp_sk(sk
);
632 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
633 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
635 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
636 tp
->md5sig_info
->entries6
--;
638 if (tp
->md5sig_info
->entries6
== 0) {
639 kfree(tp
->md5sig_info
->keys6
);
640 tp
->md5sig_info
->keys6
= NULL
;
641 tp
->md5sig_info
->alloced6
= 0;
643 /* shrink the database */
644 if (tp
->md5sig_info
->entries6
!= i
)
645 memmove(&tp
->md5sig_info
->keys6
[i
],
646 &tp
->md5sig_info
->keys6
[i
+1],
647 (tp
->md5sig_info
->entries6
- i
)
648 * sizeof (tp
->md5sig_info
->keys6
[0]));
650 tcp_free_md5sig_pool();
657 static void tcp_v6_clear_md5_list (struct sock
*sk
)
659 struct tcp_sock
*tp
= tcp_sk(sk
);
662 if (tp
->md5sig_info
->entries6
) {
663 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
664 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
665 tp
->md5sig_info
->entries6
= 0;
666 tcp_free_md5sig_pool();
669 kfree(tp
->md5sig_info
->keys6
);
670 tp
->md5sig_info
->keys6
= NULL
;
671 tp
->md5sig_info
->alloced6
= 0;
673 if (tp
->md5sig_info
->entries4
) {
674 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
675 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
676 tp
->md5sig_info
->entries4
= 0;
677 tcp_free_md5sig_pool();
680 kfree(tp
->md5sig_info
->keys4
);
681 tp
->md5sig_info
->keys4
= NULL
;
682 tp
->md5sig_info
->alloced4
= 0;
685 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
688 struct tcp_md5sig cmd
;
689 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
692 if (optlen
< sizeof(cmd
))
695 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
698 if (sin6
->sin6_family
!= AF_INET6
)
701 if (!cmd
.tcpm_keylen
) {
702 if (!tcp_sk(sk
)->md5sig_info
)
704 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
705 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
706 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
709 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
712 if (!tcp_sk(sk
)->md5sig_info
) {
713 struct tcp_sock
*tp
= tcp_sk(sk
);
714 struct tcp_md5sig_info
*p
;
716 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
721 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
724 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
727 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
728 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
729 newkey
, cmd
.tcpm_keylen
);
731 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
734 static int tcp_v6_do_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
735 struct in6_addr
*saddr
,
736 struct in6_addr
*daddr
,
737 struct tcphdr
*th
, int protocol
,
740 struct scatterlist sg
[4];
744 struct tcp_md5sig_pool
*hp
;
745 struct tcp6_pseudohdr
*bp
;
746 struct hash_desc
*desc
;
748 unsigned int nbytes
= 0;
750 hp
= tcp_get_md5sig_pool();
752 printk(KERN_WARNING
"%s(): hash pool not found...\n", __func__
);
753 goto clear_hash_noput
;
755 bp
= &hp
->md5_blk
.ip6
;
756 desc
= &hp
->md5_desc
;
758 /* 1. TCP pseudo-header (RFC2460) */
759 ipv6_addr_copy(&bp
->saddr
, saddr
);
760 ipv6_addr_copy(&bp
->daddr
, daddr
);
761 bp
->len
= htonl(tcplen
);
762 bp
->protocol
= htonl(protocol
);
764 sg_init_table(sg
, 4);
766 sg_set_buf(&sg
[block
++], bp
, sizeof(*bp
));
767 nbytes
+= sizeof(*bp
);
769 /* 2. TCP header, excluding options */
772 sg_set_buf(&sg
[block
++], th
, sizeof(*th
));
773 nbytes
+= sizeof(*th
);
775 /* 3. TCP segment data (if any) */
776 data_len
= tcplen
- (th
->doff
<< 2);
778 u8
*data
= (u8
*)th
+ (th
->doff
<< 2);
779 sg_set_buf(&sg
[block
++], data
, data_len
);
784 sg_set_buf(&sg
[block
++], key
->key
, key
->keylen
);
785 nbytes
+= key
->keylen
;
787 sg_mark_end(&sg
[block
- 1]);
789 /* Now store the hash into the packet */
790 err
= crypto_hash_init(desc
);
792 printk(KERN_WARNING
"%s(): hash_init failed\n", __func__
);
795 err
= crypto_hash_update(desc
, sg
, nbytes
);
797 printk(KERN_WARNING
"%s(): hash_update failed\n", __func__
);
800 err
= crypto_hash_final(desc
, md5_hash
);
802 printk(KERN_WARNING
"%s(): hash_final failed\n", __func__
);
806 /* Reset header, and free up the crypto */
807 tcp_put_md5sig_pool();
812 tcp_put_md5sig_pool();
814 memset(md5_hash
, 0, 16);
818 static int tcp_v6_calc_md5_hash(char *md5_hash
, struct tcp_md5sig_key
*key
,
820 struct dst_entry
*dst
,
821 struct request_sock
*req
,
822 struct tcphdr
*th
, int protocol
,
825 struct in6_addr
*saddr
, *daddr
;
828 saddr
= &inet6_sk(sk
)->saddr
;
829 daddr
= &inet6_sk(sk
)->daddr
;
831 saddr
= &inet6_rsk(req
)->loc_addr
;
832 daddr
= &inet6_rsk(req
)->rmt_addr
;
834 return tcp_v6_do_calc_md5_hash(md5_hash
, key
,
836 th
, protocol
, tcplen
);
839 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
841 __u8
*hash_location
= NULL
;
842 struct tcp_md5sig_key
*hash_expected
;
843 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
844 struct tcphdr
*th
= tcp_hdr(skb
);
845 int length
= (th
->doff
<< 2) - sizeof (*th
);
850 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
852 /* If the TCP option is too short, we can short cut */
853 if (length
< TCPOLEN_MD5SIG
)
854 return hash_expected
? 1 : 0;
870 if (opsize
< 2 || opsize
> length
)
872 if (opcode
== TCPOPT_MD5SIG
) {
882 /* do we have a hash as expected? */
883 if (!hash_expected
) {
886 if (net_ratelimit()) {
887 printk(KERN_INFO
"MD5 Hash NOT expected but found "
888 "(" NIP6_FMT
", %u)->"
889 "(" NIP6_FMT
", %u)\n",
890 NIP6(ip6h
->saddr
), ntohs(th
->source
),
891 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
896 if (!hash_location
) {
897 if (net_ratelimit()) {
898 printk(KERN_INFO
"MD5 Hash expected but NOT found "
899 "(" NIP6_FMT
", %u)->"
900 "(" NIP6_FMT
", %u)\n",
901 NIP6(ip6h
->saddr
), ntohs(th
->source
),
902 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
907 /* check the signature */
908 genhash
= tcp_v6_do_calc_md5_hash(newhash
,
910 &ip6h
->saddr
, &ip6h
->daddr
,
913 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
914 if (net_ratelimit()) {
915 printk(KERN_INFO
"MD5 Hash %s for "
916 "(" NIP6_FMT
", %u)->"
917 "(" NIP6_FMT
", %u)\n",
918 genhash
? "failed" : "mismatch",
919 NIP6(ip6h
->saddr
), ntohs(th
->source
),
920 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
928 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
930 .obj_size
= sizeof(struct tcp6_request_sock
),
931 .rtx_syn_ack
= tcp_v6_send_synack
,
932 .send_ack
= tcp_v6_reqsk_send_ack
,
933 .destructor
= tcp_v6_reqsk_destructor
,
934 .send_reset
= tcp_v6_send_reset
937 #ifdef CONFIG_TCP_MD5SIG
938 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
939 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
943 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
944 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
945 .twsk_unique
= tcp_twsk_unique
,
946 .twsk_destructor
= tcp_twsk_destructor
,
949 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
951 struct ipv6_pinfo
*np
= inet6_sk(sk
);
952 struct tcphdr
*th
= tcp_hdr(skb
);
954 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
955 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
956 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
957 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
959 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
960 csum_partial((char *)th
, th
->doff
<<2,
965 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
967 struct ipv6hdr
*ipv6h
;
970 if (!pskb_may_pull(skb
, sizeof(*th
)))
973 ipv6h
= ipv6_hdr(skb
);
977 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
979 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
980 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
981 skb
->ip_summed
= CHECKSUM_PARTIAL
;
985 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
987 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
988 struct sk_buff
*buff
;
990 struct net
*net
= dev_net(skb
->dst
->dev
);
991 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
992 unsigned int tot_len
= sizeof(*th
);
993 #ifdef CONFIG_TCP_MD5SIG
994 struct tcp_md5sig_key
*key
;
1000 if (!ipv6_unicast_destination(skb
))
1003 #ifdef CONFIG_TCP_MD5SIG
1005 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1010 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1014 * We need to grab some memory, and put together an RST,
1015 * and then put it into the queue to be sent.
1018 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1023 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1025 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
1027 /* Swap the send and the receive. */
1028 memset(t1
, 0, sizeof(*t1
));
1029 t1
->dest
= th
->source
;
1030 t1
->source
= th
->dest
;
1031 t1
->doff
= tot_len
/ 4;
1035 t1
->seq
= th
->ack_seq
;
1038 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
1039 + skb
->len
- (th
->doff
<<2));
1042 #ifdef CONFIG_TCP_MD5SIG
1044 __be32
*opt
= (__be32
*)(t1
+ 1);
1045 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1046 (TCPOPT_NOP
<< 16) |
1047 (TCPOPT_MD5SIG
<< 8) |
1049 tcp_v6_do_calc_md5_hash((__u8
*)&opt
[1], key
,
1050 &ipv6_hdr(skb
)->daddr
,
1051 &ipv6_hdr(skb
)->saddr
,
1052 t1
, IPPROTO_TCP
, tot_len
);
1056 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
1058 memset(&fl
, 0, sizeof(fl
));
1059 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1060 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1062 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1063 sizeof(*t1
), IPPROTO_TCP
,
1066 fl
.proto
= IPPROTO_TCP
;
1067 fl
.oif
= inet6_iif(skb
);
1068 fl
.fl_ip_dport
= t1
->dest
;
1069 fl
.fl_ip_sport
= t1
->source
;
1070 security_skb_classify_flow(skb
, &fl
);
1072 /* Pass a socket to ip6_dst_lookup either it is for RST
1073 * Underlying function will use this to retrieve the network
1076 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1078 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1079 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1080 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1081 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS
);
1089 static void tcp_v6_send_ack(struct tcp_timewait_sock
*tw
,
1090 struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
)
1092 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
1093 struct sk_buff
*buff
;
1095 struct net
*net
= dev_net(skb
->dev
);
1096 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
1097 unsigned int tot_len
= sizeof(struct tcphdr
);
1099 #ifdef CONFIG_TCP_MD5SIG
1100 struct tcp_md5sig_key
*key
;
1101 struct tcp_md5sig_key tw_key
;
1104 #ifdef CONFIG_TCP_MD5SIG
1105 if (!tw
&& skb
->sk
) {
1106 key
= tcp_v6_md5_do_lookup(skb
->sk
, &ipv6_hdr(skb
)->daddr
);
1107 } else if (tw
&& tw
->tw_md5_keylen
) {
1108 tw_key
.key
= tw
->tw_md5_key
;
1109 tw_key
.keylen
= tw
->tw_md5_keylen
;
1117 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1118 #ifdef CONFIG_TCP_MD5SIG
1120 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1123 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1128 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1130 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
1132 /* Swap the send and the receive. */
1133 memset(t1
, 0, sizeof(*t1
));
1134 t1
->dest
= th
->source
;
1135 t1
->source
= th
->dest
;
1136 t1
->doff
= tot_len
/4;
1137 t1
->seq
= htonl(seq
);
1138 t1
->ack_seq
= htonl(ack
);
1140 t1
->window
= htons(win
);
1142 topt
= (__be32
*)(t1
+ 1);
1145 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1146 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1147 *topt
++ = htonl(tcp_time_stamp
);
1151 #ifdef CONFIG_TCP_MD5SIG
1153 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1154 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1155 tcp_v6_do_calc_md5_hash((__u8
*)topt
, key
,
1156 &ipv6_hdr(skb
)->daddr
,
1157 &ipv6_hdr(skb
)->saddr
,
1158 t1
, IPPROTO_TCP
, tot_len
);
1162 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1164 memset(&fl
, 0, sizeof(fl
));
1165 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1166 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1168 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1169 tot_len
, IPPROTO_TCP
,
1172 fl
.proto
= IPPROTO_TCP
;
1173 fl
.oif
= inet6_iif(skb
);
1174 fl
.fl_ip_dport
= t1
->dest
;
1175 fl
.fl_ip_sport
= t1
->source
;
1176 security_skb_classify_flow(skb
, &fl
);
1178 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1179 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1180 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1181 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS
);
1189 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1191 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1192 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1194 tcp_v6_send_ack(tcptw
, skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1195 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1196 tcptw
->tw_ts_recent
);
1201 static void tcp_v6_reqsk_send_ack(struct sk_buff
*skb
, struct request_sock
*req
)
1203 tcp_v6_send_ack(NULL
, skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
);
1207 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1209 struct request_sock
*req
, **prev
;
1210 const struct tcphdr
*th
= tcp_hdr(skb
);
1213 /* Find possible connection requests. */
1214 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1215 &ipv6_hdr(skb
)->saddr
,
1216 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1218 return tcp_check_req(sk
, skb
, req
, prev
);
1220 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1221 &ipv6_hdr(skb
)->saddr
, th
->source
,
1222 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1225 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1229 inet_twsk_put(inet_twsk(nsk
));
1233 #ifdef CONFIG_SYN_COOKIES
1234 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1235 sk
= cookie_v6_check(sk
, skb
);
1240 /* FIXME: this is substantially similar to the ipv4 code.
1241 * Can some kind of merge be done? -- erics
1243 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1245 struct inet6_request_sock
*treq
;
1246 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1247 struct tcp_options_received tmp_opt
;
1248 struct tcp_sock
*tp
= tcp_sk(sk
);
1249 struct request_sock
*req
= NULL
;
1250 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1251 #ifdef CONFIG_SYN_COOKIES
1252 int want_cookie
= 0;
1254 #define want_cookie 0
1257 if (skb
->protocol
== htons(ETH_P_IP
))
1258 return tcp_v4_conn_request(sk
, skb
);
1260 if (!ipv6_unicast_destination(skb
))
1263 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1264 if (net_ratelimit())
1265 syn_flood_warning(skb
);
1266 #ifdef CONFIG_SYN_COOKIES
1267 if (sysctl_tcp_syncookies
)
1274 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1277 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1281 #ifdef CONFIG_TCP_MD5SIG
1282 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1285 tcp_clear_options(&tmp_opt
);
1286 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1287 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1289 tcp_parse_options(skb
, &tmp_opt
, 0);
1291 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1292 tcp_clear_options(&tmp_opt
);
1294 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1295 tcp_openreq_init(req
, &tmp_opt
, skb
);
1297 treq
= inet6_rsk(req
);
1298 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1299 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1300 treq
->pktopts
= NULL
;
1302 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1305 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1306 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1308 if (ipv6_opt_accepted(sk
, skb
) ||
1309 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1310 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1311 atomic_inc(&skb
->users
);
1312 treq
->pktopts
= skb
;
1314 treq
->iif
= sk
->sk_bound_dev_if
;
1316 /* So that link locals have meaning */
1317 if (!sk
->sk_bound_dev_if
&&
1318 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1319 treq
->iif
= inet6_iif(skb
);
1321 isn
= tcp_v6_init_sequence(skb
);
1324 tcp_rsk(req
)->snt_isn
= isn
;
1326 security_inet_conn_request(sk
, skb
, req
);
1328 if (tcp_v6_send_synack(sk
, req
))
1332 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1340 return 0; /* don't send reset */
1343 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1344 struct request_sock
*req
,
1345 struct dst_entry
*dst
)
1347 struct inet6_request_sock
*treq
= inet6_rsk(req
);
1348 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1349 struct tcp6_sock
*newtcp6sk
;
1350 struct inet_sock
*newinet
;
1351 struct tcp_sock
*newtp
;
1353 struct ipv6_txoptions
*opt
;
1354 #ifdef CONFIG_TCP_MD5SIG
1355 struct tcp_md5sig_key
*key
;
1358 if (skb
->protocol
== htons(ETH_P_IP
)) {
1363 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1368 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1369 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1371 newinet
= inet_sk(newsk
);
1372 newnp
= inet6_sk(newsk
);
1373 newtp
= tcp_sk(newsk
);
1375 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1377 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1380 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1383 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1385 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1386 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1387 #ifdef CONFIG_TCP_MD5SIG
1388 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1391 newnp
->pktoptions
= NULL
;
1393 newnp
->mcast_oif
= inet6_iif(skb
);
1394 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1397 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1398 * here, tcp_create_openreq_child now does this for us, see the comment in
1399 * that function for the gory details. -acme
1402 /* It is tricky place. Until this moment IPv4 tcp
1403 worked with IPv6 icsk.icsk_af_ops.
1406 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1413 if (sk_acceptq_is_full(sk
))
1417 struct in6_addr
*final_p
= NULL
, final
;
1420 memset(&fl
, 0, sizeof(fl
));
1421 fl
.proto
= IPPROTO_TCP
;
1422 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1423 if (opt
&& opt
->srcrt
) {
1424 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1425 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1426 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1429 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1430 fl
.oif
= sk
->sk_bound_dev_if
;
1431 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1432 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1433 security_req_classify_flow(req
, &fl
);
1435 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1439 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1441 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1445 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1450 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1451 * count here, tcp_create_openreq_child now does this for us, see the
1452 * comment in that function for the gory details. -acme
1455 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1456 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1458 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1459 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1461 newtp
= tcp_sk(newsk
);
1462 newinet
= inet_sk(newsk
);
1463 newnp
= inet6_sk(newsk
);
1465 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1467 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1468 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1469 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1470 newsk
->sk_bound_dev_if
= treq
->iif
;
1472 /* Now IPv6 options...
1474 First: no IPv4 options.
1476 newinet
->opt
= NULL
;
1477 newnp
->ipv6_fl_list
= NULL
;
1480 newnp
->rxopt
.all
= np
->rxopt
.all
;
1482 /* Clone pktoptions received with SYN */
1483 newnp
->pktoptions
= NULL
;
1484 if (treq
->pktopts
!= NULL
) {
1485 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1486 kfree_skb(treq
->pktopts
);
1487 treq
->pktopts
= NULL
;
1488 if (newnp
->pktoptions
)
1489 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1492 newnp
->mcast_oif
= inet6_iif(skb
);
1493 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1495 /* Clone native IPv6 options from listening socket (if any)
1497 Yes, keeping reference count would be much more clever,
1498 but we make one more one thing there: reattach optmem
1502 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1504 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1507 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1509 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1510 newnp
->opt
->opt_flen
);
1512 tcp_mtup_init(newsk
);
1513 tcp_sync_mss(newsk
, dst_mtu(dst
));
1514 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1515 tcp_initialize_rcv_mss(newsk
);
1517 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1519 #ifdef CONFIG_TCP_MD5SIG
1520 /* Copy over the MD5 key from the original socket */
1521 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1522 /* We're using one, so create a matching key
1523 * on the newsk structure. If we fail to get
1524 * memory, then we end up not copying the key
1527 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1529 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1530 newkey
, key
->keylen
);
1534 __inet6_hash(newsk
);
1535 __inet_inherit_port(sk
, newsk
);
1540 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
1542 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
1543 if (opt
&& opt
!= np
->opt
)
1544 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1549 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1551 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1552 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1553 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1554 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1559 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1560 &ipv6_hdr(skb
)->saddr
,
1561 &ipv6_hdr(skb
)->daddr
, 0));
1563 if (skb
->len
<= 76) {
1564 return __skb_checksum_complete(skb
);
1569 /* The socket must have it's spinlock held when we get
1572 * We have a potential double-lock case here, so even when
1573 * doing backlog processing we use the BH locking scheme.
1574 * This is because we cannot sleep with the original spinlock
1577 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1579 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1580 struct tcp_sock
*tp
;
1581 struct sk_buff
*opt_skb
= NULL
;
1583 /* Imagine: socket is IPv6. IPv4 packet arrives,
1584 goes to IPv4 receive handler and backlogged.
1585 From backlog it always goes here. Kerboom...
1586 Fortunately, tcp_rcv_established and rcv_established
1587 handle them correctly, but it is not case with
1588 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1591 if (skb
->protocol
== htons(ETH_P_IP
))
1592 return tcp_v4_do_rcv(sk
, skb
);
1594 #ifdef CONFIG_TCP_MD5SIG
1595 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1599 if (sk_filter(sk
, skb
))
1603 * socket locking is here for SMP purposes as backlog rcv
1604 * is currently called with bh processing disabled.
1607 /* Do Stevens' IPV6_PKTOPTIONS.
1609 Yes, guys, it is the only place in our code, where we
1610 may make it not affecting IPv4.
1611 The rest of code is protocol independent,
1612 and I do not like idea to uglify IPv4.
1614 Actually, all the idea behind IPV6_PKTOPTIONS
1615 looks not very well thought. For now we latch
1616 options, received in the last packet, enqueued
1617 by tcp. Feel free to propose better solution.
1621 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1623 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1624 TCP_CHECK_TIMER(sk
);
1625 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1627 TCP_CHECK_TIMER(sk
);
1629 goto ipv6_pktoptions
;
1633 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1636 if (sk
->sk_state
== TCP_LISTEN
) {
1637 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1642 * Queue it on the new socket if the new socket is active,
1643 * otherwise we just shortcircuit this and continue with
1647 if (tcp_child_process(sk
, nsk
, skb
))
1650 __kfree_skb(opt_skb
);
1655 TCP_CHECK_TIMER(sk
);
1656 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1658 TCP_CHECK_TIMER(sk
);
1660 goto ipv6_pktoptions
;
1664 tcp_v6_send_reset(sk
, skb
);
1667 __kfree_skb(opt_skb
);
1671 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1676 /* Do you ask, what is it?
1678 1. skb was enqueued by tcp.
1679 2. skb is added to tail of read queue, rather than out of order.
1680 3. socket is not in passive state.
1681 4. Finally, it really contains options, which user wants to receive.
1684 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1685 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1686 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1687 np
->mcast_oif
= inet6_iif(opt_skb
);
1688 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1689 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1690 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1691 skb_set_owner_r(opt_skb
, sk
);
1692 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1694 __kfree_skb(opt_skb
);
1695 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1704 static int tcp_v6_rcv(struct sk_buff
*skb
)
1710 if (skb
->pkt_type
!= PACKET_HOST
)
1714 * Count it even if it's bad.
1716 TCP_INC_STATS_BH(TCP_MIB_INSEGS
);
1718 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1723 if (th
->doff
< sizeof(struct tcphdr
)/4)
1725 if (!pskb_may_pull(skb
, th
->doff
*4))
1728 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1732 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1733 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1734 skb
->len
- th
->doff
*4);
1735 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1736 TCP_SKB_CB(skb
)->when
= 0;
1737 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1738 TCP_SKB_CB(skb
)->sacked
= 0;
1740 sk
= __inet6_lookup(dev_net(skb
->dev
), &tcp_hashinfo
,
1741 &ipv6_hdr(skb
)->saddr
, th
->source
,
1742 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
),
1749 if (sk
->sk_state
== TCP_TIME_WAIT
)
1752 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1753 goto discard_and_relse
;
1755 if (sk_filter(sk
, skb
))
1756 goto discard_and_relse
;
1760 bh_lock_sock_nested(sk
);
1762 if (!sock_owned_by_user(sk
)) {
1763 #ifdef CONFIG_NET_DMA
1764 struct tcp_sock
*tp
= tcp_sk(sk
);
1765 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1766 tp
->ucopy
.dma_chan
= get_softnet_dma();
1767 if (tp
->ucopy
.dma_chan
)
1768 ret
= tcp_v6_do_rcv(sk
, skb
);
1772 if (!tcp_prequeue(sk
, skb
))
1773 ret
= tcp_v6_do_rcv(sk
, skb
);
1776 sk_add_backlog(sk
, skb
);
1780 return ret
? -1 : 0;
1783 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1786 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1788 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1790 tcp_v6_send_reset(NULL
, skb
);
1807 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1808 inet_twsk_put(inet_twsk(sk
));
1812 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1813 TCP_INC_STATS_BH(TCP_MIB_INERRS
);
1814 inet_twsk_put(inet_twsk(sk
));
1818 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1823 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1824 &ipv6_hdr(skb
)->daddr
,
1825 ntohs(th
->dest
), inet6_iif(skb
));
1827 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1828 inet_twsk_deschedule(tw
, &tcp_death_row
);
1833 /* Fall through to ACK */
1836 tcp_v6_timewait_ack(sk
, skb
);
1840 case TCP_TW_SUCCESS
:;
1845 static int tcp_v6_remember_stamp(struct sock
*sk
)
1847 /* Alas, not yet... */
1851 static struct inet_connection_sock_af_ops ipv6_specific
= {
1852 .queue_xmit
= inet6_csk_xmit
,
1853 .send_check
= tcp_v6_send_check
,
1854 .rebuild_header
= inet6_sk_rebuild_header
,
1855 .conn_request
= tcp_v6_conn_request
,
1856 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1857 .remember_stamp
= tcp_v6_remember_stamp
,
1858 .net_header_len
= sizeof(struct ipv6hdr
),
1859 .setsockopt
= ipv6_setsockopt
,
1860 .getsockopt
= ipv6_getsockopt
,
1861 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1862 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1863 .bind_conflict
= inet6_csk_bind_conflict
,
1864 #ifdef CONFIG_COMPAT
1865 .compat_setsockopt
= compat_ipv6_setsockopt
,
1866 .compat_getsockopt
= compat_ipv6_getsockopt
,
1870 #ifdef CONFIG_TCP_MD5SIG
1871 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1872 .md5_lookup
= tcp_v6_md5_lookup
,
1873 .calc_md5_hash
= tcp_v6_calc_md5_hash
,
1874 .md5_add
= tcp_v6_md5_add_func
,
1875 .md5_parse
= tcp_v6_parse_md5_keys
,
1880 * TCP over IPv4 via INET6 API
1883 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1884 .queue_xmit
= ip_queue_xmit
,
1885 .send_check
= tcp_v4_send_check
,
1886 .rebuild_header
= inet_sk_rebuild_header
,
1887 .conn_request
= tcp_v6_conn_request
,
1888 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1889 .remember_stamp
= tcp_v4_remember_stamp
,
1890 .net_header_len
= sizeof(struct iphdr
),
1891 .setsockopt
= ipv6_setsockopt
,
1892 .getsockopt
= ipv6_getsockopt
,
1893 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1894 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1895 .bind_conflict
= inet6_csk_bind_conflict
,
1896 #ifdef CONFIG_COMPAT
1897 .compat_setsockopt
= compat_ipv6_setsockopt
,
1898 .compat_getsockopt
= compat_ipv6_getsockopt
,
1902 #ifdef CONFIG_TCP_MD5SIG
1903 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1904 .md5_lookup
= tcp_v4_md5_lookup
,
1905 .calc_md5_hash
= tcp_v4_calc_md5_hash
,
1906 .md5_add
= tcp_v6_md5_add_func
,
1907 .md5_parse
= tcp_v6_parse_md5_keys
,
1911 /* NOTE: A lot of things set to zero explicitly by call to
1912 * sk_alloc() so need not be done here.
1914 static int tcp_v6_init_sock(struct sock
*sk
)
1916 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1917 struct tcp_sock
*tp
= tcp_sk(sk
);
1919 skb_queue_head_init(&tp
->out_of_order_queue
);
1920 tcp_init_xmit_timers(sk
);
1921 tcp_prequeue_init(tp
);
1923 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1924 tp
->mdev
= TCP_TIMEOUT_INIT
;
1926 /* So many TCP implementations out there (incorrectly) count the
1927 * initial SYN frame in their delayed-ACK and congestion control
1928 * algorithms that we must have the following bandaid to talk
1929 * efficiently to them. -DaveM
1933 /* See draft-stevens-tcpca-spec-01 for discussion of the
1934 * initialization of these values.
1936 tp
->snd_ssthresh
= 0x7fffffff;
1937 tp
->snd_cwnd_clamp
= ~0;
1938 tp
->mss_cache
= 536;
1940 tp
->reordering
= sysctl_tcp_reordering
;
1942 sk
->sk_state
= TCP_CLOSE
;
1944 icsk
->icsk_af_ops
= &ipv6_specific
;
1945 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1946 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1947 sk
->sk_write_space
= sk_stream_write_space
;
1948 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1950 #ifdef CONFIG_TCP_MD5SIG
1951 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1954 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1955 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1957 atomic_inc(&tcp_sockets_allocated
);
1962 static int tcp_v6_destroy_sock(struct sock
*sk
)
1964 #ifdef CONFIG_TCP_MD5SIG
1965 /* Clean up the MD5 key list */
1966 if (tcp_sk(sk
)->md5sig_info
)
1967 tcp_v6_clear_md5_list(sk
);
1969 tcp_v4_destroy_sock(sk
);
1970 return inet6_destroy_sock(sk
);
1973 #ifdef CONFIG_PROC_FS
1974 /* Proc filesystem TCPv6 sock list dumping. */
1975 static void get_openreq6(struct seq_file
*seq
,
1976 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1978 int ttd
= req
->expires
- jiffies
;
1979 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1980 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1986 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1987 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1989 src
->s6_addr32
[0], src
->s6_addr32
[1],
1990 src
->s6_addr32
[2], src
->s6_addr32
[3],
1991 ntohs(inet_sk(sk
)->sport
),
1992 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1993 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1994 ntohs(inet_rsk(req
)->rmt_port
),
1996 0,0, /* could print option size, but that is af dependent. */
1997 1, /* timers active (only the expire timer) */
1998 jiffies_to_clock_t(ttd
),
2001 0, /* non standard timer */
2002 0, /* open_requests have no inode */
2006 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
2008 struct in6_addr
*dest
, *src
;
2011 unsigned long timer_expires
;
2012 struct inet_sock
*inet
= inet_sk(sp
);
2013 struct tcp_sock
*tp
= tcp_sk(sp
);
2014 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
2015 struct ipv6_pinfo
*np
= inet6_sk(sp
);
2018 src
= &np
->rcv_saddr
;
2019 destp
= ntohs(inet
->dport
);
2020 srcp
= ntohs(inet
->sport
);
2022 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
2024 timer_expires
= icsk
->icsk_timeout
;
2025 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
2027 timer_expires
= icsk
->icsk_timeout
;
2028 } else if (timer_pending(&sp
->sk_timer
)) {
2030 timer_expires
= sp
->sk_timer
.expires
;
2033 timer_expires
= jiffies
;
2037 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2038 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2040 src
->s6_addr32
[0], src
->s6_addr32
[1],
2041 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2042 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2043 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2045 tp
->write_seq
-tp
->snd_una
,
2046 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
2048 jiffies_to_clock_t(timer_expires
- jiffies
),
2049 icsk
->icsk_retransmits
,
2051 icsk
->icsk_probes_out
,
2053 atomic_read(&sp
->sk_refcnt
), sp
,
2056 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2057 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2061 static void get_timewait6_sock(struct seq_file
*seq
,
2062 struct inet_timewait_sock
*tw
, int i
)
2064 struct in6_addr
*dest
, *src
;
2066 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2067 int ttd
= tw
->tw_ttd
- jiffies
;
2072 dest
= &tw6
->tw_v6_daddr
;
2073 src
= &tw6
->tw_v6_rcv_saddr
;
2074 destp
= ntohs(tw
->tw_dport
);
2075 srcp
= ntohs(tw
->tw_sport
);
2078 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2079 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2081 src
->s6_addr32
[0], src
->s6_addr32
[1],
2082 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2083 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2084 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2085 tw
->tw_substate
, 0, 0,
2086 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2087 atomic_read(&tw
->tw_refcnt
), tw
);
2090 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2092 struct tcp_iter_state
*st
;
2094 if (v
== SEQ_START_TOKEN
) {
2099 "st tx_queue rx_queue tr tm->when retrnsmt"
2100 " uid timeout inode\n");
2105 switch (st
->state
) {
2106 case TCP_SEQ_STATE_LISTENING
:
2107 case TCP_SEQ_STATE_ESTABLISHED
:
2108 get_tcp6_sock(seq
, v
, st
->num
);
2110 case TCP_SEQ_STATE_OPENREQ
:
2111 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2113 case TCP_SEQ_STATE_TIME_WAIT
:
2114 get_timewait6_sock(seq
, v
, st
->num
);
2121 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2125 .owner
= THIS_MODULE
,
2128 .show
= tcp6_seq_show
,
2132 int tcp6_proc_init(struct net
*net
)
2134 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2137 void tcp6_proc_exit(struct net
*net
)
2139 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2143 struct proto tcpv6_prot
= {
2145 .owner
= THIS_MODULE
,
2147 .connect
= tcp_v6_connect
,
2148 .disconnect
= tcp_disconnect
,
2149 .accept
= inet_csk_accept
,
2151 .init
= tcp_v6_init_sock
,
2152 .destroy
= tcp_v6_destroy_sock
,
2153 .shutdown
= tcp_shutdown
,
2154 .setsockopt
= tcp_setsockopt
,
2155 .getsockopt
= tcp_getsockopt
,
2156 .recvmsg
= tcp_recvmsg
,
2157 .backlog_rcv
= tcp_v6_do_rcv
,
2158 .hash
= tcp_v6_hash
,
2159 .unhash
= inet_unhash
,
2160 .get_port
= inet_csk_get_port
,
2161 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2162 .sockets_allocated
= &tcp_sockets_allocated
,
2163 .memory_allocated
= &tcp_memory_allocated
,
2164 .memory_pressure
= &tcp_memory_pressure
,
2165 .orphan_count
= &tcp_orphan_count
,
2166 .sysctl_mem
= sysctl_tcp_mem
,
2167 .sysctl_wmem
= sysctl_tcp_wmem
,
2168 .sysctl_rmem
= sysctl_tcp_rmem
,
2169 .max_header
= MAX_TCP_HEADER
,
2170 .obj_size
= sizeof(struct tcp6_sock
),
2171 .twsk_prot
= &tcp6_timewait_sock_ops
,
2172 .rsk_prot
= &tcp6_request_sock_ops
,
2173 .h
.hashinfo
= &tcp_hashinfo
,
2174 #ifdef CONFIG_COMPAT
2175 .compat_setsockopt
= compat_tcp_setsockopt
,
2176 .compat_getsockopt
= compat_tcp_getsockopt
,
2180 static struct inet6_protocol tcpv6_protocol
= {
2181 .handler
= tcp_v6_rcv
,
2182 .err_handler
= tcp_v6_err
,
2183 .gso_send_check
= tcp_v6_gso_send_check
,
2184 .gso_segment
= tcp_tso_segment
,
2185 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2188 static struct inet_protosw tcpv6_protosw
= {
2189 .type
= SOCK_STREAM
,
2190 .protocol
= IPPROTO_TCP
,
2191 .prot
= &tcpv6_prot
,
2192 .ops
= &inet6_stream_ops
,
2195 .flags
= INET_PROTOSW_PERMANENT
|
2199 static int tcpv6_net_init(struct net
*net
)
2201 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2202 SOCK_RAW
, IPPROTO_TCP
, net
);
2205 static void tcpv6_net_exit(struct net
*net
)
2207 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2210 static struct pernet_operations tcpv6_net_ops
= {
2211 .init
= tcpv6_net_init
,
2212 .exit
= tcpv6_net_exit
,
2215 int __init
tcpv6_init(void)
2219 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2223 /* register inet6 protocol */
2224 ret
= inet6_register_protosw(&tcpv6_protosw
);
2226 goto out_tcpv6_protocol
;
2228 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2230 goto out_tcpv6_protosw
;
2235 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2237 inet6_unregister_protosw(&tcpv6_protosw
);
2241 void tcpv6_exit(void)
2243 unregister_pernet_subsys(&tcpv6_net_ops
);
2244 inet6_unregister_protosw(&tcpv6_protosw
);
2245 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);