3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/net.h>
32 #include <linux/jiffies.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/init.h>
37 #include <linux/jhash.h>
38 #include <linux/ipsec.h>
39 #include <linux/times.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
46 #include <net/ndisc.h>
47 #include <net/inet6_hashtables.h>
48 #include <net/inet6_connection_sock.h>
50 #include <net/transp_v6.h>
51 #include <net/addrconf.h>
52 #include <net/ip6_route.h>
53 #include <net/ip6_checksum.h>
54 #include <net/inet_ecn.h>
55 #include <net/protocol.h>
58 #include <net/dsfield.h>
59 #include <net/timewait_sock.h>
60 #include <net/netdma.h>
61 #include <net/inet_common.h>
63 #include <asm/uaccess.h>
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
68 #include <linux/crypto.h>
69 #include <linux/scatterlist.h>
71 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
72 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
73 struct request_sock
*req
);
75 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 static struct inet_connection_sock_af_ops ipv6_mapped
;
78 static struct inet_connection_sock_af_ops ipv6_specific
;
79 #ifdef CONFIG_TCP_MD5SIG
80 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
81 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
83 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
84 struct in6_addr
*addr
)
90 static void tcp_v6_hash(struct sock
*sk
)
92 if (sk
->sk_state
!= TCP_CLOSE
) {
93 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
103 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
104 struct in6_addr
*saddr
,
105 struct in6_addr
*daddr
,
108 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
111 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
113 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
114 ipv6_hdr(skb
)->saddr
.s6_addr32
,
116 tcp_hdr(skb
)->source
);
119 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
122 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
123 struct inet_sock
*inet
= inet_sk(sk
);
124 struct inet_connection_sock
*icsk
= inet_csk(sk
);
125 struct ipv6_pinfo
*np
= inet6_sk(sk
);
126 struct tcp_sock
*tp
= tcp_sk(sk
);
127 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
129 struct dst_entry
*dst
;
133 if (addr_len
< SIN6_LEN_RFC2133
)
136 if (usin
->sin6_family
!= AF_INET6
)
137 return(-EAFNOSUPPORT
);
139 memset(&fl
, 0, sizeof(fl
));
142 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
143 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
144 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
145 struct ip6_flowlabel
*flowlabel
;
146 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
147 if (flowlabel
== NULL
)
149 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
150 fl6_sock_release(flowlabel
);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if(ipv6_addr_any(&usin
->sin6_addr
))
159 usin
->sin6_addr
.s6_addr
[15] = 0x1;
161 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
163 if(addr_type
& IPV6_ADDR_MULTICAST
)
166 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
167 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
168 usin
->sin6_scope_id
) {
169 /* If interface is set while binding, indices
172 if (sk
->sk_bound_dev_if
&&
173 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
176 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
179 /* Connect to link-local address requires an interface */
180 if (!sk
->sk_bound_dev_if
)
184 if (tp
->rx_opt
.ts_recent_stamp
&&
185 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
186 tp
->rx_opt
.ts_recent
= 0;
187 tp
->rx_opt
.ts_recent_stamp
= 0;
191 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
192 np
->flow_label
= fl
.fl6_flowlabel
;
198 if (addr_type
== IPV6_ADDR_MAPPED
) {
199 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
200 struct sockaddr_in sin
;
202 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
204 if (__ipv6_only_sock(sk
))
207 sin
.sin_family
= AF_INET
;
208 sin
.sin_port
= usin
->sin6_port
;
209 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
211 icsk
->icsk_af_ops
= &ipv6_mapped
;
212 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
213 #ifdef CONFIG_TCP_MD5SIG
214 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
217 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
220 icsk
->icsk_ext_hdr_len
= exthdrlen
;
221 icsk
->icsk_af_ops
= &ipv6_specific
;
222 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
223 #ifdef CONFIG_TCP_MD5SIG
224 tp
->af_specific
= &tcp_sock_ipv6_specific
;
228 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
230 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
237 if (!ipv6_addr_any(&np
->rcv_saddr
))
238 saddr
= &np
->rcv_saddr
;
240 fl
.proto
= IPPROTO_TCP
;
241 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
242 ipv6_addr_copy(&fl
.fl6_src
,
243 (saddr
? saddr
: &np
->saddr
));
244 fl
.oif
= sk
->sk_bound_dev_if
;
245 fl
.fl_ip_dport
= usin
->sin6_port
;
246 fl
.fl_ip_sport
= inet
->sport
;
248 if (np
->opt
&& np
->opt
->srcrt
) {
249 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
250 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
251 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
255 security_sk_classify_flow(sk
, &fl
);
257 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
261 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
263 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
)) < 0) {
265 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
272 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
275 /* set the source address */
276 ipv6_addr_copy(&np
->saddr
, saddr
);
277 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
279 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
280 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
282 icsk
->icsk_ext_hdr_len
= 0;
284 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
287 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
289 inet
->dport
= usin
->sin6_port
;
291 tcp_set_state(sk
, TCP_SYN_SENT
);
292 err
= inet6_hash_connect(&tcp_death_row
, sk
);
297 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
302 err
= tcp_connect(sk
);
309 tcp_set_state(sk
, TCP_CLOSE
);
313 sk
->sk_route_caps
= 0;
317 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
318 int type
, int code
, int offset
, __be32 info
)
320 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
321 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
322 struct ipv6_pinfo
*np
;
327 struct net
*net
= dev_net(skb
->dev
);
329 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
330 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
333 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
337 if (sk
->sk_state
== TCP_TIME_WAIT
) {
338 inet_twsk_put(inet_twsk(sk
));
343 if (sock_owned_by_user(sk
))
344 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
346 if (sk
->sk_state
== TCP_CLOSE
)
350 seq
= ntohl(th
->seq
);
351 if (sk
->sk_state
!= TCP_LISTEN
&&
352 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
353 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
359 if (type
== ICMPV6_PKT_TOOBIG
) {
360 struct dst_entry
*dst
= NULL
;
362 if (sock_owned_by_user(sk
))
364 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
367 /* icmp should have updated the destination cache entry */
368 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
371 struct inet_sock
*inet
= inet_sk(sk
);
374 /* BUGGG_FUTURE: Again, it is not clear how
375 to handle rthdr case. Ignore this complexity
378 memset(&fl
, 0, sizeof(fl
));
379 fl
.proto
= IPPROTO_TCP
;
380 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
381 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
382 fl
.oif
= sk
->sk_bound_dev_if
;
383 fl
.fl_ip_dport
= inet
->dport
;
384 fl
.fl_ip_sport
= inet
->sport
;
385 security_skb_classify_flow(skb
, &fl
);
387 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
388 sk
->sk_err_soft
= -err
;
392 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
393 sk
->sk_err_soft
= -err
;
400 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
401 tcp_sync_mss(sk
, dst_mtu(dst
));
402 tcp_simple_retransmit(sk
);
403 } /* else let the usual retransmit timer handle it */
408 icmpv6_err_convert(type
, code
, &err
);
410 /* Might be for an request_sock */
411 switch (sk
->sk_state
) {
412 struct request_sock
*req
, **prev
;
414 if (sock_owned_by_user(sk
))
417 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
418 &hdr
->saddr
, inet6_iif(skb
));
422 /* ICMPs are not backlogged, hence we cannot get
423 * an established socket here.
425 WARN_ON(req
->sk
!= NULL
);
427 if (seq
!= tcp_rsk(req
)->snt_isn
) {
428 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
432 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
436 case TCP_SYN_RECV
: /* Cannot happen.
437 It can, it SYNs are crossed. --ANK */
438 if (!sock_owned_by_user(sk
)) {
440 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
444 sk
->sk_err_soft
= err
;
448 if (!sock_owned_by_user(sk
) && np
->recverr
) {
450 sk
->sk_error_report(sk
);
452 sk
->sk_err_soft
= err
;
460 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
462 struct inet6_request_sock
*treq
= inet6_rsk(req
);
463 struct ipv6_pinfo
*np
= inet6_sk(sk
);
464 struct sk_buff
* skb
;
465 struct ipv6_txoptions
*opt
= NULL
;
466 struct in6_addr
* final_p
= NULL
, final
;
468 struct dst_entry
*dst
;
471 memset(&fl
, 0, sizeof(fl
));
472 fl
.proto
= IPPROTO_TCP
;
473 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
474 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
475 fl
.fl6_flowlabel
= 0;
477 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
478 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
479 security_req_classify_flow(req
, &fl
);
482 if (opt
&& opt
->srcrt
) {
483 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
484 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
485 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
489 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
493 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
494 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
497 skb
= tcp_make_synack(sk
, dst
, req
);
499 struct tcphdr
*th
= tcp_hdr(skb
);
501 th
->check
= tcp_v6_check(th
, skb
->len
,
502 &treq
->loc_addr
, &treq
->rmt_addr
,
503 csum_partial((char *)th
, skb
->len
, skb
->csum
));
505 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
506 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
507 err
= net_xmit_eval(err
);
511 if (opt
&& opt
!= np
->opt
)
512 sock_kfree_s(sk
, opt
, opt
->tot_len
);
517 static inline void syn_flood_warning(struct sk_buff
*skb
)
519 #ifdef CONFIG_SYN_COOKIES
520 if (sysctl_tcp_syncookies
)
522 "TCPv6: Possible SYN flooding on port %d. "
523 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
527 "TCPv6: Possible SYN flooding on port %d. "
528 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
531 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
533 if (inet6_rsk(req
)->pktopts
)
534 kfree_skb(inet6_rsk(req
)->pktopts
);
537 #ifdef CONFIG_TCP_MD5SIG
538 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
539 struct in6_addr
*addr
)
541 struct tcp_sock
*tp
= tcp_sk(sk
);
546 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
549 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
550 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
551 return &tp
->md5sig_info
->keys6
[i
].base
;
556 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
557 struct sock
*addr_sk
)
559 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
562 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
563 struct request_sock
*req
)
565 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
568 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
569 char *newkey
, u8 newkeylen
)
571 /* Add key to the list */
572 struct tcp_md5sig_key
*key
;
573 struct tcp_sock
*tp
= tcp_sk(sk
);
574 struct tcp6_md5sig_key
*keys
;
576 key
= tcp_v6_md5_do_lookup(sk
, peer
);
578 /* modify existing entry - just update that one */
581 key
->keylen
= newkeylen
;
583 /* reallocate new list if current one is full. */
584 if (!tp
->md5sig_info
) {
585 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
586 if (!tp
->md5sig_info
) {
590 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
592 if (tcp_alloc_md5sig_pool() == NULL
) {
596 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
597 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
598 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
601 tcp_free_md5sig_pool();
606 if (tp
->md5sig_info
->entries6
)
607 memmove(keys
, tp
->md5sig_info
->keys6
,
608 (sizeof (tp
->md5sig_info
->keys6
[0]) *
609 tp
->md5sig_info
->entries6
));
611 kfree(tp
->md5sig_info
->keys6
);
612 tp
->md5sig_info
->keys6
= keys
;
613 tp
->md5sig_info
->alloced6
++;
616 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
618 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
619 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
621 tp
->md5sig_info
->entries6
++;
626 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
627 u8
*newkey
, __u8 newkeylen
)
629 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
633 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
635 struct tcp_sock
*tp
= tcp_sk(sk
);
638 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
639 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
641 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
642 tp
->md5sig_info
->entries6
--;
644 if (tp
->md5sig_info
->entries6
== 0) {
645 kfree(tp
->md5sig_info
->keys6
);
646 tp
->md5sig_info
->keys6
= NULL
;
647 tp
->md5sig_info
->alloced6
= 0;
649 /* shrink the database */
650 if (tp
->md5sig_info
->entries6
!= i
)
651 memmove(&tp
->md5sig_info
->keys6
[i
],
652 &tp
->md5sig_info
->keys6
[i
+1],
653 (tp
->md5sig_info
->entries6
- i
)
654 * sizeof (tp
->md5sig_info
->keys6
[0]));
656 tcp_free_md5sig_pool();
663 static void tcp_v6_clear_md5_list (struct sock
*sk
)
665 struct tcp_sock
*tp
= tcp_sk(sk
);
668 if (tp
->md5sig_info
->entries6
) {
669 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
670 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
671 tp
->md5sig_info
->entries6
= 0;
672 tcp_free_md5sig_pool();
675 kfree(tp
->md5sig_info
->keys6
);
676 tp
->md5sig_info
->keys6
= NULL
;
677 tp
->md5sig_info
->alloced6
= 0;
679 if (tp
->md5sig_info
->entries4
) {
680 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
681 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
682 tp
->md5sig_info
->entries4
= 0;
683 tcp_free_md5sig_pool();
686 kfree(tp
->md5sig_info
->keys4
);
687 tp
->md5sig_info
->keys4
= NULL
;
688 tp
->md5sig_info
->alloced4
= 0;
691 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
694 struct tcp_md5sig cmd
;
695 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
698 if (optlen
< sizeof(cmd
))
701 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
704 if (sin6
->sin6_family
!= AF_INET6
)
707 if (!cmd
.tcpm_keylen
) {
708 if (!tcp_sk(sk
)->md5sig_info
)
710 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
711 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
712 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
715 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
718 if (!tcp_sk(sk
)->md5sig_info
) {
719 struct tcp_sock
*tp
= tcp_sk(sk
);
720 struct tcp_md5sig_info
*p
;
722 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
727 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
730 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
733 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
734 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
735 newkey
, cmd
.tcpm_keylen
);
737 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
740 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
741 struct in6_addr
*daddr
,
742 struct in6_addr
*saddr
, int nbytes
)
744 struct tcp6_pseudohdr
*bp
;
745 struct scatterlist sg
;
747 bp
= &hp
->md5_blk
.ip6
;
748 /* 1. TCP pseudo-header (RFC2460) */
749 ipv6_addr_copy(&bp
->saddr
, saddr
);
750 ipv6_addr_copy(&bp
->daddr
, daddr
);
751 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
752 bp
->len
= cpu_to_be32(nbytes
);
754 sg_init_one(&sg
, bp
, sizeof(*bp
));
755 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
758 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
759 struct in6_addr
*daddr
, struct in6_addr
*saddr
,
762 struct tcp_md5sig_pool
*hp
;
763 struct hash_desc
*desc
;
765 hp
= tcp_get_md5sig_pool();
767 goto clear_hash_noput
;
768 desc
= &hp
->md5_desc
;
770 if (crypto_hash_init(desc
))
772 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
774 if (tcp_md5_hash_header(hp
, th
))
776 if (tcp_md5_hash_key(hp
, key
))
778 if (crypto_hash_final(desc
, md5_hash
))
781 tcp_put_md5sig_pool();
785 tcp_put_md5sig_pool();
787 memset(md5_hash
, 0, 16);
791 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
792 struct sock
*sk
, struct request_sock
*req
,
795 struct in6_addr
*saddr
, *daddr
;
796 struct tcp_md5sig_pool
*hp
;
797 struct hash_desc
*desc
;
798 struct tcphdr
*th
= tcp_hdr(skb
);
801 saddr
= &inet6_sk(sk
)->saddr
;
802 daddr
= &inet6_sk(sk
)->daddr
;
804 saddr
= &inet6_rsk(req
)->loc_addr
;
805 daddr
= &inet6_rsk(req
)->rmt_addr
;
807 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
808 saddr
= &ip6h
->saddr
;
809 daddr
= &ip6h
->daddr
;
812 hp
= tcp_get_md5sig_pool();
814 goto clear_hash_noput
;
815 desc
= &hp
->md5_desc
;
817 if (crypto_hash_init(desc
))
820 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
822 if (tcp_md5_hash_header(hp
, th
))
824 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
826 if (tcp_md5_hash_key(hp
, key
))
828 if (crypto_hash_final(desc
, md5_hash
))
831 tcp_put_md5sig_pool();
835 tcp_put_md5sig_pool();
837 memset(md5_hash
, 0, 16);
841 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
843 __u8
*hash_location
= NULL
;
844 struct tcp_md5sig_key
*hash_expected
;
845 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
846 struct tcphdr
*th
= tcp_hdr(skb
);
850 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
851 hash_location
= tcp_parse_md5sig_option(th
);
853 /* We've parsed the options - do we have a hash? */
854 if (!hash_expected
&& !hash_location
)
857 if (hash_expected
&& !hash_location
) {
858 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
862 if (!hash_expected
&& hash_location
) {
863 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
867 /* check the signature */
868 genhash
= tcp_v6_md5_hash_skb(newhash
,
872 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
873 if (net_ratelimit()) {
874 printk(KERN_INFO
"MD5 Hash %s for "
875 "(" NIP6_FMT
", %u)->"
876 "(" NIP6_FMT
", %u)\n",
877 genhash
? "failed" : "mismatch",
878 NIP6(ip6h
->saddr
), ntohs(th
->source
),
879 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
887 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
889 .obj_size
= sizeof(struct tcp6_request_sock
),
890 .rtx_syn_ack
= tcp_v6_send_synack
,
891 .send_ack
= tcp_v6_reqsk_send_ack
,
892 .destructor
= tcp_v6_reqsk_destructor
,
893 .send_reset
= tcp_v6_send_reset
896 #ifdef CONFIG_TCP_MD5SIG
897 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
898 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
902 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
903 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
904 .twsk_unique
= tcp_twsk_unique
,
905 .twsk_destructor
= tcp_twsk_destructor
,
908 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
910 struct ipv6_pinfo
*np
= inet6_sk(sk
);
911 struct tcphdr
*th
= tcp_hdr(skb
);
913 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
914 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
915 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
916 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
918 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
919 csum_partial((char *)th
, th
->doff
<<2,
924 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
926 struct ipv6hdr
*ipv6h
;
929 if (!pskb_may_pull(skb
, sizeof(*th
)))
932 ipv6h
= ipv6_hdr(skb
);
936 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
938 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
939 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
940 skb
->ip_summed
= CHECKSUM_PARTIAL
;
944 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
946 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
947 struct sk_buff
*buff
;
949 struct net
*net
= dev_net(skb
->dst
->dev
);
950 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
951 unsigned int tot_len
= sizeof(*th
);
952 #ifdef CONFIG_TCP_MD5SIG
953 struct tcp_md5sig_key
*key
;
959 if (!ipv6_unicast_destination(skb
))
962 #ifdef CONFIG_TCP_MD5SIG
964 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
969 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
973 * We need to grab some memory, and put together an RST,
974 * and then put it into the queue to be sent.
977 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
982 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
984 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
986 /* Swap the send and the receive. */
987 memset(t1
, 0, sizeof(*t1
));
988 t1
->dest
= th
->source
;
989 t1
->source
= th
->dest
;
990 t1
->doff
= tot_len
/ 4;
994 t1
->seq
= th
->ack_seq
;
997 t1
->ack_seq
= htonl(ntohl(th
->seq
) + th
->syn
+ th
->fin
998 + skb
->len
- (th
->doff
<<2));
1001 #ifdef CONFIG_TCP_MD5SIG
1003 __be32
*opt
= (__be32
*)(t1
+ 1);
1004 opt
[0] = htonl((TCPOPT_NOP
<< 24) |
1005 (TCPOPT_NOP
<< 16) |
1006 (TCPOPT_MD5SIG
<< 8) |
1008 tcp_v6_md5_hash_hdr((__u8
*)&opt
[1], key
,
1009 &ipv6_hdr(skb
)->daddr
,
1010 &ipv6_hdr(skb
)->saddr
, t1
);
1014 buff
->csum
= csum_partial((char *)t1
, sizeof(*t1
), 0);
1016 memset(&fl
, 0, sizeof(fl
));
1017 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1018 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1020 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1021 sizeof(*t1
), IPPROTO_TCP
,
1024 fl
.proto
= IPPROTO_TCP
;
1025 fl
.oif
= inet6_iif(skb
);
1026 fl
.fl_ip_dport
= t1
->dest
;
1027 fl
.fl_ip_sport
= t1
->source
;
1028 security_skb_classify_flow(skb
, &fl
);
1030 /* Pass a socket to ip6_dst_lookup either it is for RST
1031 * Underlying function will use this to retrieve the network
1034 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1036 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1037 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1038 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1039 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1047 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1048 struct tcp_md5sig_key
*key
)
1050 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
1051 struct sk_buff
*buff
;
1053 struct net
*net
= dev_net(skb
->dev
);
1054 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
1055 unsigned int tot_len
= sizeof(struct tcphdr
);
1059 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
1060 #ifdef CONFIG_TCP_MD5SIG
1062 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
1065 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
1070 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
1072 t1
= (struct tcphdr
*) skb_push(buff
,tot_len
);
1074 /* Swap the send and the receive. */
1075 memset(t1
, 0, sizeof(*t1
));
1076 t1
->dest
= th
->source
;
1077 t1
->source
= th
->dest
;
1078 t1
->doff
= tot_len
/4;
1079 t1
->seq
= htonl(seq
);
1080 t1
->ack_seq
= htonl(ack
);
1082 t1
->window
= htons(win
);
1084 topt
= (__be32
*)(t1
+ 1);
1087 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1088 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
1089 *topt
++ = htonl(tcp_time_stamp
);
1093 #ifdef CONFIG_TCP_MD5SIG
1095 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
1096 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
1097 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
1098 &ipv6_hdr(skb
)->saddr
,
1099 &ipv6_hdr(skb
)->daddr
, t1
);
1103 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1105 memset(&fl
, 0, sizeof(fl
));
1106 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1107 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1109 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1110 tot_len
, IPPROTO_TCP
,
1113 fl
.proto
= IPPROTO_TCP
;
1114 fl
.oif
= inet6_iif(skb
);
1115 fl
.fl_ip_dport
= t1
->dest
;
1116 fl
.fl_ip_sport
= t1
->source
;
1117 security_skb_classify_flow(skb
, &fl
);
1119 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1120 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1121 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1122 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1130 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1132 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1133 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1135 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1136 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1137 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1142 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1143 struct request_sock
*req
)
1145 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1146 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1150 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1152 struct request_sock
*req
, **prev
;
1153 const struct tcphdr
*th
= tcp_hdr(skb
);
1156 /* Find possible connection requests. */
1157 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1158 &ipv6_hdr(skb
)->saddr
,
1159 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1161 return tcp_check_req(sk
, skb
, req
, prev
);
1163 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1164 &ipv6_hdr(skb
)->saddr
, th
->source
,
1165 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1168 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1172 inet_twsk_put(inet_twsk(nsk
));
1176 #ifdef CONFIG_SYN_COOKIES
1177 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1178 sk
= cookie_v6_check(sk
, skb
);
1183 /* FIXME: this is substantially similar to the ipv4 code.
1184 * Can some kind of merge be done? -- erics
1186 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1188 struct inet6_request_sock
*treq
;
1189 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1190 struct tcp_options_received tmp_opt
;
1191 struct tcp_sock
*tp
= tcp_sk(sk
);
1192 struct request_sock
*req
= NULL
;
1193 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1194 #ifdef CONFIG_SYN_COOKIES
1195 int want_cookie
= 0;
1197 #define want_cookie 0
1200 if (skb
->protocol
== htons(ETH_P_IP
))
1201 return tcp_v4_conn_request(sk
, skb
);
1203 if (!ipv6_unicast_destination(skb
))
1206 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1207 if (net_ratelimit())
1208 syn_flood_warning(skb
);
1209 #ifdef CONFIG_SYN_COOKIES
1210 if (sysctl_tcp_syncookies
)
1217 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1220 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1224 #ifdef CONFIG_TCP_MD5SIG
1225 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1228 tcp_clear_options(&tmp_opt
);
1229 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1230 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1232 tcp_parse_options(skb
, &tmp_opt
, 0);
1234 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1235 tcp_clear_options(&tmp_opt
);
1237 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1238 tcp_openreq_init(req
, &tmp_opt
, skb
);
1240 treq
= inet6_rsk(req
);
1241 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1242 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1244 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1247 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1248 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1250 if (ipv6_opt_accepted(sk
, skb
) ||
1251 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1252 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1253 atomic_inc(&skb
->users
);
1254 treq
->pktopts
= skb
;
1256 treq
->iif
= sk
->sk_bound_dev_if
;
1258 /* So that link locals have meaning */
1259 if (!sk
->sk_bound_dev_if
&&
1260 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1261 treq
->iif
= inet6_iif(skb
);
1263 isn
= tcp_v6_init_sequence(skb
);
1266 tcp_rsk(req
)->snt_isn
= isn
;
1268 security_inet_conn_request(sk
, skb
, req
);
1270 if (tcp_v6_send_synack(sk
, req
))
1274 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1282 return 0; /* don't send reset */
1285 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1286 struct request_sock
*req
,
1287 struct dst_entry
*dst
)
1289 struct inet6_request_sock
*treq
= inet6_rsk(req
);
1290 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1291 struct tcp6_sock
*newtcp6sk
;
1292 struct inet_sock
*newinet
;
1293 struct tcp_sock
*newtp
;
1295 struct ipv6_txoptions
*opt
;
1296 #ifdef CONFIG_TCP_MD5SIG
1297 struct tcp_md5sig_key
*key
;
1300 if (skb
->protocol
== htons(ETH_P_IP
)) {
1305 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1310 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1311 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1313 newinet
= inet_sk(newsk
);
1314 newnp
= inet6_sk(newsk
);
1315 newtp
= tcp_sk(newsk
);
1317 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1319 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1322 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1325 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1327 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1328 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1329 #ifdef CONFIG_TCP_MD5SIG
1330 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1333 newnp
->pktoptions
= NULL
;
1335 newnp
->mcast_oif
= inet6_iif(skb
);
1336 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1339 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1340 * here, tcp_create_openreq_child now does this for us, see the comment in
1341 * that function for the gory details. -acme
1344 /* It is tricky place. Until this moment IPv4 tcp
1345 worked with IPv6 icsk.icsk_af_ops.
1348 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1355 if (sk_acceptq_is_full(sk
))
1359 struct in6_addr
*final_p
= NULL
, final
;
1362 memset(&fl
, 0, sizeof(fl
));
1363 fl
.proto
= IPPROTO_TCP
;
1364 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1365 if (opt
&& opt
->srcrt
) {
1366 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1367 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1368 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1371 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1372 fl
.oif
= sk
->sk_bound_dev_if
;
1373 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1374 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
1375 security_req_classify_flow(req
, &fl
);
1377 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1381 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1383 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1387 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1392 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1393 * count here, tcp_create_openreq_child now does this for us, see the
1394 * comment in that function for the gory details. -acme
1397 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1398 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1400 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1401 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1403 newtp
= tcp_sk(newsk
);
1404 newinet
= inet_sk(newsk
);
1405 newnp
= inet6_sk(newsk
);
1407 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1409 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1410 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1411 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1412 newsk
->sk_bound_dev_if
= treq
->iif
;
1414 /* Now IPv6 options...
1416 First: no IPv4 options.
1418 newinet
->opt
= NULL
;
1419 newnp
->ipv6_fl_list
= NULL
;
1422 newnp
->rxopt
.all
= np
->rxopt
.all
;
1424 /* Clone pktoptions received with SYN */
1425 newnp
->pktoptions
= NULL
;
1426 if (treq
->pktopts
!= NULL
) {
1427 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1428 kfree_skb(treq
->pktopts
);
1429 treq
->pktopts
= NULL
;
1430 if (newnp
->pktoptions
)
1431 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1434 newnp
->mcast_oif
= inet6_iif(skb
);
1435 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1437 /* Clone native IPv6 options from listening socket (if any)
1439 Yes, keeping reference count would be much more clever,
1440 but we make one more one thing there: reattach optmem
1444 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1446 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1449 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1451 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1452 newnp
->opt
->opt_flen
);
1454 tcp_mtup_init(newsk
);
1455 tcp_sync_mss(newsk
, dst_mtu(dst
));
1456 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1457 tcp_initialize_rcv_mss(newsk
);
1459 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1461 #ifdef CONFIG_TCP_MD5SIG
1462 /* Copy over the MD5 key from the original socket */
1463 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1464 /* We're using one, so create a matching key
1465 * on the newsk structure. If we fail to get
1466 * memory, then we end up not copying the key
1469 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1471 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1472 newkey
, key
->keylen
);
1476 __inet6_hash(newsk
);
1477 __inet_inherit_port(sk
, newsk
);
1482 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1484 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1485 if (opt
&& opt
!= np
->opt
)
1486 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1491 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1493 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1494 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1495 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1496 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1501 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1502 &ipv6_hdr(skb
)->saddr
,
1503 &ipv6_hdr(skb
)->daddr
, 0));
1505 if (skb
->len
<= 76) {
1506 return __skb_checksum_complete(skb
);
1511 /* The socket must have it's spinlock held when we get
1514 * We have a potential double-lock case here, so even when
1515 * doing backlog processing we use the BH locking scheme.
1516 * This is because we cannot sleep with the original spinlock
1519 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1521 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1522 struct tcp_sock
*tp
;
1523 struct sk_buff
*opt_skb
= NULL
;
1525 /* Imagine: socket is IPv6. IPv4 packet arrives,
1526 goes to IPv4 receive handler and backlogged.
1527 From backlog it always goes here. Kerboom...
1528 Fortunately, tcp_rcv_established and rcv_established
1529 handle them correctly, but it is not case with
1530 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1533 if (skb
->protocol
== htons(ETH_P_IP
))
1534 return tcp_v4_do_rcv(sk
, skb
);
1536 #ifdef CONFIG_TCP_MD5SIG
1537 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1541 if (sk_filter(sk
, skb
))
1545 * socket locking is here for SMP purposes as backlog rcv
1546 * is currently called with bh processing disabled.
1549 /* Do Stevens' IPV6_PKTOPTIONS.
1551 Yes, guys, it is the only place in our code, where we
1552 may make it not affecting IPv4.
1553 The rest of code is protocol independent,
1554 and I do not like idea to uglify IPv4.
1556 Actually, all the idea behind IPV6_PKTOPTIONS
1557 looks not very well thought. For now we latch
1558 options, received in the last packet, enqueued
1559 by tcp. Feel free to propose better solution.
1563 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1565 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1566 TCP_CHECK_TIMER(sk
);
1567 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1569 TCP_CHECK_TIMER(sk
);
1571 goto ipv6_pktoptions
;
1575 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1578 if (sk
->sk_state
== TCP_LISTEN
) {
1579 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1584 * Queue it on the new socket if the new socket is active,
1585 * otherwise we just shortcircuit this and continue with
1589 if (tcp_child_process(sk
, nsk
, skb
))
1592 __kfree_skb(opt_skb
);
1597 TCP_CHECK_TIMER(sk
);
1598 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1600 TCP_CHECK_TIMER(sk
);
1602 goto ipv6_pktoptions
;
1606 tcp_v6_send_reset(sk
, skb
);
1609 __kfree_skb(opt_skb
);
1613 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1618 /* Do you ask, what is it?
1620 1. skb was enqueued by tcp.
1621 2. skb is added to tail of read queue, rather than out of order.
1622 3. socket is not in passive state.
1623 4. Finally, it really contains options, which user wants to receive.
1626 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1627 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1628 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1629 np
->mcast_oif
= inet6_iif(opt_skb
);
1630 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1631 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1632 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1633 skb_set_owner_r(opt_skb
, sk
);
1634 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1636 __kfree_skb(opt_skb
);
1637 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1646 static int tcp_v6_rcv(struct sk_buff
*skb
)
1651 struct net
*net
= dev_net(skb
->dev
);
1653 if (skb
->pkt_type
!= PACKET_HOST
)
1657 * Count it even if it's bad.
1659 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1661 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1666 if (th
->doff
< sizeof(struct tcphdr
)/4)
1668 if (!pskb_may_pull(skb
, th
->doff
*4))
1671 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1675 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1676 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1677 skb
->len
- th
->doff
*4);
1678 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1679 TCP_SKB_CB(skb
)->when
= 0;
1680 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1681 TCP_SKB_CB(skb
)->sacked
= 0;
1683 sk
= __inet6_lookup(net
, &tcp_hashinfo
,
1684 &ipv6_hdr(skb
)->saddr
, th
->source
,
1685 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
),
1692 if (sk
->sk_state
== TCP_TIME_WAIT
)
1695 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1696 goto discard_and_relse
;
1698 if (sk_filter(sk
, skb
))
1699 goto discard_and_relse
;
1703 bh_lock_sock_nested(sk
);
1705 if (!sock_owned_by_user(sk
)) {
1706 #ifdef CONFIG_NET_DMA
1707 struct tcp_sock
*tp
= tcp_sk(sk
);
1708 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1709 tp
->ucopy
.dma_chan
= get_softnet_dma();
1710 if (tp
->ucopy
.dma_chan
)
1711 ret
= tcp_v6_do_rcv(sk
, skb
);
1715 if (!tcp_prequeue(sk
, skb
))
1716 ret
= tcp_v6_do_rcv(sk
, skb
);
1719 sk_add_backlog(sk
, skb
);
1723 return ret
? -1 : 0;
1726 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1729 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1731 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1733 tcp_v6_send_reset(NULL
, skb
);
1750 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1751 inet_twsk_put(inet_twsk(sk
));
1755 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1756 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1757 inet_twsk_put(inet_twsk(sk
));
1761 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1766 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1767 &ipv6_hdr(skb
)->daddr
,
1768 ntohs(th
->dest
), inet6_iif(skb
));
1770 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1771 inet_twsk_deschedule(tw
, &tcp_death_row
);
1776 /* Fall through to ACK */
1779 tcp_v6_timewait_ack(sk
, skb
);
1783 case TCP_TW_SUCCESS
:;
1788 static int tcp_v6_remember_stamp(struct sock
*sk
)
1790 /* Alas, not yet... */
1794 static struct inet_connection_sock_af_ops ipv6_specific
= {
1795 .queue_xmit
= inet6_csk_xmit
,
1796 .send_check
= tcp_v6_send_check
,
1797 .rebuild_header
= inet6_sk_rebuild_header
,
1798 .conn_request
= tcp_v6_conn_request
,
1799 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1800 .remember_stamp
= tcp_v6_remember_stamp
,
1801 .net_header_len
= sizeof(struct ipv6hdr
),
1802 .setsockopt
= ipv6_setsockopt
,
1803 .getsockopt
= ipv6_getsockopt
,
1804 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1805 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1806 .bind_conflict
= inet6_csk_bind_conflict
,
1807 #ifdef CONFIG_COMPAT
1808 .compat_setsockopt
= compat_ipv6_setsockopt
,
1809 .compat_getsockopt
= compat_ipv6_getsockopt
,
1813 #ifdef CONFIG_TCP_MD5SIG
1814 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1815 .md5_lookup
= tcp_v6_md5_lookup
,
1816 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1817 .md5_add
= tcp_v6_md5_add_func
,
1818 .md5_parse
= tcp_v6_parse_md5_keys
,
1823 * TCP over IPv4 via INET6 API
1826 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1827 .queue_xmit
= ip_queue_xmit
,
1828 .send_check
= tcp_v4_send_check
,
1829 .rebuild_header
= inet_sk_rebuild_header
,
1830 .conn_request
= tcp_v6_conn_request
,
1831 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1832 .remember_stamp
= tcp_v4_remember_stamp
,
1833 .net_header_len
= sizeof(struct iphdr
),
1834 .setsockopt
= ipv6_setsockopt
,
1835 .getsockopt
= ipv6_getsockopt
,
1836 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1837 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1838 .bind_conflict
= inet6_csk_bind_conflict
,
1839 #ifdef CONFIG_COMPAT
1840 .compat_setsockopt
= compat_ipv6_setsockopt
,
1841 .compat_getsockopt
= compat_ipv6_getsockopt
,
1845 #ifdef CONFIG_TCP_MD5SIG
1846 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1847 .md5_lookup
= tcp_v4_md5_lookup
,
1848 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1849 .md5_add
= tcp_v6_md5_add_func
,
1850 .md5_parse
= tcp_v6_parse_md5_keys
,
1854 /* NOTE: A lot of things set to zero explicitly by call to
1855 * sk_alloc() so need not be done here.
1857 static int tcp_v6_init_sock(struct sock
*sk
)
1859 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1860 struct tcp_sock
*tp
= tcp_sk(sk
);
1862 skb_queue_head_init(&tp
->out_of_order_queue
);
1863 tcp_init_xmit_timers(sk
);
1864 tcp_prequeue_init(tp
);
1866 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1867 tp
->mdev
= TCP_TIMEOUT_INIT
;
1869 /* So many TCP implementations out there (incorrectly) count the
1870 * initial SYN frame in their delayed-ACK and congestion control
1871 * algorithms that we must have the following bandaid to talk
1872 * efficiently to them. -DaveM
1876 /* See draft-stevens-tcpca-spec-01 for discussion of the
1877 * initialization of these values.
1879 tp
->snd_ssthresh
= 0x7fffffff;
1880 tp
->snd_cwnd_clamp
= ~0;
1881 tp
->mss_cache
= 536;
1883 tp
->reordering
= sysctl_tcp_reordering
;
1885 sk
->sk_state
= TCP_CLOSE
;
1887 icsk
->icsk_af_ops
= &ipv6_specific
;
1888 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1889 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1890 sk
->sk_write_space
= sk_stream_write_space
;
1891 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1893 #ifdef CONFIG_TCP_MD5SIG
1894 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1897 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1898 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1900 atomic_inc(&tcp_sockets_allocated
);
1905 static void tcp_v6_destroy_sock(struct sock
*sk
)
1907 #ifdef CONFIG_TCP_MD5SIG
1908 /* Clean up the MD5 key list */
1909 if (tcp_sk(sk
)->md5sig_info
)
1910 tcp_v6_clear_md5_list(sk
);
1912 tcp_v4_destroy_sock(sk
);
1913 inet6_destroy_sock(sk
);
1916 #ifdef CONFIG_PROC_FS
1917 /* Proc filesystem TCPv6 sock list dumping. */
1918 static void get_openreq6(struct seq_file
*seq
,
1919 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1921 int ttd
= req
->expires
- jiffies
;
1922 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1923 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1929 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1930 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1932 src
->s6_addr32
[0], src
->s6_addr32
[1],
1933 src
->s6_addr32
[2], src
->s6_addr32
[3],
1934 ntohs(inet_sk(sk
)->sport
),
1935 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1936 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1937 ntohs(inet_rsk(req
)->rmt_port
),
1939 0,0, /* could print option size, but that is af dependent. */
1940 1, /* timers active (only the expire timer) */
1941 jiffies_to_clock_t(ttd
),
1944 0, /* non standard timer */
1945 0, /* open_requests have no inode */
1949 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1951 struct in6_addr
*dest
, *src
;
1954 unsigned long timer_expires
;
1955 struct inet_sock
*inet
= inet_sk(sp
);
1956 struct tcp_sock
*tp
= tcp_sk(sp
);
1957 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1958 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1961 src
= &np
->rcv_saddr
;
1962 destp
= ntohs(inet
->dport
);
1963 srcp
= ntohs(inet
->sport
);
1965 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1967 timer_expires
= icsk
->icsk_timeout
;
1968 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1970 timer_expires
= icsk
->icsk_timeout
;
1971 } else if (timer_pending(&sp
->sk_timer
)) {
1973 timer_expires
= sp
->sk_timer
.expires
;
1976 timer_expires
= jiffies
;
1980 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1981 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1983 src
->s6_addr32
[0], src
->s6_addr32
[1],
1984 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1985 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1986 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1988 tp
->write_seq
-tp
->snd_una
,
1989 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1991 jiffies_to_clock_t(timer_expires
- jiffies
),
1992 icsk
->icsk_retransmits
,
1994 icsk
->icsk_probes_out
,
1996 atomic_read(&sp
->sk_refcnt
), sp
,
1997 jiffies_to_clock_t(icsk
->icsk_rto
),
1998 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1999 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
2000 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
2004 static void get_timewait6_sock(struct seq_file
*seq
,
2005 struct inet_timewait_sock
*tw
, int i
)
2007 struct in6_addr
*dest
, *src
;
2009 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
2010 int ttd
= tw
->tw_ttd
- jiffies
;
2015 dest
= &tw6
->tw_v6_daddr
;
2016 src
= &tw6
->tw_v6_rcv_saddr
;
2017 destp
= ntohs(tw
->tw_dport
);
2018 srcp
= ntohs(tw
->tw_sport
);
2021 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2022 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2024 src
->s6_addr32
[0], src
->s6_addr32
[1],
2025 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
2026 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
2027 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
2028 tw
->tw_substate
, 0, 0,
2029 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
2030 atomic_read(&tw
->tw_refcnt
), tw
);
2033 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
2035 struct tcp_iter_state
*st
;
2037 if (v
== SEQ_START_TOKEN
) {
2042 "st tx_queue rx_queue tr tm->when retrnsmt"
2043 " uid timeout inode\n");
2048 switch (st
->state
) {
2049 case TCP_SEQ_STATE_LISTENING
:
2050 case TCP_SEQ_STATE_ESTABLISHED
:
2051 get_tcp6_sock(seq
, v
, st
->num
);
2053 case TCP_SEQ_STATE_OPENREQ
:
2054 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
2056 case TCP_SEQ_STATE_TIME_WAIT
:
2057 get_timewait6_sock(seq
, v
, st
->num
);
2064 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2068 .owner
= THIS_MODULE
,
2071 .show
= tcp6_seq_show
,
2075 int tcp6_proc_init(struct net
*net
)
2077 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2080 void tcp6_proc_exit(struct net
*net
)
2082 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2086 struct proto tcpv6_prot
= {
2088 .owner
= THIS_MODULE
,
2090 .connect
= tcp_v6_connect
,
2091 .disconnect
= tcp_disconnect
,
2092 .accept
= inet_csk_accept
,
2094 .init
= tcp_v6_init_sock
,
2095 .destroy
= tcp_v6_destroy_sock
,
2096 .shutdown
= tcp_shutdown
,
2097 .setsockopt
= tcp_setsockopt
,
2098 .getsockopt
= tcp_getsockopt
,
2099 .recvmsg
= tcp_recvmsg
,
2100 .backlog_rcv
= tcp_v6_do_rcv
,
2101 .hash
= tcp_v6_hash
,
2102 .unhash
= inet_unhash
,
2103 .get_port
= inet_csk_get_port
,
2104 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2105 .sockets_allocated
= &tcp_sockets_allocated
,
2106 .memory_allocated
= &tcp_memory_allocated
,
2107 .memory_pressure
= &tcp_memory_pressure
,
2108 .orphan_count
= &tcp_orphan_count
,
2109 .sysctl_mem
= sysctl_tcp_mem
,
2110 .sysctl_wmem
= sysctl_tcp_wmem
,
2111 .sysctl_rmem
= sysctl_tcp_rmem
,
2112 .max_header
= MAX_TCP_HEADER
,
2113 .obj_size
= sizeof(struct tcp6_sock
),
2114 .twsk_prot
= &tcp6_timewait_sock_ops
,
2115 .rsk_prot
= &tcp6_request_sock_ops
,
2116 .h
.hashinfo
= &tcp_hashinfo
,
2117 #ifdef CONFIG_COMPAT
2118 .compat_setsockopt
= compat_tcp_setsockopt
,
2119 .compat_getsockopt
= compat_tcp_getsockopt
,
2123 static struct inet6_protocol tcpv6_protocol
= {
2124 .handler
= tcp_v6_rcv
,
2125 .err_handler
= tcp_v6_err
,
2126 .gso_send_check
= tcp_v6_gso_send_check
,
2127 .gso_segment
= tcp_tso_segment
,
2128 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2131 static struct inet_protosw tcpv6_protosw
= {
2132 .type
= SOCK_STREAM
,
2133 .protocol
= IPPROTO_TCP
,
2134 .prot
= &tcpv6_prot
,
2135 .ops
= &inet6_stream_ops
,
2138 .flags
= INET_PROTOSW_PERMANENT
|
2142 static int tcpv6_net_init(struct net
*net
)
2144 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2145 SOCK_RAW
, IPPROTO_TCP
, net
);
2148 static void tcpv6_net_exit(struct net
*net
)
2150 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2153 static struct pernet_operations tcpv6_net_ops
= {
2154 .init
= tcpv6_net_init
,
2155 .exit
= tcpv6_net_exit
,
2158 int __init
tcpv6_init(void)
2162 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2166 /* register inet6 protocol */
2167 ret
= inet6_register_protosw(&tcpv6_protosw
);
2169 goto out_tcpv6_protocol
;
2171 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2173 goto out_tcpv6_protosw
;
2178 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2180 inet6_unregister_protosw(&tcpv6_protosw
);
2184 void tcpv6_exit(void)
2186 unregister_pernet_subsys(&tcpv6_net_ops
);
2187 inet6_unregister_protosw(&tcpv6_protosw
);
2188 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);