3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/module.h>
27 #include <linux/errno.h>
28 #include <linux/types.h>
29 #include <linux/socket.h>
30 #include <linux/sockios.h>
31 #include <linux/net.h>
32 #include <linux/jiffies.h>
34 #include <linux/in6.h>
35 #include <linux/netdevice.h>
36 #include <linux/init.h>
37 #include <linux/jhash.h>
38 #include <linux/ipsec.h>
39 #include <linux/times.h>
41 #include <linux/ipv6.h>
42 #include <linux/icmpv6.h>
43 #include <linux/random.h>
46 #include <net/ndisc.h>
47 #include <net/inet6_hashtables.h>
48 #include <net/inet6_connection_sock.h>
50 #include <net/transp_v6.h>
51 #include <net/addrconf.h>
52 #include <net/ip6_route.h>
53 #include <net/ip6_checksum.h>
54 #include <net/inet_ecn.h>
55 #include <net/protocol.h>
58 #include <net/dsfield.h>
59 #include <net/timewait_sock.h>
60 #include <net/netdma.h>
61 #include <net/inet_common.h>
63 #include <asm/uaccess.h>
65 #include <linux/proc_fs.h>
66 #include <linux/seq_file.h>
68 #include <linux/crypto.h>
69 #include <linux/scatterlist.h>
71 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
);
72 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
73 struct request_sock
*req
);
75 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 static struct inet_connection_sock_af_ops ipv6_mapped
;
78 static struct inet_connection_sock_af_ops ipv6_specific
;
79 #ifdef CONFIG_TCP_MD5SIG
80 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
;
81 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
;
83 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
84 struct in6_addr
*addr
)
90 static void tcp_v6_hash(struct sock
*sk
)
92 if (sk
->sk_state
!= TCP_CLOSE
) {
93 if (inet_csk(sk
)->icsk_af_ops
== &ipv6_mapped
) {
103 static __inline__ __sum16
tcp_v6_check(struct tcphdr
*th
, int len
,
104 struct in6_addr
*saddr
,
105 struct in6_addr
*daddr
,
108 return csum_ipv6_magic(saddr
, daddr
, len
, IPPROTO_TCP
, base
);
111 static __u32
tcp_v6_init_sequence(struct sk_buff
*skb
)
113 return secure_tcpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
114 ipv6_hdr(skb
)->saddr
.s6_addr32
,
116 tcp_hdr(skb
)->source
);
119 static int tcp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
122 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*) uaddr
;
123 struct inet_sock
*inet
= inet_sk(sk
);
124 struct inet_connection_sock
*icsk
= inet_csk(sk
);
125 struct ipv6_pinfo
*np
= inet6_sk(sk
);
126 struct tcp_sock
*tp
= tcp_sk(sk
);
127 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
129 struct dst_entry
*dst
;
133 if (addr_len
< SIN6_LEN_RFC2133
)
136 if (usin
->sin6_family
!= AF_INET6
)
137 return(-EAFNOSUPPORT
);
139 memset(&fl
, 0, sizeof(fl
));
142 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
&IPV6_FLOWINFO_MASK
;
143 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
144 if (fl
.fl6_flowlabel
&IPV6_FLOWLABEL_MASK
) {
145 struct ip6_flowlabel
*flowlabel
;
146 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
147 if (flowlabel
== NULL
)
149 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
150 fl6_sock_release(flowlabel
);
155 * connect() to INADDR_ANY means loopback (BSD'ism).
158 if(ipv6_addr_any(&usin
->sin6_addr
))
159 usin
->sin6_addr
.s6_addr
[15] = 0x1;
161 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
163 if(addr_type
& IPV6_ADDR_MULTICAST
)
166 if (addr_type
&IPV6_ADDR_LINKLOCAL
) {
167 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
168 usin
->sin6_scope_id
) {
169 /* If interface is set while binding, indices
172 if (sk
->sk_bound_dev_if
&&
173 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
176 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
179 /* Connect to link-local address requires an interface */
180 if (!sk
->sk_bound_dev_if
)
184 if (tp
->rx_opt
.ts_recent_stamp
&&
185 !ipv6_addr_equal(&np
->daddr
, &usin
->sin6_addr
)) {
186 tp
->rx_opt
.ts_recent
= 0;
187 tp
->rx_opt
.ts_recent_stamp
= 0;
191 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
192 np
->flow_label
= fl
.fl6_flowlabel
;
198 if (addr_type
== IPV6_ADDR_MAPPED
) {
199 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
200 struct sockaddr_in sin
;
202 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
204 if (__ipv6_only_sock(sk
))
207 sin
.sin_family
= AF_INET
;
208 sin
.sin_port
= usin
->sin6_port
;
209 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
211 icsk
->icsk_af_ops
= &ipv6_mapped
;
212 sk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
213 #ifdef CONFIG_TCP_MD5SIG
214 tp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
217 err
= tcp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
220 icsk
->icsk_ext_hdr_len
= exthdrlen
;
221 icsk
->icsk_af_ops
= &ipv6_specific
;
222 sk
->sk_backlog_rcv
= tcp_v6_do_rcv
;
223 #ifdef CONFIG_TCP_MD5SIG
224 tp
->af_specific
= &tcp_sock_ipv6_specific
;
228 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
230 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
237 if (!ipv6_addr_any(&np
->rcv_saddr
))
238 saddr
= &np
->rcv_saddr
;
240 fl
.proto
= IPPROTO_TCP
;
241 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
242 ipv6_addr_copy(&fl
.fl6_src
,
243 (saddr
? saddr
: &np
->saddr
));
244 fl
.oif
= sk
->sk_bound_dev_if
;
245 fl
.fl_ip_dport
= usin
->sin6_port
;
246 fl
.fl_ip_sport
= inet
->sport
;
248 if (np
->opt
&& np
->opt
->srcrt
) {
249 struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
250 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
251 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
255 security_sk_classify_flow(sk
, &fl
);
257 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
261 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
263 if ((err
= __xfrm_lookup(&dst
, &fl
, sk
, XFRM_LOOKUP_WAIT
)) < 0) {
265 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
272 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
275 /* set the source address */
276 ipv6_addr_copy(&np
->saddr
, saddr
);
277 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
279 sk
->sk_gso_type
= SKB_GSO_TCPV6
;
280 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
282 icsk
->icsk_ext_hdr_len
= 0;
284 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
287 tp
->rx_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
289 inet
->dport
= usin
->sin6_port
;
291 tcp_set_state(sk
, TCP_SYN_SENT
);
292 err
= inet6_hash_connect(&tcp_death_row
, sk
);
297 tp
->write_seq
= secure_tcpv6_sequence_number(np
->saddr
.s6_addr32
,
302 err
= tcp_connect(sk
);
309 tcp_set_state(sk
, TCP_CLOSE
);
313 sk
->sk_route_caps
= 0;
317 static void tcp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
318 int type
, int code
, int offset
, __be32 info
)
320 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
321 const struct tcphdr
*th
= (struct tcphdr
*)(skb
->data
+offset
);
322 struct ipv6_pinfo
*np
;
327 struct net
*net
= dev_net(skb
->dev
);
329 sk
= inet6_lookup(net
, &tcp_hashinfo
, &hdr
->daddr
,
330 th
->dest
, &hdr
->saddr
, th
->source
, skb
->dev
->ifindex
);
333 ICMP6_INC_STATS_BH(net
, __in6_dev_get(skb
->dev
),
338 if (sk
->sk_state
== TCP_TIME_WAIT
) {
339 inet_twsk_put(inet_twsk(sk
));
344 if (sock_owned_by_user(sk
))
345 NET_INC_STATS_BH(net
, LINUX_MIB_LOCKDROPPEDICMPS
);
347 if (sk
->sk_state
== TCP_CLOSE
)
351 seq
= ntohl(th
->seq
);
352 if (sk
->sk_state
!= TCP_LISTEN
&&
353 !between(seq
, tp
->snd_una
, tp
->snd_nxt
)) {
354 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
360 if (type
== ICMPV6_PKT_TOOBIG
) {
361 struct dst_entry
*dst
= NULL
;
363 if (sock_owned_by_user(sk
))
365 if ((1 << sk
->sk_state
) & (TCPF_LISTEN
| TCPF_CLOSE
))
368 /* icmp should have updated the destination cache entry */
369 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
372 struct inet_sock
*inet
= inet_sk(sk
);
375 /* BUGGG_FUTURE: Again, it is not clear how
376 to handle rthdr case. Ignore this complexity
379 memset(&fl
, 0, sizeof(fl
));
380 fl
.proto
= IPPROTO_TCP
;
381 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
382 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
383 fl
.oif
= sk
->sk_bound_dev_if
;
384 fl
.fl_ip_dport
= inet
->dport
;
385 fl
.fl_ip_sport
= inet
->sport
;
386 security_skb_classify_flow(skb
, &fl
);
388 if ((err
= ip6_dst_lookup(sk
, &dst
, &fl
))) {
389 sk
->sk_err_soft
= -err
;
393 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0) {
394 sk
->sk_err_soft
= -err
;
401 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
402 tcp_sync_mss(sk
, dst_mtu(dst
));
403 tcp_simple_retransmit(sk
);
404 } /* else let the usual retransmit timer handle it */
409 icmpv6_err_convert(type
, code
, &err
);
411 /* Might be for an request_sock */
412 switch (sk
->sk_state
) {
413 struct request_sock
*req
, **prev
;
415 if (sock_owned_by_user(sk
))
418 req
= inet6_csk_search_req(sk
, &prev
, th
->dest
, &hdr
->daddr
,
419 &hdr
->saddr
, inet6_iif(skb
));
423 /* ICMPs are not backlogged, hence we cannot get
424 * an established socket here.
426 WARN_ON(req
->sk
!= NULL
);
428 if (seq
!= tcp_rsk(req
)->snt_isn
) {
429 NET_INC_STATS_BH(net
, LINUX_MIB_OUTOFWINDOWICMPS
);
433 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
437 case TCP_SYN_RECV
: /* Cannot happen.
438 It can, it SYNs are crossed. --ANK */
439 if (!sock_owned_by_user(sk
)) {
441 sk
->sk_error_report(sk
); /* Wake people up to see the error (see connect in sock.c) */
445 sk
->sk_err_soft
= err
;
449 if (!sock_owned_by_user(sk
) && np
->recverr
) {
451 sk
->sk_error_report(sk
);
453 sk
->sk_err_soft
= err
;
461 static int tcp_v6_send_synack(struct sock
*sk
, struct request_sock
*req
)
463 struct inet6_request_sock
*treq
= inet6_rsk(req
);
464 struct ipv6_pinfo
*np
= inet6_sk(sk
);
465 struct sk_buff
* skb
;
466 struct ipv6_txoptions
*opt
= NULL
;
467 struct in6_addr
* final_p
= NULL
, final
;
469 struct dst_entry
*dst
;
472 memset(&fl
, 0, sizeof(fl
));
473 fl
.proto
= IPPROTO_TCP
;
474 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
475 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
476 fl
.fl6_flowlabel
= 0;
478 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
479 fl
.fl_ip_sport
= inet_rsk(req
)->loc_port
;
480 security_req_classify_flow(req
, &fl
);
483 if (opt
&& opt
->srcrt
) {
484 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
485 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
486 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
490 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
494 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
495 if ((err
= xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
498 skb
= tcp_make_synack(sk
, dst
, req
);
500 struct tcphdr
*th
= tcp_hdr(skb
);
502 th
->check
= tcp_v6_check(th
, skb
->len
,
503 &treq
->loc_addr
, &treq
->rmt_addr
,
504 csum_partial((char *)th
, skb
->len
, skb
->csum
));
506 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
507 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
508 err
= net_xmit_eval(err
);
512 if (opt
&& opt
!= np
->opt
)
513 sock_kfree_s(sk
, opt
, opt
->tot_len
);
518 static inline void syn_flood_warning(struct sk_buff
*skb
)
520 #ifdef CONFIG_SYN_COOKIES
521 if (sysctl_tcp_syncookies
)
523 "TCPv6: Possible SYN flooding on port %d. "
524 "Sending cookies.\n", ntohs(tcp_hdr(skb
)->dest
));
528 "TCPv6: Possible SYN flooding on port %d. "
529 "Dropping request.\n", ntohs(tcp_hdr(skb
)->dest
));
532 static void tcp_v6_reqsk_destructor(struct request_sock
*req
)
534 if (inet6_rsk(req
)->pktopts
)
535 kfree_skb(inet6_rsk(req
)->pktopts
);
538 #ifdef CONFIG_TCP_MD5SIG
539 static struct tcp_md5sig_key
*tcp_v6_md5_do_lookup(struct sock
*sk
,
540 struct in6_addr
*addr
)
542 struct tcp_sock
*tp
= tcp_sk(sk
);
547 if (!tp
->md5sig_info
|| !tp
->md5sig_info
->entries6
)
550 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
551 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, addr
))
552 return &tp
->md5sig_info
->keys6
[i
].base
;
557 static struct tcp_md5sig_key
*tcp_v6_md5_lookup(struct sock
*sk
,
558 struct sock
*addr_sk
)
560 return tcp_v6_md5_do_lookup(sk
, &inet6_sk(addr_sk
)->daddr
);
563 static struct tcp_md5sig_key
*tcp_v6_reqsk_md5_lookup(struct sock
*sk
,
564 struct request_sock
*req
)
566 return tcp_v6_md5_do_lookup(sk
, &inet6_rsk(req
)->rmt_addr
);
569 static int tcp_v6_md5_do_add(struct sock
*sk
, struct in6_addr
*peer
,
570 char *newkey
, u8 newkeylen
)
572 /* Add key to the list */
573 struct tcp_md5sig_key
*key
;
574 struct tcp_sock
*tp
= tcp_sk(sk
);
575 struct tcp6_md5sig_key
*keys
;
577 key
= tcp_v6_md5_do_lookup(sk
, peer
);
579 /* modify existing entry - just update that one */
582 key
->keylen
= newkeylen
;
584 /* reallocate new list if current one is full. */
585 if (!tp
->md5sig_info
) {
586 tp
->md5sig_info
= kzalloc(sizeof(*tp
->md5sig_info
), GFP_ATOMIC
);
587 if (!tp
->md5sig_info
) {
591 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
593 if (tcp_alloc_md5sig_pool() == NULL
) {
597 if (tp
->md5sig_info
->alloced6
== tp
->md5sig_info
->entries6
) {
598 keys
= kmalloc((sizeof (tp
->md5sig_info
->keys6
[0]) *
599 (tp
->md5sig_info
->entries6
+ 1)), GFP_ATOMIC
);
602 tcp_free_md5sig_pool();
607 if (tp
->md5sig_info
->entries6
)
608 memmove(keys
, tp
->md5sig_info
->keys6
,
609 (sizeof (tp
->md5sig_info
->keys6
[0]) *
610 tp
->md5sig_info
->entries6
));
612 kfree(tp
->md5sig_info
->keys6
);
613 tp
->md5sig_info
->keys6
= keys
;
614 tp
->md5sig_info
->alloced6
++;
617 ipv6_addr_copy(&tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].addr
,
619 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.key
= newkey
;
620 tp
->md5sig_info
->keys6
[tp
->md5sig_info
->entries6
].base
.keylen
= newkeylen
;
622 tp
->md5sig_info
->entries6
++;
627 static int tcp_v6_md5_add_func(struct sock
*sk
, struct sock
*addr_sk
,
628 u8
*newkey
, __u8 newkeylen
)
630 return tcp_v6_md5_do_add(sk
, &inet6_sk(addr_sk
)->daddr
,
634 static int tcp_v6_md5_do_del(struct sock
*sk
, struct in6_addr
*peer
)
636 struct tcp_sock
*tp
= tcp_sk(sk
);
639 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++) {
640 if (ipv6_addr_equal(&tp
->md5sig_info
->keys6
[i
].addr
, peer
)) {
642 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
643 tp
->md5sig_info
->entries6
--;
645 if (tp
->md5sig_info
->entries6
== 0) {
646 kfree(tp
->md5sig_info
->keys6
);
647 tp
->md5sig_info
->keys6
= NULL
;
648 tp
->md5sig_info
->alloced6
= 0;
650 /* shrink the database */
651 if (tp
->md5sig_info
->entries6
!= i
)
652 memmove(&tp
->md5sig_info
->keys6
[i
],
653 &tp
->md5sig_info
->keys6
[i
+1],
654 (tp
->md5sig_info
->entries6
- i
)
655 * sizeof (tp
->md5sig_info
->keys6
[0]));
657 tcp_free_md5sig_pool();
664 static void tcp_v6_clear_md5_list (struct sock
*sk
)
666 struct tcp_sock
*tp
= tcp_sk(sk
);
669 if (tp
->md5sig_info
->entries6
) {
670 for (i
= 0; i
< tp
->md5sig_info
->entries6
; i
++)
671 kfree(tp
->md5sig_info
->keys6
[i
].base
.key
);
672 tp
->md5sig_info
->entries6
= 0;
673 tcp_free_md5sig_pool();
676 kfree(tp
->md5sig_info
->keys6
);
677 tp
->md5sig_info
->keys6
= NULL
;
678 tp
->md5sig_info
->alloced6
= 0;
680 if (tp
->md5sig_info
->entries4
) {
681 for (i
= 0; i
< tp
->md5sig_info
->entries4
; i
++)
682 kfree(tp
->md5sig_info
->keys4
[i
].base
.key
);
683 tp
->md5sig_info
->entries4
= 0;
684 tcp_free_md5sig_pool();
687 kfree(tp
->md5sig_info
->keys4
);
688 tp
->md5sig_info
->keys4
= NULL
;
689 tp
->md5sig_info
->alloced4
= 0;
692 static int tcp_v6_parse_md5_keys (struct sock
*sk
, char __user
*optval
,
695 struct tcp_md5sig cmd
;
696 struct sockaddr_in6
*sin6
= (struct sockaddr_in6
*)&cmd
.tcpm_addr
;
699 if (optlen
< sizeof(cmd
))
702 if (copy_from_user(&cmd
, optval
, sizeof(cmd
)))
705 if (sin6
->sin6_family
!= AF_INET6
)
708 if (!cmd
.tcpm_keylen
) {
709 if (!tcp_sk(sk
)->md5sig_info
)
711 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
))
712 return tcp_v4_md5_do_del(sk
, sin6
->sin6_addr
.s6_addr32
[3]);
713 return tcp_v6_md5_do_del(sk
, &sin6
->sin6_addr
);
716 if (cmd
.tcpm_keylen
> TCP_MD5SIG_MAXKEYLEN
)
719 if (!tcp_sk(sk
)->md5sig_info
) {
720 struct tcp_sock
*tp
= tcp_sk(sk
);
721 struct tcp_md5sig_info
*p
;
723 p
= kzalloc(sizeof(struct tcp_md5sig_info
), GFP_KERNEL
);
728 sk
->sk_route_caps
&= ~NETIF_F_GSO_MASK
;
731 newkey
= kmemdup(cmd
.tcpm_key
, cmd
.tcpm_keylen
, GFP_KERNEL
);
734 if (ipv6_addr_v4mapped(&sin6
->sin6_addr
)) {
735 return tcp_v4_md5_do_add(sk
, sin6
->sin6_addr
.s6_addr32
[3],
736 newkey
, cmd
.tcpm_keylen
);
738 return tcp_v6_md5_do_add(sk
, &sin6
->sin6_addr
, newkey
, cmd
.tcpm_keylen
);
741 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool
*hp
,
742 struct in6_addr
*daddr
,
743 struct in6_addr
*saddr
, int nbytes
)
745 struct tcp6_pseudohdr
*bp
;
746 struct scatterlist sg
;
748 bp
= &hp
->md5_blk
.ip6
;
749 /* 1. TCP pseudo-header (RFC2460) */
750 ipv6_addr_copy(&bp
->saddr
, saddr
);
751 ipv6_addr_copy(&bp
->daddr
, daddr
);
752 bp
->protocol
= cpu_to_be32(IPPROTO_TCP
);
753 bp
->len
= cpu_to_be32(nbytes
);
755 sg_init_one(&sg
, bp
, sizeof(*bp
));
756 return crypto_hash_update(&hp
->md5_desc
, &sg
, sizeof(*bp
));
759 static int tcp_v6_md5_hash_hdr(char *md5_hash
, struct tcp_md5sig_key
*key
,
760 struct in6_addr
*daddr
, struct in6_addr
*saddr
,
763 struct tcp_md5sig_pool
*hp
;
764 struct hash_desc
*desc
;
766 hp
= tcp_get_md5sig_pool();
768 goto clear_hash_noput
;
769 desc
= &hp
->md5_desc
;
771 if (crypto_hash_init(desc
))
773 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, th
->doff
<< 2))
775 if (tcp_md5_hash_header(hp
, th
))
777 if (tcp_md5_hash_key(hp
, key
))
779 if (crypto_hash_final(desc
, md5_hash
))
782 tcp_put_md5sig_pool();
786 tcp_put_md5sig_pool();
788 memset(md5_hash
, 0, 16);
792 static int tcp_v6_md5_hash_skb(char *md5_hash
, struct tcp_md5sig_key
*key
,
793 struct sock
*sk
, struct request_sock
*req
,
796 struct in6_addr
*saddr
, *daddr
;
797 struct tcp_md5sig_pool
*hp
;
798 struct hash_desc
*desc
;
799 struct tcphdr
*th
= tcp_hdr(skb
);
802 saddr
= &inet6_sk(sk
)->saddr
;
803 daddr
= &inet6_sk(sk
)->daddr
;
805 saddr
= &inet6_rsk(req
)->loc_addr
;
806 daddr
= &inet6_rsk(req
)->rmt_addr
;
808 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
809 saddr
= &ip6h
->saddr
;
810 daddr
= &ip6h
->daddr
;
813 hp
= tcp_get_md5sig_pool();
815 goto clear_hash_noput
;
816 desc
= &hp
->md5_desc
;
818 if (crypto_hash_init(desc
))
821 if (tcp_v6_md5_hash_pseudoheader(hp
, daddr
, saddr
, skb
->len
))
823 if (tcp_md5_hash_header(hp
, th
))
825 if (tcp_md5_hash_skb_data(hp
, skb
, th
->doff
<< 2))
827 if (tcp_md5_hash_key(hp
, key
))
829 if (crypto_hash_final(desc
, md5_hash
))
832 tcp_put_md5sig_pool();
836 tcp_put_md5sig_pool();
838 memset(md5_hash
, 0, 16);
842 static int tcp_v6_inbound_md5_hash (struct sock
*sk
, struct sk_buff
*skb
)
844 __u8
*hash_location
= NULL
;
845 struct tcp_md5sig_key
*hash_expected
;
846 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
847 struct tcphdr
*th
= tcp_hdr(skb
);
851 hash_expected
= tcp_v6_md5_do_lookup(sk
, &ip6h
->saddr
);
852 hash_location
= tcp_parse_md5sig_option(th
);
854 /* We've parsed the options - do we have a hash? */
855 if (!hash_expected
&& !hash_location
)
858 if (hash_expected
&& !hash_location
) {
859 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5NOTFOUND
);
863 if (!hash_expected
&& hash_location
) {
864 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_TCPMD5UNEXPECTED
);
868 /* check the signature */
869 genhash
= tcp_v6_md5_hash_skb(newhash
,
873 if (genhash
|| memcmp(hash_location
, newhash
, 16) != 0) {
874 if (net_ratelimit()) {
875 printk(KERN_INFO
"MD5 Hash %s for "
876 "(" NIP6_FMT
", %u)->"
877 "(" NIP6_FMT
", %u)\n",
878 genhash
? "failed" : "mismatch",
879 NIP6(ip6h
->saddr
), ntohs(th
->source
),
880 NIP6(ip6h
->daddr
), ntohs(th
->dest
));
888 struct request_sock_ops tcp6_request_sock_ops __read_mostly
= {
890 .obj_size
= sizeof(struct tcp6_request_sock
),
891 .rtx_syn_ack
= tcp_v6_send_synack
,
892 .send_ack
= tcp_v6_reqsk_send_ack
,
893 .destructor
= tcp_v6_reqsk_destructor
,
894 .send_reset
= tcp_v6_send_reset
897 #ifdef CONFIG_TCP_MD5SIG
898 static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
= {
899 .md5_lookup
= tcp_v6_reqsk_md5_lookup
,
903 static struct timewait_sock_ops tcp6_timewait_sock_ops
= {
904 .twsk_obj_size
= sizeof(struct tcp6_timewait_sock
),
905 .twsk_unique
= tcp_twsk_unique
,
906 .twsk_destructor
= tcp_twsk_destructor
,
909 static void tcp_v6_send_check(struct sock
*sk
, int len
, struct sk_buff
*skb
)
911 struct ipv6_pinfo
*np
= inet6_sk(sk
);
912 struct tcphdr
*th
= tcp_hdr(skb
);
914 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
915 th
->check
= ~csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
, 0);
916 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
917 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
919 th
->check
= csum_ipv6_magic(&np
->saddr
, &np
->daddr
, len
, IPPROTO_TCP
,
920 csum_partial((char *)th
, th
->doff
<<2,
925 static int tcp_v6_gso_send_check(struct sk_buff
*skb
)
927 struct ipv6hdr
*ipv6h
;
930 if (!pskb_may_pull(skb
, sizeof(*th
)))
933 ipv6h
= ipv6_hdr(skb
);
937 th
->check
= ~csum_ipv6_magic(&ipv6h
->saddr
, &ipv6h
->daddr
, skb
->len
,
939 skb
->csum_start
= skb_transport_header(skb
) - skb
->head
;
940 skb
->csum_offset
= offsetof(struct tcphdr
, check
);
941 skb
->ip_summed
= CHECKSUM_PARTIAL
;
945 static void tcp_v6_send_response(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
,
946 u32 ts
, struct tcp_md5sig_key
*key
, int rst
)
948 struct tcphdr
*th
= tcp_hdr(skb
), *t1
;
949 struct sk_buff
*buff
;
951 struct net
*net
= dev_net(skb
->dst
->dev
);
952 struct sock
*ctl_sk
= net
->ipv6
.tcp_sk
;
953 unsigned int tot_len
= sizeof(struct tcphdr
);
957 tot_len
+= TCPOLEN_TSTAMP_ALIGNED
;
958 #ifdef CONFIG_TCP_MD5SIG
960 tot_len
+= TCPOLEN_MD5SIG_ALIGNED
;
963 buff
= alloc_skb(MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
,
968 skb_reserve(buff
, MAX_HEADER
+ sizeof(struct ipv6hdr
) + tot_len
);
970 t1
= (struct tcphdr
*) skb_push(buff
, tot_len
);
972 /* Swap the send and the receive. */
973 memset(t1
, 0, sizeof(*t1
));
974 t1
->dest
= th
->source
;
975 t1
->source
= th
->dest
;
976 t1
->doff
= tot_len
/ 4;
977 t1
->seq
= htonl(seq
);
978 t1
->ack_seq
= htonl(ack
);
979 t1
->ack
= !rst
|| !th
->ack
;
981 t1
->window
= htons(win
);
983 topt
= (__be32
*)(t1
+ 1);
986 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
987 (TCPOPT_TIMESTAMP
<< 8) | TCPOLEN_TIMESTAMP
);
988 *topt
++ = htonl(tcp_time_stamp
);
992 #ifdef CONFIG_TCP_MD5SIG
994 *topt
++ = htonl((TCPOPT_NOP
<< 24) | (TCPOPT_NOP
<< 16) |
995 (TCPOPT_MD5SIG
<< 8) | TCPOLEN_MD5SIG
);
996 tcp_v6_md5_hash_hdr((__u8
*)topt
, key
,
997 &ipv6_hdr(skb
)->saddr
,
998 &ipv6_hdr(skb
)->daddr
, t1
);
1002 buff
->csum
= csum_partial((char *)t1
, tot_len
, 0);
1004 memset(&fl
, 0, sizeof(fl
));
1005 ipv6_addr_copy(&fl
.fl6_dst
, &ipv6_hdr(skb
)->saddr
);
1006 ipv6_addr_copy(&fl
.fl6_src
, &ipv6_hdr(skb
)->daddr
);
1008 t1
->check
= csum_ipv6_magic(&fl
.fl6_src
, &fl
.fl6_dst
,
1009 tot_len
, IPPROTO_TCP
,
1012 fl
.proto
= IPPROTO_TCP
;
1013 fl
.oif
= inet6_iif(skb
);
1014 fl
.fl_ip_dport
= t1
->dest
;
1015 fl
.fl_ip_sport
= t1
->source
;
1016 security_skb_classify_flow(skb
, &fl
);
1018 /* Pass a socket to ip6_dst_lookup either it is for RST
1019 * Underlying function will use this to retrieve the network
1022 if (!ip6_dst_lookup(ctl_sk
, &buff
->dst
, &fl
)) {
1023 if (xfrm_lookup(&buff
->dst
, &fl
, NULL
, 0) >= 0) {
1024 ip6_xmit(ctl_sk
, buff
, &fl
, NULL
, 0);
1025 TCP_INC_STATS_BH(net
, TCP_MIB_OUTSEGS
);
1027 TCP_INC_STATS_BH(net
, TCP_MIB_OUTRSTS
);
1035 static void tcp_v6_send_reset(struct sock
*sk
, struct sk_buff
*skb
)
1037 struct tcphdr
*th
= tcp_hdr(skb
);
1038 u32 seq
= 0, ack_seq
= 0;
1039 struct tcp_md5sig_key
*key
= NULL
;
1044 if (!ipv6_unicast_destination(skb
))
1047 #ifdef CONFIG_TCP_MD5SIG
1049 key
= tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
);
1053 seq
= ntohl(th
->ack_seq
);
1055 ack_seq
= ntohl(th
->seq
) + th
->syn
+ th
->fin
+ skb
->len
-
1058 tcp_v6_send_response(skb
, seq
, ack_seq
, 0, 0, key
, 1);
1061 static void tcp_v6_send_ack(struct sk_buff
*skb
, u32 seq
, u32 ack
, u32 win
, u32 ts
,
1062 struct tcp_md5sig_key
*key
)
1064 tcp_v6_send_response(skb
, seq
, ack
, win
, ts
, key
, 0);
1067 static void tcp_v6_timewait_ack(struct sock
*sk
, struct sk_buff
*skb
)
1069 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1070 struct tcp_timewait_sock
*tcptw
= tcp_twsk(sk
);
1072 tcp_v6_send_ack(skb
, tcptw
->tw_snd_nxt
, tcptw
->tw_rcv_nxt
,
1073 tcptw
->tw_rcv_wnd
>> tw
->tw_rcv_wscale
,
1074 tcptw
->tw_ts_recent
, tcp_twsk_md5_key(tcptw
));
1079 static void tcp_v6_reqsk_send_ack(struct sock
*sk
, struct sk_buff
*skb
,
1080 struct request_sock
*req
)
1082 tcp_v6_send_ack(skb
, tcp_rsk(req
)->snt_isn
+ 1, tcp_rsk(req
)->rcv_isn
+ 1, req
->rcv_wnd
, req
->ts_recent
,
1083 tcp_v6_md5_do_lookup(sk
, &ipv6_hdr(skb
)->daddr
));
1087 static struct sock
*tcp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
1089 struct request_sock
*req
, **prev
;
1090 const struct tcphdr
*th
= tcp_hdr(skb
);
1093 /* Find possible connection requests. */
1094 req
= inet6_csk_search_req(sk
, &prev
, th
->source
,
1095 &ipv6_hdr(skb
)->saddr
,
1096 &ipv6_hdr(skb
)->daddr
, inet6_iif(skb
));
1098 return tcp_check_req(sk
, skb
, req
, prev
);
1100 nsk
= __inet6_lookup_established(sock_net(sk
), &tcp_hashinfo
,
1101 &ipv6_hdr(skb
)->saddr
, th
->source
,
1102 &ipv6_hdr(skb
)->daddr
, ntohs(th
->dest
), inet6_iif(skb
));
1105 if (nsk
->sk_state
!= TCP_TIME_WAIT
) {
1109 inet_twsk_put(inet_twsk(nsk
));
1113 #ifdef CONFIG_SYN_COOKIES
1114 if (!th
->rst
&& !th
->syn
&& th
->ack
)
1115 sk
= cookie_v6_check(sk
, skb
);
1120 /* FIXME: this is substantially similar to the ipv4 code.
1121 * Can some kind of merge be done? -- erics
1123 static int tcp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
1125 struct inet6_request_sock
*treq
;
1126 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1127 struct tcp_options_received tmp_opt
;
1128 struct tcp_sock
*tp
= tcp_sk(sk
);
1129 struct request_sock
*req
= NULL
;
1130 __u32 isn
= TCP_SKB_CB(skb
)->when
;
1131 #ifdef CONFIG_SYN_COOKIES
1132 int want_cookie
= 0;
1134 #define want_cookie 0
1137 if (skb
->protocol
== htons(ETH_P_IP
))
1138 return tcp_v4_conn_request(sk
, skb
);
1140 if (!ipv6_unicast_destination(skb
))
1143 if (inet_csk_reqsk_queue_is_full(sk
) && !isn
) {
1144 if (net_ratelimit())
1145 syn_flood_warning(skb
);
1146 #ifdef CONFIG_SYN_COOKIES
1147 if (sysctl_tcp_syncookies
)
1154 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
1157 req
= inet6_reqsk_alloc(&tcp6_request_sock_ops
);
1161 #ifdef CONFIG_TCP_MD5SIG
1162 tcp_rsk(req
)->af_specific
= &tcp_request_sock_ipv6_ops
;
1165 tcp_clear_options(&tmp_opt
);
1166 tmp_opt
.mss_clamp
= IPV6_MIN_MTU
- sizeof(struct tcphdr
) - sizeof(struct ipv6hdr
);
1167 tmp_opt
.user_mss
= tp
->rx_opt
.user_mss
;
1169 tcp_parse_options(skb
, &tmp_opt
, 0);
1171 if (want_cookie
&& !tmp_opt
.saw_tstamp
)
1172 tcp_clear_options(&tmp_opt
);
1174 tmp_opt
.tstamp_ok
= tmp_opt
.saw_tstamp
;
1175 tcp_openreq_init(req
, &tmp_opt
, skb
);
1177 treq
= inet6_rsk(req
);
1178 ipv6_addr_copy(&treq
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
1179 ipv6_addr_copy(&treq
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
1181 TCP_ECN_create_request(req
, tcp_hdr(skb
));
1184 isn
= cookie_v6_init_sequence(sk
, skb
, &req
->mss
);
1185 req
->cookie_ts
= tmp_opt
.tstamp_ok
;
1187 if (ipv6_opt_accepted(sk
, skb
) ||
1188 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
1189 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
1190 atomic_inc(&skb
->users
);
1191 treq
->pktopts
= skb
;
1193 treq
->iif
= sk
->sk_bound_dev_if
;
1195 /* So that link locals have meaning */
1196 if (!sk
->sk_bound_dev_if
&&
1197 ipv6_addr_type(&treq
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
1198 treq
->iif
= inet6_iif(skb
);
1200 isn
= tcp_v6_init_sequence(skb
);
1203 tcp_rsk(req
)->snt_isn
= isn
;
1205 security_inet_conn_request(sk
, skb
, req
);
1207 if (tcp_v6_send_synack(sk
, req
))
1211 inet6_csk_reqsk_queue_hash_add(sk
, req
, TCP_TIMEOUT_INIT
);
1219 return 0; /* don't send reset */
1222 static struct sock
* tcp_v6_syn_recv_sock(struct sock
*sk
, struct sk_buff
*skb
,
1223 struct request_sock
*req
,
1224 struct dst_entry
*dst
)
1226 struct inet6_request_sock
*treq
;
1227 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
1228 struct tcp6_sock
*newtcp6sk
;
1229 struct inet_sock
*newinet
;
1230 struct tcp_sock
*newtp
;
1232 struct ipv6_txoptions
*opt
;
1233 #ifdef CONFIG_TCP_MD5SIG
1234 struct tcp_md5sig_key
*key
;
1237 if (skb
->protocol
== htons(ETH_P_IP
)) {
1242 newsk
= tcp_v4_syn_recv_sock(sk
, skb
, req
, dst
);
1247 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1248 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1250 newinet
= inet_sk(newsk
);
1251 newnp
= inet6_sk(newsk
);
1252 newtp
= tcp_sk(newsk
);
1254 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1256 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
1259 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
1262 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
1264 inet_csk(newsk
)->icsk_af_ops
= &ipv6_mapped
;
1265 newsk
->sk_backlog_rcv
= tcp_v4_do_rcv
;
1266 #ifdef CONFIG_TCP_MD5SIG
1267 newtp
->af_specific
= &tcp_sock_ipv6_mapped_specific
;
1270 newnp
->pktoptions
= NULL
;
1272 newnp
->mcast_oif
= inet6_iif(skb
);
1273 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1276 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1277 * here, tcp_create_openreq_child now does this for us, see the comment in
1278 * that function for the gory details. -acme
1281 /* It is tricky place. Until this moment IPv4 tcp
1282 worked with IPv6 icsk.icsk_af_ops.
1285 tcp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
1290 treq
= inet6_rsk(req
);
1293 if (sk_acceptq_is_full(sk
))
1297 struct in6_addr
*final_p
= NULL
, final
;
1300 memset(&fl
, 0, sizeof(fl
));
1301 fl
.proto
= IPPROTO_TCP
;
1302 ipv6_addr_copy(&fl
.fl6_dst
, &treq
->rmt_addr
);
1303 if (opt
&& opt
->srcrt
) {
1304 struct rt0_hdr
*rt0
= (struct rt0_hdr
*) opt
->srcrt
;
1305 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1306 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1309 ipv6_addr_copy(&fl
.fl6_src
, &treq
->loc_addr
);
1310 fl
.oif
= sk
->sk_bound_dev_if
;
1311 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
1312 fl
.fl_ip_sport
= inet_rsk(req
)->loc_port
;
1313 security_req_classify_flow(req
, &fl
);
1315 if (ip6_dst_lookup(sk
, &dst
, &fl
))
1319 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1321 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
1325 newsk
= tcp_create_openreq_child(sk
, req
, skb
);
1330 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1331 * count here, tcp_create_openreq_child now does this for us, see the
1332 * comment in that function for the gory details. -acme
1335 newsk
->sk_gso_type
= SKB_GSO_TCPV6
;
1336 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
1338 newtcp6sk
= (struct tcp6_sock
*)newsk
;
1339 inet_sk(newsk
)->pinet6
= &newtcp6sk
->inet6
;
1341 newtp
= tcp_sk(newsk
);
1342 newinet
= inet_sk(newsk
);
1343 newnp
= inet6_sk(newsk
);
1345 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
1347 ipv6_addr_copy(&newnp
->daddr
, &treq
->rmt_addr
);
1348 ipv6_addr_copy(&newnp
->saddr
, &treq
->loc_addr
);
1349 ipv6_addr_copy(&newnp
->rcv_saddr
, &treq
->loc_addr
);
1350 newsk
->sk_bound_dev_if
= treq
->iif
;
1352 /* Now IPv6 options...
1354 First: no IPv4 options.
1356 newinet
->opt
= NULL
;
1357 newnp
->ipv6_fl_list
= NULL
;
1360 newnp
->rxopt
.all
= np
->rxopt
.all
;
1362 /* Clone pktoptions received with SYN */
1363 newnp
->pktoptions
= NULL
;
1364 if (treq
->pktopts
!= NULL
) {
1365 newnp
->pktoptions
= skb_clone(treq
->pktopts
, GFP_ATOMIC
);
1366 kfree_skb(treq
->pktopts
);
1367 treq
->pktopts
= NULL
;
1368 if (newnp
->pktoptions
)
1369 skb_set_owner_r(newnp
->pktoptions
, newsk
);
1372 newnp
->mcast_oif
= inet6_iif(skb
);
1373 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
1375 /* Clone native IPv6 options from listening socket (if any)
1377 Yes, keeping reference count would be much more clever,
1378 but we make one more one thing there: reattach optmem
1382 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
1384 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1387 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
1389 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
1390 newnp
->opt
->opt_flen
);
1392 tcp_mtup_init(newsk
);
1393 tcp_sync_mss(newsk
, dst_mtu(dst
));
1394 newtp
->advmss
= dst_metric(dst
, RTAX_ADVMSS
);
1395 tcp_initialize_rcv_mss(newsk
);
1397 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
1399 #ifdef CONFIG_TCP_MD5SIG
1400 /* Copy over the MD5 key from the original socket */
1401 if ((key
= tcp_v6_md5_do_lookup(sk
, &newnp
->daddr
)) != NULL
) {
1402 /* We're using one, so create a matching key
1403 * on the newsk structure. If we fail to get
1404 * memory, then we end up not copying the key
1407 char *newkey
= kmemdup(key
->key
, key
->keylen
, GFP_ATOMIC
);
1409 tcp_v6_md5_do_add(newsk
, &inet6_sk(sk
)->daddr
,
1410 newkey
, key
->keylen
);
1414 __inet6_hash(newsk
);
1415 __inet_inherit_port(sk
, newsk
);
1420 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENOVERFLOWS
);
1422 NET_INC_STATS_BH(sock_net(sk
), LINUX_MIB_LISTENDROPS
);
1423 if (opt
&& opt
!= np
->opt
)
1424 sock_kfree_s(sk
, opt
, opt
->tot_len
);
1429 static __sum16
tcp_v6_checksum_init(struct sk_buff
*skb
)
1431 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
1432 if (!tcp_v6_check(tcp_hdr(skb
), skb
->len
, &ipv6_hdr(skb
)->saddr
,
1433 &ipv6_hdr(skb
)->daddr
, skb
->csum
)) {
1434 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1439 skb
->csum
= ~csum_unfold(tcp_v6_check(tcp_hdr(skb
), skb
->len
,
1440 &ipv6_hdr(skb
)->saddr
,
1441 &ipv6_hdr(skb
)->daddr
, 0));
1443 if (skb
->len
<= 76) {
1444 return __skb_checksum_complete(skb
);
1449 /* The socket must have it's spinlock held when we get
1452 * We have a potential double-lock case here, so even when
1453 * doing backlog processing we use the BH locking scheme.
1454 * This is because we cannot sleep with the original spinlock
1457 static int tcp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
1459 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1460 struct tcp_sock
*tp
;
1461 struct sk_buff
*opt_skb
= NULL
;
1463 /* Imagine: socket is IPv6. IPv4 packet arrives,
1464 goes to IPv4 receive handler and backlogged.
1465 From backlog it always goes here. Kerboom...
1466 Fortunately, tcp_rcv_established and rcv_established
1467 handle them correctly, but it is not case with
1468 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1471 if (skb
->protocol
== htons(ETH_P_IP
))
1472 return tcp_v4_do_rcv(sk
, skb
);
1474 #ifdef CONFIG_TCP_MD5SIG
1475 if (tcp_v6_inbound_md5_hash (sk
, skb
))
1479 if (sk_filter(sk
, skb
))
1483 * socket locking is here for SMP purposes as backlog rcv
1484 * is currently called with bh processing disabled.
1487 /* Do Stevens' IPV6_PKTOPTIONS.
1489 Yes, guys, it is the only place in our code, where we
1490 may make it not affecting IPv4.
1491 The rest of code is protocol independent,
1492 and I do not like idea to uglify IPv4.
1494 Actually, all the idea behind IPV6_PKTOPTIONS
1495 looks not very well thought. For now we latch
1496 options, received in the last packet, enqueued
1497 by tcp. Feel free to propose better solution.
1501 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
1503 if (sk
->sk_state
== TCP_ESTABLISHED
) { /* Fast path */
1504 TCP_CHECK_TIMER(sk
);
1505 if (tcp_rcv_established(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1507 TCP_CHECK_TIMER(sk
);
1509 goto ipv6_pktoptions
;
1513 if (skb
->len
< tcp_hdrlen(skb
) || tcp_checksum_complete(skb
))
1516 if (sk
->sk_state
== TCP_LISTEN
) {
1517 struct sock
*nsk
= tcp_v6_hnd_req(sk
, skb
);
1522 * Queue it on the new socket if the new socket is active,
1523 * otherwise we just shortcircuit this and continue with
1527 if (tcp_child_process(sk
, nsk
, skb
))
1530 __kfree_skb(opt_skb
);
1535 TCP_CHECK_TIMER(sk
);
1536 if (tcp_rcv_state_process(sk
, skb
, tcp_hdr(skb
), skb
->len
))
1538 TCP_CHECK_TIMER(sk
);
1540 goto ipv6_pktoptions
;
1544 tcp_v6_send_reset(sk
, skb
);
1547 __kfree_skb(opt_skb
);
1551 TCP_INC_STATS_BH(sock_net(sk
), TCP_MIB_INERRS
);
1556 /* Do you ask, what is it?
1558 1. skb was enqueued by tcp.
1559 2. skb is added to tail of read queue, rather than out of order.
1560 3. socket is not in passive state.
1561 4. Finally, it really contains options, which user wants to receive.
1564 if (TCP_SKB_CB(opt_skb
)->end_seq
== tp
->rcv_nxt
&&
1565 !((1 << sk
->sk_state
) & (TCPF_CLOSE
| TCPF_LISTEN
))) {
1566 if (np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
)
1567 np
->mcast_oif
= inet6_iif(opt_skb
);
1568 if (np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
)
1569 np
->mcast_hops
= ipv6_hdr(opt_skb
)->hop_limit
;
1570 if (ipv6_opt_accepted(sk
, opt_skb
)) {
1571 skb_set_owner_r(opt_skb
, sk
);
1572 opt_skb
= xchg(&np
->pktoptions
, opt_skb
);
1574 __kfree_skb(opt_skb
);
1575 opt_skb
= xchg(&np
->pktoptions
, NULL
);
1584 static int tcp_v6_rcv(struct sk_buff
*skb
)
1589 struct net
*net
= dev_net(skb
->dev
);
1591 if (skb
->pkt_type
!= PACKET_HOST
)
1595 * Count it even if it's bad.
1597 TCP_INC_STATS_BH(net
, TCP_MIB_INSEGS
);
1599 if (!pskb_may_pull(skb
, sizeof(struct tcphdr
)))
1604 if (th
->doff
< sizeof(struct tcphdr
)/4)
1606 if (!pskb_may_pull(skb
, th
->doff
*4))
1609 if (!skb_csum_unnecessary(skb
) && tcp_v6_checksum_init(skb
))
1613 TCP_SKB_CB(skb
)->seq
= ntohl(th
->seq
);
1614 TCP_SKB_CB(skb
)->end_seq
= (TCP_SKB_CB(skb
)->seq
+ th
->syn
+ th
->fin
+
1615 skb
->len
- th
->doff
*4);
1616 TCP_SKB_CB(skb
)->ack_seq
= ntohl(th
->ack_seq
);
1617 TCP_SKB_CB(skb
)->when
= 0;
1618 TCP_SKB_CB(skb
)->flags
= ipv6_get_dsfield(ipv6_hdr(skb
));
1619 TCP_SKB_CB(skb
)->sacked
= 0;
1621 sk
= __inet6_lookup_skb(&tcp_hashinfo
, skb
, th
->source
, th
->dest
);
1626 if (sk
->sk_state
== TCP_TIME_WAIT
)
1629 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
1630 goto discard_and_relse
;
1632 if (sk_filter(sk
, skb
))
1633 goto discard_and_relse
;
1637 bh_lock_sock_nested(sk
);
1639 if (!sock_owned_by_user(sk
)) {
1640 #ifdef CONFIG_NET_DMA
1641 struct tcp_sock
*tp
= tcp_sk(sk
);
1642 if (!tp
->ucopy
.dma_chan
&& tp
->ucopy
.pinned_list
)
1643 tp
->ucopy
.dma_chan
= get_softnet_dma();
1644 if (tp
->ucopy
.dma_chan
)
1645 ret
= tcp_v6_do_rcv(sk
, skb
);
1649 if (!tcp_prequeue(sk
, skb
))
1650 ret
= tcp_v6_do_rcv(sk
, skb
);
1653 sk_add_backlog(sk
, skb
);
1657 return ret
? -1 : 0;
1660 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
1663 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1665 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1667 tcp_v6_send_reset(NULL
, skb
);
1684 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
)) {
1685 inet_twsk_put(inet_twsk(sk
));
1689 if (skb
->len
< (th
->doff
<<2) || tcp_checksum_complete(skb
)) {
1690 TCP_INC_STATS_BH(net
, TCP_MIB_INERRS
);
1691 inet_twsk_put(inet_twsk(sk
));
1695 switch (tcp_timewait_state_process(inet_twsk(sk
), skb
, th
)) {
1700 sk2
= inet6_lookup_listener(dev_net(skb
->dev
), &tcp_hashinfo
,
1701 &ipv6_hdr(skb
)->daddr
,
1702 ntohs(th
->dest
), inet6_iif(skb
));
1704 struct inet_timewait_sock
*tw
= inet_twsk(sk
);
1705 inet_twsk_deschedule(tw
, &tcp_death_row
);
1710 /* Fall through to ACK */
1713 tcp_v6_timewait_ack(sk
, skb
);
1717 case TCP_TW_SUCCESS
:;
1722 static int tcp_v6_remember_stamp(struct sock
*sk
)
1724 /* Alas, not yet... */
1728 static struct inet_connection_sock_af_ops ipv6_specific
= {
1729 .queue_xmit
= inet6_csk_xmit
,
1730 .send_check
= tcp_v6_send_check
,
1731 .rebuild_header
= inet6_sk_rebuild_header
,
1732 .conn_request
= tcp_v6_conn_request
,
1733 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1734 .remember_stamp
= tcp_v6_remember_stamp
,
1735 .net_header_len
= sizeof(struct ipv6hdr
),
1736 .setsockopt
= ipv6_setsockopt
,
1737 .getsockopt
= ipv6_getsockopt
,
1738 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1739 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1740 .bind_conflict
= inet6_csk_bind_conflict
,
1741 #ifdef CONFIG_COMPAT
1742 .compat_setsockopt
= compat_ipv6_setsockopt
,
1743 .compat_getsockopt
= compat_ipv6_getsockopt
,
1747 #ifdef CONFIG_TCP_MD5SIG
1748 static struct tcp_sock_af_ops tcp_sock_ipv6_specific
= {
1749 .md5_lookup
= tcp_v6_md5_lookup
,
1750 .calc_md5_hash
= tcp_v6_md5_hash_skb
,
1751 .md5_add
= tcp_v6_md5_add_func
,
1752 .md5_parse
= tcp_v6_parse_md5_keys
,
1757 * TCP over IPv4 via INET6 API
1760 static struct inet_connection_sock_af_ops ipv6_mapped
= {
1761 .queue_xmit
= ip_queue_xmit
,
1762 .send_check
= tcp_v4_send_check
,
1763 .rebuild_header
= inet_sk_rebuild_header
,
1764 .conn_request
= tcp_v6_conn_request
,
1765 .syn_recv_sock
= tcp_v6_syn_recv_sock
,
1766 .remember_stamp
= tcp_v4_remember_stamp
,
1767 .net_header_len
= sizeof(struct iphdr
),
1768 .setsockopt
= ipv6_setsockopt
,
1769 .getsockopt
= ipv6_getsockopt
,
1770 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1771 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1772 .bind_conflict
= inet6_csk_bind_conflict
,
1773 #ifdef CONFIG_COMPAT
1774 .compat_setsockopt
= compat_ipv6_setsockopt
,
1775 .compat_getsockopt
= compat_ipv6_getsockopt
,
1779 #ifdef CONFIG_TCP_MD5SIG
1780 static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific
= {
1781 .md5_lookup
= tcp_v4_md5_lookup
,
1782 .calc_md5_hash
= tcp_v4_md5_hash_skb
,
1783 .md5_add
= tcp_v6_md5_add_func
,
1784 .md5_parse
= tcp_v6_parse_md5_keys
,
1788 /* NOTE: A lot of things set to zero explicitly by call to
1789 * sk_alloc() so need not be done here.
1791 static int tcp_v6_init_sock(struct sock
*sk
)
1793 struct inet_connection_sock
*icsk
= inet_csk(sk
);
1794 struct tcp_sock
*tp
= tcp_sk(sk
);
1796 skb_queue_head_init(&tp
->out_of_order_queue
);
1797 tcp_init_xmit_timers(sk
);
1798 tcp_prequeue_init(tp
);
1800 icsk
->icsk_rto
= TCP_TIMEOUT_INIT
;
1801 tp
->mdev
= TCP_TIMEOUT_INIT
;
1803 /* So many TCP implementations out there (incorrectly) count the
1804 * initial SYN frame in their delayed-ACK and congestion control
1805 * algorithms that we must have the following bandaid to talk
1806 * efficiently to them. -DaveM
1810 /* See draft-stevens-tcpca-spec-01 for discussion of the
1811 * initialization of these values.
1813 tp
->snd_ssthresh
= 0x7fffffff;
1814 tp
->snd_cwnd_clamp
= ~0;
1815 tp
->mss_cache
= 536;
1817 tp
->reordering
= sysctl_tcp_reordering
;
1819 sk
->sk_state
= TCP_CLOSE
;
1821 icsk
->icsk_af_ops
= &ipv6_specific
;
1822 icsk
->icsk_ca_ops
= &tcp_init_congestion_ops
;
1823 icsk
->icsk_sync_mss
= tcp_sync_mss
;
1824 sk
->sk_write_space
= sk_stream_write_space
;
1825 sock_set_flag(sk
, SOCK_USE_WRITE_QUEUE
);
1827 #ifdef CONFIG_TCP_MD5SIG
1828 tp
->af_specific
= &tcp_sock_ipv6_specific
;
1831 sk
->sk_sndbuf
= sysctl_tcp_wmem
[1];
1832 sk
->sk_rcvbuf
= sysctl_tcp_rmem
[1];
1834 atomic_inc(&tcp_sockets_allocated
);
1839 static void tcp_v6_destroy_sock(struct sock
*sk
)
1841 #ifdef CONFIG_TCP_MD5SIG
1842 /* Clean up the MD5 key list */
1843 if (tcp_sk(sk
)->md5sig_info
)
1844 tcp_v6_clear_md5_list(sk
);
1846 tcp_v4_destroy_sock(sk
);
1847 inet6_destroy_sock(sk
);
1850 #ifdef CONFIG_PROC_FS
1851 /* Proc filesystem TCPv6 sock list dumping. */
1852 static void get_openreq6(struct seq_file
*seq
,
1853 struct sock
*sk
, struct request_sock
*req
, int i
, int uid
)
1855 int ttd
= req
->expires
- jiffies
;
1856 struct in6_addr
*src
= &inet6_rsk(req
)->loc_addr
;
1857 struct in6_addr
*dest
= &inet6_rsk(req
)->rmt_addr
;
1863 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1864 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1866 src
->s6_addr32
[0], src
->s6_addr32
[1],
1867 src
->s6_addr32
[2], src
->s6_addr32
[3],
1868 ntohs(inet_rsk(req
)->loc_port
),
1869 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1870 dest
->s6_addr32
[2], dest
->s6_addr32
[3],
1871 ntohs(inet_rsk(req
)->rmt_port
),
1873 0,0, /* could print option size, but that is af dependent. */
1874 1, /* timers active (only the expire timer) */
1875 jiffies_to_clock_t(ttd
),
1878 0, /* non standard timer */
1879 0, /* open_requests have no inode */
1883 static void get_tcp6_sock(struct seq_file
*seq
, struct sock
*sp
, int i
)
1885 struct in6_addr
*dest
, *src
;
1888 unsigned long timer_expires
;
1889 struct inet_sock
*inet
= inet_sk(sp
);
1890 struct tcp_sock
*tp
= tcp_sk(sp
);
1891 const struct inet_connection_sock
*icsk
= inet_csk(sp
);
1892 struct ipv6_pinfo
*np
= inet6_sk(sp
);
1895 src
= &np
->rcv_saddr
;
1896 destp
= ntohs(inet
->dport
);
1897 srcp
= ntohs(inet
->sport
);
1899 if (icsk
->icsk_pending
== ICSK_TIME_RETRANS
) {
1901 timer_expires
= icsk
->icsk_timeout
;
1902 } else if (icsk
->icsk_pending
== ICSK_TIME_PROBE0
) {
1904 timer_expires
= icsk
->icsk_timeout
;
1905 } else if (timer_pending(&sp
->sk_timer
)) {
1907 timer_expires
= sp
->sk_timer
.expires
;
1910 timer_expires
= jiffies
;
1914 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1915 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
1917 src
->s6_addr32
[0], src
->s6_addr32
[1],
1918 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1919 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1920 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1922 tp
->write_seq
-tp
->snd_una
,
1923 (sp
->sk_state
== TCP_LISTEN
) ? sp
->sk_ack_backlog
: (tp
->rcv_nxt
- tp
->copied_seq
),
1925 jiffies_to_clock_t(timer_expires
- jiffies
),
1926 icsk
->icsk_retransmits
,
1928 icsk
->icsk_probes_out
,
1930 atomic_read(&sp
->sk_refcnt
), sp
,
1931 jiffies_to_clock_t(icsk
->icsk_rto
),
1932 jiffies_to_clock_t(icsk
->icsk_ack
.ato
),
1933 (icsk
->icsk_ack
.quick
<< 1 ) | icsk
->icsk_ack
.pingpong
,
1934 tp
->snd_cwnd
, tp
->snd_ssthresh
>=0xFFFF?-1:tp
->snd_ssthresh
1938 static void get_timewait6_sock(struct seq_file
*seq
,
1939 struct inet_timewait_sock
*tw
, int i
)
1941 struct in6_addr
*dest
, *src
;
1943 struct inet6_timewait_sock
*tw6
= inet6_twsk((struct sock
*)tw
);
1944 int ttd
= tw
->tw_ttd
- jiffies
;
1949 dest
= &tw6
->tw_v6_daddr
;
1950 src
= &tw6
->tw_v6_rcv_saddr
;
1951 destp
= ntohs(tw
->tw_dport
);
1952 srcp
= ntohs(tw
->tw_sport
);
1955 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1956 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1958 src
->s6_addr32
[0], src
->s6_addr32
[1],
1959 src
->s6_addr32
[2], src
->s6_addr32
[3], srcp
,
1960 dest
->s6_addr32
[0], dest
->s6_addr32
[1],
1961 dest
->s6_addr32
[2], dest
->s6_addr32
[3], destp
,
1962 tw
->tw_substate
, 0, 0,
1963 3, jiffies_to_clock_t(ttd
), 0, 0, 0, 0,
1964 atomic_read(&tw
->tw_refcnt
), tw
);
1967 static int tcp6_seq_show(struct seq_file
*seq
, void *v
)
1969 struct tcp_iter_state
*st
;
1971 if (v
== SEQ_START_TOKEN
) {
1976 "st tx_queue rx_queue tr tm->when retrnsmt"
1977 " uid timeout inode\n");
1982 switch (st
->state
) {
1983 case TCP_SEQ_STATE_LISTENING
:
1984 case TCP_SEQ_STATE_ESTABLISHED
:
1985 get_tcp6_sock(seq
, v
, st
->num
);
1987 case TCP_SEQ_STATE_OPENREQ
:
1988 get_openreq6(seq
, st
->syn_wait_sk
, v
, st
->num
, st
->uid
);
1990 case TCP_SEQ_STATE_TIME_WAIT
:
1991 get_timewait6_sock(seq
, v
, st
->num
);
1998 static struct tcp_seq_afinfo tcp6_seq_afinfo
= {
2002 .owner
= THIS_MODULE
,
2005 .show
= tcp6_seq_show
,
2009 int tcp6_proc_init(struct net
*net
)
2011 return tcp_proc_register(net
, &tcp6_seq_afinfo
);
2014 void tcp6_proc_exit(struct net
*net
)
2016 tcp_proc_unregister(net
, &tcp6_seq_afinfo
);
2020 struct proto tcpv6_prot
= {
2022 .owner
= THIS_MODULE
,
2024 .connect
= tcp_v6_connect
,
2025 .disconnect
= tcp_disconnect
,
2026 .accept
= inet_csk_accept
,
2028 .init
= tcp_v6_init_sock
,
2029 .destroy
= tcp_v6_destroy_sock
,
2030 .shutdown
= tcp_shutdown
,
2031 .setsockopt
= tcp_setsockopt
,
2032 .getsockopt
= tcp_getsockopt
,
2033 .recvmsg
= tcp_recvmsg
,
2034 .backlog_rcv
= tcp_v6_do_rcv
,
2035 .hash
= tcp_v6_hash
,
2036 .unhash
= inet_unhash
,
2037 .get_port
= inet_csk_get_port
,
2038 .enter_memory_pressure
= tcp_enter_memory_pressure
,
2039 .sockets_allocated
= &tcp_sockets_allocated
,
2040 .memory_allocated
= &tcp_memory_allocated
,
2041 .memory_pressure
= &tcp_memory_pressure
,
2042 .orphan_count
= &tcp_orphan_count
,
2043 .sysctl_mem
= sysctl_tcp_mem
,
2044 .sysctl_wmem
= sysctl_tcp_wmem
,
2045 .sysctl_rmem
= sysctl_tcp_rmem
,
2046 .max_header
= MAX_TCP_HEADER
,
2047 .obj_size
= sizeof(struct tcp6_sock
),
2048 .twsk_prot
= &tcp6_timewait_sock_ops
,
2049 .rsk_prot
= &tcp6_request_sock_ops
,
2050 .h
.hashinfo
= &tcp_hashinfo
,
2051 #ifdef CONFIG_COMPAT
2052 .compat_setsockopt
= compat_tcp_setsockopt
,
2053 .compat_getsockopt
= compat_tcp_getsockopt
,
2057 static struct inet6_protocol tcpv6_protocol
= {
2058 .handler
= tcp_v6_rcv
,
2059 .err_handler
= tcp_v6_err
,
2060 .gso_send_check
= tcp_v6_gso_send_check
,
2061 .gso_segment
= tcp_tso_segment
,
2062 .flags
= INET6_PROTO_NOPOLICY
|INET6_PROTO_FINAL
,
2065 static struct inet_protosw tcpv6_protosw
= {
2066 .type
= SOCK_STREAM
,
2067 .protocol
= IPPROTO_TCP
,
2068 .prot
= &tcpv6_prot
,
2069 .ops
= &inet6_stream_ops
,
2072 .flags
= INET_PROTOSW_PERMANENT
|
2076 static int tcpv6_net_init(struct net
*net
)
2078 return inet_ctl_sock_create(&net
->ipv6
.tcp_sk
, PF_INET6
,
2079 SOCK_RAW
, IPPROTO_TCP
, net
);
2082 static void tcpv6_net_exit(struct net
*net
)
2084 inet_ctl_sock_destroy(net
->ipv6
.tcp_sk
);
2085 inet_twsk_purge(net
, &tcp_hashinfo
, &tcp_death_row
, AF_INET6
);
2088 static struct pernet_operations tcpv6_net_ops
= {
2089 .init
= tcpv6_net_init
,
2090 .exit
= tcpv6_net_exit
,
2093 int __init
tcpv6_init(void)
2097 ret
= inet6_add_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2101 /* register inet6 protocol */
2102 ret
= inet6_register_protosw(&tcpv6_protosw
);
2104 goto out_tcpv6_protocol
;
2106 ret
= register_pernet_subsys(&tcpv6_net_ops
);
2108 goto out_tcpv6_protosw
;
2113 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);
2115 inet6_unregister_protosw(&tcpv6_protosw
);
2119 void tcpv6_exit(void)
2121 unregister_pernet_subsys(&tcpv6_net_ops
);
2122 inet6_unregister_protosw(&tcpv6_protosw
);
2123 inet6_del_protocol(&tcpv6_protocol
, IPPROTO_TCP
);