tcp: use limited socket backlog
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / tcp_ipv6.c
blobc4ea9d5cbfaaad9740df03e8aeecf80f777c6648
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
42 #include <linux/ipv6.h>
43 #include <linux/icmpv6.h>
44 #include <linux/random.h>
46 #include <net/tcp.h>
47 #include <net/ndisc.h>
48 #include <net/inet6_hashtables.h>
49 #include <net/inet6_connection_sock.h>
50 #include <net/ipv6.h>
51 #include <net/transp_v6.h>
52 #include <net/addrconf.h>
53 #include <net/ip6_route.h>
54 #include <net/ip6_checksum.h>
55 #include <net/inet_ecn.h>
56 #include <net/protocol.h>
57 #include <net/xfrm.h>
58 #include <net/snmp.h>
59 #include <net/dsfield.h>
60 #include <net/timewait_sock.h>
61 #include <net/netdma.h>
62 #include <net/inet_common.h>
64 #include <asm/uaccess.h>
66 #include <linux/proc_fs.h>
67 #include <linux/seq_file.h>
69 #include <linux/crypto.h>
70 #include <linux/scatterlist.h>
72 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
73 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
74 struct request_sock *req);
76 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78 static const struct inet_connection_sock_af_ops ipv6_mapped;
79 static const struct inet_connection_sock_af_ops ipv6_specific;
80 #ifdef CONFIG_TCP_MD5SIG
81 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
83 #else
84 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
85 struct in6_addr *addr)
87 return NULL;
89 #endif
91 static void tcp_v6_hash(struct sock *sk)
93 if (sk->sk_state != TCP_CLOSE) {
94 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
95 tcp_prot.hash(sk);
96 return;
98 local_bh_disable();
99 __inet6_hash(sk, NULL);
100 local_bh_enable();
104 static __inline__ __sum16 tcp_v6_check(int len,
105 struct in6_addr *saddr,
106 struct in6_addr *daddr,
107 __wsum base)
109 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
112 static __u32 tcp_v6_init_sequence(struct sk_buff *skb)
114 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
115 ipv6_hdr(skb)->saddr.s6_addr32,
116 tcp_hdr(skb)->dest,
117 tcp_hdr(skb)->source);
120 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
121 int addr_len)
123 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
124 struct inet_sock *inet = inet_sk(sk);
125 struct inet_connection_sock *icsk = inet_csk(sk);
126 struct ipv6_pinfo *np = inet6_sk(sk);
127 struct tcp_sock *tp = tcp_sk(sk);
128 struct in6_addr *saddr = NULL, *final_p = NULL, final;
129 struct flowi fl;
130 struct dst_entry *dst;
131 int addr_type;
132 int err;
134 if (addr_len < SIN6_LEN_RFC2133)
135 return -EINVAL;
137 if (usin->sin6_family != AF_INET6)
138 return(-EAFNOSUPPORT);
140 memset(&fl, 0, sizeof(fl));
142 if (np->sndflow) {
143 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
144 IP6_ECN_flow_init(fl.fl6_flowlabel);
145 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
146 struct ip6_flowlabel *flowlabel;
147 flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
148 if (flowlabel == NULL)
149 return -EINVAL;
150 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
151 fl6_sock_release(flowlabel);
156 * connect() to INADDR_ANY means loopback (BSD'ism).
159 if(ipv6_addr_any(&usin->sin6_addr))
160 usin->sin6_addr.s6_addr[15] = 0x1;
162 addr_type = ipv6_addr_type(&usin->sin6_addr);
164 if(addr_type & IPV6_ADDR_MULTICAST)
165 return -ENETUNREACH;
167 if (addr_type&IPV6_ADDR_LINKLOCAL) {
168 if (addr_len >= sizeof(struct sockaddr_in6) &&
169 usin->sin6_scope_id) {
170 /* If interface is set while binding, indices
171 * must coincide.
173 if (sk->sk_bound_dev_if &&
174 sk->sk_bound_dev_if != usin->sin6_scope_id)
175 return -EINVAL;
177 sk->sk_bound_dev_if = usin->sin6_scope_id;
180 /* Connect to link-local address requires an interface */
181 if (!sk->sk_bound_dev_if)
182 return -EINVAL;
185 if (tp->rx_opt.ts_recent_stamp &&
186 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
187 tp->rx_opt.ts_recent = 0;
188 tp->rx_opt.ts_recent_stamp = 0;
189 tp->write_seq = 0;
192 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
193 np->flow_label = fl.fl6_flowlabel;
196 * TCP over IPv4
199 if (addr_type == IPV6_ADDR_MAPPED) {
200 u32 exthdrlen = icsk->icsk_ext_hdr_len;
201 struct sockaddr_in sin;
203 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
205 if (__ipv6_only_sock(sk))
206 return -ENETUNREACH;
208 sin.sin_family = AF_INET;
209 sin.sin_port = usin->sin6_port;
210 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
212 icsk->icsk_af_ops = &ipv6_mapped;
213 sk->sk_backlog_rcv = tcp_v4_do_rcv;
214 #ifdef CONFIG_TCP_MD5SIG
215 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
216 #endif
218 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
220 if (err) {
221 icsk->icsk_ext_hdr_len = exthdrlen;
222 icsk->icsk_af_ops = &ipv6_specific;
223 sk->sk_backlog_rcv = tcp_v6_do_rcv;
224 #ifdef CONFIG_TCP_MD5SIG
225 tp->af_specific = &tcp_sock_ipv6_specific;
226 #endif
227 goto failure;
228 } else {
229 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
230 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
231 &np->rcv_saddr);
234 return err;
237 if (!ipv6_addr_any(&np->rcv_saddr))
238 saddr = &np->rcv_saddr;
240 fl.proto = IPPROTO_TCP;
241 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
242 ipv6_addr_copy(&fl.fl6_src,
243 (saddr ? saddr : &np->saddr));
244 fl.oif = sk->sk_bound_dev_if;
245 fl.mark = sk->sk_mark;
246 fl.fl_ip_dport = usin->sin6_port;
247 fl.fl_ip_sport = inet->inet_sport;
249 if (np->opt && np->opt->srcrt) {
250 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
251 ipv6_addr_copy(&final, &fl.fl6_dst);
252 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
253 final_p = &final;
256 security_sk_classify_flow(sk, &fl);
258 err = ip6_dst_lookup(sk, &dst, &fl);
259 if (err)
260 goto failure;
261 if (final_p)
262 ipv6_addr_copy(&fl.fl6_dst, final_p);
264 err = __xfrm_lookup(sock_net(sk), &dst, &fl, sk, XFRM_LOOKUP_WAIT);
265 if (err < 0) {
266 if (err == -EREMOTE)
267 err = ip6_dst_blackhole(sk, &dst, &fl);
268 if (err < 0)
269 goto failure;
272 if (saddr == NULL) {
273 saddr = &fl.fl6_src;
274 ipv6_addr_copy(&np->rcv_saddr, saddr);
277 /* set the source address */
278 ipv6_addr_copy(&np->saddr, saddr);
279 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
281 sk->sk_gso_type = SKB_GSO_TCPV6;
282 __ip6_dst_store(sk, dst, NULL, NULL);
284 icsk->icsk_ext_hdr_len = 0;
285 if (np->opt)
286 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
287 np->opt->opt_nflen);
289 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
291 inet->inet_dport = usin->sin6_port;
293 tcp_set_state(sk, TCP_SYN_SENT);
294 err = inet6_hash_connect(&tcp_death_row, sk);
295 if (err)
296 goto late_failure;
298 if (!tp->write_seq)
299 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
300 np->daddr.s6_addr32,
301 inet->inet_sport,
302 inet->inet_dport);
304 err = tcp_connect(sk);
305 if (err)
306 goto late_failure;
308 return 0;
310 late_failure:
311 tcp_set_state(sk, TCP_CLOSE);
312 __sk_dst_reset(sk);
313 failure:
314 inet->inet_dport = 0;
315 sk->sk_route_caps = 0;
316 return err;
319 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
320 u8 type, u8 code, int offset, __be32 info)
322 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
323 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
324 struct ipv6_pinfo *np;
325 struct sock *sk;
326 int err;
327 struct tcp_sock *tp;
328 __u32 seq;
329 struct net *net = dev_net(skb->dev);
331 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
332 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
334 if (sk == NULL) {
335 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
336 ICMP6_MIB_INERRORS);
337 return;
340 if (sk->sk_state == TCP_TIME_WAIT) {
341 inet_twsk_put(inet_twsk(sk));
342 return;
345 bh_lock_sock(sk);
346 if (sock_owned_by_user(sk))
347 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
349 if (sk->sk_state == TCP_CLOSE)
350 goto out;
352 tp = tcp_sk(sk);
353 seq = ntohl(th->seq);
354 if (sk->sk_state != TCP_LISTEN &&
355 !between(seq, tp->snd_una, tp->snd_nxt)) {
356 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
357 goto out;
360 np = inet6_sk(sk);
362 if (type == ICMPV6_PKT_TOOBIG) {
363 struct dst_entry *dst = NULL;
365 if (sock_owned_by_user(sk))
366 goto out;
367 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
368 goto out;
370 /* icmp should have updated the destination cache entry */
371 dst = __sk_dst_check(sk, np->dst_cookie);
373 if (dst == NULL) {
374 struct inet_sock *inet = inet_sk(sk);
375 struct flowi fl;
377 /* BUGGG_FUTURE: Again, it is not clear how
378 to handle rthdr case. Ignore this complexity
379 for now.
381 memset(&fl, 0, sizeof(fl));
382 fl.proto = IPPROTO_TCP;
383 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
384 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
385 fl.oif = sk->sk_bound_dev_if;
386 fl.mark = sk->sk_mark;
387 fl.fl_ip_dport = inet->inet_dport;
388 fl.fl_ip_sport = inet->inet_sport;
389 security_skb_classify_flow(skb, &fl);
391 if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
392 sk->sk_err_soft = -err;
393 goto out;
396 if ((err = xfrm_lookup(net, &dst, &fl, sk, 0)) < 0) {
397 sk->sk_err_soft = -err;
398 goto out;
401 } else
402 dst_hold(dst);
404 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
405 tcp_sync_mss(sk, dst_mtu(dst));
406 tcp_simple_retransmit(sk);
407 } /* else let the usual retransmit timer handle it */
408 dst_release(dst);
409 goto out;
412 icmpv6_err_convert(type, code, &err);
414 /* Might be for an request_sock */
415 switch (sk->sk_state) {
416 struct request_sock *req, **prev;
417 case TCP_LISTEN:
418 if (sock_owned_by_user(sk))
419 goto out;
421 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
422 &hdr->saddr, inet6_iif(skb));
423 if (!req)
424 goto out;
426 /* ICMPs are not backlogged, hence we cannot get
427 * an established socket here.
429 WARN_ON(req->sk != NULL);
431 if (seq != tcp_rsk(req)->snt_isn) {
432 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
433 goto out;
436 inet_csk_reqsk_queue_drop(sk, req, prev);
437 goto out;
439 case TCP_SYN_SENT:
440 case TCP_SYN_RECV: /* Cannot happen.
441 It can, it SYNs are crossed. --ANK */
442 if (!sock_owned_by_user(sk)) {
443 sk->sk_err = err;
444 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
446 tcp_done(sk);
447 } else
448 sk->sk_err_soft = err;
449 goto out;
452 if (!sock_owned_by_user(sk) && np->recverr) {
453 sk->sk_err = err;
454 sk->sk_error_report(sk);
455 } else
456 sk->sk_err_soft = err;
458 out:
459 bh_unlock_sock(sk);
460 sock_put(sk);
464 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
465 struct request_values *rvp)
467 struct inet6_request_sock *treq = inet6_rsk(req);
468 struct ipv6_pinfo *np = inet6_sk(sk);
469 struct sk_buff * skb;
470 struct ipv6_txoptions *opt = NULL;
471 struct in6_addr * final_p = NULL, final;
472 struct flowi fl;
473 struct dst_entry *dst;
474 int err = -1;
476 memset(&fl, 0, sizeof(fl));
477 fl.proto = IPPROTO_TCP;
478 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
479 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
480 fl.fl6_flowlabel = 0;
481 fl.oif = treq->iif;
482 fl.mark = sk->sk_mark;
483 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
484 fl.fl_ip_sport = inet_rsk(req)->loc_port;
485 security_req_classify_flow(req, &fl);
487 opt = np->opt;
488 if (opt && opt->srcrt) {
489 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
490 ipv6_addr_copy(&final, &fl.fl6_dst);
491 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
492 final_p = &final;
495 err = ip6_dst_lookup(sk, &dst, &fl);
496 if (err)
497 goto done;
498 if (final_p)
499 ipv6_addr_copy(&fl.fl6_dst, final_p);
500 if ((err = xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
501 goto done;
503 skb = tcp_make_synack(sk, dst, req, rvp);
504 if (skb) {
505 struct tcphdr *th = tcp_hdr(skb);
507 th->check = tcp_v6_check(skb->len,
508 &treq->loc_addr, &treq->rmt_addr,
509 csum_partial(th, skb->len, skb->csum));
511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
512 err = ip6_xmit(sk, skb, &fl, opt, 0);
513 err = net_xmit_eval(err);
516 done:
517 if (opt && opt != np->opt)
518 sock_kfree_s(sk, opt, opt->tot_len);
519 dst_release(dst);
520 return err;
523 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
524 struct request_values *rvp)
526 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
527 return tcp_v6_send_synack(sk, req, rvp);
530 static inline void syn_flood_warning(struct sk_buff *skb)
532 #ifdef CONFIG_SYN_COOKIES
533 if (sysctl_tcp_syncookies)
534 printk(KERN_INFO
535 "TCPv6: Possible SYN flooding on port %d. "
536 "Sending cookies.\n", ntohs(tcp_hdr(skb)->dest));
537 else
538 #endif
539 printk(KERN_INFO
540 "TCPv6: Possible SYN flooding on port %d. "
541 "Dropping request.\n", ntohs(tcp_hdr(skb)->dest));
544 static void tcp_v6_reqsk_destructor(struct request_sock *req)
546 kfree_skb(inet6_rsk(req)->pktopts);
549 #ifdef CONFIG_TCP_MD5SIG
550 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
551 struct in6_addr *addr)
553 struct tcp_sock *tp = tcp_sk(sk);
554 int i;
556 BUG_ON(tp == NULL);
558 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
559 return NULL;
561 for (i = 0; i < tp->md5sig_info->entries6; i++) {
562 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
563 return &tp->md5sig_info->keys6[i].base;
565 return NULL;
568 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
569 struct sock *addr_sk)
571 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
574 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
575 struct request_sock *req)
577 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
580 static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
581 char *newkey, u8 newkeylen)
583 /* Add key to the list */
584 struct tcp_md5sig_key *key;
585 struct tcp_sock *tp = tcp_sk(sk);
586 struct tcp6_md5sig_key *keys;
588 key = tcp_v6_md5_do_lookup(sk, peer);
589 if (key) {
590 /* modify existing entry - just update that one */
591 kfree(key->key);
592 key->key = newkey;
593 key->keylen = newkeylen;
594 } else {
595 /* reallocate new list if current one is full. */
596 if (!tp->md5sig_info) {
597 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
598 if (!tp->md5sig_info) {
599 kfree(newkey);
600 return -ENOMEM;
602 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
604 if (tcp_alloc_md5sig_pool(sk) == NULL) {
605 kfree(newkey);
606 return -ENOMEM;
608 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
609 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
610 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
612 if (!keys) {
613 tcp_free_md5sig_pool();
614 kfree(newkey);
615 return -ENOMEM;
618 if (tp->md5sig_info->entries6)
619 memmove(keys, tp->md5sig_info->keys6,
620 (sizeof (tp->md5sig_info->keys6[0]) *
621 tp->md5sig_info->entries6));
623 kfree(tp->md5sig_info->keys6);
624 tp->md5sig_info->keys6 = keys;
625 tp->md5sig_info->alloced6++;
628 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
629 peer);
630 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
631 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
633 tp->md5sig_info->entries6++;
635 return 0;
638 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
639 u8 *newkey, __u8 newkeylen)
641 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
642 newkey, newkeylen);
645 static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer)
647 struct tcp_sock *tp = tcp_sk(sk);
648 int i;
650 for (i = 0; i < tp->md5sig_info->entries6; i++) {
651 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
652 /* Free the key */
653 kfree(tp->md5sig_info->keys6[i].base.key);
654 tp->md5sig_info->entries6--;
656 if (tp->md5sig_info->entries6 == 0) {
657 kfree(tp->md5sig_info->keys6);
658 tp->md5sig_info->keys6 = NULL;
659 tp->md5sig_info->alloced6 = 0;
660 } else {
661 /* shrink the database */
662 if (tp->md5sig_info->entries6 != i)
663 memmove(&tp->md5sig_info->keys6[i],
664 &tp->md5sig_info->keys6[i+1],
665 (tp->md5sig_info->entries6 - i)
666 * sizeof (tp->md5sig_info->keys6[0]));
668 tcp_free_md5sig_pool();
669 return 0;
672 return -ENOENT;
675 static void tcp_v6_clear_md5_list (struct sock *sk)
677 struct tcp_sock *tp = tcp_sk(sk);
678 int i;
680 if (tp->md5sig_info->entries6) {
681 for (i = 0; i < tp->md5sig_info->entries6; i++)
682 kfree(tp->md5sig_info->keys6[i].base.key);
683 tp->md5sig_info->entries6 = 0;
684 tcp_free_md5sig_pool();
687 kfree(tp->md5sig_info->keys6);
688 tp->md5sig_info->keys6 = NULL;
689 tp->md5sig_info->alloced6 = 0;
691 if (tp->md5sig_info->entries4) {
692 for (i = 0; i < tp->md5sig_info->entries4; i++)
693 kfree(tp->md5sig_info->keys4[i].base.key);
694 tp->md5sig_info->entries4 = 0;
695 tcp_free_md5sig_pool();
698 kfree(tp->md5sig_info->keys4);
699 tp->md5sig_info->keys4 = NULL;
700 tp->md5sig_info->alloced4 = 0;
703 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
704 int optlen)
706 struct tcp_md5sig cmd;
707 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
708 u8 *newkey;
710 if (optlen < sizeof(cmd))
711 return -EINVAL;
713 if (copy_from_user(&cmd, optval, sizeof(cmd)))
714 return -EFAULT;
716 if (sin6->sin6_family != AF_INET6)
717 return -EINVAL;
719 if (!cmd.tcpm_keylen) {
720 if (!tcp_sk(sk)->md5sig_info)
721 return -ENOENT;
722 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
723 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
724 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
727 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
728 return -EINVAL;
730 if (!tcp_sk(sk)->md5sig_info) {
731 struct tcp_sock *tp = tcp_sk(sk);
732 struct tcp_md5sig_info *p;
734 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
735 if (!p)
736 return -ENOMEM;
738 tp->md5sig_info = p;
739 sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
742 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
743 if (!newkey)
744 return -ENOMEM;
745 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
746 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
747 newkey, cmd.tcpm_keylen);
749 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
752 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
753 struct in6_addr *daddr,
754 struct in6_addr *saddr, int nbytes)
756 struct tcp6_pseudohdr *bp;
757 struct scatterlist sg;
759 bp = &hp->md5_blk.ip6;
760 /* 1. TCP pseudo-header (RFC2460) */
761 ipv6_addr_copy(&bp->saddr, saddr);
762 ipv6_addr_copy(&bp->daddr, daddr);
763 bp->protocol = cpu_to_be32(IPPROTO_TCP);
764 bp->len = cpu_to_be32(nbytes);
766 sg_init_one(&sg, bp, sizeof(*bp));
767 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
770 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
771 struct in6_addr *daddr, struct in6_addr *saddr,
772 struct tcphdr *th)
774 struct tcp_md5sig_pool *hp;
775 struct hash_desc *desc;
777 hp = tcp_get_md5sig_pool();
778 if (!hp)
779 goto clear_hash_noput;
780 desc = &hp->md5_desc;
782 if (crypto_hash_init(desc))
783 goto clear_hash;
784 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
785 goto clear_hash;
786 if (tcp_md5_hash_header(hp, th))
787 goto clear_hash;
788 if (tcp_md5_hash_key(hp, key))
789 goto clear_hash;
790 if (crypto_hash_final(desc, md5_hash))
791 goto clear_hash;
793 tcp_put_md5sig_pool();
794 return 0;
796 clear_hash:
797 tcp_put_md5sig_pool();
798 clear_hash_noput:
799 memset(md5_hash, 0, 16);
800 return 1;
803 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
804 struct sock *sk, struct request_sock *req,
805 struct sk_buff *skb)
807 struct in6_addr *saddr, *daddr;
808 struct tcp_md5sig_pool *hp;
809 struct hash_desc *desc;
810 struct tcphdr *th = tcp_hdr(skb);
812 if (sk) {
813 saddr = &inet6_sk(sk)->saddr;
814 daddr = &inet6_sk(sk)->daddr;
815 } else if (req) {
816 saddr = &inet6_rsk(req)->loc_addr;
817 daddr = &inet6_rsk(req)->rmt_addr;
818 } else {
819 struct ipv6hdr *ip6h = ipv6_hdr(skb);
820 saddr = &ip6h->saddr;
821 daddr = &ip6h->daddr;
824 hp = tcp_get_md5sig_pool();
825 if (!hp)
826 goto clear_hash_noput;
827 desc = &hp->md5_desc;
829 if (crypto_hash_init(desc))
830 goto clear_hash;
832 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
833 goto clear_hash;
834 if (tcp_md5_hash_header(hp, th))
835 goto clear_hash;
836 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
837 goto clear_hash;
838 if (tcp_md5_hash_key(hp, key))
839 goto clear_hash;
840 if (crypto_hash_final(desc, md5_hash))
841 goto clear_hash;
843 tcp_put_md5sig_pool();
844 return 0;
846 clear_hash:
847 tcp_put_md5sig_pool();
848 clear_hash_noput:
849 memset(md5_hash, 0, 16);
850 return 1;
853 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
855 __u8 *hash_location = NULL;
856 struct tcp_md5sig_key *hash_expected;
857 struct ipv6hdr *ip6h = ipv6_hdr(skb);
858 struct tcphdr *th = tcp_hdr(skb);
859 int genhash;
860 u8 newhash[16];
862 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
863 hash_location = tcp_parse_md5sig_option(th);
865 /* We've parsed the options - do we have a hash? */
866 if (!hash_expected && !hash_location)
867 return 0;
869 if (hash_expected && !hash_location) {
870 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
871 return 1;
874 if (!hash_expected && hash_location) {
875 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
876 return 1;
879 /* check the signature */
880 genhash = tcp_v6_md5_hash_skb(newhash,
881 hash_expected,
882 NULL, NULL, skb);
884 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
885 if (net_ratelimit()) {
886 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
887 genhash ? "failed" : "mismatch",
888 &ip6h->saddr, ntohs(th->source),
889 &ip6h->daddr, ntohs(th->dest));
891 return 1;
893 return 0;
895 #endif
897 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
898 .family = AF_INET6,
899 .obj_size = sizeof(struct tcp6_request_sock),
900 .rtx_syn_ack = tcp_v6_rtx_synack,
901 .send_ack = tcp_v6_reqsk_send_ack,
902 .destructor = tcp_v6_reqsk_destructor,
903 .send_reset = tcp_v6_send_reset,
904 .syn_ack_timeout = tcp_syn_ack_timeout,
907 #ifdef CONFIG_TCP_MD5SIG
908 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
909 .md5_lookup = tcp_v6_reqsk_md5_lookup,
910 .calc_md5_hash = tcp_v6_md5_hash_skb,
912 #endif
914 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
915 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
916 .twsk_unique = tcp_twsk_unique,
917 .twsk_destructor= tcp_twsk_destructor,
920 static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
922 struct ipv6_pinfo *np = inet6_sk(sk);
923 struct tcphdr *th = tcp_hdr(skb);
925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
926 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
927 skb->csum_start = skb_transport_header(skb) - skb->head;
928 skb->csum_offset = offsetof(struct tcphdr, check);
929 } else {
930 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,
931 csum_partial(th, th->doff<<2,
932 skb->csum));
936 static int tcp_v6_gso_send_check(struct sk_buff *skb)
938 struct ipv6hdr *ipv6h;
939 struct tcphdr *th;
941 if (!pskb_may_pull(skb, sizeof(*th)))
942 return -EINVAL;
944 ipv6h = ipv6_hdr(skb);
945 th = tcp_hdr(skb);
947 th->check = 0;
948 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
949 IPPROTO_TCP, 0);
950 skb->csum_start = skb_transport_header(skb) - skb->head;
951 skb->csum_offset = offsetof(struct tcphdr, check);
952 skb->ip_summed = CHECKSUM_PARTIAL;
953 return 0;
956 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
957 struct sk_buff *skb)
959 struct ipv6hdr *iph = skb_gro_network_header(skb);
961 switch (skb->ip_summed) {
962 case CHECKSUM_COMPLETE:
963 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
964 skb->csum)) {
965 skb->ip_summed = CHECKSUM_UNNECESSARY;
966 break;
969 /* fall through */
970 case CHECKSUM_NONE:
971 NAPI_GRO_CB(skb)->flush = 1;
972 return NULL;
975 return tcp_gro_receive(head, skb);
978 static int tcp6_gro_complete(struct sk_buff *skb)
980 struct ipv6hdr *iph = ipv6_hdr(skb);
981 struct tcphdr *th = tcp_hdr(skb);
983 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
984 &iph->saddr, &iph->daddr, 0);
985 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
987 return tcp_gro_complete(skb);
990 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
991 u32 ts, struct tcp_md5sig_key *key, int rst)
993 struct tcphdr *th = tcp_hdr(skb), *t1;
994 struct sk_buff *buff;
995 struct flowi fl;
996 struct net *net = dev_net(skb_dst(skb)->dev);
997 struct sock *ctl_sk = net->ipv6.tcp_sk;
998 unsigned int tot_len = sizeof(struct tcphdr);
999 struct dst_entry *dst;
1000 __be32 *topt;
1002 if (ts)
1003 tot_len += TCPOLEN_TSTAMP_ALIGNED;
1004 #ifdef CONFIG_TCP_MD5SIG
1005 if (key)
1006 tot_len += TCPOLEN_MD5SIG_ALIGNED;
1007 #endif
1009 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1010 GFP_ATOMIC);
1011 if (buff == NULL)
1012 return;
1014 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1016 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1017 skb_reset_transport_header(skb);
1019 /* Swap the send and the receive. */
1020 memset(t1, 0, sizeof(*t1));
1021 t1->dest = th->source;
1022 t1->source = th->dest;
1023 t1->doff = tot_len / 4;
1024 t1->seq = htonl(seq);
1025 t1->ack_seq = htonl(ack);
1026 t1->ack = !rst || !th->ack;
1027 t1->rst = rst;
1028 t1->window = htons(win);
1030 topt = (__be32 *)(t1 + 1);
1032 if (ts) {
1033 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1034 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1035 *topt++ = htonl(tcp_time_stamp);
1036 *topt++ = htonl(ts);
1039 #ifdef CONFIG_TCP_MD5SIG
1040 if (key) {
1041 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1042 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1043 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1044 &ipv6_hdr(skb)->saddr,
1045 &ipv6_hdr(skb)->daddr, t1);
1047 #endif
1049 buff->csum = csum_partial(t1, tot_len, 0);
1051 memset(&fl, 0, sizeof(fl));
1052 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1053 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1055 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1056 tot_len, IPPROTO_TCP,
1057 buff->csum);
1059 fl.proto = IPPROTO_TCP;
1060 fl.oif = inet6_iif(skb);
1061 fl.fl_ip_dport = t1->dest;
1062 fl.fl_ip_sport = t1->source;
1063 security_skb_classify_flow(skb, &fl);
1065 /* Pass a socket to ip6_dst_lookup either it is for RST
1066 * Underlying function will use this to retrieve the network
1067 * namespace
1069 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1070 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1071 skb_dst_set(buff, dst);
1072 ip6_xmit(ctl_sk, buff, &fl, NULL, 0);
1073 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1074 if (rst)
1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1076 return;
1080 kfree_skb(buff);
1083 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1085 struct tcphdr *th = tcp_hdr(skb);
1086 u32 seq = 0, ack_seq = 0;
1087 struct tcp_md5sig_key *key = NULL;
1089 if (th->rst)
1090 return;
1092 if (!ipv6_unicast_destination(skb))
1093 return;
1095 #ifdef CONFIG_TCP_MD5SIG
1096 if (sk)
1097 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1098 #endif
1100 if (th->ack)
1101 seq = ntohl(th->ack_seq);
1102 else
1103 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1104 (th->doff << 2);
1106 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1);
1109 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1110 struct tcp_md5sig_key *key)
1112 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0);
1115 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1117 struct inet_timewait_sock *tw = inet_twsk(sk);
1118 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1120 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1121 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1122 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw));
1124 inet_twsk_put(tw);
1127 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1128 struct request_sock *req)
1130 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1131 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr));
1135 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1137 struct request_sock *req, **prev;
1138 const struct tcphdr *th = tcp_hdr(skb);
1139 struct sock *nsk;
1141 /* Find possible connection requests. */
1142 req = inet6_csk_search_req(sk, &prev, th->source,
1143 &ipv6_hdr(skb)->saddr,
1144 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1145 if (req)
1146 return tcp_check_req(sk, skb, req, prev);
1148 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1149 &ipv6_hdr(skb)->saddr, th->source,
1150 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1152 if (nsk) {
1153 if (nsk->sk_state != TCP_TIME_WAIT) {
1154 bh_lock_sock(nsk);
1155 return nsk;
1157 inet_twsk_put(inet_twsk(nsk));
1158 return NULL;
1161 #ifdef CONFIG_SYN_COOKIES
1162 if (!th->rst && !th->syn && th->ack)
1163 sk = cookie_v6_check(sk, skb);
1164 #endif
1165 return sk;
1168 /* FIXME: this is substantially similar to the ipv4 code.
1169 * Can some kind of merge be done? -- erics
1171 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1173 struct tcp_extend_values tmp_ext;
1174 struct tcp_options_received tmp_opt;
1175 u8 *hash_location;
1176 struct request_sock *req;
1177 struct inet6_request_sock *treq;
1178 struct ipv6_pinfo *np = inet6_sk(sk);
1179 struct tcp_sock *tp = tcp_sk(sk);
1180 __u32 isn = TCP_SKB_CB(skb)->when;
1181 #ifdef CONFIG_SYN_COOKIES
1182 int want_cookie = 0;
1183 #else
1184 #define want_cookie 0
1185 #endif
1187 if (skb->protocol == htons(ETH_P_IP))
1188 return tcp_v4_conn_request(sk, skb);
1190 if (!ipv6_unicast_destination(skb))
1191 goto drop;
1193 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1194 if (net_ratelimit())
1195 syn_flood_warning(skb);
1196 #ifdef CONFIG_SYN_COOKIES
1197 if (sysctl_tcp_syncookies)
1198 want_cookie = 1;
1199 else
1200 #endif
1201 goto drop;
1204 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1205 goto drop;
1207 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1208 if (req == NULL)
1209 goto drop;
1211 #ifdef CONFIG_TCP_MD5SIG
1212 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1213 #endif
1215 tcp_clear_options(&tmp_opt);
1216 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1217 tmp_opt.user_mss = tp->rx_opt.user_mss;
1218 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1220 if (tmp_opt.cookie_plus > 0 &&
1221 tmp_opt.saw_tstamp &&
1222 !tp->rx_opt.cookie_out_never &&
1223 (sysctl_tcp_cookie_size > 0 ||
1224 (tp->cookie_values != NULL &&
1225 tp->cookie_values->cookie_desired > 0))) {
1226 u8 *c;
1227 u32 *d;
1228 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1229 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1231 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1232 goto drop_and_free;
1234 /* Secret recipe starts with IP addresses */
1235 d = &ipv6_hdr(skb)->daddr.s6_addr32[0];
1236 *mess++ ^= *d++;
1237 *mess++ ^= *d++;
1238 *mess++ ^= *d++;
1239 *mess++ ^= *d++;
1240 d = &ipv6_hdr(skb)->saddr.s6_addr32[0];
1241 *mess++ ^= *d++;
1242 *mess++ ^= *d++;
1243 *mess++ ^= *d++;
1244 *mess++ ^= *d++;
1246 /* plus variable length Initiator Cookie */
1247 c = (u8 *)mess;
1248 while (l-- > 0)
1249 *c++ ^= *hash_location++;
1251 #ifdef CONFIG_SYN_COOKIES
1252 want_cookie = 0; /* not our kind of cookie */
1253 #endif
1254 tmp_ext.cookie_out_never = 0; /* false */
1255 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1256 } else if (!tp->rx_opt.cookie_in_always) {
1257 /* redundant indications, but ensure initialization. */
1258 tmp_ext.cookie_out_never = 1; /* true */
1259 tmp_ext.cookie_plus = 0;
1260 } else {
1261 goto drop_and_free;
1263 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1265 if (want_cookie && !tmp_opt.saw_tstamp)
1266 tcp_clear_options(&tmp_opt);
1268 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1269 tcp_openreq_init(req, &tmp_opt, skb);
1271 treq = inet6_rsk(req);
1272 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1273 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1274 if (!want_cookie)
1275 TCP_ECN_create_request(req, tcp_hdr(skb));
1277 if (want_cookie) {
1278 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1279 req->cookie_ts = tmp_opt.tstamp_ok;
1280 } else if (!isn) {
1281 if (ipv6_opt_accepted(sk, skb) ||
1282 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1283 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1284 atomic_inc(&skb->users);
1285 treq->pktopts = skb;
1287 treq->iif = sk->sk_bound_dev_if;
1289 /* So that link locals have meaning */
1290 if (!sk->sk_bound_dev_if &&
1291 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1292 treq->iif = inet6_iif(skb);
1294 isn = tcp_v6_init_sequence(skb);
1296 tcp_rsk(req)->snt_isn = isn;
1298 security_inet_conn_request(sk, skb, req);
1300 if (tcp_v6_send_synack(sk, req,
1301 (struct request_values *)&tmp_ext) ||
1302 want_cookie)
1303 goto drop_and_free;
1305 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1306 return 0;
1308 drop_and_free:
1309 reqsk_free(req);
1310 drop:
1311 return 0; /* don't send reset */
1314 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1315 struct request_sock *req,
1316 struct dst_entry *dst)
1318 struct inet6_request_sock *treq;
1319 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1320 struct tcp6_sock *newtcp6sk;
1321 struct inet_sock *newinet;
1322 struct tcp_sock *newtp;
1323 struct sock *newsk;
1324 struct ipv6_txoptions *opt;
1325 #ifdef CONFIG_TCP_MD5SIG
1326 struct tcp_md5sig_key *key;
1327 #endif
1329 if (skb->protocol == htons(ETH_P_IP)) {
1331 * v6 mapped
1334 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1336 if (newsk == NULL)
1337 return NULL;
1339 newtcp6sk = (struct tcp6_sock *)newsk;
1340 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1342 newinet = inet_sk(newsk);
1343 newnp = inet6_sk(newsk);
1344 newtp = tcp_sk(newsk);
1346 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1348 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1350 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1352 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1354 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1355 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1356 #ifdef CONFIG_TCP_MD5SIG
1357 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1358 #endif
1360 newnp->pktoptions = NULL;
1361 newnp->opt = NULL;
1362 newnp->mcast_oif = inet6_iif(skb);
1363 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1366 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1367 * here, tcp_create_openreq_child now does this for us, see the comment in
1368 * that function for the gory details. -acme
1371 /* It is tricky place. Until this moment IPv4 tcp
1372 worked with IPv6 icsk.icsk_af_ops.
1373 Sync it now.
1375 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1377 return newsk;
1380 treq = inet6_rsk(req);
1381 opt = np->opt;
1383 if (sk_acceptq_is_full(sk))
1384 goto out_overflow;
1386 if (dst == NULL) {
1387 struct in6_addr *final_p = NULL, final;
1388 struct flowi fl;
1390 memset(&fl, 0, sizeof(fl));
1391 fl.proto = IPPROTO_TCP;
1392 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1393 if (opt && opt->srcrt) {
1394 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1395 ipv6_addr_copy(&final, &fl.fl6_dst);
1396 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1397 final_p = &final;
1399 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1400 fl.oif = sk->sk_bound_dev_if;
1401 fl.mark = sk->sk_mark;
1402 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
1403 fl.fl_ip_sport = inet_rsk(req)->loc_port;
1404 security_req_classify_flow(req, &fl);
1406 if (ip6_dst_lookup(sk, &dst, &fl))
1407 goto out;
1409 if (final_p)
1410 ipv6_addr_copy(&fl.fl6_dst, final_p);
1412 if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
1413 goto out;
1416 newsk = tcp_create_openreq_child(sk, req, skb);
1417 if (newsk == NULL)
1418 goto out;
1421 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1422 * count here, tcp_create_openreq_child now does this for us, see the
1423 * comment in that function for the gory details. -acme
1426 newsk->sk_gso_type = SKB_GSO_TCPV6;
1427 __ip6_dst_store(newsk, dst, NULL, NULL);
1429 newtcp6sk = (struct tcp6_sock *)newsk;
1430 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1432 newtp = tcp_sk(newsk);
1433 newinet = inet_sk(newsk);
1434 newnp = inet6_sk(newsk);
1436 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1438 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1439 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1440 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1441 newsk->sk_bound_dev_if = treq->iif;
1443 /* Now IPv6 options...
1445 First: no IPv4 options.
1447 newinet->opt = NULL;
1448 newnp->ipv6_fl_list = NULL;
1450 /* Clone RX bits */
1451 newnp->rxopt.all = np->rxopt.all;
1453 /* Clone pktoptions received with SYN */
1454 newnp->pktoptions = NULL;
1455 if (treq->pktopts != NULL) {
1456 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1457 kfree_skb(treq->pktopts);
1458 treq->pktopts = NULL;
1459 if (newnp->pktoptions)
1460 skb_set_owner_r(newnp->pktoptions, newsk);
1462 newnp->opt = NULL;
1463 newnp->mcast_oif = inet6_iif(skb);
1464 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1466 /* Clone native IPv6 options from listening socket (if any)
1468 Yes, keeping reference count would be much more clever,
1469 but we make one more one thing there: reattach optmem
1470 to newsk.
1472 if (opt) {
1473 newnp->opt = ipv6_dup_options(newsk, opt);
1474 if (opt != np->opt)
1475 sock_kfree_s(sk, opt, opt->tot_len);
1478 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1479 if (newnp->opt)
1480 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1481 newnp->opt->opt_flen);
1483 tcp_mtup_init(newsk);
1484 tcp_sync_mss(newsk, dst_mtu(dst));
1485 newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1486 tcp_initialize_rcv_mss(newsk);
1488 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1489 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1491 #ifdef CONFIG_TCP_MD5SIG
1492 /* Copy over the MD5 key from the original socket */
1493 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1494 /* We're using one, so create a matching key
1495 * on the newsk structure. If we fail to get
1496 * memory, then we end up not copying the key
1497 * across. Shucks.
1499 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1500 if (newkey != NULL)
1501 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1502 newkey, key->keylen);
1504 #endif
1506 __inet6_hash(newsk, NULL);
1507 __inet_inherit_port(sk, newsk);
1509 return newsk;
1511 out_overflow:
1512 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1513 out:
1514 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1515 if (opt && opt != np->opt)
1516 sock_kfree_s(sk, opt, opt->tot_len);
1517 dst_release(dst);
1518 return NULL;
1521 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1523 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1524 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1525 &ipv6_hdr(skb)->daddr, skb->csum)) {
1526 skb->ip_summed = CHECKSUM_UNNECESSARY;
1527 return 0;
1531 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1532 &ipv6_hdr(skb)->saddr,
1533 &ipv6_hdr(skb)->daddr, 0));
1535 if (skb->len <= 76) {
1536 return __skb_checksum_complete(skb);
1538 return 0;
1541 /* The socket must have it's spinlock held when we get
1542 * here.
1544 * We have a potential double-lock case here, so even when
1545 * doing backlog processing we use the BH locking scheme.
1546 * This is because we cannot sleep with the original spinlock
1547 * held.
1549 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1551 struct ipv6_pinfo *np = inet6_sk(sk);
1552 struct tcp_sock *tp;
1553 struct sk_buff *opt_skb = NULL;
1555 /* Imagine: socket is IPv6. IPv4 packet arrives,
1556 goes to IPv4 receive handler and backlogged.
1557 From backlog it always goes here. Kerboom...
1558 Fortunately, tcp_rcv_established and rcv_established
1559 handle them correctly, but it is not case with
1560 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1563 if (skb->protocol == htons(ETH_P_IP))
1564 return tcp_v4_do_rcv(sk, skb);
1566 #ifdef CONFIG_TCP_MD5SIG
1567 if (tcp_v6_inbound_md5_hash (sk, skb))
1568 goto discard;
1569 #endif
1571 if (sk_filter(sk, skb))
1572 goto discard;
1575 * socket locking is here for SMP purposes as backlog rcv
1576 * is currently called with bh processing disabled.
1579 /* Do Stevens' IPV6_PKTOPTIONS.
1581 Yes, guys, it is the only place in our code, where we
1582 may make it not affecting IPv4.
1583 The rest of code is protocol independent,
1584 and I do not like idea to uglify IPv4.
1586 Actually, all the idea behind IPV6_PKTOPTIONS
1587 looks not very well thought. For now we latch
1588 options, received in the last packet, enqueued
1589 by tcp. Feel free to propose better solution.
1590 --ANK (980728)
1592 if (np->rxopt.all)
1593 opt_skb = skb_clone(skb, GFP_ATOMIC);
1595 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1596 TCP_CHECK_TIMER(sk);
1597 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1598 goto reset;
1599 TCP_CHECK_TIMER(sk);
1600 if (opt_skb)
1601 goto ipv6_pktoptions;
1602 return 0;
1605 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1606 goto csum_err;
1608 if (sk->sk_state == TCP_LISTEN) {
1609 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1610 if (!nsk)
1611 goto discard;
1614 * Queue it on the new socket if the new socket is active,
1615 * otherwise we just shortcircuit this and continue with
1616 * the new socket..
1618 if(nsk != sk) {
1619 if (tcp_child_process(sk, nsk, skb))
1620 goto reset;
1621 if (opt_skb)
1622 __kfree_skb(opt_skb);
1623 return 0;
1627 TCP_CHECK_TIMER(sk);
1628 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1629 goto reset;
1630 TCP_CHECK_TIMER(sk);
1631 if (opt_skb)
1632 goto ipv6_pktoptions;
1633 return 0;
1635 reset:
1636 tcp_v6_send_reset(sk, skb);
1637 discard:
1638 if (opt_skb)
1639 __kfree_skb(opt_skb);
1640 kfree_skb(skb);
1641 return 0;
1642 csum_err:
1643 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1644 goto discard;
1647 ipv6_pktoptions:
1648 /* Do you ask, what is it?
1650 1. skb was enqueued by tcp.
1651 2. skb is added to tail of read queue, rather than out of order.
1652 3. socket is not in passive state.
1653 4. Finally, it really contains options, which user wants to receive.
1655 tp = tcp_sk(sk);
1656 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1657 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1658 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1659 np->mcast_oif = inet6_iif(opt_skb);
1660 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1661 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1662 if (ipv6_opt_accepted(sk, opt_skb)) {
1663 skb_set_owner_r(opt_skb, sk);
1664 opt_skb = xchg(&np->pktoptions, opt_skb);
1665 } else {
1666 __kfree_skb(opt_skb);
1667 opt_skb = xchg(&np->pktoptions, NULL);
1671 kfree_skb(opt_skb);
1672 return 0;
1675 static int tcp_v6_rcv(struct sk_buff *skb)
1677 struct tcphdr *th;
1678 struct sock *sk;
1679 int ret;
1680 struct net *net = dev_net(skb->dev);
1682 if (skb->pkt_type != PACKET_HOST)
1683 goto discard_it;
1686 * Count it even if it's bad.
1688 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1690 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1691 goto discard_it;
1693 th = tcp_hdr(skb);
1695 if (th->doff < sizeof(struct tcphdr)/4)
1696 goto bad_packet;
1697 if (!pskb_may_pull(skb, th->doff*4))
1698 goto discard_it;
1700 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1701 goto bad_packet;
1703 th = tcp_hdr(skb);
1704 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1705 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1706 skb->len - th->doff*4);
1707 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1708 TCP_SKB_CB(skb)->when = 0;
1709 TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(ipv6_hdr(skb));
1710 TCP_SKB_CB(skb)->sacked = 0;
1712 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1713 if (!sk)
1714 goto no_tcp_socket;
1716 process:
1717 if (sk->sk_state == TCP_TIME_WAIT)
1718 goto do_time_wait;
1720 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1721 goto discard_and_relse;
1723 if (sk_filter(sk, skb))
1724 goto discard_and_relse;
1726 skb->dev = NULL;
1728 bh_lock_sock_nested(sk);
1729 ret = 0;
1730 if (!sock_owned_by_user(sk)) {
1731 #ifdef CONFIG_NET_DMA
1732 struct tcp_sock *tp = tcp_sk(sk);
1733 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1734 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1735 if (tp->ucopy.dma_chan)
1736 ret = tcp_v6_do_rcv(sk, skb);
1737 else
1738 #endif
1740 if (!tcp_prequeue(sk, skb))
1741 ret = tcp_v6_do_rcv(sk, skb);
1743 } else if (sk_add_backlog_limited(sk, skb)) {
1744 bh_unlock_sock(sk);
1745 goto discard_and_relse;
1747 bh_unlock_sock(sk);
1749 sock_put(sk);
1750 return ret ? -1 : 0;
1752 no_tcp_socket:
1753 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1754 goto discard_it;
1756 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1757 bad_packet:
1758 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1759 } else {
1760 tcp_v6_send_reset(NULL, skb);
1763 discard_it:
1766 * Discard frame
1769 kfree_skb(skb);
1770 return 0;
1772 discard_and_relse:
1773 sock_put(sk);
1774 goto discard_it;
1776 do_time_wait:
1777 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1778 inet_twsk_put(inet_twsk(sk));
1779 goto discard_it;
1782 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1783 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1784 inet_twsk_put(inet_twsk(sk));
1785 goto discard_it;
1788 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1789 case TCP_TW_SYN:
1791 struct sock *sk2;
1793 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1794 &ipv6_hdr(skb)->daddr,
1795 ntohs(th->dest), inet6_iif(skb));
1796 if (sk2 != NULL) {
1797 struct inet_timewait_sock *tw = inet_twsk(sk);
1798 inet_twsk_deschedule(tw, &tcp_death_row);
1799 inet_twsk_put(tw);
1800 sk = sk2;
1801 goto process;
1803 /* Fall through to ACK */
1805 case TCP_TW_ACK:
1806 tcp_v6_timewait_ack(sk, skb);
1807 break;
1808 case TCP_TW_RST:
1809 goto no_tcp_socket;
1810 case TCP_TW_SUCCESS:;
1812 goto discard_it;
1815 static int tcp_v6_remember_stamp(struct sock *sk)
1817 /* Alas, not yet... */
1818 return 0;
1821 static const struct inet_connection_sock_af_ops ipv6_specific = {
1822 .queue_xmit = inet6_csk_xmit,
1823 .send_check = tcp_v6_send_check,
1824 .rebuild_header = inet6_sk_rebuild_header,
1825 .conn_request = tcp_v6_conn_request,
1826 .syn_recv_sock = tcp_v6_syn_recv_sock,
1827 .remember_stamp = tcp_v6_remember_stamp,
1828 .net_header_len = sizeof(struct ipv6hdr),
1829 .setsockopt = ipv6_setsockopt,
1830 .getsockopt = ipv6_getsockopt,
1831 .addr2sockaddr = inet6_csk_addr2sockaddr,
1832 .sockaddr_len = sizeof(struct sockaddr_in6),
1833 .bind_conflict = inet6_csk_bind_conflict,
1834 #ifdef CONFIG_COMPAT
1835 .compat_setsockopt = compat_ipv6_setsockopt,
1836 .compat_getsockopt = compat_ipv6_getsockopt,
1837 #endif
1840 #ifdef CONFIG_TCP_MD5SIG
1841 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1842 .md5_lookup = tcp_v6_md5_lookup,
1843 .calc_md5_hash = tcp_v6_md5_hash_skb,
1844 .md5_add = tcp_v6_md5_add_func,
1845 .md5_parse = tcp_v6_parse_md5_keys,
1847 #endif
1850 * TCP over IPv4 via INET6 API
1853 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1854 .queue_xmit = ip_queue_xmit,
1855 .send_check = tcp_v4_send_check,
1856 .rebuild_header = inet_sk_rebuild_header,
1857 .conn_request = tcp_v6_conn_request,
1858 .syn_recv_sock = tcp_v6_syn_recv_sock,
1859 .remember_stamp = tcp_v4_remember_stamp,
1860 .net_header_len = sizeof(struct iphdr),
1861 .setsockopt = ipv6_setsockopt,
1862 .getsockopt = ipv6_getsockopt,
1863 .addr2sockaddr = inet6_csk_addr2sockaddr,
1864 .sockaddr_len = sizeof(struct sockaddr_in6),
1865 .bind_conflict = inet6_csk_bind_conflict,
1866 #ifdef CONFIG_COMPAT
1867 .compat_setsockopt = compat_ipv6_setsockopt,
1868 .compat_getsockopt = compat_ipv6_getsockopt,
1869 #endif
1872 #ifdef CONFIG_TCP_MD5SIG
1873 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1874 .md5_lookup = tcp_v4_md5_lookup,
1875 .calc_md5_hash = tcp_v4_md5_hash_skb,
1876 .md5_add = tcp_v6_md5_add_func,
1877 .md5_parse = tcp_v6_parse_md5_keys,
1879 #endif
1881 /* NOTE: A lot of things set to zero explicitly by call to
1882 * sk_alloc() so need not be done here.
1884 static int tcp_v6_init_sock(struct sock *sk)
1886 struct inet_connection_sock *icsk = inet_csk(sk);
1887 struct tcp_sock *tp = tcp_sk(sk);
1889 skb_queue_head_init(&tp->out_of_order_queue);
1890 tcp_init_xmit_timers(sk);
1891 tcp_prequeue_init(tp);
1893 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1894 tp->mdev = TCP_TIMEOUT_INIT;
1896 /* So many TCP implementations out there (incorrectly) count the
1897 * initial SYN frame in their delayed-ACK and congestion control
1898 * algorithms that we must have the following bandaid to talk
1899 * efficiently to them. -DaveM
1901 tp->snd_cwnd = 2;
1903 /* See draft-stevens-tcpca-spec-01 for discussion of the
1904 * initialization of these values.
1906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1907 tp->snd_cwnd_clamp = ~0;
1908 tp->mss_cache = TCP_MSS_DEFAULT;
1910 tp->reordering = sysctl_tcp_reordering;
1912 sk->sk_state = TCP_CLOSE;
1914 icsk->icsk_af_ops = &ipv6_specific;
1915 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1916 icsk->icsk_sync_mss = tcp_sync_mss;
1917 sk->sk_write_space = sk_stream_write_space;
1918 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1920 #ifdef CONFIG_TCP_MD5SIG
1921 tp->af_specific = &tcp_sock_ipv6_specific;
1922 #endif
1924 /* TCP Cookie Transactions */
1925 if (sysctl_tcp_cookie_size > 0) {
1926 /* Default, cookies without s_data_payload. */
1927 tp->cookie_values =
1928 kzalloc(sizeof(*tp->cookie_values),
1929 sk->sk_allocation);
1930 if (tp->cookie_values != NULL)
1931 kref_init(&tp->cookie_values->kref);
1933 /* Presumed zeroed, in order of appearance:
1934 * cookie_in_always, cookie_out_never,
1935 * s_data_constant, s_data_in, s_data_out
1937 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1938 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1940 local_bh_disable();
1941 percpu_counter_inc(&tcp_sockets_allocated);
1942 local_bh_enable();
1944 return 0;
1947 static void tcp_v6_destroy_sock(struct sock *sk)
1949 #ifdef CONFIG_TCP_MD5SIG
1950 /* Clean up the MD5 key list */
1951 if (tcp_sk(sk)->md5sig_info)
1952 tcp_v6_clear_md5_list(sk);
1953 #endif
1954 tcp_v4_destroy_sock(sk);
1955 inet6_destroy_sock(sk);
1958 #ifdef CONFIG_PROC_FS
1959 /* Proc filesystem TCPv6 sock list dumping. */
1960 static void get_openreq6(struct seq_file *seq,
1961 struct sock *sk, struct request_sock *req, int i, int uid)
1963 int ttd = req->expires - jiffies;
1964 struct in6_addr *src = &inet6_rsk(req)->loc_addr;
1965 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
1967 if (ttd < 0)
1968 ttd = 0;
1970 seq_printf(seq,
1971 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1972 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1974 src->s6_addr32[0], src->s6_addr32[1],
1975 src->s6_addr32[2], src->s6_addr32[3],
1976 ntohs(inet_rsk(req)->loc_port),
1977 dest->s6_addr32[0], dest->s6_addr32[1],
1978 dest->s6_addr32[2], dest->s6_addr32[3],
1979 ntohs(inet_rsk(req)->rmt_port),
1980 TCP_SYN_RECV,
1981 0,0, /* could print option size, but that is af dependent. */
1982 1, /* timers active (only the expire timer) */
1983 jiffies_to_clock_t(ttd),
1984 req->retrans,
1985 uid,
1986 0, /* non standard timer */
1987 0, /* open_requests have no inode */
1988 0, req);
1991 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1993 struct in6_addr *dest, *src;
1994 __u16 destp, srcp;
1995 int timer_active;
1996 unsigned long timer_expires;
1997 struct inet_sock *inet = inet_sk(sp);
1998 struct tcp_sock *tp = tcp_sk(sp);
1999 const struct inet_connection_sock *icsk = inet_csk(sp);
2000 struct ipv6_pinfo *np = inet6_sk(sp);
2002 dest = &np->daddr;
2003 src = &np->rcv_saddr;
2004 destp = ntohs(inet->inet_dport);
2005 srcp = ntohs(inet->inet_sport);
2007 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2008 timer_active = 1;
2009 timer_expires = icsk->icsk_timeout;
2010 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2011 timer_active = 4;
2012 timer_expires = icsk->icsk_timeout;
2013 } else if (timer_pending(&sp->sk_timer)) {
2014 timer_active = 2;
2015 timer_expires = sp->sk_timer.expires;
2016 } else {
2017 timer_active = 0;
2018 timer_expires = jiffies;
2021 seq_printf(seq,
2022 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2023 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
2025 src->s6_addr32[0], src->s6_addr32[1],
2026 src->s6_addr32[2], src->s6_addr32[3], srcp,
2027 dest->s6_addr32[0], dest->s6_addr32[1],
2028 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2029 sp->sk_state,
2030 tp->write_seq-tp->snd_una,
2031 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2032 timer_active,
2033 jiffies_to_clock_t(timer_expires - jiffies),
2034 icsk->icsk_retransmits,
2035 sock_i_uid(sp),
2036 icsk->icsk_probes_out,
2037 sock_i_ino(sp),
2038 atomic_read(&sp->sk_refcnt), sp,
2039 jiffies_to_clock_t(icsk->icsk_rto),
2040 jiffies_to_clock_t(icsk->icsk_ack.ato),
2041 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2042 tp->snd_cwnd,
2043 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2047 static void get_timewait6_sock(struct seq_file *seq,
2048 struct inet_timewait_sock *tw, int i)
2050 struct in6_addr *dest, *src;
2051 __u16 destp, srcp;
2052 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2053 int ttd = tw->tw_ttd - jiffies;
2055 if (ttd < 0)
2056 ttd = 0;
2058 dest = &tw6->tw_v6_daddr;
2059 src = &tw6->tw_v6_rcv_saddr;
2060 destp = ntohs(tw->tw_dport);
2061 srcp = ntohs(tw->tw_sport);
2063 seq_printf(seq,
2064 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2065 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2067 src->s6_addr32[0], src->s6_addr32[1],
2068 src->s6_addr32[2], src->s6_addr32[3], srcp,
2069 dest->s6_addr32[0], dest->s6_addr32[1],
2070 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2071 tw->tw_substate, 0, 0,
2072 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2073 atomic_read(&tw->tw_refcnt), tw);
2076 static int tcp6_seq_show(struct seq_file *seq, void *v)
2078 struct tcp_iter_state *st;
2080 if (v == SEQ_START_TOKEN) {
2081 seq_puts(seq,
2082 " sl "
2083 "local_address "
2084 "remote_address "
2085 "st tx_queue rx_queue tr tm->when retrnsmt"
2086 " uid timeout inode\n");
2087 goto out;
2089 st = seq->private;
2091 switch (st->state) {
2092 case TCP_SEQ_STATE_LISTENING:
2093 case TCP_SEQ_STATE_ESTABLISHED:
2094 get_tcp6_sock(seq, v, st->num);
2095 break;
2096 case TCP_SEQ_STATE_OPENREQ:
2097 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2098 break;
2099 case TCP_SEQ_STATE_TIME_WAIT:
2100 get_timewait6_sock(seq, v, st->num);
2101 break;
2103 out:
2104 return 0;
2107 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2108 .name = "tcp6",
2109 .family = AF_INET6,
2110 .seq_fops = {
2111 .owner = THIS_MODULE,
2113 .seq_ops = {
2114 .show = tcp6_seq_show,
2118 int __net_init tcp6_proc_init(struct net *net)
2120 return tcp_proc_register(net, &tcp6_seq_afinfo);
2123 void tcp6_proc_exit(struct net *net)
2125 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2127 #endif
2129 struct proto tcpv6_prot = {
2130 .name = "TCPv6",
2131 .owner = THIS_MODULE,
2132 .close = tcp_close,
2133 .connect = tcp_v6_connect,
2134 .disconnect = tcp_disconnect,
2135 .accept = inet_csk_accept,
2136 .ioctl = tcp_ioctl,
2137 .init = tcp_v6_init_sock,
2138 .destroy = tcp_v6_destroy_sock,
2139 .shutdown = tcp_shutdown,
2140 .setsockopt = tcp_setsockopt,
2141 .getsockopt = tcp_getsockopt,
2142 .recvmsg = tcp_recvmsg,
2143 .backlog_rcv = tcp_v6_do_rcv,
2144 .hash = tcp_v6_hash,
2145 .unhash = inet_unhash,
2146 .get_port = inet_csk_get_port,
2147 .enter_memory_pressure = tcp_enter_memory_pressure,
2148 .sockets_allocated = &tcp_sockets_allocated,
2149 .memory_allocated = &tcp_memory_allocated,
2150 .memory_pressure = &tcp_memory_pressure,
2151 .orphan_count = &tcp_orphan_count,
2152 .sysctl_mem = sysctl_tcp_mem,
2153 .sysctl_wmem = sysctl_tcp_wmem,
2154 .sysctl_rmem = sysctl_tcp_rmem,
2155 .max_header = MAX_TCP_HEADER,
2156 .obj_size = sizeof(struct tcp6_sock),
2157 .slab_flags = SLAB_DESTROY_BY_RCU,
2158 .twsk_prot = &tcp6_timewait_sock_ops,
2159 .rsk_prot = &tcp6_request_sock_ops,
2160 .h.hashinfo = &tcp_hashinfo,
2161 #ifdef CONFIG_COMPAT
2162 .compat_setsockopt = compat_tcp_setsockopt,
2163 .compat_getsockopt = compat_tcp_getsockopt,
2164 #endif
2167 static const struct inet6_protocol tcpv6_protocol = {
2168 .handler = tcp_v6_rcv,
2169 .err_handler = tcp_v6_err,
2170 .gso_send_check = tcp_v6_gso_send_check,
2171 .gso_segment = tcp_tso_segment,
2172 .gro_receive = tcp6_gro_receive,
2173 .gro_complete = tcp6_gro_complete,
2174 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2177 static struct inet_protosw tcpv6_protosw = {
2178 .type = SOCK_STREAM,
2179 .protocol = IPPROTO_TCP,
2180 .prot = &tcpv6_prot,
2181 .ops = &inet6_stream_ops,
2182 .no_check = 0,
2183 .flags = INET_PROTOSW_PERMANENT |
2184 INET_PROTOSW_ICSK,
2187 static int __net_init tcpv6_net_init(struct net *net)
2189 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2190 SOCK_RAW, IPPROTO_TCP, net);
2193 static void __net_exit tcpv6_net_exit(struct net *net)
2195 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2198 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2200 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2203 static struct pernet_operations tcpv6_net_ops = {
2204 .init = tcpv6_net_init,
2205 .exit = tcpv6_net_exit,
2206 .exit_batch = tcpv6_net_exit_batch,
2209 int __init tcpv6_init(void)
2211 int ret;
2213 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2214 if (ret)
2215 goto out;
2217 /* register inet6 protocol */
2218 ret = inet6_register_protosw(&tcpv6_protosw);
2219 if (ret)
2220 goto out_tcpv6_protocol;
2222 ret = register_pernet_subsys(&tcpv6_net_ops);
2223 if (ret)
2224 goto out_tcpv6_protosw;
2225 out:
2226 return ret;
2228 out_tcpv6_protocol:
2229 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2230 out_tcpv6_protosw:
2231 inet6_unregister_protosw(&tcpv6_protosw);
2232 goto out;
2235 void tcpv6_exit(void)
2237 unregister_pernet_subsys(&tcpv6_net_ops);
2238 inet6_unregister_protosw(&tcpv6_protosw);
2239 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);