drm: Fix authentication kernel crash
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv6 / tcp_ipv6.c
blob2dea4bb7b54a3381a7c50e60c2ade383eabe477b
1 /*
2 * TCP over IPv6
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on:
9 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
26 #include <linux/bottom_half.h>
27 #include <linux/module.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/jiffies.h>
34 #include <linux/in.h>
35 #include <linux/in6.h>
36 #include <linux/netdevice.h>
37 #include <linux/init.h>
38 #include <linux/jhash.h>
39 #include <linux/ipsec.h>
40 #include <linux/times.h>
41 #include <linux/slab.h>
43 #include <linux/ipv6.h>
44 #include <linux/icmpv6.h>
45 #include <linux/random.h>
47 #include <net/tcp.h>
48 #include <net/ndisc.h>
49 #include <net/inet6_hashtables.h>
50 #include <net/inet6_connection_sock.h>
51 #include <net/ipv6.h>
52 #include <net/transp_v6.h>
53 #include <net/addrconf.h>
54 #include <net/ip6_route.h>
55 #include <net/ip6_checksum.h>
56 #include <net/inet_ecn.h>
57 #include <net/protocol.h>
58 #include <net/xfrm.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61 #include <net/timewait_sock.h>
62 #include <net/netdma.h>
63 #include <net/inet_common.h>
64 #include <net/secure_seq.h>
66 #include <asm/uaccess.h>
68 #include <linux/proc_fs.h>
69 #include <linux/seq_file.h>
71 #include <linux/crypto.h>
72 #include <linux/scatterlist.h>
74 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb);
75 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 struct request_sock *req);
78 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
79 static void __tcp_v6_send_check(struct sk_buff *skb,
80 const struct in6_addr *saddr,
81 const struct in6_addr *daddr);
83 static const struct inet_connection_sock_af_ops ipv6_mapped;
84 static const struct inet_connection_sock_af_ops ipv6_specific;
85 #ifdef CONFIG_TCP_MD5SIG
86 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
87 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
88 #else
89 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
90 const struct in6_addr *addr)
92 return NULL;
94 #endif
96 static void tcp_v6_hash(struct sock *sk)
98 if (sk->sk_state != TCP_CLOSE) {
99 if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
100 tcp_prot.hash(sk);
101 return;
103 local_bh_disable();
104 __inet6_hash(sk, NULL);
105 local_bh_enable();
109 static __inline__ __sum16 tcp_v6_check(int len,
110 const struct in6_addr *saddr,
111 const struct in6_addr *daddr,
112 __wsum base)
114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
117 static __u32 tcp_v6_init_sequence(const struct sk_buff *skb)
119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
120 ipv6_hdr(skb)->saddr.s6_addr32,
121 tcp_hdr(skb)->dest,
122 tcp_hdr(skb)->source);
125 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
126 int addr_len)
128 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
129 struct inet_sock *inet = inet_sk(sk);
130 struct inet_connection_sock *icsk = inet_csk(sk);
131 struct ipv6_pinfo *np = inet6_sk(sk);
132 struct tcp_sock *tp = tcp_sk(sk);
133 struct in6_addr *saddr = NULL, *final_p, final;
134 struct rt6_info *rt;
135 struct flowi6 fl6;
136 struct dst_entry *dst;
137 int addr_type;
138 int err;
140 if (addr_len < SIN6_LEN_RFC2133)
141 return -EINVAL;
143 if (usin->sin6_family != AF_INET6)
144 return -EAFNOSUPPORT;
146 memset(&fl6, 0, sizeof(fl6));
148 if (np->sndflow) {
149 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
150 IP6_ECN_flow_init(fl6.flowlabel);
151 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
152 struct ip6_flowlabel *flowlabel;
153 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
154 if (flowlabel == NULL)
155 return -EINVAL;
156 ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
157 fl6_sock_release(flowlabel);
162 * connect() to INADDR_ANY means loopback (BSD'ism).
165 if(ipv6_addr_any(&usin->sin6_addr))
166 usin->sin6_addr.s6_addr[15] = 0x1;
168 addr_type = ipv6_addr_type(&usin->sin6_addr);
170 if(addr_type & IPV6_ADDR_MULTICAST)
171 return -ENETUNREACH;
173 if (addr_type&IPV6_ADDR_LINKLOCAL) {
174 if (addr_len >= sizeof(struct sockaddr_in6) &&
175 usin->sin6_scope_id) {
176 /* If interface is set while binding, indices
177 * must coincide.
179 if (sk->sk_bound_dev_if &&
180 sk->sk_bound_dev_if != usin->sin6_scope_id)
181 return -EINVAL;
183 sk->sk_bound_dev_if = usin->sin6_scope_id;
186 /* Connect to link-local address requires an interface */
187 if (!sk->sk_bound_dev_if)
188 return -EINVAL;
191 if (tp->rx_opt.ts_recent_stamp &&
192 !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
193 tp->rx_opt.ts_recent = 0;
194 tp->rx_opt.ts_recent_stamp = 0;
195 tp->write_seq = 0;
198 ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
199 np->flow_label = fl6.flowlabel;
202 * TCP over IPv4
205 if (addr_type == IPV6_ADDR_MAPPED) {
206 u32 exthdrlen = icsk->icsk_ext_hdr_len;
207 struct sockaddr_in sin;
209 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
211 if (__ipv6_only_sock(sk))
212 return -ENETUNREACH;
214 sin.sin_family = AF_INET;
215 sin.sin_port = usin->sin6_port;
216 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
218 icsk->icsk_af_ops = &ipv6_mapped;
219 sk->sk_backlog_rcv = tcp_v4_do_rcv;
220 #ifdef CONFIG_TCP_MD5SIG
221 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
222 #endif
224 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
226 if (err) {
227 icsk->icsk_ext_hdr_len = exthdrlen;
228 icsk->icsk_af_ops = &ipv6_specific;
229 sk->sk_backlog_rcv = tcp_v6_do_rcv;
230 #ifdef CONFIG_TCP_MD5SIG
231 tp->af_specific = &tcp_sock_ipv6_specific;
232 #endif
233 goto failure;
234 } else {
235 ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
236 ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
237 &np->rcv_saddr);
240 return err;
243 if (!ipv6_addr_any(&np->rcv_saddr))
244 saddr = &np->rcv_saddr;
246 fl6.flowi6_proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl6.daddr, &np->daddr);
248 ipv6_addr_copy(&fl6.saddr,
249 (saddr ? saddr : &np->saddr));
250 fl6.flowi6_oif = sk->sk_bound_dev_if;
251 fl6.flowi6_mark = sk->sk_mark;
252 fl6.fl6_dport = usin->sin6_port;
253 fl6.fl6_sport = inet->inet_sport;
255 final_p = fl6_update_dst(&fl6, np->opt, &final);
257 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
259 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
260 if (IS_ERR(dst)) {
261 err = PTR_ERR(dst);
262 goto failure;
265 if (saddr == NULL) {
266 saddr = &fl6.saddr;
267 ipv6_addr_copy(&np->rcv_saddr, saddr);
270 /* set the source address */
271 ipv6_addr_copy(&np->saddr, saddr);
272 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
274 sk->sk_gso_type = SKB_GSO_TCPV6;
275 __ip6_dst_store(sk, dst, NULL, NULL);
277 rt = (struct rt6_info *) dst;
278 if (tcp_death_row.sysctl_tw_recycle &&
279 !tp->rx_opt.ts_recent_stamp &&
280 ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
281 struct inet_peer *peer = rt6_get_peer(rt);
283 * VJ's idea. We save last timestamp seen from
284 * the destination in peer table, when entering state
285 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
286 * when trying new connection.
288 if (peer) {
289 inet_peer_refcheck(peer);
290 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
291 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
292 tp->rx_opt.ts_recent = peer->tcp_ts;
297 icsk->icsk_ext_hdr_len = 0;
298 if (np->opt)
299 icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
300 np->opt->opt_nflen);
302 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
304 inet->inet_dport = usin->sin6_port;
306 tcp_set_state(sk, TCP_SYN_SENT);
307 err = inet6_hash_connect(&tcp_death_row, sk);
308 if (err)
309 goto late_failure;
311 if (!tp->write_seq)
312 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
313 np->daddr.s6_addr32,
314 inet->inet_sport,
315 inet->inet_dport);
317 err = tcp_connect(sk);
318 if (err)
319 goto late_failure;
321 return 0;
323 late_failure:
324 tcp_set_state(sk, TCP_CLOSE);
325 __sk_dst_reset(sk);
326 failure:
327 inet->inet_dport = 0;
328 sk->sk_route_caps = 0;
329 return err;
332 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
333 u8 type, u8 code, int offset, __be32 info)
335 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
336 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
337 struct ipv6_pinfo *np;
338 struct sock *sk;
339 int err;
340 struct tcp_sock *tp;
341 __u32 seq;
342 struct net *net = dev_net(skb->dev);
344 sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
345 th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
347 if (sk == NULL) {
348 ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
349 ICMP6_MIB_INERRORS);
350 return;
353 if (sk->sk_state == TCP_TIME_WAIT) {
354 inet_twsk_put(inet_twsk(sk));
355 return;
358 bh_lock_sock(sk);
359 if (sock_owned_by_user(sk))
360 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
362 if (sk->sk_state == TCP_CLOSE)
363 goto out;
365 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
366 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
367 goto out;
370 tp = tcp_sk(sk);
371 seq = ntohl(th->seq);
372 if (sk->sk_state != TCP_LISTEN &&
373 !between(seq, tp->snd_una, tp->snd_nxt)) {
374 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
375 goto out;
378 np = inet6_sk(sk);
380 if (type == ICMPV6_PKT_TOOBIG) {
381 struct dst_entry *dst;
383 if (sock_owned_by_user(sk))
384 goto out;
385 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
386 goto out;
388 /* icmp should have updated the destination cache entry */
389 dst = __sk_dst_check(sk, np->dst_cookie);
391 if (dst == NULL) {
392 struct inet_sock *inet = inet_sk(sk);
393 struct flowi6 fl6;
395 /* BUGGG_FUTURE: Again, it is not clear how
396 to handle rthdr case. Ignore this complexity
397 for now.
399 memset(&fl6, 0, sizeof(fl6));
400 fl6.flowi6_proto = IPPROTO_TCP;
401 ipv6_addr_copy(&fl6.daddr, &np->daddr);
402 ipv6_addr_copy(&fl6.saddr, &np->saddr);
403 fl6.flowi6_oif = sk->sk_bound_dev_if;
404 fl6.flowi6_mark = sk->sk_mark;
405 fl6.fl6_dport = inet->inet_dport;
406 fl6.fl6_sport = inet->inet_sport;
407 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
409 dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
410 if (IS_ERR(dst)) {
411 sk->sk_err_soft = -PTR_ERR(dst);
412 goto out;
415 } else
416 dst_hold(dst);
418 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
419 tcp_sync_mss(sk, dst_mtu(dst));
420 tcp_simple_retransmit(sk);
421 } /* else let the usual retransmit timer handle it */
422 dst_release(dst);
423 goto out;
426 icmpv6_err_convert(type, code, &err);
428 /* Might be for an request_sock */
429 switch (sk->sk_state) {
430 struct request_sock *req, **prev;
431 case TCP_LISTEN:
432 if (sock_owned_by_user(sk))
433 goto out;
435 req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
436 &hdr->saddr, inet6_iif(skb));
437 if (!req)
438 goto out;
440 /* ICMPs are not backlogged, hence we cannot get
441 * an established socket here.
443 WARN_ON(req->sk != NULL);
445 if (seq != tcp_rsk(req)->snt_isn) {
446 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
447 goto out;
450 inet_csk_reqsk_queue_drop(sk, req, prev);
451 goto out;
453 case TCP_SYN_SENT:
454 case TCP_SYN_RECV: /* Cannot happen.
455 It can, it SYNs are crossed. --ANK */
456 if (!sock_owned_by_user(sk)) {
457 sk->sk_err = err;
458 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
460 tcp_done(sk);
461 } else
462 sk->sk_err_soft = err;
463 goto out;
466 if (!sock_owned_by_user(sk) && np->recverr) {
467 sk->sk_err = err;
468 sk->sk_error_report(sk);
469 } else
470 sk->sk_err_soft = err;
472 out:
473 bh_unlock_sock(sk);
474 sock_put(sk);
478 static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp)
481 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk);
483 struct sk_buff * skb;
484 struct ipv6_txoptions *opt = NULL;
485 struct in6_addr * final_p, final;
486 struct flowi6 fl6;
487 struct dst_entry *dst;
488 int err;
490 memset(&fl6, 0, sizeof(fl6));
491 fl6.flowi6_proto = IPPROTO_TCP;
492 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
493 ipv6_addr_copy(&fl6.saddr, &treq->loc_addr);
494 fl6.flowlabel = 0;
495 fl6.flowi6_oif = treq->iif;
496 fl6.flowi6_mark = sk->sk_mark;
497 fl6.fl6_dport = inet_rsk(req)->rmt_port;
498 fl6.fl6_sport = inet_rsk(req)->loc_port;
499 security_req_classify_flow(req, flowi6_to_flowi(&fl6));
501 opt = np->opt;
502 final_p = fl6_update_dst(&fl6, opt, &final);
504 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
505 if (IS_ERR(dst)) {
506 err = PTR_ERR(dst);
507 dst = NULL;
508 goto done;
510 skb = tcp_make_synack(sk, dst, req, rvp);
511 err = -ENOMEM;
512 if (skb) {
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err);
520 done:
521 if (opt && opt != np->opt)
522 sock_kfree_s(sk, opt, opt->tot_len);
523 dst_release(dst);
524 return err;
527 static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp)
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp);
534 static void tcp_v6_reqsk_destructor(struct request_sock *req)
536 kfree_skb(inet6_rsk(req)->pktopts);
539 #ifdef CONFIG_TCP_MD5SIG
540 static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
541 const struct in6_addr *addr)
543 struct tcp_sock *tp = tcp_sk(sk);
544 int i;
546 BUG_ON(tp == NULL);
548 if (!tp->md5sig_info || !tp->md5sig_info->entries6)
549 return NULL;
551 for (i = 0; i < tp->md5sig_info->entries6; i++) {
552 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, addr))
553 return &tp->md5sig_info->keys6[i].base;
555 return NULL;
558 static struct tcp_md5sig_key *tcp_v6_md5_lookup(struct sock *sk,
559 struct sock *addr_sk)
561 return tcp_v6_md5_do_lookup(sk, &inet6_sk(addr_sk)->daddr);
564 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
565 struct request_sock *req)
567 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
570 static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
571 char *newkey, u8 newkeylen)
573 /* Add key to the list */
574 struct tcp_md5sig_key *key;
575 struct tcp_sock *tp = tcp_sk(sk);
576 struct tcp6_md5sig_key *keys;
578 key = tcp_v6_md5_do_lookup(sk, peer);
579 if (key) {
580 /* modify existing entry - just update that one */
581 kfree(key->key);
582 key->key = newkey;
583 key->keylen = newkeylen;
584 } else {
585 /* reallocate new list if current one is full. */
586 if (!tp->md5sig_info) {
587 tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
588 if (!tp->md5sig_info) {
589 kfree(newkey);
590 return -ENOMEM;
592 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
594 if (tp->md5sig_info->entries6 == 0 &&
595 tcp_alloc_md5sig_pool(sk) == NULL) {
596 kfree(newkey);
597 return -ENOMEM;
599 if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) {
600 keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) *
601 (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
603 if (!keys) {
604 kfree(newkey);
605 if (tp->md5sig_info->entries6 == 0)
606 tcp_free_md5sig_pool();
607 return -ENOMEM;
610 if (tp->md5sig_info->entries6)
611 memmove(keys, tp->md5sig_info->keys6,
612 (sizeof (tp->md5sig_info->keys6[0]) *
613 tp->md5sig_info->entries6));
615 kfree(tp->md5sig_info->keys6);
616 tp->md5sig_info->keys6 = keys;
617 tp->md5sig_info->alloced6++;
620 ipv6_addr_copy(&tp->md5sig_info->keys6[tp->md5sig_info->entries6].addr,
621 peer);
622 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.key = newkey;
623 tp->md5sig_info->keys6[tp->md5sig_info->entries6].base.keylen = newkeylen;
625 tp->md5sig_info->entries6++;
627 return 0;
630 static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
631 u8 *newkey, __u8 newkeylen)
633 return tcp_v6_md5_do_add(sk, &inet6_sk(addr_sk)->daddr,
634 newkey, newkeylen);
637 static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
639 struct tcp_sock *tp = tcp_sk(sk);
640 int i;
642 for (i = 0; i < tp->md5sig_info->entries6; i++) {
643 if (ipv6_addr_equal(&tp->md5sig_info->keys6[i].addr, peer)) {
644 /* Free the key */
645 kfree(tp->md5sig_info->keys6[i].base.key);
646 tp->md5sig_info->entries6--;
648 if (tp->md5sig_info->entries6 == 0) {
649 kfree(tp->md5sig_info->keys6);
650 tp->md5sig_info->keys6 = NULL;
651 tp->md5sig_info->alloced6 = 0;
652 tcp_free_md5sig_pool();
653 } else {
654 /* shrink the database */
655 if (tp->md5sig_info->entries6 != i)
656 memmove(&tp->md5sig_info->keys6[i],
657 &tp->md5sig_info->keys6[i+1],
658 (tp->md5sig_info->entries6 - i)
659 * sizeof (tp->md5sig_info->keys6[0]));
661 return 0;
664 return -ENOENT;
667 static void tcp_v6_clear_md5_list (struct sock *sk)
669 struct tcp_sock *tp = tcp_sk(sk);
670 int i;
672 if (tp->md5sig_info->entries6) {
673 for (i = 0; i < tp->md5sig_info->entries6; i++)
674 kfree(tp->md5sig_info->keys6[i].base.key);
675 tp->md5sig_info->entries6 = 0;
676 tcp_free_md5sig_pool();
679 kfree(tp->md5sig_info->keys6);
680 tp->md5sig_info->keys6 = NULL;
681 tp->md5sig_info->alloced6 = 0;
683 if (tp->md5sig_info->entries4) {
684 for (i = 0; i < tp->md5sig_info->entries4; i++)
685 kfree(tp->md5sig_info->keys4[i].base.key);
686 tp->md5sig_info->entries4 = 0;
687 tcp_free_md5sig_pool();
690 kfree(tp->md5sig_info->keys4);
691 tp->md5sig_info->keys4 = NULL;
692 tp->md5sig_info->alloced4 = 0;
695 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
696 int optlen)
698 struct tcp_md5sig cmd;
699 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
700 u8 *newkey;
702 if (optlen < sizeof(cmd))
703 return -EINVAL;
705 if (copy_from_user(&cmd, optval, sizeof(cmd)))
706 return -EFAULT;
708 if (sin6->sin6_family != AF_INET6)
709 return -EINVAL;
711 if (!cmd.tcpm_keylen) {
712 if (!tcp_sk(sk)->md5sig_info)
713 return -ENOENT;
714 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
715 return tcp_v4_md5_do_del(sk, sin6->sin6_addr.s6_addr32[3]);
716 return tcp_v6_md5_do_del(sk, &sin6->sin6_addr);
719 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
720 return -EINVAL;
722 if (!tcp_sk(sk)->md5sig_info) {
723 struct tcp_sock *tp = tcp_sk(sk);
724 struct tcp_md5sig_info *p;
726 p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
727 if (!p)
728 return -ENOMEM;
730 tp->md5sig_info = p;
731 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
734 newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
735 if (!newkey)
736 return -ENOMEM;
737 if (ipv6_addr_v4mapped(&sin6->sin6_addr)) {
738 return tcp_v4_md5_do_add(sk, sin6->sin6_addr.s6_addr32[3],
739 newkey, cmd.tcpm_keylen);
741 return tcp_v6_md5_do_add(sk, &sin6->sin6_addr, newkey, cmd.tcpm_keylen);
744 static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
745 const struct in6_addr *daddr,
746 const struct in6_addr *saddr, int nbytes)
748 struct tcp6_pseudohdr *bp;
749 struct scatterlist sg;
751 bp = &hp->md5_blk.ip6;
752 /* 1. TCP pseudo-header (RFC2460) */
753 ipv6_addr_copy(&bp->saddr, saddr);
754 ipv6_addr_copy(&bp->daddr, daddr);
755 bp->protocol = cpu_to_be32(IPPROTO_TCP);
756 bp->len = cpu_to_be32(nbytes);
758 sg_init_one(&sg, bp, sizeof(*bp));
759 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
762 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
763 const struct in6_addr *daddr, struct in6_addr *saddr,
764 const struct tcphdr *th)
766 struct tcp_md5sig_pool *hp;
767 struct hash_desc *desc;
769 hp = tcp_get_md5sig_pool();
770 if (!hp)
771 goto clear_hash_noput;
772 desc = &hp->md5_desc;
774 if (crypto_hash_init(desc))
775 goto clear_hash;
776 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
777 goto clear_hash;
778 if (tcp_md5_hash_header(hp, th))
779 goto clear_hash;
780 if (tcp_md5_hash_key(hp, key))
781 goto clear_hash;
782 if (crypto_hash_final(desc, md5_hash))
783 goto clear_hash;
785 tcp_put_md5sig_pool();
786 return 0;
788 clear_hash:
789 tcp_put_md5sig_pool();
790 clear_hash_noput:
791 memset(md5_hash, 0, 16);
792 return 1;
795 static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
796 const struct sock *sk,
797 const struct request_sock *req,
798 const struct sk_buff *skb)
800 const struct in6_addr *saddr, *daddr;
801 struct tcp_md5sig_pool *hp;
802 struct hash_desc *desc;
803 const struct tcphdr *th = tcp_hdr(skb);
805 if (sk) {
806 saddr = &inet6_sk(sk)->saddr;
807 daddr = &inet6_sk(sk)->daddr;
808 } else if (req) {
809 saddr = &inet6_rsk(req)->loc_addr;
810 daddr = &inet6_rsk(req)->rmt_addr;
811 } else {
812 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
813 saddr = &ip6h->saddr;
814 daddr = &ip6h->daddr;
817 hp = tcp_get_md5sig_pool();
818 if (!hp)
819 goto clear_hash_noput;
820 desc = &hp->md5_desc;
822 if (crypto_hash_init(desc))
823 goto clear_hash;
825 if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
826 goto clear_hash;
827 if (tcp_md5_hash_header(hp, th))
828 goto clear_hash;
829 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
830 goto clear_hash;
831 if (tcp_md5_hash_key(hp, key))
832 goto clear_hash;
833 if (crypto_hash_final(desc, md5_hash))
834 goto clear_hash;
836 tcp_put_md5sig_pool();
837 return 0;
839 clear_hash:
840 tcp_put_md5sig_pool();
841 clear_hash_noput:
842 memset(md5_hash, 0, 16);
843 return 1;
846 static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
848 const __u8 *hash_location = NULL;
849 struct tcp_md5sig_key *hash_expected;
850 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
851 const struct tcphdr *th = tcp_hdr(skb);
852 int genhash;
853 u8 newhash[16];
855 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
856 hash_location = tcp_parse_md5sig_option(th);
858 /* We've parsed the options - do we have a hash? */
859 if (!hash_expected && !hash_location)
860 return 0;
862 if (hash_expected && !hash_location) {
863 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
864 return 1;
867 if (!hash_expected && hash_location) {
868 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
869 return 1;
872 /* check the signature */
873 genhash = tcp_v6_md5_hash_skb(newhash,
874 hash_expected,
875 NULL, NULL, skb);
877 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
878 if (net_ratelimit()) {
879 printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
880 genhash ? "failed" : "mismatch",
881 &ip6h->saddr, ntohs(th->source),
882 &ip6h->daddr, ntohs(th->dest));
884 return 1;
886 return 0;
888 #endif
890 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
891 .family = AF_INET6,
892 .obj_size = sizeof(struct tcp6_request_sock),
893 .rtx_syn_ack = tcp_v6_rtx_synack,
894 .send_ack = tcp_v6_reqsk_send_ack,
895 .destructor = tcp_v6_reqsk_destructor,
896 .send_reset = tcp_v6_send_reset,
897 .syn_ack_timeout = tcp_syn_ack_timeout,
900 #ifdef CONFIG_TCP_MD5SIG
901 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
902 .md5_lookup = tcp_v6_reqsk_md5_lookup,
903 .calc_md5_hash = tcp_v6_md5_hash_skb,
905 #endif
907 static void __tcp_v6_send_check(struct sk_buff *skb,
908 const struct in6_addr *saddr, const struct in6_addr *daddr)
910 struct tcphdr *th = tcp_hdr(skb);
912 if (skb->ip_summed == CHECKSUM_PARTIAL) {
913 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
914 skb->csum_start = skb_transport_header(skb) - skb->head;
915 skb->csum_offset = offsetof(struct tcphdr, check);
916 } else {
917 th->check = tcp_v6_check(skb->len, saddr, daddr,
918 csum_partial(th, th->doff << 2,
919 skb->csum));
923 static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
925 struct ipv6_pinfo *np = inet6_sk(sk);
927 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
930 static int tcp_v6_gso_send_check(struct sk_buff *skb)
932 const struct ipv6hdr *ipv6h;
933 struct tcphdr *th;
935 if (!pskb_may_pull(skb, sizeof(*th)))
936 return -EINVAL;
938 ipv6h = ipv6_hdr(skb);
939 th = tcp_hdr(skb);
941 th->check = 0;
942 skb->ip_summed = CHECKSUM_PARTIAL;
943 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
944 return 0;
947 static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
948 struct sk_buff *skb)
950 const struct ipv6hdr *iph = skb_gro_network_header(skb);
952 switch (skb->ip_summed) {
953 case CHECKSUM_COMPLETE:
954 if (!tcp_v6_check(skb_gro_len(skb), &iph->saddr, &iph->daddr,
955 skb->csum)) {
956 skb->ip_summed = CHECKSUM_UNNECESSARY;
957 break;
960 /* fall through */
961 case CHECKSUM_NONE:
962 NAPI_GRO_CB(skb)->flush = 1;
963 return NULL;
966 return tcp_gro_receive(head, skb);
969 static int tcp6_gro_complete(struct sk_buff *skb)
971 const struct ipv6hdr *iph = ipv6_hdr(skb);
972 struct tcphdr *th = tcp_hdr(skb);
974 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
975 &iph->saddr, &iph->daddr, 0);
976 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
978 return tcp_gro_complete(skb);
981 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
982 u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass)
984 const struct tcphdr *th = tcp_hdr(skb);
985 struct tcphdr *t1;
986 struct sk_buff *buff;
987 struct flowi6 fl6;
988 struct net *net = dev_net(skb_dst(skb)->dev);
989 struct sock *ctl_sk = net->ipv6.tcp_sk;
990 unsigned int tot_len = sizeof(struct tcphdr);
991 struct dst_entry *dst;
992 __be32 *topt;
994 if (ts)
995 tot_len += TCPOLEN_TSTAMP_ALIGNED;
996 #ifdef CONFIG_TCP_MD5SIG
997 if (key)
998 tot_len += TCPOLEN_MD5SIG_ALIGNED;
999 #endif
1001 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1002 GFP_ATOMIC);
1003 if (buff == NULL)
1004 return;
1006 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1008 t1 = (struct tcphdr *) skb_push(buff, tot_len);
1009 skb_reset_transport_header(buff);
1011 /* Swap the send and the receive. */
1012 memset(t1, 0, sizeof(*t1));
1013 t1->dest = th->source;
1014 t1->source = th->dest;
1015 t1->doff = tot_len / 4;
1016 t1->seq = htonl(seq);
1017 t1->ack_seq = htonl(ack);
1018 t1->ack = !rst || !th->ack;
1019 t1->rst = rst;
1020 t1->window = htons(win);
1022 topt = (__be32 *)(t1 + 1);
1024 if (ts) {
1025 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1026 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1027 *topt++ = htonl(tcp_time_stamp);
1028 *topt++ = htonl(ts);
1031 #ifdef CONFIG_TCP_MD5SIG
1032 if (key) {
1033 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1034 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
1035 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
1036 &ipv6_hdr(skb)->saddr,
1037 &ipv6_hdr(skb)->daddr, t1);
1039 #endif
1041 memset(&fl6, 0, sizeof(fl6));
1042 ipv6_addr_copy(&fl6.daddr, &ipv6_hdr(skb)->saddr);
1043 ipv6_addr_copy(&fl6.saddr, &ipv6_hdr(skb)->daddr);
1045 buff->ip_summed = CHECKSUM_PARTIAL;
1046 buff->csum = 0;
1048 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
1050 fl6.flowi6_proto = IPPROTO_TCP;
1051 fl6.flowi6_oif = inet6_iif(skb);
1052 fl6.fl6_dport = t1->dest;
1053 fl6.fl6_sport = t1->source;
1054 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
1056 /* Pass a socket to ip6_dst_lookup either it is for RST
1057 * Underlying function will use this to retrieve the network
1058 * namespace
1060 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false);
1061 if (!IS_ERR(dst)) {
1062 skb_dst_set(buff, dst);
1063 ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1065 if (rst)
1066 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
1067 return;
1070 kfree_skb(buff);
1073 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
1075 const struct tcphdr *th = tcp_hdr(skb);
1076 u32 seq = 0, ack_seq = 0;
1077 struct tcp_md5sig_key *key = NULL;
1079 if (th->rst)
1080 return;
1082 if (!ipv6_unicast_destination(skb))
1083 return;
1085 #ifdef CONFIG_TCP_MD5SIG
1086 if (sk)
1087 key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
1088 #endif
1090 if (th->ack)
1091 seq = ntohl(th->ack_seq);
1092 else
1093 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1094 (th->doff << 2);
1096 tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0);
1099 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts,
1100 struct tcp_md5sig_key *key, u8 tclass)
1102 tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass);
1105 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1107 struct inet_timewait_sock *tw = inet_twsk(sk);
1108 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
1110 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
1111 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
1112 tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw),
1113 tw->tw_tclass);
1115 inet_twsk_put(tw);
1118 static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
1119 struct request_sock *req)
1121 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent,
1122 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0);
1126 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1128 struct request_sock *req, **prev;
1129 const struct tcphdr *th = tcp_hdr(skb);
1130 struct sock *nsk;
1132 /* Find possible connection requests. */
1133 req = inet6_csk_search_req(sk, &prev, th->source,
1134 &ipv6_hdr(skb)->saddr,
1135 &ipv6_hdr(skb)->daddr, inet6_iif(skb));
1136 if (req)
1137 return tcp_check_req(sk, skb, req, prev);
1139 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
1140 &ipv6_hdr(skb)->saddr, th->source,
1141 &ipv6_hdr(skb)->daddr, ntohs(th->dest), inet6_iif(skb));
1143 if (nsk) {
1144 if (nsk->sk_state != TCP_TIME_WAIT) {
1145 bh_lock_sock(nsk);
1146 return nsk;
1148 inet_twsk_put(inet_twsk(nsk));
1149 return NULL;
1152 #ifdef CONFIG_SYN_COOKIES
1153 if (!th->syn)
1154 sk = cookie_v6_check(sk, skb);
1155 #endif
1156 return sk;
1159 /* FIXME: this is substantially similar to the ipv4 code.
1160 * Can some kind of merge be done? -- erics
1162 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1164 struct tcp_extend_values tmp_ext;
1165 struct tcp_options_received tmp_opt;
1166 const u8 *hash_location;
1167 struct request_sock *req;
1168 struct inet6_request_sock *treq;
1169 struct ipv6_pinfo *np = inet6_sk(sk);
1170 struct tcp_sock *tp = tcp_sk(sk);
1171 __u32 isn = TCP_SKB_CB(skb)->when;
1172 struct dst_entry *dst = NULL;
1173 int want_cookie = 0;
1175 if (skb->protocol == htons(ETH_P_IP))
1176 return tcp_v4_conn_request(sk, skb);
1178 if (!ipv6_unicast_destination(skb))
1179 goto drop;
1181 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1182 want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
1183 if (!want_cookie)
1184 goto drop;
1187 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1188 goto drop;
1190 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
1191 if (req == NULL)
1192 goto drop;
1194 #ifdef CONFIG_TCP_MD5SIG
1195 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
1196 #endif
1198 tcp_clear_options(&tmp_opt);
1199 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1200 tmp_opt.user_mss = tp->rx_opt.user_mss;
1201 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1203 if (tmp_opt.cookie_plus > 0 &&
1204 tmp_opt.saw_tstamp &&
1205 !tp->rx_opt.cookie_out_never &&
1206 (sysctl_tcp_cookie_size > 0 ||
1207 (tp->cookie_values != NULL &&
1208 tp->cookie_values->cookie_desired > 0))) {
1209 u8 *c;
1210 u32 *d;
1211 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1212 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1214 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1215 goto drop_and_free;
1217 /* Secret recipe starts with IP addresses */
1218 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1219 *mess++ ^= *d++;
1220 *mess++ ^= *d++;
1221 *mess++ ^= *d++;
1222 *mess++ ^= *d++;
1223 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1224 *mess++ ^= *d++;
1225 *mess++ ^= *d++;
1226 *mess++ ^= *d++;
1227 *mess++ ^= *d++;
1229 /* plus variable length Initiator Cookie */
1230 c = (u8 *)mess;
1231 while (l-- > 0)
1232 *c++ ^= *hash_location++;
1234 want_cookie = 0; /* not our kind of cookie */
1235 tmp_ext.cookie_out_never = 0; /* false */
1236 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1237 } else if (!tp->rx_opt.cookie_in_always) {
1238 /* redundant indications, but ensure initialization. */
1239 tmp_ext.cookie_out_never = 1; /* true */
1240 tmp_ext.cookie_plus = 0;
1241 } else {
1242 goto drop_and_free;
1244 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1246 if (want_cookie && !tmp_opt.saw_tstamp)
1247 tcp_clear_options(&tmp_opt);
1249 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1250 tcp_openreq_init(req, &tmp_opt, skb);
1252 treq = inet6_rsk(req);
1253 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1254 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1255 if (!want_cookie || tmp_opt.tstamp_ok)
1256 TCP_ECN_create_request(req, tcp_hdr(skb));
1258 treq->iif = sk->sk_bound_dev_if;
1260 /* So that link locals have meaning */
1261 if (!sk->sk_bound_dev_if &&
1262 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1263 treq->iif = inet6_iif(skb);
1265 if (!isn) {
1266 struct inet_peer *peer = NULL;
1268 if (ipv6_opt_accepted(sk, skb) ||
1269 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1270 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
1271 atomic_inc(&skb->users);
1272 treq->pktopts = skb;
1275 if (want_cookie) {
1276 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1277 req->cookie_ts = tmp_opt.tstamp_ok;
1278 goto have_isn;
1281 /* VJ's idea. We save last timestamp seen
1282 * from the destination in peer table, when entering
1283 * state TIME-WAIT, and check against it before
1284 * accepting new connection request.
1286 * If "isn" is not zero, this request hit alive
1287 * timewait bucket, so that all the necessary checks
1288 * are made in the function processing timewait state.
1290 if (tmp_opt.saw_tstamp &&
1291 tcp_death_row.sysctl_tw_recycle &&
1292 (dst = inet6_csk_route_req(sk, req)) != NULL &&
1293 (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
1294 ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
1295 &treq->rmt_addr)) {
1296 inet_peer_refcheck(peer);
1297 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1298 (s32)(peer->tcp_ts - req->ts_recent) >
1299 TCP_PAWS_WINDOW) {
1300 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1301 goto drop_and_release;
1304 /* Kill the following clause, if you dislike this way. */
1305 else if (!sysctl_tcp_syncookies &&
1306 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1307 (sysctl_max_syn_backlog >> 2)) &&
1308 (!peer || !peer->tcp_ts_stamp) &&
1309 (!dst || !dst_metric(dst, RTAX_RTT))) {
1310 /* Without syncookies last quarter of
1311 * backlog is filled with destinations,
1312 * proven to be alive.
1313 * It means that we continue to communicate
1314 * to destinations, already remembered
1315 * to the moment of synflood.
1317 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
1318 &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
1319 goto drop_and_release;
1322 isn = tcp_v6_init_sequence(skb);
1324 have_isn:
1325 tcp_rsk(req)->snt_isn = isn;
1326 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1328 security_inet_conn_request(sk, skb, req);
1330 if (tcp_v6_send_synack(sk, req,
1331 (struct request_values *)&tmp_ext) ||
1332 want_cookie)
1333 goto drop_and_free;
1335 inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1336 return 0;
1338 drop_and_release:
1339 dst_release(dst);
1340 drop_and_free:
1341 reqsk_free(req);
1342 drop:
1343 return 0; /* don't send reset */
1346 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1347 struct request_sock *req,
1348 struct dst_entry *dst)
1350 struct inet6_request_sock *treq;
1351 struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1352 struct tcp6_sock *newtcp6sk;
1353 struct inet_sock *newinet;
1354 struct tcp_sock *newtp;
1355 struct sock *newsk;
1356 struct ipv6_txoptions *opt;
1357 #ifdef CONFIG_TCP_MD5SIG
1358 struct tcp_md5sig_key *key;
1359 #endif
1361 if (skb->protocol == htons(ETH_P_IP)) {
1363 * v6 mapped
1366 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1368 if (newsk == NULL)
1369 return NULL;
1371 newtcp6sk = (struct tcp6_sock *)newsk;
1372 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1374 newinet = inet_sk(newsk);
1375 newnp = inet6_sk(newsk);
1376 newtp = tcp_sk(newsk);
1378 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1380 ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr);
1382 ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);
1384 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1386 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
1387 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1388 #ifdef CONFIG_TCP_MD5SIG
1389 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1390 #endif
1392 newnp->ipv6_ac_list = NULL;
1393 newnp->ipv6_fl_list = NULL;
1394 newnp->pktoptions = NULL;
1395 newnp->opt = NULL;
1396 newnp->mcast_oif = inet6_iif(skb);
1397 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1400 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1401 * here, tcp_create_openreq_child now does this for us, see the comment in
1402 * that function for the gory details. -acme
1405 /* It is tricky place. Until this moment IPv4 tcp
1406 worked with IPv6 icsk.icsk_af_ops.
1407 Sync it now.
1409 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
1411 return newsk;
1414 treq = inet6_rsk(req);
1415 opt = np->opt;
1417 if (sk_acceptq_is_full(sk))
1418 goto out_overflow;
1420 if (!dst) {
1421 dst = inet6_csk_route_req(sk, req);
1422 if (!dst)
1423 goto out;
1426 newsk = tcp_create_openreq_child(sk, req, skb);
1427 if (newsk == NULL)
1428 goto out_nonewsk;
1431 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1432 * count here, tcp_create_openreq_child now does this for us, see the
1433 * comment in that function for the gory details. -acme
1436 newsk->sk_gso_type = SKB_GSO_TCPV6;
1437 __ip6_dst_store(newsk, dst, NULL, NULL);
1439 newtcp6sk = (struct tcp6_sock *)newsk;
1440 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1442 newtp = tcp_sk(newsk);
1443 newinet = inet_sk(newsk);
1444 newnp = inet6_sk(newsk);
1446 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1448 ipv6_addr_copy(&newnp->daddr, &treq->rmt_addr);
1449 ipv6_addr_copy(&newnp->saddr, &treq->loc_addr);
1450 ipv6_addr_copy(&newnp->rcv_saddr, &treq->loc_addr);
1451 newsk->sk_bound_dev_if = treq->iif;
1453 /* Now IPv6 options...
1455 First: no IPv4 options.
1457 newinet->inet_opt = NULL;
1458 newnp->ipv6_ac_list = NULL;
1459 newnp->ipv6_fl_list = NULL;
1461 /* Clone RX bits */
1462 newnp->rxopt.all = np->rxopt.all;
1464 /* Clone pktoptions received with SYN */
1465 newnp->pktoptions = NULL;
1466 if (treq->pktopts != NULL) {
1467 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1468 kfree_skb(treq->pktopts);
1469 treq->pktopts = NULL;
1470 if (newnp->pktoptions)
1471 skb_set_owner_r(newnp->pktoptions, newsk);
1473 newnp->opt = NULL;
1474 newnp->mcast_oif = inet6_iif(skb);
1475 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
1477 /* Clone native IPv6 options from listening socket (if any)
1479 Yes, keeping reference count would be much more clever,
1480 but we make one more one thing there: reattach optmem
1481 to newsk.
1483 if (opt) {
1484 newnp->opt = ipv6_dup_options(newsk, opt);
1485 if (opt != np->opt)
1486 sock_kfree_s(sk, opt, opt->tot_len);
1489 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1490 if (newnp->opt)
1491 inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
1492 newnp->opt->opt_flen);
1494 tcp_mtup_init(newsk);
1495 tcp_sync_mss(newsk, dst_mtu(dst));
1496 newtp->advmss = dst_metric_advmss(dst);
1497 tcp_initialize_rcv_mss(newsk);
1498 if (tcp_rsk(req)->snt_synack)
1499 tcp_valid_rtt_meas(newsk,
1500 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1501 newtp->total_retrans = req->retrans;
1503 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1504 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
1506 #ifdef CONFIG_TCP_MD5SIG
1507 /* Copy over the MD5 key from the original socket */
1508 if ((key = tcp_v6_md5_do_lookup(sk, &newnp->daddr)) != NULL) {
1509 /* We're using one, so create a matching key
1510 * on the newsk structure. If we fail to get
1511 * memory, then we end up not copying the key
1512 * across. Shucks.
1514 char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC);
1515 if (newkey != NULL)
1516 tcp_v6_md5_do_add(newsk, &newnp->daddr,
1517 newkey, key->keylen);
1519 #endif
1521 if (__inet_inherit_port(sk, newsk) < 0) {
1522 sock_put(newsk);
1523 goto out;
1525 __inet6_hash(newsk, NULL);
1527 return newsk;
1529 out_overflow:
1530 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1531 out_nonewsk:
1532 if (opt && opt != np->opt)
1533 sock_kfree_s(sk, opt, opt->tot_len);
1534 dst_release(dst);
1535 out:
1536 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1537 return NULL;
1540 static __sum16 tcp_v6_checksum_init(struct sk_buff *skb)
1542 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1543 if (!tcp_v6_check(skb->len, &ipv6_hdr(skb)->saddr,
1544 &ipv6_hdr(skb)->daddr, skb->csum)) {
1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1546 return 0;
1550 skb->csum = ~csum_unfold(tcp_v6_check(skb->len,
1551 &ipv6_hdr(skb)->saddr,
1552 &ipv6_hdr(skb)->daddr, 0));
1554 if (skb->len <= 76) {
1555 return __skb_checksum_complete(skb);
1557 return 0;
1560 /* The socket must have it's spinlock held when we get
1561 * here.
1563 * We have a potential double-lock case here, so even when
1564 * doing backlog processing we use the BH locking scheme.
1565 * This is because we cannot sleep with the original spinlock
1566 * held.
1568 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1570 struct ipv6_pinfo *np = inet6_sk(sk);
1571 struct tcp_sock *tp;
1572 struct sk_buff *opt_skb = NULL;
1574 /* Imagine: socket is IPv6. IPv4 packet arrives,
1575 goes to IPv4 receive handler and backlogged.
1576 From backlog it always goes here. Kerboom...
1577 Fortunately, tcp_rcv_established and rcv_established
1578 handle them correctly, but it is not case with
1579 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1582 if (skb->protocol == htons(ETH_P_IP))
1583 return tcp_v4_do_rcv(sk, skb);
1585 #ifdef CONFIG_TCP_MD5SIG
1586 if (tcp_v6_inbound_md5_hash (sk, skb))
1587 goto discard;
1588 #endif
1590 if (sk_filter(sk, skb))
1591 goto discard;
1594 * socket locking is here for SMP purposes as backlog rcv
1595 * is currently called with bh processing disabled.
1598 /* Do Stevens' IPV6_PKTOPTIONS.
1600 Yes, guys, it is the only place in our code, where we
1601 may make it not affecting IPv4.
1602 The rest of code is protocol independent,
1603 and I do not like idea to uglify IPv4.
1605 Actually, all the idea behind IPV6_PKTOPTIONS
1606 looks not very well thought. For now we latch
1607 options, received in the last packet, enqueued
1608 by tcp. Feel free to propose better solution.
1609 --ANK (980728)
1611 if (np->rxopt.all)
1612 opt_skb = skb_clone(skb, GFP_ATOMIC);
1614 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1615 sock_rps_save_rxhash(sk, skb);
1616 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1617 goto reset;
1618 if (opt_skb)
1619 goto ipv6_pktoptions;
1620 return 0;
1623 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1624 goto csum_err;
1626 if (sk->sk_state == TCP_LISTEN) {
1627 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1628 if (!nsk)
1629 goto discard;
1632 * Queue it on the new socket if the new socket is active,
1633 * otherwise we just shortcircuit this and continue with
1634 * the new socket..
1636 if(nsk != sk) {
1637 sock_rps_save_rxhash(nsk, skb);
1638 if (tcp_child_process(sk, nsk, skb))
1639 goto reset;
1640 if (opt_skb)
1641 __kfree_skb(opt_skb);
1642 return 0;
1644 } else
1645 sock_rps_save_rxhash(sk, skb);
1647 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1648 goto reset;
1649 if (opt_skb)
1650 goto ipv6_pktoptions;
1651 return 0;
1653 reset:
1654 tcp_v6_send_reset(sk, skb);
1655 discard:
1656 if (opt_skb)
1657 __kfree_skb(opt_skb);
1658 kfree_skb(skb);
1659 return 0;
1660 csum_err:
1661 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1662 goto discard;
1665 ipv6_pktoptions:
1666 /* Do you ask, what is it?
1668 1. skb was enqueued by tcp.
1669 2. skb is added to tail of read queue, rather than out of order.
1670 3. socket is not in passive state.
1671 4. Finally, it really contains options, which user wants to receive.
1673 tp = tcp_sk(sk);
1674 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1675 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1676 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
1677 np->mcast_oif = inet6_iif(opt_skb);
1678 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1679 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1680 if (ipv6_opt_accepted(sk, opt_skb)) {
1681 skb_set_owner_r(opt_skb, sk);
1682 opt_skb = xchg(&np->pktoptions, opt_skb);
1683 } else {
1684 __kfree_skb(opt_skb);
1685 opt_skb = xchg(&np->pktoptions, NULL);
1689 kfree_skb(opt_skb);
1690 return 0;
1693 static int tcp_v6_rcv(struct sk_buff *skb)
1695 const struct tcphdr *th;
1696 const struct ipv6hdr *hdr;
1697 struct sock *sk;
1698 int ret;
1699 struct net *net = dev_net(skb->dev);
1701 if (skb->pkt_type != PACKET_HOST)
1702 goto discard_it;
1705 * Count it even if it's bad.
1707 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1709 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1710 goto discard_it;
1712 th = tcp_hdr(skb);
1714 if (th->doff < sizeof(struct tcphdr)/4)
1715 goto bad_packet;
1716 if (!pskb_may_pull(skb, th->doff*4))
1717 goto discard_it;
1719 if (!skb_csum_unnecessary(skb) && tcp_v6_checksum_init(skb))
1720 goto bad_packet;
1722 th = tcp_hdr(skb);
1723 hdr = ipv6_hdr(skb);
1724 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1725 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1726 skb->len - th->doff*4);
1727 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1728 TCP_SKB_CB(skb)->when = 0;
1729 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1730 TCP_SKB_CB(skb)->sacked = 0;
1732 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1733 if (!sk)
1734 goto no_tcp_socket;
1736 process:
1737 if (sk->sk_state == TCP_TIME_WAIT)
1738 goto do_time_wait;
1740 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
1741 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1742 goto discard_and_relse;
1745 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1746 goto discard_and_relse;
1748 if (sk_filter(sk, skb))
1749 goto discard_and_relse;
1751 skb->dev = NULL;
1753 bh_lock_sock_nested(sk);
1754 ret = 0;
1755 if (!sock_owned_by_user(sk)) {
1756 #ifdef CONFIG_NET_DMA
1757 struct tcp_sock *tp = tcp_sk(sk);
1758 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1759 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1760 if (tp->ucopy.dma_chan)
1761 ret = tcp_v6_do_rcv(sk, skb);
1762 else
1763 #endif
1765 if (!tcp_prequeue(sk, skb))
1766 ret = tcp_v6_do_rcv(sk, skb);
1768 } else if (unlikely(sk_add_backlog(sk, skb))) {
1769 bh_unlock_sock(sk);
1770 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1771 goto discard_and_relse;
1773 bh_unlock_sock(sk);
1775 sock_put(sk);
1776 return ret ? -1 : 0;
1778 no_tcp_socket:
1779 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1780 goto discard_it;
1782 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1783 bad_packet:
1784 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1785 } else {
1786 tcp_v6_send_reset(NULL, skb);
1789 discard_it:
1792 * Discard frame
1795 kfree_skb(skb);
1796 return 0;
1798 discard_and_relse:
1799 sock_put(sk);
1800 goto discard_it;
1802 do_time_wait:
1803 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1804 inet_twsk_put(inet_twsk(sk));
1805 goto discard_it;
1808 if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1809 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1810 inet_twsk_put(inet_twsk(sk));
1811 goto discard_it;
1814 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1815 case TCP_TW_SYN:
1817 struct sock *sk2;
1819 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
1820 &ipv6_hdr(skb)->daddr,
1821 ntohs(th->dest), inet6_iif(skb));
1822 if (sk2 != NULL) {
1823 struct inet_timewait_sock *tw = inet_twsk(sk);
1824 inet_twsk_deschedule(tw, &tcp_death_row);
1825 inet_twsk_put(tw);
1826 sk = sk2;
1827 goto process;
1829 /* Fall through to ACK */
1831 case TCP_TW_ACK:
1832 tcp_v6_timewait_ack(sk, skb);
1833 break;
1834 case TCP_TW_RST:
1835 goto no_tcp_socket;
1836 case TCP_TW_SUCCESS:;
1838 goto discard_it;
1841 static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
1843 struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
1844 struct ipv6_pinfo *np = inet6_sk(sk);
1845 struct inet_peer *peer;
1847 if (!rt ||
1848 !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
1849 peer = inet_getpeer_v6(&np->daddr, 1);
1850 *release_it = true;
1851 } else {
1852 if (!rt->rt6i_peer)
1853 rt6_bind_peer(rt, 1);
1854 peer = rt->rt6i_peer;
1855 *release_it = false;
1858 return peer;
1861 static void *tcp_v6_tw_get_peer(struct sock *sk)
1863 const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
1864 const struct inet_timewait_sock *tw = inet_twsk(sk);
1866 if (tw->tw_family == AF_INET)
1867 return tcp_v4_tw_get_peer(sk);
1869 return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
1872 static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1873 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1874 .twsk_unique = tcp_twsk_unique,
1875 .twsk_destructor= tcp_twsk_destructor,
1876 .twsk_getpeer = tcp_v6_tw_get_peer,
1879 static const struct inet_connection_sock_af_ops ipv6_specific = {
1880 .queue_xmit = inet6_csk_xmit,
1881 .send_check = tcp_v6_send_check,
1882 .rebuild_header = inet6_sk_rebuild_header,
1883 .conn_request = tcp_v6_conn_request,
1884 .syn_recv_sock = tcp_v6_syn_recv_sock,
1885 .get_peer = tcp_v6_get_peer,
1886 .net_header_len = sizeof(struct ipv6hdr),
1887 .setsockopt = ipv6_setsockopt,
1888 .getsockopt = ipv6_getsockopt,
1889 .addr2sockaddr = inet6_csk_addr2sockaddr,
1890 .sockaddr_len = sizeof(struct sockaddr_in6),
1891 .bind_conflict = inet6_csk_bind_conflict,
1892 #ifdef CONFIG_COMPAT
1893 .compat_setsockopt = compat_ipv6_setsockopt,
1894 .compat_getsockopt = compat_ipv6_getsockopt,
1895 #endif
1898 #ifdef CONFIG_TCP_MD5SIG
1899 static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
1900 .md5_lookup = tcp_v6_md5_lookup,
1901 .calc_md5_hash = tcp_v6_md5_hash_skb,
1902 .md5_add = tcp_v6_md5_add_func,
1903 .md5_parse = tcp_v6_parse_md5_keys,
1905 #endif
1908 * TCP over IPv4 via INET6 API
1911 static const struct inet_connection_sock_af_ops ipv6_mapped = {
1912 .queue_xmit = ip_queue_xmit,
1913 .send_check = tcp_v4_send_check,
1914 .rebuild_header = inet_sk_rebuild_header,
1915 .conn_request = tcp_v6_conn_request,
1916 .syn_recv_sock = tcp_v6_syn_recv_sock,
1917 .get_peer = tcp_v4_get_peer,
1918 .net_header_len = sizeof(struct iphdr),
1919 .setsockopt = ipv6_setsockopt,
1920 .getsockopt = ipv6_getsockopt,
1921 .addr2sockaddr = inet6_csk_addr2sockaddr,
1922 .sockaddr_len = sizeof(struct sockaddr_in6),
1923 .bind_conflict = inet6_csk_bind_conflict,
1924 #ifdef CONFIG_COMPAT
1925 .compat_setsockopt = compat_ipv6_setsockopt,
1926 .compat_getsockopt = compat_ipv6_getsockopt,
1927 #endif
1930 #ifdef CONFIG_TCP_MD5SIG
1931 static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1932 .md5_lookup = tcp_v4_md5_lookup,
1933 .calc_md5_hash = tcp_v4_md5_hash_skb,
1934 .md5_add = tcp_v6_md5_add_func,
1935 .md5_parse = tcp_v6_parse_md5_keys,
1937 #endif
1939 /* NOTE: A lot of things set to zero explicitly by call to
1940 * sk_alloc() so need not be done here.
1942 static int tcp_v6_init_sock(struct sock *sk)
1944 struct inet_connection_sock *icsk = inet_csk(sk);
1945 struct tcp_sock *tp = tcp_sk(sk);
1947 skb_queue_head_init(&tp->out_of_order_queue);
1948 tcp_init_xmit_timers(sk);
1949 tcp_prequeue_init(tp);
1951 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1952 tp->mdev = TCP_TIMEOUT_INIT;
1954 /* So many TCP implementations out there (incorrectly) count the
1955 * initial SYN frame in their delayed-ACK and congestion control
1956 * algorithms that we must have the following bandaid to talk
1957 * efficiently to them. -DaveM
1959 tp->snd_cwnd = 2;
1961 /* See draft-stevens-tcpca-spec-01 for discussion of the
1962 * initialization of these values.
1964 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1965 tp->snd_cwnd_clamp = ~0;
1966 tp->mss_cache = TCP_MSS_DEFAULT;
1968 tp->reordering = sysctl_tcp_reordering;
1970 sk->sk_state = TCP_CLOSE;
1972 icsk->icsk_af_ops = &ipv6_specific;
1973 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1974 icsk->icsk_sync_mss = tcp_sync_mss;
1975 sk->sk_write_space = sk_stream_write_space;
1976 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1978 #ifdef CONFIG_TCP_MD5SIG
1979 tp->af_specific = &tcp_sock_ipv6_specific;
1980 #endif
1982 /* TCP Cookie Transactions */
1983 if (sysctl_tcp_cookie_size > 0) {
1984 /* Default, cookies without s_data_payload. */
1985 tp->cookie_values =
1986 kzalloc(sizeof(*tp->cookie_values),
1987 sk->sk_allocation);
1988 if (tp->cookie_values != NULL)
1989 kref_init(&tp->cookie_values->kref);
1991 /* Presumed zeroed, in order of appearance:
1992 * cookie_in_always, cookie_out_never,
1993 * s_data_constant, s_data_in, s_data_out
1995 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1996 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1998 local_bh_disable();
1999 percpu_counter_inc(&tcp_sockets_allocated);
2000 local_bh_enable();
2002 return 0;
2005 static void tcp_v6_destroy_sock(struct sock *sk)
2007 #ifdef CONFIG_TCP_MD5SIG
2008 /* Clean up the MD5 key list */
2009 if (tcp_sk(sk)->md5sig_info)
2010 tcp_v6_clear_md5_list(sk);
2011 #endif
2012 tcp_v4_destroy_sock(sk);
2013 inet6_destroy_sock(sk);
2016 #ifdef CONFIG_PROC_FS
2017 /* Proc filesystem TCPv6 sock list dumping. */
2018 static void get_openreq6(struct seq_file *seq,
2019 const struct sock *sk, struct request_sock *req, int i, int uid)
2021 int ttd = req->expires - jiffies;
2022 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2023 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2025 if (ttd < 0)
2026 ttd = 0;
2028 seq_printf(seq,
2029 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2030 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2032 src->s6_addr32[0], src->s6_addr32[1],
2033 src->s6_addr32[2], src->s6_addr32[3],
2034 ntohs(inet_rsk(req)->loc_port),
2035 dest->s6_addr32[0], dest->s6_addr32[1],
2036 dest->s6_addr32[2], dest->s6_addr32[3],
2037 ntohs(inet_rsk(req)->rmt_port),
2038 TCP_SYN_RECV,
2039 0,0, /* could print option size, but that is af dependent. */
2040 1, /* timers active (only the expire timer) */
2041 jiffies_to_clock_t(ttd),
2042 req->retrans,
2043 uid,
2044 0, /* non standard timer */
2045 0, /* open_requests have no inode */
2046 0, req);
2049 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2051 const struct in6_addr *dest, *src;
2052 __u16 destp, srcp;
2053 int timer_active;
2054 unsigned long timer_expires;
2055 const struct inet_sock *inet = inet_sk(sp);
2056 const struct tcp_sock *tp = tcp_sk(sp);
2057 const struct inet_connection_sock *icsk = inet_csk(sp);
2058 const struct ipv6_pinfo *np = inet6_sk(sp);
2060 dest = &np->daddr;
2061 src = &np->rcv_saddr;
2062 destp = ntohs(inet->inet_dport);
2063 srcp = ntohs(inet->inet_sport);
2065 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2066 timer_active = 1;
2067 timer_expires = icsk->icsk_timeout;
2068 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2069 timer_active = 4;
2070 timer_expires = icsk->icsk_timeout;
2071 } else if (timer_pending(&sp->sk_timer)) {
2072 timer_active = 2;
2073 timer_expires = sp->sk_timer.expires;
2074 } else {
2075 timer_active = 0;
2076 timer_expires = jiffies;
2079 seq_printf(seq,
2080 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2081 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2083 src->s6_addr32[0], src->s6_addr32[1],
2084 src->s6_addr32[2], src->s6_addr32[3], srcp,
2085 dest->s6_addr32[0], dest->s6_addr32[1],
2086 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2087 sp->sk_state,
2088 tp->write_seq-tp->snd_una,
2089 (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
2090 timer_active,
2091 jiffies_to_clock_t(timer_expires - jiffies),
2092 icsk->icsk_retransmits,
2093 sock_i_uid(sp),
2094 icsk->icsk_probes_out,
2095 sock_i_ino(sp),
2096 atomic_read(&sp->sk_refcnt), sp,
2097 jiffies_to_clock_t(icsk->icsk_rto),
2098 jiffies_to_clock_t(icsk->icsk_ack.ato),
2099 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
2100 tp->snd_cwnd,
2101 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
2105 static void get_timewait6_sock(struct seq_file *seq,
2106 struct inet_timewait_sock *tw, int i)
2108 const struct in6_addr *dest, *src;
2109 __u16 destp, srcp;
2110 const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2111 int ttd = tw->tw_ttd - jiffies;
2113 if (ttd < 0)
2114 ttd = 0;
2116 dest = &tw6->tw_v6_daddr;
2117 src = &tw6->tw_v6_rcv_saddr;
2118 destp = ntohs(tw->tw_dport);
2119 srcp = ntohs(tw->tw_sport);
2121 seq_printf(seq,
2122 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2123 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2125 src->s6_addr32[0], src->s6_addr32[1],
2126 src->s6_addr32[2], src->s6_addr32[3], srcp,
2127 dest->s6_addr32[0], dest->s6_addr32[1],
2128 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2129 tw->tw_substate, 0, 0,
2130 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2131 atomic_read(&tw->tw_refcnt), tw);
2134 static int tcp6_seq_show(struct seq_file *seq, void *v)
2136 struct tcp_iter_state *st;
2138 if (v == SEQ_START_TOKEN) {
2139 seq_puts(seq,
2140 " sl "
2141 "local_address "
2142 "remote_address "
2143 "st tx_queue rx_queue tr tm->when retrnsmt"
2144 " uid timeout inode\n");
2145 goto out;
2147 st = seq->private;
2149 switch (st->state) {
2150 case TCP_SEQ_STATE_LISTENING:
2151 case TCP_SEQ_STATE_ESTABLISHED:
2152 get_tcp6_sock(seq, v, st->num);
2153 break;
2154 case TCP_SEQ_STATE_OPENREQ:
2155 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2156 break;
2157 case TCP_SEQ_STATE_TIME_WAIT:
2158 get_timewait6_sock(seq, v, st->num);
2159 break;
2161 out:
2162 return 0;
2165 static const struct file_operations tcp6_afinfo_seq_fops = {
2166 .owner = THIS_MODULE,
2167 .open = tcp_seq_open,
2168 .read = seq_read,
2169 .llseek = seq_lseek,
2170 .release = seq_release_net
2173 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2174 .name = "tcp6",
2175 .family = AF_INET6,
2176 .seq_fops = &tcp6_afinfo_seq_fops,
2177 .seq_ops = {
2178 .show = tcp6_seq_show,
2182 int __net_init tcp6_proc_init(struct net *net)
2184 return tcp_proc_register(net, &tcp6_seq_afinfo);
2187 void tcp6_proc_exit(struct net *net)
2189 tcp_proc_unregister(net, &tcp6_seq_afinfo);
2191 #endif
2193 struct proto tcpv6_prot = {
2194 .name = "TCPv6",
2195 .owner = THIS_MODULE,
2196 .close = tcp_close,
2197 .connect = tcp_v6_connect,
2198 .disconnect = tcp_disconnect,
2199 .accept = inet_csk_accept,
2200 .ioctl = tcp_ioctl,
2201 .init = tcp_v6_init_sock,
2202 .destroy = tcp_v6_destroy_sock,
2203 .shutdown = tcp_shutdown,
2204 .setsockopt = tcp_setsockopt,
2205 .getsockopt = tcp_getsockopt,
2206 .recvmsg = tcp_recvmsg,
2207 .sendmsg = tcp_sendmsg,
2208 .sendpage = tcp_sendpage,
2209 .backlog_rcv = tcp_v6_do_rcv,
2210 .hash = tcp_v6_hash,
2211 .unhash = inet_unhash,
2212 .get_port = inet_csk_get_port,
2213 .enter_memory_pressure = tcp_enter_memory_pressure,
2214 .sockets_allocated = &tcp_sockets_allocated,
2215 .memory_allocated = &tcp_memory_allocated,
2216 .memory_pressure = &tcp_memory_pressure,
2217 .orphan_count = &tcp_orphan_count,
2218 .sysctl_mem = sysctl_tcp_mem,
2219 .sysctl_wmem = sysctl_tcp_wmem,
2220 .sysctl_rmem = sysctl_tcp_rmem,
2221 .max_header = MAX_TCP_HEADER,
2222 .obj_size = sizeof(struct tcp6_sock),
2223 .slab_flags = SLAB_DESTROY_BY_RCU,
2224 .twsk_prot = &tcp6_timewait_sock_ops,
2225 .rsk_prot = &tcp6_request_sock_ops,
2226 .h.hashinfo = &tcp_hashinfo,
2227 .no_autobind = true,
2228 #ifdef CONFIG_COMPAT
2229 .compat_setsockopt = compat_tcp_setsockopt,
2230 .compat_getsockopt = compat_tcp_getsockopt,
2231 #endif
2234 static const struct inet6_protocol tcpv6_protocol = {
2235 .handler = tcp_v6_rcv,
2236 .err_handler = tcp_v6_err,
2237 .gso_send_check = tcp_v6_gso_send_check,
2238 .gso_segment = tcp_tso_segment,
2239 .gro_receive = tcp6_gro_receive,
2240 .gro_complete = tcp6_gro_complete,
2241 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2244 static struct inet_protosw tcpv6_protosw = {
2245 .type = SOCK_STREAM,
2246 .protocol = IPPROTO_TCP,
2247 .prot = &tcpv6_prot,
2248 .ops = &inet6_stream_ops,
2249 .no_check = 0,
2250 .flags = INET_PROTOSW_PERMANENT |
2251 INET_PROTOSW_ICSK,
2254 static int __net_init tcpv6_net_init(struct net *net)
2256 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2257 SOCK_RAW, IPPROTO_TCP, net);
2260 static void __net_exit tcpv6_net_exit(struct net *net)
2262 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
2265 static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
2267 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
2270 static struct pernet_operations tcpv6_net_ops = {
2271 .init = tcpv6_net_init,
2272 .exit = tcpv6_net_exit,
2273 .exit_batch = tcpv6_net_exit_batch,
2276 int __init tcpv6_init(void)
2278 int ret;
2280 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2281 if (ret)
2282 goto out;
2284 /* register inet6 protocol */
2285 ret = inet6_register_protosw(&tcpv6_protosw);
2286 if (ret)
2287 goto out_tcpv6_protocol;
2289 ret = register_pernet_subsys(&tcpv6_net_ops);
2290 if (ret)
2291 goto out_tcpv6_protosw;
2292 out:
2293 return ret;
2295 out_tcpv6_protocol:
2296 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
2297 out_tcpv6_protosw:
2298 inet6_unregister_protosw(&tcpv6_protosw);
2299 goto out;
2302 void tcpv6_exit(void)
2304 unregister_pernet_subsys(&tcpv6_net_ops);
2305 inet6_unregister_protosw(&tcpv6_protosw);
2306 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);