3 * Linux INET6 implementation
5 * Based on net/dccp6/ipv6.c
7 * Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/xfrm.h>
19 #include <net/addrconf.h>
20 #include <net/inet_common.h>
21 #include <net/inet_hashtables.h>
22 #include <net/inet_sock.h>
23 #include <net/inet6_connection_sock.h>
24 #include <net/inet6_hashtables.h>
25 #include <net/ip6_route.h>
27 #include <net/protocol.h>
28 #include <net/transp_v6.h>
29 #include <net/ip6_checksum.h>
36 /* Socket used for sending RSTs and ACKs */
37 static struct socket
*dccp_v6_ctl_socket
;
39 static struct inet_connection_sock_af_ops dccp_ipv6_mapped
;
40 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops
;
42 static int dccp_v6_get_port(struct sock
*sk
, unsigned short snum
)
44 return inet_csk_get_port(&dccp_hashinfo
, sk
, snum
,
45 inet6_csk_bind_conflict
);
48 static void dccp_v6_hash(struct sock
*sk
)
50 if (sk
->sk_state
!= DCCP_CLOSED
) {
51 if (inet_csk(sk
)->icsk_af_ops
== &dccp_ipv6_mapped
) {
56 __inet6_hash(&dccp_hashinfo
, sk
);
61 /* add pseudo-header to DCCP checksum stored in skb->csum */
62 static inline __sum16
dccp_v6_csum_finish(struct sk_buff
*skb
,
63 struct in6_addr
*saddr
,
64 struct in6_addr
*daddr
)
66 return csum_ipv6_magic(saddr
, daddr
, skb
->len
, IPPROTO_DCCP
, skb
->csum
);
69 static inline void dccp_v6_send_check(struct sock
*sk
, int unused_value
,
72 struct ipv6_pinfo
*np
= inet6_sk(sk
);
73 struct dccp_hdr
*dh
= dccp_hdr(skb
);
75 dccp_csum_outgoing(skb
);
76 dh
->dccph_checksum
= dccp_v6_csum_finish(skb
, &np
->saddr
, &np
->daddr
);
79 static inline __u32
secure_dccpv6_sequence_number(__be32
*saddr
, __be32
*daddr
,
80 __be16 sport
, __be16 dport
)
82 return secure_tcpv6_sequence_number(saddr
, daddr
, sport
, dport
);
85 static inline __u32
dccp_v6_init_sequence(struct sk_buff
*skb
)
87 return secure_dccpv6_sequence_number(ipv6_hdr(skb
)->daddr
.s6_addr32
,
88 ipv6_hdr(skb
)->saddr
.s6_addr32
,
89 dccp_hdr(skb
)->dccph_dport
,
90 dccp_hdr(skb
)->dccph_sport
);
94 static void dccp_v6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
95 int type
, int code
, int offset
, __be32 info
)
97 struct ipv6hdr
*hdr
= (struct ipv6hdr
*)skb
->data
;
98 const struct dccp_hdr
*dh
= (struct dccp_hdr
*)(skb
->data
+ offset
);
99 struct ipv6_pinfo
*np
;
104 sk
= inet6_lookup(&dccp_hashinfo
, &hdr
->daddr
, dh
->dccph_dport
,
105 &hdr
->saddr
, dh
->dccph_sport
, inet6_iif(skb
));
108 ICMP6_INC_STATS_BH(__in6_dev_get(skb
->dev
), ICMP6_MIB_INERRORS
);
112 if (sk
->sk_state
== DCCP_TIME_WAIT
) {
113 inet_twsk_put(inet_twsk(sk
));
118 if (sock_owned_by_user(sk
))
119 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS
);
121 if (sk
->sk_state
== DCCP_CLOSED
)
126 if (type
== ICMPV6_PKT_TOOBIG
) {
127 struct dst_entry
*dst
= NULL
;
129 if (sock_owned_by_user(sk
))
131 if ((1 << sk
->sk_state
) & (DCCPF_LISTEN
| DCCPF_CLOSED
))
134 /* icmp should have updated the destination cache entry */
135 dst
= __sk_dst_check(sk
, np
->dst_cookie
);
137 struct inet_sock
*inet
= inet_sk(sk
);
140 /* BUGGG_FUTURE: Again, it is not clear how
141 to handle rthdr case. Ignore this complexity
144 memset(&fl
, 0, sizeof(fl
));
145 fl
.proto
= IPPROTO_DCCP
;
146 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
147 ipv6_addr_copy(&fl
.fl6_src
, &np
->saddr
);
148 fl
.oif
= sk
->sk_bound_dev_if
;
149 fl
.fl_ip_dport
= inet
->dport
;
150 fl
.fl_ip_sport
= inet
->sport
;
151 security_sk_classify_flow(sk
, &fl
);
153 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
155 sk
->sk_err_soft
= -err
;
159 err
= xfrm_lookup(&dst
, &fl
, sk
, 0);
161 sk
->sk_err_soft
= -err
;
167 if (inet_csk(sk
)->icsk_pmtu_cookie
> dst_mtu(dst
)) {
168 dccp_sync_mss(sk
, dst_mtu(dst
));
169 } /* else let the usual retransmit timer handle it */
174 icmpv6_err_convert(type
, code
, &err
);
176 seq
= DCCP_SKB_CB(skb
)->dccpd_seq
;
177 /* Might be for an request_sock */
178 switch (sk
->sk_state
) {
179 struct request_sock
*req
, **prev
;
181 if (sock_owned_by_user(sk
))
184 req
= inet6_csk_search_req(sk
, &prev
, dh
->dccph_dport
,
185 &hdr
->daddr
, &hdr
->saddr
,
191 * ICMPs are not backlogged, hence we cannot get an established
194 BUG_TRAP(req
->sk
== NULL
);
196 if (seq
!= dccp_rsk(req
)->dreq_iss
) {
197 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS
);
201 inet_csk_reqsk_queue_drop(sk
, req
, prev
);
204 case DCCP_REQUESTING
:
205 case DCCP_RESPOND
: /* Cannot happen.
206 It can, it SYNs are crossed. --ANK */
207 if (!sock_owned_by_user(sk
)) {
208 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS
);
211 * Wake people up to see the error
212 * (see connect in sock.c)
214 sk
->sk_error_report(sk
);
217 sk
->sk_err_soft
= err
;
221 if (!sock_owned_by_user(sk
) && np
->recverr
) {
223 sk
->sk_error_report(sk
);
225 sk
->sk_err_soft
= err
;
233 static int dccp_v6_send_response(struct sock
*sk
, struct request_sock
*req
,
234 struct dst_entry
*dst
)
236 struct inet6_request_sock
*ireq6
= inet6_rsk(req
);
237 struct ipv6_pinfo
*np
= inet6_sk(sk
);
239 struct ipv6_txoptions
*opt
= NULL
;
240 struct in6_addr
*final_p
= NULL
, final
;
244 memset(&fl
, 0, sizeof(fl
));
245 fl
.proto
= IPPROTO_DCCP
;
246 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
247 ipv6_addr_copy(&fl
.fl6_src
, &ireq6
->loc_addr
);
248 fl
.fl6_flowlabel
= 0;
250 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
251 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
252 security_req_classify_flow(req
, &fl
);
257 if (opt
!= NULL
&& opt
->srcrt
!= NULL
) {
258 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)opt
->srcrt
;
260 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
261 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
265 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
270 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
272 err
= xfrm_lookup(&dst
, &fl
, sk
, 0);
277 skb
= dccp_make_response(sk
, dst
, req
);
279 struct dccp_hdr
*dh
= dccp_hdr(skb
);
281 dh
->dccph_checksum
= dccp_v6_csum_finish(skb
,
284 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
285 err
= ip6_xmit(sk
, skb
, &fl
, opt
, 0);
286 err
= net_xmit_eval(err
);
290 if (opt
!= NULL
&& opt
!= np
->opt
)
291 sock_kfree_s(sk
, opt
, opt
->tot_len
);
296 static void dccp_v6_reqsk_destructor(struct request_sock
*req
)
298 if (inet6_rsk(req
)->pktopts
!= NULL
)
299 kfree_skb(inet6_rsk(req
)->pktopts
);
302 static void dccp_v6_ctl_send_reset(struct sock
*sk
, struct sk_buff
*rxskb
)
304 struct dccp_hdr
*rxdh
= dccp_hdr(rxskb
), *dh
;
305 struct ipv6hdr
*rxip6h
;
306 const u32 dccp_hdr_reset_len
= sizeof(struct dccp_hdr
) +
307 sizeof(struct dccp_hdr_ext
) +
308 sizeof(struct dccp_hdr_reset
);
313 if (rxdh
->dccph_type
== DCCP_PKT_RESET
)
316 if (!ipv6_unicast_destination(rxskb
))
319 skb
= alloc_skb(dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
,
324 skb_reserve(skb
, dccp_v6_ctl_socket
->sk
->sk_prot
->max_header
);
326 dh
= dccp_zeroed_hdr(skb
, dccp_hdr_reset_len
);
328 /* Swap the send and the receive. */
329 dh
->dccph_type
= DCCP_PKT_RESET
;
330 dh
->dccph_sport
= rxdh
->dccph_dport
;
331 dh
->dccph_dport
= rxdh
->dccph_sport
;
332 dh
->dccph_doff
= dccp_hdr_reset_len
/ 4;
334 dccp_hdr_reset(skb
)->dccph_reset_code
=
335 DCCP_SKB_CB(rxskb
)->dccpd_reset_code
;
337 /* See "8.3.1. Abnormal Termination" in RFC 4340 */
338 if (DCCP_SKB_CB(rxskb
)->dccpd_ack_seq
!= DCCP_PKT_WITHOUT_ACK_SEQ
)
339 dccp_set_seqno(&seqno
, DCCP_SKB_CB(rxskb
)->dccpd_ack_seq
+ 1);
341 dccp_hdr_set_seq(dh
, seqno
);
342 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb
), DCCP_SKB_CB(rxskb
)->dccpd_seq
);
344 dccp_csum_outgoing(skb
);
345 rxip6h
= ipv6_hdr(rxskb
);
346 dh
->dccph_checksum
= dccp_v6_csum_finish(skb
, &rxip6h
->saddr
,
349 memset(&fl
, 0, sizeof(fl
));
350 ipv6_addr_copy(&fl
.fl6_dst
, &rxip6h
->saddr
);
351 ipv6_addr_copy(&fl
.fl6_src
, &rxip6h
->daddr
);
353 fl
.proto
= IPPROTO_DCCP
;
354 fl
.oif
= inet6_iif(rxskb
);
355 fl
.fl_ip_dport
= dh
->dccph_dport
;
356 fl
.fl_ip_sport
= dh
->dccph_sport
;
357 security_skb_classify_flow(rxskb
, &fl
);
359 /* sk = NULL, but it is safe for now. RST socket required. */
360 if (!ip6_dst_lookup(NULL
, &skb
->dst
, &fl
)) {
361 if (xfrm_lookup(&skb
->dst
, &fl
, NULL
, 0) >= 0) {
362 ip6_xmit(dccp_v6_ctl_socket
->sk
, skb
, &fl
, NULL
, 0);
363 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS
);
364 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS
);
372 static struct request_sock_ops dccp6_request_sock_ops
= {
374 .obj_size
= sizeof(struct dccp6_request_sock
),
375 .rtx_syn_ack
= dccp_v6_send_response
,
376 .send_ack
= dccp_reqsk_send_ack
,
377 .destructor
= dccp_v6_reqsk_destructor
,
378 .send_reset
= dccp_v6_ctl_send_reset
,
381 static struct sock
*dccp_v6_hnd_req(struct sock
*sk
,struct sk_buff
*skb
)
383 const struct dccp_hdr
*dh
= dccp_hdr(skb
);
384 const struct ipv6hdr
*iph
= ipv6_hdr(skb
);
386 struct request_sock
**prev
;
387 /* Find possible connection requests. */
388 struct request_sock
*req
= inet6_csk_search_req(sk
, &prev
,
394 return dccp_check_req(sk
, skb
, req
, prev
);
396 nsk
= __inet6_lookup_established(&dccp_hashinfo
,
397 &iph
->saddr
, dh
->dccph_sport
,
398 &iph
->daddr
, ntohs(dh
->dccph_dport
),
401 if (nsk
->sk_state
!= DCCP_TIME_WAIT
) {
405 inet_twsk_put(inet_twsk(nsk
));
412 static int dccp_v6_conn_request(struct sock
*sk
, struct sk_buff
*skb
)
414 struct request_sock
*req
;
415 struct dccp_request_sock
*dreq
;
416 struct inet6_request_sock
*ireq6
;
417 struct ipv6_pinfo
*np
= inet6_sk(sk
);
418 const __be32 service
= dccp_hdr_request(skb
)->dccph_req_service
;
419 struct dccp_skb_cb
*dcb
= DCCP_SKB_CB(skb
);
420 __u8 reset_code
= DCCP_RESET_CODE_TOO_BUSY
;
422 if (skb
->protocol
== htons(ETH_P_IP
))
423 return dccp_v4_conn_request(sk
, skb
);
425 if (!ipv6_unicast_destination(skb
))
428 if (dccp_bad_service_code(sk
, service
)) {
429 reset_code
= DCCP_RESET_CODE_BAD_SERVICE_CODE
;
433 * There are no SYN attacks on IPv6, yet...
435 if (inet_csk_reqsk_queue_is_full(sk
))
438 if (sk_acceptq_is_full(sk
) && inet_csk_reqsk_queue_young(sk
) > 1)
441 req
= inet6_reqsk_alloc(&dccp6_request_sock_ops
);
445 if (dccp_parse_options(sk
, skb
))
448 dccp_reqsk_init(req
, skb
);
450 if (security_inet_conn_request(sk
, skb
, req
))
453 ireq6
= inet6_rsk(req
);
454 ipv6_addr_copy(&ireq6
->rmt_addr
, &ipv6_hdr(skb
)->saddr
);
455 ipv6_addr_copy(&ireq6
->loc_addr
, &ipv6_hdr(skb
)->daddr
);
456 ireq6
->pktopts
= NULL
;
458 if (ipv6_opt_accepted(sk
, skb
) ||
459 np
->rxopt
.bits
.rxinfo
|| np
->rxopt
.bits
.rxoinfo
||
460 np
->rxopt
.bits
.rxhlim
|| np
->rxopt
.bits
.rxohlim
) {
461 atomic_inc(&skb
->users
);
462 ireq6
->pktopts
= skb
;
464 ireq6
->iif
= sk
->sk_bound_dev_if
;
466 /* So that link locals have meaning */
467 if (!sk
->sk_bound_dev_if
&&
468 ipv6_addr_type(&ireq6
->rmt_addr
) & IPV6_ADDR_LINKLOCAL
)
469 ireq6
->iif
= inet6_iif(skb
);
472 * Step 3: Process LISTEN state
474 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie
476 * In fact we defer setting S.GSR, S.SWL, S.SWH to
477 * dccp_create_openreq_child.
479 dreq
= dccp_rsk(req
);
480 dreq
->dreq_isr
= dcb
->dccpd_seq
;
481 dreq
->dreq_iss
= dccp_v6_init_sequence(skb
);
482 dreq
->dreq_service
= service
;
484 if (dccp_v6_send_response(sk
, req
, NULL
))
487 inet6_csk_reqsk_queue_hash_add(sk
, req
, DCCP_TIMEOUT_INIT
);
493 DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS
);
494 dcb
->dccpd_reset_code
= reset_code
;
498 static struct sock
*dccp_v6_request_recv_sock(struct sock
*sk
,
500 struct request_sock
*req
,
501 struct dst_entry
*dst
)
503 struct inet6_request_sock
*ireq6
= inet6_rsk(req
);
504 struct ipv6_pinfo
*newnp
, *np
= inet6_sk(sk
);
505 struct inet_sock
*newinet
;
506 struct dccp_sock
*newdp
;
507 struct dccp6_sock
*newdp6
;
509 struct ipv6_txoptions
*opt
;
511 if (skb
->protocol
== htons(ETH_P_IP
)) {
515 newsk
= dccp_v4_request_recv_sock(sk
, skb
, req
, dst
);
519 newdp6
= (struct dccp6_sock
*)newsk
;
520 newdp
= dccp_sk(newsk
);
521 newinet
= inet_sk(newsk
);
522 newinet
->pinet6
= &newdp6
->inet6
;
523 newnp
= inet6_sk(newsk
);
525 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
527 ipv6_addr_set(&newnp
->daddr
, 0, 0, htonl(0x0000FFFF),
530 ipv6_addr_set(&newnp
->saddr
, 0, 0, htonl(0x0000FFFF),
533 ipv6_addr_copy(&newnp
->rcv_saddr
, &newnp
->saddr
);
535 inet_csk(newsk
)->icsk_af_ops
= &dccp_ipv6_mapped
;
536 newsk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
537 newnp
->pktoptions
= NULL
;
539 newnp
->mcast_oif
= inet6_iif(skb
);
540 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
543 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
544 * here, dccp_create_openreq_child now does this for us, see the comment in
545 * that function for the gory details. -acme
548 /* It is tricky place. Until this moment IPv4 tcp
549 worked with IPv6 icsk.icsk_af_ops.
552 dccp_sync_mss(newsk
, inet_csk(newsk
)->icsk_pmtu_cookie
);
559 if (sk_acceptq_is_full(sk
))
563 struct in6_addr
*final_p
= NULL
, final
;
566 memset(&fl
, 0, sizeof(fl
));
567 fl
.proto
= IPPROTO_DCCP
;
568 ipv6_addr_copy(&fl
.fl6_dst
, &ireq6
->rmt_addr
);
569 if (opt
!= NULL
&& opt
->srcrt
!= NULL
) {
570 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)opt
->srcrt
;
572 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
573 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
576 ipv6_addr_copy(&fl
.fl6_src
, &ireq6
->loc_addr
);
577 fl
.oif
= sk
->sk_bound_dev_if
;
578 fl
.fl_ip_dport
= inet_rsk(req
)->rmt_port
;
579 fl
.fl_ip_sport
= inet_sk(sk
)->sport
;
580 security_sk_classify_flow(sk
, &fl
);
582 if (ip6_dst_lookup(sk
, &dst
, &fl
))
586 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
588 if ((xfrm_lookup(&dst
, &fl
, sk
, 0)) < 0)
592 newsk
= dccp_create_openreq_child(sk
, req
, skb
);
597 * No need to charge this sock to the relevant IPv6 refcnt debug socks
598 * count here, dccp_create_openreq_child now does this for us, see the
599 * comment in that function for the gory details. -acme
602 __ip6_dst_store(newsk
, dst
, NULL
, NULL
);
603 newsk
->sk_route_caps
= dst
->dev
->features
& ~(NETIF_F_IP_CSUM
|
605 newdp6
= (struct dccp6_sock
*)newsk
;
606 newinet
= inet_sk(newsk
);
607 newinet
->pinet6
= &newdp6
->inet6
;
608 newdp
= dccp_sk(newsk
);
609 newnp
= inet6_sk(newsk
);
611 memcpy(newnp
, np
, sizeof(struct ipv6_pinfo
));
613 ipv6_addr_copy(&newnp
->daddr
, &ireq6
->rmt_addr
);
614 ipv6_addr_copy(&newnp
->saddr
, &ireq6
->loc_addr
);
615 ipv6_addr_copy(&newnp
->rcv_saddr
, &ireq6
->loc_addr
);
616 newsk
->sk_bound_dev_if
= ireq6
->iif
;
618 /* Now IPv6 options...
620 First: no IPv4 options.
625 newnp
->rxopt
.all
= np
->rxopt
.all
;
627 /* Clone pktoptions received with SYN */
628 newnp
->pktoptions
= NULL
;
629 if (ireq6
->pktopts
!= NULL
) {
630 newnp
->pktoptions
= skb_clone(ireq6
->pktopts
, GFP_ATOMIC
);
631 kfree_skb(ireq6
->pktopts
);
632 ireq6
->pktopts
= NULL
;
633 if (newnp
->pktoptions
)
634 skb_set_owner_r(newnp
->pktoptions
, newsk
);
637 newnp
->mcast_oif
= inet6_iif(skb
);
638 newnp
->mcast_hops
= ipv6_hdr(skb
)->hop_limit
;
641 * Clone native IPv6 options from listening socket (if any)
643 * Yes, keeping reference count would be much more clever, but we make
644 * one more one thing there: reattach optmem to newsk.
647 newnp
->opt
= ipv6_dup_options(newsk
, opt
);
649 sock_kfree_s(sk
, opt
, opt
->tot_len
);
652 inet_csk(newsk
)->icsk_ext_hdr_len
= 0;
653 if (newnp
->opt
!= NULL
)
654 inet_csk(newsk
)->icsk_ext_hdr_len
= (newnp
->opt
->opt_nflen
+
655 newnp
->opt
->opt_flen
);
657 dccp_sync_mss(newsk
, dst_mtu(dst
));
659 newinet
->daddr
= newinet
->saddr
= newinet
->rcv_saddr
= LOOPBACK4_IPV6
;
661 __inet6_hash(&dccp_hashinfo
, newsk
);
662 inet_inherit_port(&dccp_hashinfo
, sk
, newsk
);
667 NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS
);
669 NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS
);
670 if (opt
!= NULL
&& opt
!= np
->opt
)
671 sock_kfree_s(sk
, opt
, opt
->tot_len
);
676 /* The socket must have it's spinlock held when we get
679 * We have a potential double-lock case here, so even when
680 * doing backlog processing we use the BH locking scheme.
681 * This is because we cannot sleep with the original spinlock
684 static int dccp_v6_do_rcv(struct sock
*sk
, struct sk_buff
*skb
)
686 struct ipv6_pinfo
*np
= inet6_sk(sk
);
687 struct sk_buff
*opt_skb
= NULL
;
689 /* Imagine: socket is IPv6. IPv4 packet arrives,
690 goes to IPv4 receive handler and backlogged.
691 From backlog it always goes here. Kerboom...
692 Fortunately, dccp_rcv_established and rcv_established
693 handle them correctly, but it is not case with
694 dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK
697 if (skb
->protocol
== htons(ETH_P_IP
))
698 return dccp_v4_do_rcv(sk
, skb
);
700 if (sk_filter(sk
, skb
))
704 * socket locking is here for SMP purposes as backlog rcv is currently
705 * called with bh processing disabled.
708 /* Do Stevens' IPV6_PKTOPTIONS.
710 Yes, guys, it is the only place in our code, where we
711 may make it not affecting IPv4.
712 The rest of code is protocol independent,
713 and I do not like idea to uglify IPv4.
715 Actually, all the idea behind IPV6_PKTOPTIONS
716 looks not very well thought. For now we latch
717 options, received in the last packet, enqueued
718 by tcp. Feel free to propose better solution.
723 * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below
724 * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example.
726 opt_skb
= skb_clone(skb
, GFP_ATOMIC
);
728 if (sk
->sk_state
== DCCP_OPEN
) { /* Fast path */
729 if (dccp_rcv_established(sk
, skb
, dccp_hdr(skb
), skb
->len
))
732 /* XXX This is where we would goto ipv6_pktoptions. */
733 __kfree_skb(opt_skb
);
739 * Step 3: Process LISTEN state
740 * If S.state == LISTEN,
741 * If P.type == Request or P contains a valid Init Cookie option,
742 * (* Must scan the packet's options to check for Init
743 * Cookies. Only Init Cookies are processed here,
744 * however; other options are processed in Step 8. This
745 * scan need only be performed if the endpoint uses Init
747 * (* Generate a new socket and switch to that socket *)
748 * Set S := new socket for this port pair
750 * Choose S.ISS (initial seqno) or set from Init Cookies
751 * Initialize S.GAR := S.ISS
752 * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
753 * Continue with S.state == RESPOND
754 * (* A Response packet will be generated in Step 11 *)
756 * Generate Reset(No Connection) unless P.type == Reset
757 * Drop packet and return
759 * NOTE: the check for the packet types is done in
760 * dccp_rcv_state_process
762 if (sk
->sk_state
== DCCP_LISTEN
) {
763 struct sock
*nsk
= dccp_v6_hnd_req(sk
, skb
);
768 * Queue it on the new socket if the new socket is active,
769 * otherwise we just shortcircuit this and continue with
773 if (dccp_child_process(sk
, nsk
, skb
))
776 __kfree_skb(opt_skb
);
781 if (dccp_rcv_state_process(sk
, skb
, dccp_hdr(skb
), skb
->len
))
784 /* XXX This is where we would goto ipv6_pktoptions. */
785 __kfree_skb(opt_skb
);
790 dccp_v6_ctl_send_reset(sk
, skb
);
793 __kfree_skb(opt_skb
);
798 static int dccp_v6_rcv(struct sk_buff
**pskb
)
800 const struct dccp_hdr
*dh
;
801 struct sk_buff
*skb
= *pskb
;
805 /* Step 1: Check header basics */
807 if (dccp_invalid_packet(skb
))
810 /* Step 1: If header checksum is incorrect, drop packet and return. */
811 if (dccp_v6_csum_finish(skb
, &ipv6_hdr(skb
)->saddr
,
812 &ipv6_hdr(skb
)->daddr
)) {
813 DCCP_WARN("dropped packet with invalid checksum\n");
819 DCCP_SKB_CB(skb
)->dccpd_seq
= dccp_hdr_seq(skb
);
820 DCCP_SKB_CB(skb
)->dccpd_type
= dh
->dccph_type
;
822 if (dccp_packet_without_ack(skb
))
823 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= DCCP_PKT_WITHOUT_ACK_SEQ
;
825 DCCP_SKB_CB(skb
)->dccpd_ack_seq
= dccp_hdr_ack_seq(skb
);
828 * Look up flow ID in table and get corresponding socket */
829 sk
= __inet6_lookup(&dccp_hashinfo
, &ipv6_hdr(skb
)->saddr
,
831 &ipv6_hdr(skb
)->daddr
, ntohs(dh
->dccph_dport
),
838 dccp_pr_debug("failed to look up flow ID in table and "
839 "get corresponding socket\n");
845 * ... or S.state == TIMEWAIT,
846 * Generate Reset(No Connection) unless P.type == Reset
847 * Drop packet and return
849 if (sk
->sk_state
== DCCP_TIME_WAIT
) {
850 dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n");
851 inet_twsk_put(inet_twsk(sk
));
856 * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage
857 * o if MinCsCov = 0, only packets with CsCov = 0 are accepted
858 * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov
860 min_cov
= dccp_sk(sk
)->dccps_pcrlen
;
861 if (dh
->dccph_cscov
&& (min_cov
== 0 || dh
->dccph_cscov
< min_cov
)) {
862 dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n",
863 dh
->dccph_cscov
, min_cov
);
864 /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */
865 goto discard_and_relse
;
868 if (!xfrm6_policy_check(sk
, XFRM_POLICY_IN
, skb
))
869 goto discard_and_relse
;
871 return sk_receive_skb(sk
, skb
, 1) ? -1 : 0;
874 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_IN
, skb
))
879 * Generate Reset(No Connection) unless P.type == Reset
880 * Drop packet and return
882 if (dh
->dccph_type
!= DCCP_PKT_RESET
) {
883 DCCP_SKB_CB(skb
)->dccpd_reset_code
=
884 DCCP_RESET_CODE_NO_CONNECTION
;
885 dccp_v6_ctl_send_reset(sk
, skb
);
897 static int dccp_v6_connect(struct sock
*sk
, struct sockaddr
*uaddr
,
900 struct sockaddr_in6
*usin
= (struct sockaddr_in6
*)uaddr
;
901 struct inet_connection_sock
*icsk
= inet_csk(sk
);
902 struct inet_sock
*inet
= inet_sk(sk
);
903 struct ipv6_pinfo
*np
= inet6_sk(sk
);
904 struct dccp_sock
*dp
= dccp_sk(sk
);
905 struct in6_addr
*saddr
= NULL
, *final_p
= NULL
, final
;
907 struct dst_entry
*dst
;
911 dp
->dccps_role
= DCCP_ROLE_CLIENT
;
913 if (addr_len
< SIN6_LEN_RFC2133
)
916 if (usin
->sin6_family
!= AF_INET6
)
917 return -EAFNOSUPPORT
;
919 memset(&fl
, 0, sizeof(fl
));
922 fl
.fl6_flowlabel
= usin
->sin6_flowinfo
& IPV6_FLOWINFO_MASK
;
923 IP6_ECN_flow_init(fl
.fl6_flowlabel
);
924 if (fl
.fl6_flowlabel
& IPV6_FLOWLABEL_MASK
) {
925 struct ip6_flowlabel
*flowlabel
;
926 flowlabel
= fl6_sock_lookup(sk
, fl
.fl6_flowlabel
);
927 if (flowlabel
== NULL
)
929 ipv6_addr_copy(&usin
->sin6_addr
, &flowlabel
->dst
);
930 fl6_sock_release(flowlabel
);
934 * connect() to INADDR_ANY means loopback (BSD'ism).
936 if (ipv6_addr_any(&usin
->sin6_addr
))
937 usin
->sin6_addr
.s6_addr
[15] = 1;
939 addr_type
= ipv6_addr_type(&usin
->sin6_addr
);
941 if (addr_type
& IPV6_ADDR_MULTICAST
)
944 if (addr_type
& IPV6_ADDR_LINKLOCAL
) {
945 if (addr_len
>= sizeof(struct sockaddr_in6
) &&
946 usin
->sin6_scope_id
) {
947 /* If interface is set while binding, indices
950 if (sk
->sk_bound_dev_if
&&
951 sk
->sk_bound_dev_if
!= usin
->sin6_scope_id
)
954 sk
->sk_bound_dev_if
= usin
->sin6_scope_id
;
957 /* Connect to link-local address requires an interface */
958 if (!sk
->sk_bound_dev_if
)
962 ipv6_addr_copy(&np
->daddr
, &usin
->sin6_addr
);
963 np
->flow_label
= fl
.fl6_flowlabel
;
968 if (addr_type
== IPV6_ADDR_MAPPED
) {
969 u32 exthdrlen
= icsk
->icsk_ext_hdr_len
;
970 struct sockaddr_in sin
;
972 SOCK_DEBUG(sk
, "connect: ipv4 mapped\n");
974 if (__ipv6_only_sock(sk
))
977 sin
.sin_family
= AF_INET
;
978 sin
.sin_port
= usin
->sin6_port
;
979 sin
.sin_addr
.s_addr
= usin
->sin6_addr
.s6_addr32
[3];
981 icsk
->icsk_af_ops
= &dccp_ipv6_mapped
;
982 sk
->sk_backlog_rcv
= dccp_v4_do_rcv
;
984 err
= dccp_v4_connect(sk
, (struct sockaddr
*)&sin
, sizeof(sin
));
986 icsk
->icsk_ext_hdr_len
= exthdrlen
;
987 icsk
->icsk_af_ops
= &dccp_ipv6_af_ops
;
988 sk
->sk_backlog_rcv
= dccp_v6_do_rcv
;
991 ipv6_addr_set(&np
->saddr
, 0, 0, htonl(0x0000FFFF),
993 ipv6_addr_set(&np
->rcv_saddr
, 0, 0, htonl(0x0000FFFF),
1000 if (!ipv6_addr_any(&np
->rcv_saddr
))
1001 saddr
= &np
->rcv_saddr
;
1003 fl
.proto
= IPPROTO_DCCP
;
1004 ipv6_addr_copy(&fl
.fl6_dst
, &np
->daddr
);
1005 ipv6_addr_copy(&fl
.fl6_src
, saddr
? saddr
: &np
->saddr
);
1006 fl
.oif
= sk
->sk_bound_dev_if
;
1007 fl
.fl_ip_dport
= usin
->sin6_port
;
1008 fl
.fl_ip_sport
= inet
->sport
;
1009 security_sk_classify_flow(sk
, &fl
);
1011 if (np
->opt
!= NULL
&& np
->opt
->srcrt
!= NULL
) {
1012 const struct rt0_hdr
*rt0
= (struct rt0_hdr
*)np
->opt
->srcrt
;
1014 ipv6_addr_copy(&final
, &fl
.fl6_dst
);
1015 ipv6_addr_copy(&fl
.fl6_dst
, rt0
->addr
);
1019 err
= ip6_dst_lookup(sk
, &dst
, &fl
);
1024 ipv6_addr_copy(&fl
.fl6_dst
, final_p
);
1026 err
= __xfrm_lookup(&dst
, &fl
, sk
, 1);
1028 if (err
== -EREMOTE
)
1029 err
= ip6_dst_blackhole(sk
, &dst
, &fl
);
1034 if (saddr
== NULL
) {
1035 saddr
= &fl
.fl6_src
;
1036 ipv6_addr_copy(&np
->rcv_saddr
, saddr
);
1039 /* set the source address */
1040 ipv6_addr_copy(&np
->saddr
, saddr
);
1041 inet
->rcv_saddr
= LOOPBACK4_IPV6
;
1043 __ip6_dst_store(sk
, dst
, NULL
, NULL
);
1045 icsk
->icsk_ext_hdr_len
= 0;
1046 if (np
->opt
!= NULL
)
1047 icsk
->icsk_ext_hdr_len
= (np
->opt
->opt_flen
+
1048 np
->opt
->opt_nflen
);
1050 inet
->dport
= usin
->sin6_port
;
1052 dccp_set_state(sk
, DCCP_REQUESTING
);
1053 err
= inet6_hash_connect(&dccp_death_row
, sk
);
1057 dp
->dccps_iss
= secure_dccpv6_sequence_number(np
->saddr
.s6_addr32
,
1058 np
->daddr
.s6_addr32
,
1059 inet
->sport
, inet
->dport
);
1060 err
= dccp_connect(sk
);
1067 dccp_set_state(sk
, DCCP_CLOSED
);
1071 sk
->sk_route_caps
= 0;
1075 static struct inet_connection_sock_af_ops dccp_ipv6_af_ops
= {
1076 .queue_xmit
= inet6_csk_xmit
,
1077 .send_check
= dccp_v6_send_check
,
1078 .rebuild_header
= inet6_sk_rebuild_header
,
1079 .conn_request
= dccp_v6_conn_request
,
1080 .syn_recv_sock
= dccp_v6_request_recv_sock
,
1081 .net_header_len
= sizeof(struct ipv6hdr
),
1082 .setsockopt
= ipv6_setsockopt
,
1083 .getsockopt
= ipv6_getsockopt
,
1084 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1085 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1086 #ifdef CONFIG_COMPAT
1087 .compat_setsockopt
= compat_ipv6_setsockopt
,
1088 .compat_getsockopt
= compat_ipv6_getsockopt
,
1093 * DCCP over IPv4 via INET6 API
1095 static struct inet_connection_sock_af_ops dccp_ipv6_mapped
= {
1096 .queue_xmit
= ip_queue_xmit
,
1097 .send_check
= dccp_v4_send_check
,
1098 .rebuild_header
= inet_sk_rebuild_header
,
1099 .conn_request
= dccp_v6_conn_request
,
1100 .syn_recv_sock
= dccp_v6_request_recv_sock
,
1101 .net_header_len
= sizeof(struct iphdr
),
1102 .setsockopt
= ipv6_setsockopt
,
1103 .getsockopt
= ipv6_getsockopt
,
1104 .addr2sockaddr
= inet6_csk_addr2sockaddr
,
1105 .sockaddr_len
= sizeof(struct sockaddr_in6
),
1106 #ifdef CONFIG_COMPAT
1107 .compat_setsockopt
= compat_ipv6_setsockopt
,
1108 .compat_getsockopt
= compat_ipv6_getsockopt
,
1112 /* NOTE: A lot of things set to zero explicitly by call to
1113 * sk_alloc() so need not be done here.
1115 static int dccp_v6_init_sock(struct sock
*sk
)
1117 static __u8 dccp_v6_ctl_sock_initialized
;
1118 int err
= dccp_init_sock(sk
, dccp_v6_ctl_sock_initialized
);
1121 if (unlikely(!dccp_v6_ctl_sock_initialized
))
1122 dccp_v6_ctl_sock_initialized
= 1;
1123 inet_csk(sk
)->icsk_af_ops
= &dccp_ipv6_af_ops
;
1129 static int dccp_v6_destroy_sock(struct sock
*sk
)
1131 dccp_destroy_sock(sk
);
1132 return inet6_destroy_sock(sk
);
1135 static struct timewait_sock_ops dccp6_timewait_sock_ops
= {
1136 .twsk_obj_size
= sizeof(struct dccp6_timewait_sock
),
1139 static struct proto dccp_v6_prot
= {
1141 .owner
= THIS_MODULE
,
1142 .close
= dccp_close
,
1143 .connect
= dccp_v6_connect
,
1144 .disconnect
= dccp_disconnect
,
1145 .ioctl
= dccp_ioctl
,
1146 .init
= dccp_v6_init_sock
,
1147 .setsockopt
= dccp_setsockopt
,
1148 .getsockopt
= dccp_getsockopt
,
1149 .sendmsg
= dccp_sendmsg
,
1150 .recvmsg
= dccp_recvmsg
,
1151 .backlog_rcv
= dccp_v6_do_rcv
,
1152 .hash
= dccp_v6_hash
,
1153 .unhash
= dccp_unhash
,
1154 .accept
= inet_csk_accept
,
1155 .get_port
= dccp_v6_get_port
,
1156 .shutdown
= dccp_shutdown
,
1157 .destroy
= dccp_v6_destroy_sock
,
1158 .orphan_count
= &dccp_orphan_count
,
1159 .max_header
= MAX_DCCP_HEADER
,
1160 .obj_size
= sizeof(struct dccp6_sock
),
1161 .rsk_prot
= &dccp6_request_sock_ops
,
1162 .twsk_prot
= &dccp6_timewait_sock_ops
,
1163 #ifdef CONFIG_COMPAT
1164 .compat_setsockopt
= compat_dccp_setsockopt
,
1165 .compat_getsockopt
= compat_dccp_getsockopt
,
1169 static struct inet6_protocol dccp_v6_protocol
= {
1170 .handler
= dccp_v6_rcv
,
1171 .err_handler
= dccp_v6_err
,
1172 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_FINAL
,
1175 static struct proto_ops inet6_dccp_ops
= {
1177 .owner
= THIS_MODULE
,
1178 .release
= inet6_release
,
1180 .connect
= inet_stream_connect
,
1181 .socketpair
= sock_no_socketpair
,
1182 .accept
= inet_accept
,
1183 .getname
= inet6_getname
,
1185 .ioctl
= inet6_ioctl
,
1186 .listen
= inet_dccp_listen
,
1187 .shutdown
= inet_shutdown
,
1188 .setsockopt
= sock_common_setsockopt
,
1189 .getsockopt
= sock_common_getsockopt
,
1190 .sendmsg
= inet_sendmsg
,
1191 .recvmsg
= sock_common_recvmsg
,
1192 .mmap
= sock_no_mmap
,
1193 .sendpage
= sock_no_sendpage
,
1194 #ifdef CONFIG_COMPAT
1195 .compat_setsockopt
= compat_sock_common_setsockopt
,
1196 .compat_getsockopt
= compat_sock_common_getsockopt
,
1200 static struct inet_protosw dccp_v6_protosw
= {
1202 .protocol
= IPPROTO_DCCP
,
1203 .prot
= &dccp_v6_prot
,
1204 .ops
= &inet6_dccp_ops
,
1206 .flags
= INET_PROTOSW_ICSK
,
1209 static int __init
dccp_v6_init(void)
1211 int err
= proto_register(&dccp_v6_prot
, 1);
1216 err
= inet6_add_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1218 goto out_unregister_proto
;
1220 inet6_register_protosw(&dccp_v6_protosw
);
1222 err
= inet_csk_ctl_sock_create(&dccp_v6_ctl_socket
, PF_INET6
,
1223 SOCK_DCCP
, IPPROTO_DCCP
);
1225 goto out_unregister_protosw
;
1228 out_unregister_protosw
:
1229 inet6_del_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1230 inet6_unregister_protosw(&dccp_v6_protosw
);
1231 out_unregister_proto
:
1232 proto_unregister(&dccp_v6_prot
);
1236 static void __exit
dccp_v6_exit(void)
1238 inet6_del_protocol(&dccp_v6_protocol
, IPPROTO_DCCP
);
1239 inet6_unregister_protosw(&dccp_v6_protosw
);
1240 proto_unregister(&dccp_v6_prot
);
1243 module_init(dccp_v6_init
);
1244 module_exit(dccp_v6_exit
);
1247 * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
1248 * values directly, Also cover the case where the protocol is not specified,
1249 * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP
1251 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-33-type-6");
1252 MODULE_ALIAS("net-pf-" __stringify(PF_INET6
) "-proto-0-type-6");
1253 MODULE_LICENSE("GPL");
1254 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>");
1255 MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");