2 * Extension Header handling for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
17 * yoshfuji : ensure not to overrun while parsing
19 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
20 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
21 * handlers as inet6_protocol{}.
24 #include <linux/errno.h>
25 #include <linux/types.h>
26 #include <linux/socket.h>
27 #include <linux/sockios.h>
28 #include <linux/net.h>
29 #include <linux/netdevice.h>
30 #include <linux/in6.h>
31 #include <linux/icmpv6.h>
32 #include <linux/slab.h>
33 #include <linux/export.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/rawv6.h>
43 #include <net/ndisc.h>
44 #include <net/ip6_route.h>
45 #include <net/addrconf.h>
46 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
50 #include <asm/uaccess.h>
52 int ipv6_find_tlv(struct sk_buff
*skb
, int offset
, int type
)
54 const unsigned char *nh
= skb_network_header(skb
);
55 int packet_len
= skb
->tail
- skb
->network_header
;
56 struct ipv6_opt_hdr
*hdr
;
59 if (offset
+ 2 > packet_len
)
61 hdr
= (struct ipv6_opt_hdr
*)(nh
+ offset
);
62 len
= ((hdr
->hdrlen
+ 1) << 3);
64 if (offset
+ len
> packet_len
)
71 int opttype
= nh
[offset
];
82 optlen
= nh
[offset
+ 1] + 2;
94 EXPORT_SYMBOL_GPL(ipv6_find_tlv
);
97 * Parsing tlv encoded headers.
99 * Parsing function "func" returns true, if parsing succeed
100 * and false, if it failed.
101 * It MUST NOT touch skb->h.
104 struct tlvtype_proc
{
106 bool (*func
)(struct sk_buff
*skb
, int offset
);
109 /*********************
111 *********************/
113 /* An unknown option is detected, decide what to do */
115 static bool ip6_tlvopt_unknown(struct sk_buff
*skb
, int optoff
)
117 switch ((skb_network_header(skb
)[optoff
] & 0xC0) >> 6) {
121 case 1: /* drop packet */
124 case 3: /* Send ICMP if not a multicast address and drop packet */
125 /* Actually, it is redundant check. icmp_send
126 will recheck in any case.
128 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
))
130 case 2: /* send ICMP PARM PROB regardless and drop packet */
131 icmpv6_param_prob(skb
, ICMPV6_UNK_OPTION
, optoff
);
139 /* Parse tlv encoded option header (hop-by-hop or destination) */
141 static bool ip6_parse_tlv(const struct tlvtype_proc
*procs
, struct sk_buff
*skb
)
143 const struct tlvtype_proc
*curr
;
144 const unsigned char *nh
= skb_network_header(skb
);
145 int off
= skb_network_header_len(skb
);
146 int len
= (skb_transport_header(skb
)[1] + 1) << 3;
149 if (skb_transport_offset(skb
) + len
> skb_headlen(skb
))
156 int optlen
= nh
[off
+ 1] + 2;
168 /* RFC 2460 states that the purpose of PadN is
169 * to align the containing header to multiples
170 * of 8. 7 is therefore the highest valid value.
171 * See also RFC 4942, Section 2.1.9.5.
176 /* RFC 4942 recommends receiving hosts to
177 * actively check PadN payload to contain
180 for (i
= 2; i
< optlen
; i
++) {
181 if (nh
[off
+ i
] != 0)
186 default: /* Other TLV code so scan list */
189 for (curr
=procs
; curr
->type
>= 0; curr
++) {
190 if (curr
->type
== nh
[off
]) {
191 /* type specific length/alignment
192 checks will be performed in the
194 if (curr
->func(skb
, off
) == false)
199 if (curr
->type
< 0) {
200 if (ip6_tlvopt_unknown(skb
, off
) == 0)
209 /* This case will not be caught by above check since its padding
210 * length is smaller than 7:
211 * 1 byte NH + 1 byte Length + 6 bytes Padding
213 if ((padlen
== 6) && ((off
- skb_network_header_len(skb
)) == 8))
223 /*****************************
224 Destination options header.
225 *****************************/
227 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
228 static bool ipv6_dest_hao(struct sk_buff
*skb
, int optoff
)
230 struct ipv6_destopt_hao
*hao
;
231 struct inet6_skb_parm
*opt
= IP6CB(skb
);
232 struct ipv6hdr
*ipv6h
= ipv6_hdr(skb
);
233 struct in6_addr tmp_addr
;
237 LIMIT_NETDEBUG(KERN_DEBUG
"hao duplicated\n");
240 opt
->dsthao
= opt
->dst1
;
243 hao
= (struct ipv6_destopt_hao
*)(skb_network_header(skb
) + optoff
);
245 if (hao
->length
!= 16) {
247 KERN_DEBUG
"hao invalid option length = %d\n", hao
->length
);
251 if (!(ipv6_addr_type(&hao
->addr
) & IPV6_ADDR_UNICAST
)) {
253 KERN_DEBUG
"hao is not an unicast addr: %pI6\n", &hao
->addr
);
257 ret
= xfrm6_input_addr(skb
, (xfrm_address_t
*)&ipv6h
->daddr
,
258 (xfrm_address_t
*)&hao
->addr
, IPPROTO_DSTOPTS
);
259 if (unlikely(ret
< 0))
262 if (skb_cloned(skb
)) {
263 if (pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))
266 /* update all variable using below by copied skbuff */
267 hao
= (struct ipv6_destopt_hao
*)(skb_network_header(skb
) +
269 ipv6h
= ipv6_hdr(skb
);
272 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
273 skb
->ip_summed
= CHECKSUM_NONE
;
275 tmp_addr
= ipv6h
->saddr
;
276 ipv6h
->saddr
= hao
->addr
;
277 hao
->addr
= tmp_addr
;
279 if (skb
->tstamp
.tv64
== 0)
280 __net_timestamp(skb
);
290 static const struct tlvtype_proc tlvprocdestopt_lst
[] = {
291 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
293 .type
= IPV6_TLV_HAO
,
294 .func
= ipv6_dest_hao
,
300 static int ipv6_destopt_rcv(struct sk_buff
*skb
)
302 struct inet6_skb_parm
*opt
= IP6CB(skb
);
303 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
306 struct dst_entry
*dst
= skb_dst(skb
);
308 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + 8) ||
309 !pskb_may_pull(skb
, (skb_transport_offset(skb
) +
310 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
311 IP6_INC_STATS_BH(dev_net(dst
->dev
), ip6_dst_idev(dst
),
312 IPSTATS_MIB_INHDRERRORS
);
317 opt
->lastopt
= opt
->dst1
= skb_network_header_len(skb
);
318 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
322 if (ip6_parse_tlv(tlvprocdestopt_lst
, skb
)) {
323 skb
->transport_header
+= (skb_transport_header(skb
)[1] + 1) << 3;
325 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
328 opt
->nhoff
= opt
->dst1
;
333 IP6_INC_STATS_BH(dev_net(dst
->dev
),
334 ip6_dst_idev(dst
), IPSTATS_MIB_INHDRERRORS
);
338 /********************************
340 ********************************/
342 /* called with rcu_read_lock() */
343 static int ipv6_rthdr_rcv(struct sk_buff
*skb
)
345 struct inet6_skb_parm
*opt
= IP6CB(skb
);
346 struct in6_addr
*addr
= NULL
;
347 struct in6_addr daddr
;
348 struct inet6_dev
*idev
;
350 struct ipv6_rt_hdr
*hdr
;
351 struct rt0_hdr
*rthdr
;
352 struct net
*net
= dev_net(skb
->dev
);
353 int accept_source_route
= net
->ipv6
.devconf_all
->accept_source_route
;
355 idev
= __in6_dev_get(skb
->dev
);
356 if (idev
&& accept_source_route
> idev
->cnf
.accept_source_route
)
357 accept_source_route
= idev
->cnf
.accept_source_route
;
359 if (!pskb_may_pull(skb
, skb_transport_offset(skb
) + 8) ||
360 !pskb_may_pull(skb
, (skb_transport_offset(skb
) +
361 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
362 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
363 IPSTATS_MIB_INHDRERRORS
);
368 hdr
= (struct ipv6_rt_hdr
*)skb_transport_header(skb
);
370 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
) ||
371 skb
->pkt_type
!= PACKET_HOST
) {
372 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
373 IPSTATS_MIB_INADDRERRORS
);
379 if (hdr
->segments_left
== 0) {
381 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
382 case IPV6_SRCRT_TYPE_2
:
383 /* Silently discard type 2 header unless it was
387 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
388 IPSTATS_MIB_INADDRERRORS
);
398 opt
->lastopt
= opt
->srcrt
= skb_network_header_len(skb
);
399 skb
->transport_header
+= (hdr
->hdrlen
+ 1) << 3;
400 opt
->dst0
= opt
->dst1
;
402 opt
->nhoff
= (&hdr
->nexthdr
) - skb_network_header(skb
);
407 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
408 case IPV6_SRCRT_TYPE_2
:
409 if (accept_source_route
< 0)
411 /* Silently discard invalid RTH type 2 */
412 if (hdr
->hdrlen
!= 2 || hdr
->segments_left
!= 1) {
413 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
414 IPSTATS_MIB_INHDRERRORS
);
425 * This is the routing header forwarding algorithm from
429 n
= hdr
->hdrlen
>> 1;
431 if (hdr
->segments_left
> n
) {
432 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
433 IPSTATS_MIB_INHDRERRORS
);
434 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
435 ((&hdr
->segments_left
) -
436 skb_network_header(skb
)));
440 /* We are about to mangle packet header. Be careful!
441 Do not damage packets queued somewhere.
443 if (skb_cloned(skb
)) {
444 /* the copy is a forwarded packet */
445 if (pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
446 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
447 IPSTATS_MIB_OUTDISCARDS
);
451 hdr
= (struct ipv6_rt_hdr
*)skb_transport_header(skb
);
454 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
455 skb
->ip_summed
= CHECKSUM_NONE
;
457 i
= n
- --hdr
->segments_left
;
459 rthdr
= (struct rt0_hdr
*) hdr
;
464 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
465 case IPV6_SRCRT_TYPE_2
:
466 if (xfrm6_input_addr(skb
, (xfrm_address_t
*)addr
,
467 (xfrm_address_t
*)&ipv6_hdr(skb
)->saddr
,
468 IPPROTO_ROUTING
) < 0) {
469 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
470 IPSTATS_MIB_INADDRERRORS
);
474 if (!ipv6_chk_home_addr(dev_net(skb_dst(skb
)->dev
), addr
)) {
475 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
476 IPSTATS_MIB_INADDRERRORS
);
486 if (ipv6_addr_is_multicast(addr
)) {
487 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
488 IPSTATS_MIB_INADDRERRORS
);
494 *addr
= ipv6_hdr(skb
)->daddr
;
495 ipv6_hdr(skb
)->daddr
= daddr
;
498 ip6_route_input(skb
);
499 if (skb_dst(skb
)->error
) {
500 skb_push(skb
, skb
->data
- skb_network_header(skb
));
505 if (skb_dst(skb
)->dev
->flags
&IFF_LOOPBACK
) {
506 if (ipv6_hdr(skb
)->hop_limit
<= 1) {
507 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)),
508 IPSTATS_MIB_INHDRERRORS
);
509 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
514 ipv6_hdr(skb
)->hop_limit
--;
518 skb_push(skb
, skb
->data
- skb_network_header(skb
));
523 IP6_INC_STATS_BH(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_INHDRERRORS
);
524 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
,
525 (&hdr
->type
) - skb_network_header(skb
));
529 static const struct inet6_protocol rthdr_protocol
= {
530 .handler
= ipv6_rthdr_rcv
,
531 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_GSO_EXTHDR
,
534 static const struct inet6_protocol destopt_protocol
= {
535 .handler
= ipv6_destopt_rcv
,
536 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_GSO_EXTHDR
,
539 static const struct inet6_protocol nodata_protocol
= {
540 .handler
= dst_discard
,
541 .flags
= INET6_PROTO_NOPOLICY
,
544 int __init
ipv6_exthdrs_init(void)
548 ret
= inet6_add_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
552 ret
= inet6_add_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
556 ret
= inet6_add_protocol(&nodata_protocol
, IPPROTO_NONE
);
563 inet6_del_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
565 inet6_del_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
569 void ipv6_exthdrs_exit(void)
571 inet6_del_protocol(&nodata_protocol
, IPPROTO_NONE
);
572 inet6_del_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
);
573 inet6_del_protocol(&rthdr_protocol
, IPPROTO_ROUTING
);
576 /**********************************
578 **********************************/
581 * Note: we cannot rely on skb_dst(skb) before we assign it in ip6_route_input().
583 static inline struct inet6_dev
*ipv6_skb_idev(struct sk_buff
*skb
)
585 return skb_dst(skb
) ? ip6_dst_idev(skb_dst(skb
)) : __in6_dev_get(skb
->dev
);
588 static inline struct net
*ipv6_skb_net(struct sk_buff
*skb
)
590 return skb_dst(skb
) ? dev_net(skb_dst(skb
)->dev
) : dev_net(skb
->dev
);
593 /* Router Alert as of RFC 2711 */
595 static bool ipv6_hop_ra(struct sk_buff
*skb
, int optoff
)
597 const unsigned char *nh
= skb_network_header(skb
);
599 if (nh
[optoff
+ 1] == 2) {
600 IP6CB(skb
)->ra
= optoff
;
603 LIMIT_NETDEBUG(KERN_DEBUG
"ipv6_hop_ra: wrong RA length %d\n",
611 static bool ipv6_hop_jumbo(struct sk_buff
*skb
, int optoff
)
613 const unsigned char *nh
= skb_network_header(skb
);
614 struct net
*net
= ipv6_skb_net(skb
);
617 if (nh
[optoff
+ 1] != 4 || (optoff
& 3) != 2) {
618 LIMIT_NETDEBUG(KERN_DEBUG
"ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
620 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
621 IPSTATS_MIB_INHDRERRORS
);
625 pkt_len
= ntohl(*(__be32
*)(nh
+ optoff
+ 2));
626 if (pkt_len
<= IPV6_MAXPLEN
) {
627 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
628 IPSTATS_MIB_INHDRERRORS
);
629 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
+2);
632 if (ipv6_hdr(skb
)->payload_len
) {
633 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
634 IPSTATS_MIB_INHDRERRORS
);
635 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
);
639 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
)) {
640 IP6_INC_STATS_BH(net
, ipv6_skb_idev(skb
),
641 IPSTATS_MIB_INTRUNCATEDPKTS
);
645 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
655 static const struct tlvtype_proc tlvprochopopt_lst
[] = {
657 .type
= IPV6_TLV_ROUTERALERT
,
661 .type
= IPV6_TLV_JUMBO
,
662 .func
= ipv6_hop_jumbo
,
667 int ipv6_parse_hopopts(struct sk_buff
*skb
)
669 struct inet6_skb_parm
*opt
= IP6CB(skb
);
672 * skb_network_header(skb) is equal to skb->data, and
673 * skb_network_header_len(skb) is always equal to
674 * sizeof(struct ipv6hdr) by definition of
675 * hop-by-hop options.
677 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + 8) ||
678 !pskb_may_pull(skb
, (sizeof(struct ipv6hdr
) +
679 ((skb_transport_header(skb
)[1] + 1) << 3)))) {
684 opt
->hop
= sizeof(struct ipv6hdr
);
685 if (ip6_parse_tlv(tlvprochopopt_lst
, skb
)) {
686 skb
->transport_header
+= (skb_transport_header(skb
)[1] + 1) << 3;
688 opt
->nhoff
= sizeof(struct ipv6hdr
);
695 * Creating outbound headers.
697 * "build" functions work when skb is filled from head to tail (datagram)
698 * "push" functions work when headers are added from tail to head (tcp)
700 * In both cases we assume, that caller reserved enough room
704 static void ipv6_push_rthdr(struct sk_buff
*skb
, u8
*proto
,
705 struct ipv6_rt_hdr
*opt
,
706 struct in6_addr
**addr_p
)
708 struct rt0_hdr
*phdr
, *ihdr
;
711 ihdr
= (struct rt0_hdr
*) opt
;
713 phdr
= (struct rt0_hdr
*) skb_push(skb
, (ihdr
->rt_hdr
.hdrlen
+ 1) << 3);
714 memcpy(phdr
, ihdr
, sizeof(struct rt0_hdr
));
716 hops
= ihdr
->rt_hdr
.hdrlen
>> 1;
719 memcpy(phdr
->addr
, ihdr
->addr
+ 1,
720 (hops
- 1) * sizeof(struct in6_addr
));
722 phdr
->addr
[hops
- 1] = **addr_p
;
723 *addr_p
= ihdr
->addr
;
725 phdr
->rt_hdr
.nexthdr
= *proto
;
726 *proto
= NEXTHDR_ROUTING
;
729 static void ipv6_push_exthdr(struct sk_buff
*skb
, u8
*proto
, u8 type
, struct ipv6_opt_hdr
*opt
)
731 struct ipv6_opt_hdr
*h
= (struct ipv6_opt_hdr
*)skb_push(skb
, ipv6_optlen(opt
));
733 memcpy(h
, opt
, ipv6_optlen(opt
));
738 void ipv6_push_nfrag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
,
740 struct in6_addr
**daddr
)
743 ipv6_push_rthdr(skb
, proto
, opt
->srcrt
, daddr
);
745 * IPV6_RTHDRDSTOPTS is ignored
746 * unless IPV6_RTHDR is set (RFC3542).
749 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst0opt
);
752 ipv6_push_exthdr(skb
, proto
, NEXTHDR_HOP
, opt
->hopopt
);
754 EXPORT_SYMBOL(ipv6_push_nfrag_opts
);
756 void ipv6_push_frag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
, u8
*proto
)
759 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst1opt
);
762 struct ipv6_txoptions
*
763 ipv6_dup_options(struct sock
*sk
, struct ipv6_txoptions
*opt
)
765 struct ipv6_txoptions
*opt2
;
767 opt2
= sock_kmalloc(sk
, opt
->tot_len
, GFP_ATOMIC
);
769 long dif
= (char *)opt2
- (char *)opt
;
770 memcpy(opt2
, opt
, opt
->tot_len
);
772 *((char **)&opt2
->hopopt
) += dif
;
774 *((char **)&opt2
->dst0opt
) += dif
;
776 *((char **)&opt2
->dst1opt
) += dif
;
778 *((char **)&opt2
->srcrt
) += dif
;
782 EXPORT_SYMBOL_GPL(ipv6_dup_options
);
784 static int ipv6_renew_option(void *ohdr
,
785 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
,
787 struct ipv6_opt_hdr
**hdr
,
792 memcpy(*p
, ohdr
, ipv6_optlen((struct ipv6_opt_hdr
*)ohdr
));
793 *hdr
= (struct ipv6_opt_hdr
*)*p
;
794 *p
+= CMSG_ALIGN(ipv6_optlen(*hdr
));
798 if (copy_from_user(*p
, newopt
, newoptlen
))
800 *hdr
= (struct ipv6_opt_hdr
*)*p
;
801 if (ipv6_optlen(*hdr
) > newoptlen
)
803 *p
+= CMSG_ALIGN(newoptlen
);
809 struct ipv6_txoptions
*
810 ipv6_renew_options(struct sock
*sk
, struct ipv6_txoptions
*opt
,
812 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
)
816 struct ipv6_txoptions
*opt2
;
820 if (newtype
!= IPV6_HOPOPTS
&& opt
->hopopt
)
821 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->hopopt
));
822 if (newtype
!= IPV6_RTHDRDSTOPTS
&& opt
->dst0opt
)
823 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst0opt
));
824 if (newtype
!= IPV6_RTHDR
&& opt
->srcrt
)
825 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->srcrt
));
826 if (newtype
!= IPV6_DSTOPTS
&& opt
->dst1opt
)
827 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst1opt
));
830 if (newopt
&& newoptlen
)
831 tot_len
+= CMSG_ALIGN(newoptlen
);
836 tot_len
+= sizeof(*opt2
);
837 opt2
= sock_kmalloc(sk
, tot_len
, GFP_ATOMIC
);
839 return ERR_PTR(-ENOBUFS
);
841 memset(opt2
, 0, tot_len
);
843 opt2
->tot_len
= tot_len
;
844 p
= (char *)(opt2
+ 1);
846 err
= ipv6_renew_option(opt
? opt
->hopopt
: NULL
, newopt
, newoptlen
,
847 newtype
!= IPV6_HOPOPTS
,
852 err
= ipv6_renew_option(opt
? opt
->dst0opt
: NULL
, newopt
, newoptlen
,
853 newtype
!= IPV6_RTHDRDSTOPTS
,
858 err
= ipv6_renew_option(opt
? opt
->srcrt
: NULL
, newopt
, newoptlen
,
859 newtype
!= IPV6_RTHDR
,
860 (struct ipv6_opt_hdr
**)&opt2
->srcrt
, &p
);
864 err
= ipv6_renew_option(opt
? opt
->dst1opt
: NULL
, newopt
, newoptlen
,
865 newtype
!= IPV6_DSTOPTS
,
870 opt2
->opt_nflen
= (opt2
->hopopt
? ipv6_optlen(opt2
->hopopt
) : 0) +
871 (opt2
->dst0opt
? ipv6_optlen(opt2
->dst0opt
) : 0) +
872 (opt2
->srcrt
? ipv6_optlen(opt2
->srcrt
) : 0);
873 opt2
->opt_flen
= (opt2
->dst1opt
? ipv6_optlen(opt2
->dst1opt
) : 0);
877 sock_kfree_s(sk
, opt2
, opt2
->tot_len
);
881 struct ipv6_txoptions
*ipv6_fixup_options(struct ipv6_txoptions
*opt_space
,
882 struct ipv6_txoptions
*opt
)
885 * ignore the dest before srcrt unless srcrt is being included.
888 if (opt
&& opt
->dst0opt
&& !opt
->srcrt
) {
889 if (opt_space
!= opt
) {
890 memcpy(opt_space
, opt
, sizeof(*opt_space
));
893 opt
->opt_nflen
-= ipv6_optlen(opt
->dst0opt
);
899 EXPORT_SYMBOL_GPL(ipv6_fixup_options
);
902 * fl6_update_dst - update flowi destination address with info given
903 * by srcrt option, if any.
905 * @fl6: flowi6 for which daddr is to be updated
906 * @opt: struct ipv6_txoptions in which to look for srcrt opt
907 * @orig: copy of original daddr address if modified
909 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
910 * and initial value of fl6->daddr set in orig
912 struct in6_addr
*fl6_update_dst(struct flowi6
*fl6
,
913 const struct ipv6_txoptions
*opt
,
914 struct in6_addr
*orig
)
916 if (!opt
|| !opt
->srcrt
)
920 fl6
->daddr
= *((struct rt0_hdr
*)opt
->srcrt
)->addr
;
923 EXPORT_SYMBOL_GPL(fl6_update_dst
);