2 * Extension Header handling for IPv6
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Andi Kleen <ak@muc.de>
8 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
10 * $Id: exthdrs.c,v 1.13 2001/06/19 15:58:56 davem Exp $
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 * yoshfuji : ensure not to overrun while parsing
21 * Mitsuru KANDA @USAGI and: Remove ipv6_parse_exthdrs().
22 * YOSHIFUJI Hideaki @USAGI Register inbound extension header
23 * handlers as inet6_protocol{}.
26 #include <linux/errno.h>
27 #include <linux/types.h>
28 #include <linux/socket.h>
29 #include <linux/sockios.h>
30 #include <linux/sched.h>
31 #include <linux/net.h>
32 #include <linux/netdevice.h>
33 #include <linux/in6.h>
34 #include <linux/icmpv6.h>
40 #include <net/protocol.h>
41 #include <net/transp_v6.h>
42 #include <net/rawv6.h>
43 #include <net/ndisc.h>
44 #include <net/ip6_route.h>
45 #include <net/addrconf.h>
46 #ifdef CONFIG_IPV6_MIP6
50 #include <asm/uaccess.h>
52 int ipv6_find_tlv(struct sk_buff
*skb
, int offset
, int type
)
54 int packet_len
= skb
->tail
- skb
->nh
.raw
;
55 struct ipv6_opt_hdr
*hdr
;
58 if (offset
+ 2 > packet_len
)
60 hdr
= (struct ipv6_opt_hdr
*)(skb
->nh
.raw
+ offset
);
61 len
= ((hdr
->hdrlen
+ 1) << 3);
63 if (offset
+ len
> packet_len
)
70 int opttype
= skb
->nh
.raw
[offset
];
81 optlen
= skb
->nh
.raw
[offset
+ 1] + 2;
95 * Parsing tlv encoded headers.
97 * Parsing function "func" returns 1, if parsing succeed
98 * and 0, if it failed.
99 * It MUST NOT touch skb->h.
102 struct tlvtype_proc
{
104 int (*func
)(struct sk_buff
**skbp
, int offset
);
107 /*********************
109 *********************/
111 /* An unknown option is detected, decide what to do */
113 static int ip6_tlvopt_unknown(struct sk_buff
**skbp
, int optoff
)
115 struct sk_buff
*skb
= *skbp
;
117 switch ((skb
->nh
.raw
[optoff
] & 0xC0) >> 6) {
121 case 1: /* drop packet */
124 case 3: /* Send ICMP if not a multicast address and drop packet */
125 /* Actually, it is redundant check. icmp_send
126 will recheck in any case.
128 if (ipv6_addr_is_multicast(&skb
->nh
.ipv6h
->daddr
))
130 case 2: /* send ICMP PARM PROB regardless and drop packet */
131 icmpv6_param_prob(skb
, ICMPV6_UNK_OPTION
, optoff
);
139 /* Parse tlv encoded option header (hop-by-hop or destination) */
141 static int ip6_parse_tlv(struct tlvtype_proc
*procs
, struct sk_buff
**skbp
)
143 struct sk_buff
*skb
= *skbp
;
144 struct tlvtype_proc
*curr
;
145 int off
= skb
->h
.raw
- skb
->nh
.raw
;
146 int len
= ((skb
->h
.raw
[1]+1)<<3);
148 if ((skb
->h
.raw
+ len
) - skb
->data
> skb_headlen(skb
))
155 int optlen
= skb
->nh
.raw
[off
+1]+2;
157 switch (skb
->nh
.raw
[off
]) {
165 default: /* Other TLV code so scan list */
168 for (curr
=procs
; curr
->type
>= 0; curr
++) {
169 if (curr
->type
== skb
->nh
.raw
[off
]) {
170 /* type specific length/alignment
171 checks will be performed in the
173 if (curr
->func(skbp
, off
) == 0)
178 if (curr
->type
< 0) {
179 if (ip6_tlvopt_unknown(skbp
, off
) == 0)
194 /*****************************
195 Destination options header.
196 *****************************/
198 #ifdef CONFIG_IPV6_MIP6
199 static int ipv6_dest_hao(struct sk_buff
**skbp
, int optoff
)
201 struct sk_buff
*skb
= *skbp
;
202 struct ipv6_destopt_hao
*hao
;
203 struct inet6_skb_parm
*opt
= IP6CB(skb
);
204 struct ipv6hdr
*ipv6h
= (struct ipv6hdr
*)skb
->nh
.raw
;
205 struct in6_addr tmp_addr
;
209 LIMIT_NETDEBUG(KERN_DEBUG
"hao duplicated\n");
212 opt
->dsthao
= opt
->dst1
;
215 hao
= (struct ipv6_destopt_hao
*)(skb
->nh
.raw
+ optoff
);
217 if (hao
->length
!= 16) {
219 KERN_DEBUG
"hao invalid option length = %d\n", hao
->length
);
223 if (!(ipv6_addr_type(&hao
->addr
) & IPV6_ADDR_UNICAST
)) {
225 KERN_DEBUG
"hao is not an unicast addr: " NIP6_FMT
"\n", NIP6(hao
->addr
));
229 ret
= xfrm6_input_addr(skb
, (xfrm_address_t
*)&ipv6h
->daddr
,
230 (xfrm_address_t
*)&hao
->addr
, IPPROTO_DSTOPTS
);
231 if (unlikely(ret
< 0))
234 if (skb_cloned(skb
)) {
235 struct sk_buff
*skb2
= skb_copy(skb
, GFP_ATOMIC
);
241 /* update all variable using below by copied skbuff */
243 hao
= (struct ipv6_destopt_hao
*)(skb2
->nh
.raw
+ optoff
);
244 ipv6h
= (struct ipv6hdr
*)skb2
->nh
.raw
;
247 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
248 skb
->ip_summed
= CHECKSUM_NONE
;
250 ipv6_addr_copy(&tmp_addr
, &ipv6h
->saddr
);
251 ipv6_addr_copy(&ipv6h
->saddr
, &hao
->addr
);
252 ipv6_addr_copy(&hao
->addr
, &tmp_addr
);
254 if (skb
->tstamp
.off_sec
== 0)
255 __net_timestamp(skb
);
265 static struct tlvtype_proc tlvprocdestopt_lst
[] = {
266 #ifdef CONFIG_IPV6_MIP6
268 .type
= IPV6_TLV_HAO
,
269 .func
= ipv6_dest_hao
,
275 static int ipv6_destopt_rcv(struct sk_buff
**skbp
)
277 struct sk_buff
*skb
= *skbp
;
278 struct inet6_skb_parm
*opt
= IP6CB(skb
);
279 #ifdef CONFIG_IPV6_MIP6
283 if (!pskb_may_pull(skb
, (skb
->h
.raw
-skb
->data
)+8) ||
284 !pskb_may_pull(skb
, (skb
->h
.raw
-skb
->data
)+((skb
->h
.raw
[1]+1)<<3))) {
285 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
290 opt
->lastopt
= skb
->h
.raw
- skb
->nh
.raw
;
291 opt
->dst1
= skb
->h
.raw
- skb
->nh
.raw
;
292 #ifdef CONFIG_IPV6_MIP6
296 if (ip6_parse_tlv(tlvprocdestopt_lst
, skbp
)) {
298 skb
->h
.raw
+= ((skb
->h
.raw
[1]+1)<<3);
299 #ifdef CONFIG_IPV6_MIP6
302 opt
->nhoff
= opt
->dst1
;
307 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
311 static struct inet6_protocol destopt_protocol
= {
312 .handler
= ipv6_destopt_rcv
,
313 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_GSO_EXTHDR
,
316 void __init
ipv6_destopt_init(void)
318 if (inet6_add_protocol(&destopt_protocol
, IPPROTO_DSTOPTS
) < 0)
319 printk(KERN_ERR
"ipv6_destopt_init: Could not register protocol\n");
322 /********************************
323 NONE header. No data in packet.
324 ********************************/
326 static int ipv6_nodata_rcv(struct sk_buff
**skbp
)
328 struct sk_buff
*skb
= *skbp
;
334 static struct inet6_protocol nodata_protocol
= {
335 .handler
= ipv6_nodata_rcv
,
336 .flags
= INET6_PROTO_NOPOLICY
,
339 void __init
ipv6_nodata_init(void)
341 if (inet6_add_protocol(&nodata_protocol
, IPPROTO_NONE
) < 0)
342 printk(KERN_ERR
"ipv6_nodata_init: Could not register protocol\n");
345 /********************************
347 ********************************/
349 static int ipv6_rthdr_rcv(struct sk_buff
**skbp
)
351 struct sk_buff
*skb
= *skbp
;
352 struct inet6_skb_parm
*opt
= IP6CB(skb
);
353 struct in6_addr
*addr
= NULL
;
354 struct in6_addr daddr
;
357 struct ipv6_rt_hdr
*hdr
;
358 struct rt0_hdr
*rthdr
;
360 if (!pskb_may_pull(skb
, (skb
->h
.raw
-skb
->data
)+8) ||
361 !pskb_may_pull(skb
, (skb
->h
.raw
-skb
->data
)+((skb
->h
.raw
[1]+1)<<3))) {
362 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
367 hdr
= (struct ipv6_rt_hdr
*) skb
->h
.raw
;
369 if (ipv6_addr_is_multicast(&skb
->nh
.ipv6h
->daddr
) ||
370 skb
->pkt_type
!= PACKET_HOST
) {
371 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
377 if (hdr
->segments_left
== 0) {
379 #ifdef CONFIG_IPV6_MIP6
380 case IPV6_SRCRT_TYPE_2
:
381 /* Silently discard type 2 header unless it was
385 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
395 opt
->lastopt
= skb
->h
.raw
- skb
->nh
.raw
;
396 opt
->srcrt
= skb
->h
.raw
- skb
->nh
.raw
;
397 skb
->h
.raw
+= (hdr
->hdrlen
+ 1) << 3;
398 opt
->dst0
= opt
->dst1
;
400 opt
->nhoff
= (&hdr
->nexthdr
) - skb
->nh
.raw
;
405 case IPV6_SRCRT_TYPE_0
:
406 if (hdr
->hdrlen
& 0x01) {
407 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
408 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, (&hdr
->hdrlen
) - skb
->nh
.raw
);
412 #ifdef CONFIG_IPV6_MIP6
413 case IPV6_SRCRT_TYPE_2
:
414 /* Silently discard invalid RTH type 2 */
415 if (hdr
->hdrlen
!= 2 || hdr
->segments_left
!= 1) {
416 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
423 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
424 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, (&hdr
->type
) - skb
->nh
.raw
);
429 * This is the routing header forwarding algorithm from
433 n
= hdr
->hdrlen
>> 1;
435 if (hdr
->segments_left
> n
) {
436 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
437 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, (&hdr
->segments_left
) - skb
->nh
.raw
);
441 /* We are about to mangle packet header. Be careful!
442 Do not damage packets queued somewhere.
444 if (skb_cloned(skb
)) {
445 struct sk_buff
*skb2
= skb_copy(skb
, GFP_ATOMIC
);
447 /* the copy is a forwarded packet */
449 IP6_INC_STATS_BH(IPSTATS_MIB_OUTDISCARDS
);
454 hdr
= (struct ipv6_rt_hdr
*) skb2
->h
.raw
;
457 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
458 skb
->ip_summed
= CHECKSUM_NONE
;
460 i
= n
- --hdr
->segments_left
;
462 rthdr
= (struct rt0_hdr
*) hdr
;
467 #ifdef CONFIG_IPV6_MIP6
468 case IPV6_SRCRT_TYPE_2
:
469 if (xfrm6_input_addr(skb
, (xfrm_address_t
*)addr
,
470 (xfrm_address_t
*)&skb
->nh
.ipv6h
->saddr
,
471 IPPROTO_ROUTING
) < 0) {
472 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
476 if (!ipv6_chk_home_addr(addr
)) {
477 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
487 if (ipv6_addr_is_multicast(addr
)) {
488 IP6_INC_STATS_BH(IPSTATS_MIB_INADDRERRORS
);
493 ipv6_addr_copy(&daddr
, addr
);
494 ipv6_addr_copy(addr
, &skb
->nh
.ipv6h
->daddr
);
495 ipv6_addr_copy(&skb
->nh
.ipv6h
->daddr
, &daddr
);
497 dst_release(xchg(&skb
->dst
, NULL
));
498 ip6_route_input(skb
);
499 if (skb
->dst
->error
) {
500 skb_push(skb
, skb
->data
- skb
->nh
.raw
);
505 if (skb
->dst
->dev
->flags
&IFF_LOOPBACK
) {
506 if (skb
->nh
.ipv6h
->hop_limit
<= 1) {
507 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
508 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
,
513 skb
->nh
.ipv6h
->hop_limit
--;
517 skb_push(skb
, skb
->data
- skb
->nh
.raw
);
522 static struct inet6_protocol rthdr_protocol
= {
523 .handler
= ipv6_rthdr_rcv
,
524 .flags
= INET6_PROTO_NOPOLICY
| INET6_PROTO_GSO_EXTHDR
,
527 void __init
ipv6_rthdr_init(void)
529 if (inet6_add_protocol(&rthdr_protocol
, IPPROTO_ROUTING
) < 0)
530 printk(KERN_ERR
"ipv6_rthdr_init: Could not register protocol\n");
534 This function inverts received rthdr.
535 NOTE: specs allow to make it automatically only if
536 packet authenticated.
538 I will not discuss it here (though, I am really pissed off at
539 this stupid requirement making rthdr idea useless)
541 Actually, it creates severe problems for us.
542 Embryonic requests has no associated sockets,
543 so that user have no control over it and
544 cannot not only to set reply options, but
545 even to know, that someone wants to connect
548 For now we need to test the engine, so that I created
549 temporary (or permanent) backdoor.
550 If listening socket set IPV6_RTHDR to 2, then we invert header.
554 struct ipv6_txoptions
*
555 ipv6_invert_rthdr(struct sock
*sk
, struct ipv6_rt_hdr
*hdr
)
559 [ H1 -> H2 -> ... H_prev ] daddr=ME
562 [ H_prev -> ... -> H1 ] daddr =sender
564 Note, that IP output engine will rewrite this rthdr
565 by rotating it left by one addr.
569 struct rt0_hdr
*rthdr
= (struct rt0_hdr
*)hdr
;
570 struct rt0_hdr
*irthdr
;
571 struct ipv6_txoptions
*opt
;
572 int hdrlen
= ipv6_optlen(hdr
);
574 if (hdr
->segments_left
||
575 hdr
->type
!= IPV6_SRCRT_TYPE_0
||
579 n
= hdr
->hdrlen
>> 1;
580 opt
= sock_kmalloc(sk
, sizeof(*opt
) + hdrlen
, GFP_ATOMIC
);
583 memset(opt
, 0, sizeof(*opt
));
584 opt
->tot_len
= sizeof(*opt
) + hdrlen
;
585 opt
->srcrt
= (void*)(opt
+1);
586 opt
->opt_nflen
= hdrlen
;
588 memcpy(opt
->srcrt
, hdr
, sizeof(*hdr
));
589 irthdr
= (struct rt0_hdr
*)opt
->srcrt
;
590 irthdr
->reserved
= 0;
591 opt
->srcrt
->segments_left
= n
;
593 memcpy(irthdr
->addr
+i
, rthdr
->addr
+(n
-1-i
), 16);
597 EXPORT_SYMBOL_GPL(ipv6_invert_rthdr
);
599 /**********************************
601 **********************************/
603 /* Router Alert as of RFC 2711 */
605 static int ipv6_hop_ra(struct sk_buff
**skbp
, int optoff
)
607 struct sk_buff
*skb
= *skbp
;
609 if (skb
->nh
.raw
[optoff
+1] == 2) {
610 IP6CB(skb
)->ra
= optoff
;
613 LIMIT_NETDEBUG(KERN_DEBUG
"ipv6_hop_ra: wrong RA length %d\n",
614 skb
->nh
.raw
[optoff
+1]);
621 static int ipv6_hop_jumbo(struct sk_buff
**skbp
, int optoff
)
623 struct sk_buff
*skb
= *skbp
;
626 if (skb
->nh
.raw
[optoff
+1] != 4 || (optoff
&3) != 2) {
627 LIMIT_NETDEBUG(KERN_DEBUG
"ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
628 skb
->nh
.raw
[optoff
+1]);
629 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
633 pkt_len
= ntohl(*(u32
*)(skb
->nh
.raw
+optoff
+2));
634 if (pkt_len
<= IPV6_MAXPLEN
) {
635 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
636 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
+2);
639 if (skb
->nh
.ipv6h
->payload_len
) {
640 IP6_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS
);
641 icmpv6_param_prob(skb
, ICMPV6_HDR_FIELD
, optoff
);
645 if (pkt_len
> skb
->len
- sizeof(struct ipv6hdr
)) {
646 IP6_INC_STATS_BH(IPSTATS_MIB_INTRUNCATEDPKTS
);
650 if (pskb_trim_rcsum(skb
, pkt_len
+ sizeof(struct ipv6hdr
)))
660 static struct tlvtype_proc tlvprochopopt_lst
[] = {
662 .type
= IPV6_TLV_ROUTERALERT
,
666 .type
= IPV6_TLV_JUMBO
,
667 .func
= ipv6_hop_jumbo
,
672 int ipv6_parse_hopopts(struct sk_buff
**skbp
)
674 struct sk_buff
*skb
= *skbp
;
675 struct inet6_skb_parm
*opt
= IP6CB(skb
);
678 * skb->nh.raw is equal to skb->data, and
679 * skb->h.raw - skb->nh.raw is always equal to
680 * sizeof(struct ipv6hdr) by definition of
681 * hop-by-hop options.
683 if (!pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + 8) ||
684 !pskb_may_pull(skb
, sizeof(struct ipv6hdr
) + ((skb
->h
.raw
[1] + 1) << 3))) {
689 opt
->hop
= sizeof(struct ipv6hdr
);
690 if (ip6_parse_tlv(tlvprochopopt_lst
, skbp
)) {
692 skb
->h
.raw
+= (skb
->h
.raw
[1]+1)<<3;
693 opt
->nhoff
= sizeof(struct ipv6hdr
);
700 * Creating outbound headers.
702 * "build" functions work when skb is filled from head to tail (datagram)
703 * "push" functions work when headers are added from tail to head (tcp)
705 * In both cases we assume, that caller reserved enough room
709 static void ipv6_push_rthdr(struct sk_buff
*skb
, u8
*proto
,
710 struct ipv6_rt_hdr
*opt
,
711 struct in6_addr
**addr_p
)
713 struct rt0_hdr
*phdr
, *ihdr
;
716 ihdr
= (struct rt0_hdr
*) opt
;
718 phdr
= (struct rt0_hdr
*) skb_push(skb
, (ihdr
->rt_hdr
.hdrlen
+ 1) << 3);
719 memcpy(phdr
, ihdr
, sizeof(struct rt0_hdr
));
721 hops
= ihdr
->rt_hdr
.hdrlen
>> 1;
724 memcpy(phdr
->addr
, ihdr
->addr
+ 1,
725 (hops
- 1) * sizeof(struct in6_addr
));
727 ipv6_addr_copy(phdr
->addr
+ (hops
- 1), *addr_p
);
728 *addr_p
= ihdr
->addr
;
730 phdr
->rt_hdr
.nexthdr
= *proto
;
731 *proto
= NEXTHDR_ROUTING
;
734 static void ipv6_push_exthdr(struct sk_buff
*skb
, u8
*proto
, u8 type
, struct ipv6_opt_hdr
*opt
)
736 struct ipv6_opt_hdr
*h
= (struct ipv6_opt_hdr
*)skb_push(skb
, ipv6_optlen(opt
));
738 memcpy(h
, opt
, ipv6_optlen(opt
));
743 void ipv6_push_nfrag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
,
745 struct in6_addr
**daddr
)
748 ipv6_push_rthdr(skb
, proto
, opt
->srcrt
, daddr
);
750 * IPV6_RTHDRDSTOPTS is ignored
751 * unless IPV6_RTHDR is set (RFC3542).
754 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst0opt
);
757 ipv6_push_exthdr(skb
, proto
, NEXTHDR_HOP
, opt
->hopopt
);
760 void ipv6_push_frag_opts(struct sk_buff
*skb
, struct ipv6_txoptions
*opt
, u8
*proto
)
763 ipv6_push_exthdr(skb
, proto
, NEXTHDR_DEST
, opt
->dst1opt
);
766 struct ipv6_txoptions
*
767 ipv6_dup_options(struct sock
*sk
, struct ipv6_txoptions
*opt
)
769 struct ipv6_txoptions
*opt2
;
771 opt2
= sock_kmalloc(sk
, opt
->tot_len
, GFP_ATOMIC
);
773 long dif
= (char*)opt2
- (char*)opt
;
774 memcpy(opt2
, opt
, opt
->tot_len
);
776 *((char**)&opt2
->hopopt
) += dif
;
778 *((char**)&opt2
->dst0opt
) += dif
;
780 *((char**)&opt2
->dst1opt
) += dif
;
782 *((char**)&opt2
->srcrt
) += dif
;
787 EXPORT_SYMBOL_GPL(ipv6_dup_options
);
789 static int ipv6_renew_option(void *ohdr
,
790 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
,
792 struct ipv6_opt_hdr
**hdr
,
797 memcpy(*p
, ohdr
, ipv6_optlen((struct ipv6_opt_hdr
*)ohdr
));
798 *hdr
= (struct ipv6_opt_hdr
*)*p
;
799 *p
+= CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr
**)hdr
));
803 if (copy_from_user(*p
, newopt
, newoptlen
))
805 *hdr
= (struct ipv6_opt_hdr
*)*p
;
806 if (ipv6_optlen(*(struct ipv6_opt_hdr
**)hdr
) > newoptlen
)
808 *p
+= CMSG_ALIGN(newoptlen
);
814 struct ipv6_txoptions
*
815 ipv6_renew_options(struct sock
*sk
, struct ipv6_txoptions
*opt
,
817 struct ipv6_opt_hdr __user
*newopt
, int newoptlen
)
821 struct ipv6_txoptions
*opt2
;
825 if (newtype
!= IPV6_HOPOPTS
&& opt
->hopopt
)
826 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->hopopt
));
827 if (newtype
!= IPV6_RTHDRDSTOPTS
&& opt
->dst0opt
)
828 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst0opt
));
829 if (newtype
!= IPV6_RTHDR
&& opt
->srcrt
)
830 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->srcrt
));
831 if (newtype
!= IPV6_DSTOPTS
&& opt
->dst1opt
)
832 tot_len
+= CMSG_ALIGN(ipv6_optlen(opt
->dst1opt
));
835 if (newopt
&& newoptlen
)
836 tot_len
+= CMSG_ALIGN(newoptlen
);
841 tot_len
+= sizeof(*opt2
);
842 opt2
= sock_kmalloc(sk
, tot_len
, GFP_ATOMIC
);
844 return ERR_PTR(-ENOBUFS
);
846 memset(opt2
, 0, tot_len
);
848 opt2
->tot_len
= tot_len
;
849 p
= (char *)(opt2
+ 1);
851 err
= ipv6_renew_option(opt
? opt
->hopopt
: NULL
, newopt
, newoptlen
,
852 newtype
!= IPV6_HOPOPTS
,
857 err
= ipv6_renew_option(opt
? opt
->dst0opt
: NULL
, newopt
, newoptlen
,
858 newtype
!= IPV6_RTHDRDSTOPTS
,
863 err
= ipv6_renew_option(opt
? opt
->srcrt
: NULL
, newopt
, newoptlen
,
864 newtype
!= IPV6_RTHDR
,
865 (struct ipv6_opt_hdr
**)&opt2
->srcrt
, &p
);
869 err
= ipv6_renew_option(opt
? opt
->dst1opt
: NULL
, newopt
, newoptlen
,
870 newtype
!= IPV6_DSTOPTS
,
875 opt2
->opt_nflen
= (opt2
->hopopt
? ipv6_optlen(opt2
->hopopt
) : 0) +
876 (opt2
->dst0opt
? ipv6_optlen(opt2
->dst0opt
) : 0) +
877 (opt2
->srcrt
? ipv6_optlen(opt2
->srcrt
) : 0);
878 opt2
->opt_flen
= (opt2
->dst1opt
? ipv6_optlen(opt2
->dst1opt
) : 0);
882 sock_kfree_s(sk
, opt2
, opt2
->tot_len
);
886 struct ipv6_txoptions
*ipv6_fixup_options(struct ipv6_txoptions
*opt_space
,
887 struct ipv6_txoptions
*opt
)
890 * ignore the dest before srcrt unless srcrt is being included.
893 if (opt
&& opt
->dst0opt
&& !opt
->srcrt
) {
894 if (opt_space
!= opt
) {
895 memcpy(opt_space
, opt
, sizeof(*opt_space
));
898 opt
->opt_nflen
-= ipv6_optlen(opt
->dst0opt
);