Import 2.3.49pre2
[davej-history.git] / net / ipv6 / ip6_output.c
blobbcbf2b40c41b3598d6eb4fdbb777ee1d5bc2c446
1 /*
2 * IPv6 output functions
3 * Linux INET6 implementation
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: ip6_output.c,v 1.25 2000/02/27 19:42:53 davem Exp $
10 * Based on linux/net/ipv4/ip_output.c
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
17 * Changes:
18 * A.N.Kuznetsov : airthmetics in fragmentation.
19 * extension headers are implemented.
20 * route changes now work.
21 * ip6_forward does not confuse sniffers.
22 * etc.
24 * H. von Brand : Added missing #include <linux/string.h>
27 #include <linux/config.h>
28 #include <linux/errno.h>
29 #include <linux/types.h>
30 #include <linux/string.h>
31 #include <linux/socket.h>
32 #include <linux/net.h>
33 #include <linux/netdevice.h>
34 #include <linux/if_arp.h>
35 #include <linux/in6.h>
36 #include <linux/route.h>
38 #include <linux/netfilter.h>
39 #include <linux/netfilter_ipv6.h>
41 #include <net/sock.h>
42 #include <net/snmp.h>
44 #include <net/ipv6.h>
45 #include <net/ndisc.h>
46 #include <net/protocol.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/rawv6.h>
50 #include <net/icmp.h>
52 static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
54 static u32 ipv6_fragmentation_id = 1;
55 static spinlock_t ip6_id_lock = SPIN_LOCK_UNLOCKED;
57 spin_lock_bh(&ip6_id_lock);
58 fhdr->identification = ipv6_fragmentation_id;
59 if (++ipv6_fragmentation_id == 0)
60 ipv6_fragmentation_id = 1;
61 spin_unlock_bh(&ip6_id_lock);
64 static inline int ip6_output_finish(struct sk_buff *skb)
67 struct dst_entry *dst = skb->dst;
68 struct hh_cache *hh = dst->hh;
70 if (hh) {
71 read_lock_bh(&hh->hh_lock);
72 memcpy(skb->data - 16, hh->hh_data, 16);
73 read_unlock_bh(&hh->hh_lock);
74 skb_push(skb, hh->hh_len);
75 return hh->hh_output(skb);
76 } else if (dst->neighbour)
77 return dst->neighbour->output(skb);
79 kfree_skb(skb);
80 return -EINVAL;
84 /* dev_loopback_xmit for use with netfilter. */
85 static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
87 newskb->mac.raw = newskb->data;
88 skb_pull(newskb, newskb->nh.raw - newskb->data);
89 newskb->pkt_type = PACKET_LOOPBACK;
90 newskb->ip_summed = CHECKSUM_UNNECESSARY;
91 BUG_TRAP(newskb->dst);
93 netif_rx(newskb);
94 return 0;
98 int ip6_output(struct sk_buff *skb)
100 struct dst_entry *dst = skb->dst;
101 struct net_device *dev = dst->dev;
103 skb->protocol = __constant_htons(ETH_P_IPV6);
104 skb->dev = dev;
106 if (ipv6_addr_is_multicast(&skb->nh.ipv6h->daddr)) {
107 if (!(dev->flags&IFF_LOOPBACK) &&
108 (skb->sk == NULL || skb->sk->net_pinfo.af_inet6.mc_loop) &&
109 ipv6_chk_mcast_addr(dev, &skb->nh.ipv6h->daddr)) {
110 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
112 /* Do not check for IFF_ALLMULTI; multicast routing
113 is not supported in any case.
115 if (newskb)
116 NF_HOOK(PF_INET, NF_IP6_POST_ROUTING, newskb, NULL,
117 newskb->dev,
118 ip6_dev_loopback_xmit);
120 if (skb->nh.ipv6h->hop_limit == 0) {
121 kfree_skb(skb);
122 return 0;
126 IP6_INC_STATS(Ip6OutMcastPkts);
129 return NF_HOOK(PF_INET6, NF_IP6_POST_ROUTING, skb,NULL, skb->dev,ip6_output_finish);
133 #ifdef CONFIG_NETFILTER
134 static int route6_me_harder(struct sk_buff *skb)
136 struct ipv6hdr *iph = skb->nh.ipv6h;
137 struct dst_entry *dst;
138 struct flowi fl;
140 fl.proto = iph->nexthdr;
141 fl.fl6_dst = &iph->daddr;
142 fl.fl6_src = &iph->saddr;
143 fl.oif = skb->sk ? skb->sk->bound_dev_if : 0;
144 fl.fl6_flowlabel = 0;
145 fl.uli_u.ports.dport = 0;
146 fl.uli_u.ports.sport = 0;
148 dst = ip6_route_output(skb->sk, &fl);
150 if (dst->error) {
151 printk(KERN_DEBUG "route6_me_harder: No more route.\n");
152 return -EINVAL;
155 /* Drop old route. */
156 dst_release(skb->dst);
158 skb->dst = dst;
159 return 0;
161 #endif
163 static inline int ip6_maybe_reroute(struct sk_buff *skb)
165 #ifdef CONFIG_NETFILTER
166 if (skb->nfcache & NFC_ALTERED){
167 if (route6_me_harder(skb) != 0){
168 kfree_skb(skb);
169 return -EINVAL;
172 #endif /* CONFIG_NETFILTER */
173 return skb->dst->output(skb);
177 * xmit an sk_buff (used by TCP)
180 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
181 struct ipv6_txoptions *opt)
183 struct ipv6_pinfo * np = sk ? &sk->net_pinfo.af_inet6 : NULL;
184 struct in6_addr *first_hop = fl->nl_u.ip6_u.daddr;
185 struct dst_entry *dst = skb->dst;
186 struct ipv6hdr *hdr;
187 u8 proto = fl->proto;
188 int seg_len = skb->len;
189 int hlimit;
191 if (opt) {
192 int head_room;
194 /* First: exthdrs may take lots of space (~8K for now)
195 MAX_HEADER is not enough.
197 head_room = opt->opt_nflen + opt->opt_flen;
198 seg_len += head_room;
199 head_room += sizeof(struct ipv6hdr) + ((dst->dev->hard_header_len + 15)&~15);
201 if (skb_headroom(skb) < head_room) {
202 struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
203 kfree(skb);
204 skb = skb2;
205 if (skb == NULL)
206 return -ENOBUFS;
207 if (sk)
208 skb_set_owner_w(skb, sk);
210 if (opt->opt_flen)
211 ipv6_push_frag_opts(skb, opt, &proto);
212 if (opt->opt_nflen)
213 ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
216 hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));
219 * Fill in the IPv6 header
222 *(u32*)hdr = __constant_htonl(0x60000000) | fl->fl6_flowlabel;
223 hlimit = -1;
224 if (np)
225 hlimit = np->hop_limit;
226 if (hlimit < 0)
227 hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit;
229 hdr->payload_len = htons(seg_len);
230 hdr->nexthdr = proto;
231 hdr->hop_limit = hlimit;
233 ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr);
234 ipv6_addr_copy(&hdr->daddr, first_hop);
236 if (skb->len <= dst->pmtu) {
237 IP6_INC_STATS(Ip6OutRequests);
238 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
241 printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
242 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);
243 kfree_skb(skb);
244 return -EMSGSIZE;
248 * To avoid extra problems ND packets are send through this
249 * routine. It's code duplication but I really want to avoid
250 * extra checks since ipv6_build_header is used by TCP (which
251 * is for us performace critical)
254 int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
255 struct in6_addr *saddr, struct in6_addr *daddr,
256 int proto, int len)
258 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
259 struct ipv6hdr *hdr;
260 int totlen;
262 skb->protocol = __constant_htons(ETH_P_IPV6);
263 skb->dev = dev;
265 totlen = len + sizeof(struct ipv6hdr);
267 hdr = (struct ipv6hdr *) skb_put(skb, sizeof(struct ipv6hdr));
268 skb->nh.ipv6h = hdr;
270 *(u32*)hdr = htonl(0x60000000);
272 hdr->payload_len = htons(len);
273 hdr->nexthdr = proto;
274 hdr->hop_limit = np->hop_limit;
276 ipv6_addr_copy(&hdr->saddr, saddr);
277 ipv6_addr_copy(&hdr->daddr, daddr);
279 return 0;
282 static struct ipv6hdr * ip6_bld_1(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
283 int hlimit, unsigned pktlength)
285 struct ipv6hdr *hdr;
287 skb->nh.raw = skb_put(skb, sizeof(struct ipv6hdr));
288 hdr = skb->nh.ipv6h;
290 *(u32*)hdr = fl->fl6_flowlabel | htonl(0x60000000);
292 hdr->payload_len = htons(pktlength - sizeof(struct ipv6hdr));
293 hdr->hop_limit = hlimit;
294 hdr->nexthdr = fl->proto;
296 ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr);
297 ipv6_addr_copy(&hdr->daddr, fl->nl_u.ip6_u.daddr);
298 return hdr;
301 static __inline__ u8 * ipv6_build_fraghdr(struct sk_buff *skb, u8* prev_hdr, unsigned offset)
303 struct frag_hdr *fhdr;
305 fhdr = (struct frag_hdr *) skb_put(skb, sizeof(struct frag_hdr));
307 fhdr->nexthdr = *prev_hdr;
308 *prev_hdr = NEXTHDR_FRAGMENT;
309 prev_hdr = &fhdr->nexthdr;
311 fhdr->reserved = 0;
312 fhdr->frag_off = htons(offset);
313 ipv6_select_ident(skb, fhdr);
314 return &fhdr->nexthdr;
317 static int ip6_frag_xmit(struct sock *sk, inet_getfrag_t getfrag,
318 const void *data, struct dst_entry *dst,
319 struct flowi *fl, struct ipv6_txoptions *opt,
320 struct in6_addr *final_dst,
321 int hlimit, int flags, unsigned length, int mtu)
323 struct ipv6hdr *hdr;
324 struct sk_buff *last_skb;
325 u8 *prev_hdr;
326 int unfrag_len;
327 int frag_len;
328 int last_len;
329 int nfrags;
330 int fhdr_dist;
331 int frag_off;
332 int data_off;
333 int err;
336 * Fragmentation
338 * Extension header order:
339 * Hop-by-hop -> Dest0 -> Routing -> Fragment -> Auth -> Dest1 -> rest (...)
341 * We must build the non-fragmented part that
342 * will be in every packet... this also means
343 * that other extension headers (Dest, Auth, etc)
344 * must be considered in the data to be fragmented
347 unfrag_len = sizeof(struct ipv6hdr) + sizeof(struct frag_hdr);
348 last_len = length;
350 if (opt) {
351 unfrag_len += opt->opt_nflen;
352 last_len += opt->opt_flen;
356 * Length of fragmented part on every packet but
357 * the last must be an:
358 * "integer multiple of 8 octects".
361 frag_len = (mtu - unfrag_len) & ~0x7;
363 /* Unfragmentable part exceeds mtu. */
364 if (frag_len <= 0) {
365 ipv6_local_error(sk, EMSGSIZE, fl, mtu);
366 return -EMSGSIZE;
369 nfrags = last_len / frag_len;
372 * We must send from end to start because of
373 * UDP/ICMP checksums. We do a funny trick:
374 * fill the last skb first with the fixed
375 * header (and its data) and then use it
376 * to create the following segments and send it
377 * in the end. If the peer is checking the M_flag
378 * to trigger the reassembly code then this
379 * might be a good idea.
382 frag_off = nfrags * frag_len;
383 last_len -= frag_off;
385 if (last_len == 0) {
386 last_len = frag_len;
387 frag_off -= frag_len;
388 nfrags--;
390 data_off = frag_off;
392 /* And it is implementation problem: for now we assume, that
393 all the exthdrs will fit to the first fragment.
395 if (opt) {
396 if (frag_len < opt->opt_flen) {
397 ipv6_local_error(sk, EMSGSIZE, fl, mtu);
398 return -EMSGSIZE;
400 data_off = frag_off - opt->opt_flen;
403 if (flags&MSG_PROBE)
404 return 0;
406 last_skb = sock_alloc_send_skb(sk, unfrag_len + frag_len +
407 dst->dev->hard_header_len + 15,
408 0, flags & MSG_DONTWAIT, &err);
410 if (last_skb == NULL)
411 return err;
413 last_skb->dst = dst_clone(dst);
415 skb_reserve(last_skb, (dst->dev->hard_header_len + 15) & ~15);
417 hdr = ip6_bld_1(sk, last_skb, fl, hlimit, frag_len+unfrag_len);
418 prev_hdr = &hdr->nexthdr;
420 if (opt && opt->opt_nflen)
421 prev_hdr = ipv6_build_nfrag_opts(last_skb, prev_hdr, opt, final_dst, 0);
423 prev_hdr = ipv6_build_fraghdr(last_skb, prev_hdr, frag_off);
424 fhdr_dist = prev_hdr - last_skb->data;
426 err = getfrag(data, &hdr->saddr, last_skb->tail, data_off, last_len);
428 if (!err) {
429 while (nfrags--) {
430 struct sk_buff *skb;
432 struct frag_hdr *fhdr2;
434 skb = skb_copy(last_skb, sk->allocation);
436 if (skb == NULL) {
437 IP6_INC_STATS(Ip6FragFails);
438 kfree_skb(last_skb);
439 return -ENOMEM;
442 frag_off -= frag_len;
443 data_off -= frag_len;
445 fhdr2 = (struct frag_hdr *) (skb->data + fhdr_dist);
447 /* more flag on */
448 fhdr2->frag_off = htons(frag_off | 1);
450 /* Write fragmentable exthdrs to the first chunk */
451 if (nfrags == 0 && opt && opt->opt_flen) {
452 ipv6_build_frag_opts(skb, &fhdr2->nexthdr, opt);
453 frag_len -= opt->opt_flen;
454 data_off = 0;
457 err = getfrag(data, &hdr->saddr,skb_put(skb, frag_len),
458 data_off, frag_len);
460 if (err) {
461 kfree_skb(skb);
462 break;
465 IP6_INC_STATS(Ip6FragCreates);
466 IP6_INC_STATS(Ip6OutRequests);
467 err = NF_HOOK(PF_INET6,NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
468 if (err) {
469 kfree_skb(last_skb);
470 return err;
475 if (err) {
476 IP6_INC_STATS(Ip6FragFails);
477 kfree_skb(last_skb);
478 return -EFAULT;
481 hdr->payload_len = htons(unfrag_len + last_len - sizeof(struct ipv6hdr));
484 * update last_skb to reflect the getfrag we did
485 * on start.
488 skb_put(last_skb, last_len);
490 IP6_INC_STATS(Ip6FragCreates);
491 IP6_INC_STATS(Ip6FragOKs);
492 IP6_INC_STATS(Ip6OutRequests);
493 return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, last_skb, NULL,dst->dev, ip6_maybe_reroute);
496 int ip6_build_xmit(struct sock *sk, inet_getfrag_t getfrag, const void *data,
497 struct flowi *fl, unsigned length,
498 struct ipv6_txoptions *opt, int hlimit, int flags)
500 struct ipv6_pinfo *np = &sk->net_pinfo.af_inet6;
501 struct in6_addr *final_dst = NULL;
502 struct dst_entry *dst;
503 int err = 0;
504 unsigned int pktlength, jumbolen, mtu;
505 struct in6_addr saddr;
507 if (opt && opt->srcrt) {
508 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
509 final_dst = fl->fl6_dst;
510 fl->fl6_dst = rt0->addr;
513 if (!fl->oif && ipv6_addr_is_multicast(fl->nl_u.ip6_u.daddr))
514 fl->oif = np->mcast_oif;
516 dst = __sk_dst_check(sk, np->dst_cookie);
517 if (dst) {
518 struct rt6_info *rt = (struct rt6_info*)dst;
520 /* Yes, checking route validity in not connected
521 case is not very simple. Take into account,
522 that we do not support routing by source, TOS,
523 and MSG_DONTROUTE --ANK (980726)
525 1. If route was host route, check that
526 cached destination is current.
527 If it is network route, we still may
528 check its validity using saved pointer
529 to the last used address: daddr_cache.
530 We do not want to save whole address now,
531 (because main consumer of this service
532 is tcp, which has not this problem),
533 so that the last trick works only on connected
534 sockets.
535 2. oif also should be the same.
538 if (((rt->rt6i_dst.plen != 128 ||
539 ipv6_addr_cmp(fl->fl6_dst, &rt->rt6i_dst.addr))
540 && (np->daddr_cache == NULL ||
541 ipv6_addr_cmp(fl->fl6_dst, np->daddr_cache)))
542 || (fl->oif && fl->oif != dst->dev->ifindex)) {
543 dst = NULL;
544 } else
545 dst_clone(dst);
548 if (dst == NULL)
549 dst = ip6_route_output(sk, fl);
551 if (dst->error) {
552 IP6_INC_STATS(Ip6OutNoRoutes);
553 dst_release(dst);
554 return -ENETUNREACH;
557 if (fl->fl6_src == NULL) {
558 err = ipv6_get_saddr(dst, fl->fl6_dst, &saddr);
560 if (err) {
561 #if IP6_DEBUG >= 2
562 printk(KERN_DEBUG "ip6_build_xmit: "
563 "no availiable source address\n");
564 #endif
565 goto out;
567 fl->fl6_src = &saddr;
569 pktlength = length;
571 if (hlimit < 0) {
572 if (ipv6_addr_is_multicast(fl->fl6_dst))
573 hlimit = np->mcast_hops;
574 else
575 hlimit = np->hop_limit;
576 if (hlimit < 0)
577 hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit;
580 jumbolen = 0;
582 if (!sk->protinfo.af_inet.hdrincl) {
583 pktlength += sizeof(struct ipv6hdr);
584 if (opt)
585 pktlength += opt->opt_flen + opt->opt_nflen;
587 if (pktlength > 0xFFFF + sizeof(struct ipv6hdr)) {
588 /* Jumbo datagram.
589 It is assumed, that in the case of hdrincl
590 jumbo option is supplied by user.
592 pktlength += 8;
593 jumbolen = pktlength - sizeof(struct ipv6hdr);
597 mtu = dst->pmtu;
598 if (np->frag_size < mtu) {
599 if (np->frag_size)
600 mtu = np->frag_size;
601 else if (np->pmtudisc == IPV6_PMTUDISC_DONT)
602 mtu = IPV6_MIN_MTU;
605 /* Critical arithmetic overflow check.
606 FIXME: may gcc optimize it out? --ANK (980726)
608 if (pktlength < length) {
609 ipv6_local_error(sk, EMSGSIZE, fl, mtu);
610 err = -EMSGSIZE;
611 goto out;
614 if (flags&MSG_CONFIRM)
615 dst_confirm(dst);
617 if (pktlength <= mtu) {
618 struct sk_buff *skb;
619 struct ipv6hdr *hdr;
620 struct net_device *dev = dst->dev;
622 err = 0;
623 if (flags&MSG_PROBE)
624 goto out;
626 skb = sock_alloc_send_skb(sk, pktlength + 15 +
627 dev->hard_header_len, 0,
628 flags & MSG_DONTWAIT, &err);
630 if (skb == NULL) {
631 IP6_INC_STATS(Ip6OutDiscards);
632 goto out;
635 skb->dst = dst_clone(dst);
637 skb_reserve(skb, (dev->hard_header_len + 15) & ~15);
639 hdr = (struct ipv6hdr *) skb->tail;
640 skb->nh.ipv6h = hdr;
642 if (!sk->protinfo.af_inet.hdrincl) {
643 ip6_bld_1(sk, skb, fl, hlimit,
644 jumbolen ? sizeof(struct ipv6hdr) : pktlength);
646 if (opt || jumbolen) {
647 u8 *prev_hdr = &hdr->nexthdr;
648 prev_hdr = ipv6_build_nfrag_opts(skb, prev_hdr, opt, final_dst, jumbolen);
649 if (opt && opt->opt_flen)
650 ipv6_build_frag_opts(skb, prev_hdr, opt);
654 skb_put(skb, length);
655 err = getfrag(data, &hdr->saddr,
656 ((char *) hdr) + (pktlength - length),
657 0, length);
659 if (!err) {
660 IP6_INC_STATS(Ip6OutRequests);
661 err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
662 } else {
663 err = -EFAULT;
664 kfree_skb(skb);
666 } else {
667 if (sk->protinfo.af_inet.hdrincl || jumbolen ||
668 np->pmtudisc == IPV6_PMTUDISC_DO) {
669 ipv6_local_error(sk, EMSGSIZE, fl, mtu);
670 err = -EMSGSIZE;
671 goto out;
674 err = ip6_frag_xmit(sk, getfrag, data, dst, fl, opt, final_dst, hlimit,
675 flags, length, mtu);
679 * cleanup
681 out:
682 ip6_dst_store(sk, dst, fl->nl_u.ip6_u.daddr == &np->daddr ? &np->daddr : NULL);
683 if (err > 0)
684 err = np->recverr ? net_xmit_errno(err) : 0;
685 return err;
688 int ip6_call_ra_chain(struct sk_buff *skb, int sel)
690 struct ip6_ra_chain *ra;
691 struct sock *last = NULL;
693 read_lock(&ip6_ra_lock);
694 for (ra = ip6_ra_chain; ra; ra = ra->next) {
695 struct sock *sk = ra->sk;
696 if (sk && ra->sel == sel) {
697 if (last) {
698 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
699 if (skb2)
700 rawv6_rcv(last, skb2, skb2->len);
702 last = sk;
706 if (last) {
707 rawv6_rcv(last, skb, skb->len);
708 read_unlock(&ip6_ra_lock);
709 return 1;
711 read_unlock(&ip6_ra_lock);
712 return 0;
715 static inline int ip6_forward_finish(struct sk_buff *skb)
717 return skb->dst->output(skb);
720 int ip6_forward(struct sk_buff *skb)
722 struct dst_entry *dst = skb->dst;
723 struct ipv6hdr *hdr = skb->nh.ipv6h;
724 struct inet6_skb_parm *opt =(struct inet6_skb_parm*)skb->cb;
726 if (ipv6_devconf.forwarding == 0 && opt->srcrt == 0)
727 goto drop;
730 * We DO NOT make any processing on
731 * RA packets, pushing them to user level AS IS
732 * without ane WARRANTY that application will be able
733 * to interpret them. The reason is that we
734 * cannot make anything clever here.
736 * We are not end-node, so that if packet contains
737 * AH/ESP, we cannot make anything.
738 * Defragmentation also would be mistake, RA packets
739 * cannot be fragmented, because there is no warranty
740 * that different fragments will go along one path. --ANK
742 if (opt->ra) {
743 u8 *ptr = skb->nh.raw + opt->ra;
744 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
745 return 0;
749 * check and decrement ttl
751 if (hdr->hop_limit <= 1) {
752 /* Force OUTPUT device used as source address */
753 skb->dev = dst->dev;
754 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
755 0, skb->dev);
757 kfree_skb(skb);
758 return -ETIMEDOUT;
761 /* IPv6 specs say nothing about it, but it is clear that we cannot
762 send redirects to source routed frames.
764 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) {
765 struct in6_addr *target = NULL;
766 struct rt6_info *rt;
767 struct neighbour *n = dst->neighbour;
770 * incoming and outgoing devices are the same
771 * send a redirect.
774 rt = (struct rt6_info *) dst;
775 if ((rt->rt6i_flags & RTF_GATEWAY))
776 target = (struct in6_addr*)&n->primary_key;
777 else
778 target = &hdr->daddr;
780 /* Limit redirects both by destination (here)
781 and by source (inside ndisc_send_redirect)
783 if (xrlim_allow(dst, 1*HZ))
784 ndisc_send_redirect(skb, n, target);
785 } else if (ipv6_addr_type(&hdr->saddr)&(IPV6_ADDR_MULTICAST|IPV6_ADDR_LOOPBACK
786 |IPV6_ADDR_LINKLOCAL)) {
787 /* This check is security critical. */
788 goto drop;
791 if (skb->len > dst->pmtu) {
792 /* Again, force OUTPUT device used as source address */
793 skb->dev = dst->dev;
794 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);
795 IP6_INC_STATS_BH(Ip6InTooBigErrors);
796 kfree_skb(skb);
797 return -EMSGSIZE;
800 if ((skb = skb_cow(skb, dst->dev->hard_header_len)) == NULL)
801 return 0;
803 hdr = skb->nh.ipv6h;
805 /* Mangling hops number delayed to point after skb COW */
807 hdr->hop_limit--;
809 IP6_INC_STATS_BH(Ip6OutForwDatagrams);
810 return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish);
812 drop:
813 IP6_INC_STATS_BH(Ip6InAddrErrors);
814 kfree_skb(skb);
815 return -EINVAL;