2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * $Id: route.c,v 1.56 2001/10/31 21:55:55 davem Exp $
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
18 * YOSHIFUJI Hideaki @USAGI
19 * reworked default router selection.
20 * - respect outgoing interface
21 * - select from (probably) reachable routers (i.e.
22 * routers in REACHABLE, STALE, DELAY or PROBE states).
23 * - always select the same router if it is (probably)
24 * reachable. otherwise, round-robin the list.
27 #include <linux/capability.h>
28 #include <linux/config.h>
29 #include <linux/errno.h>
30 #include <linux/types.h>
31 #include <linux/times.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/route.h>
36 #include <linux/netdevice.h>
37 #include <linux/in6.h>
38 #include <linux/init.h>
39 #include <linux/netlink.h>
40 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
49 #include <net/ip6_fib.h>
50 #include <net/ip6_route.h>
51 #include <net/ndisc.h>
52 #include <net/addrconf.h>
54 #include <linux/rtnetlink.h>
58 #include <asm/uaccess.h>
61 #include <linux/sysctl.h>
64 /* Set to 3 to get tracing. */
68 #define RDBG(x) printk x
69 #define RT6_TRACE(x...) printk(KERN_DEBUG x)
72 #define RT6_TRACE(x...) do { ; } while (0)
75 #define CLONE_OFFLINK_ROUTE 0
77 #define RT6_SELECT_F_IFACE 0x1
78 #define RT6_SELECT_F_REACHABLE 0x2
80 static int ip6_rt_max_size
= 4096;
81 static int ip6_rt_gc_min_interval
= HZ
/ 2;
82 static int ip6_rt_gc_timeout
= 60*HZ
;
83 int ip6_rt_gc_interval
= 30*HZ
;
84 static int ip6_rt_gc_elasticity
= 9;
85 static int ip6_rt_mtu_expires
= 10*60*HZ
;
86 static int ip6_rt_min_advmss
= IPV6_MIN_MTU
- 20 - 40;
88 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
);
89 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
);
90 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*);
91 static void ip6_dst_destroy(struct dst_entry
*);
92 static void ip6_dst_ifdown(struct dst_entry
*,
93 struct net_device
*dev
, int how
);
94 static int ip6_dst_gc(void);
96 static int ip6_pkt_discard(struct sk_buff
*skb
);
97 static int ip6_pkt_discard_out(struct sk_buff
*skb
);
98 static void ip6_link_failure(struct sk_buff
*skb
);
99 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
101 static struct dst_ops ip6_dst_ops
= {
103 .protocol
= __constant_htons(ETH_P_IPV6
),
106 .check
= ip6_dst_check
,
107 .destroy
= ip6_dst_destroy
,
108 .ifdown
= ip6_dst_ifdown
,
109 .negative_advice
= ip6_negative_advice
,
110 .link_failure
= ip6_link_failure
,
111 .update_pmtu
= ip6_rt_update_pmtu
,
112 .entry_size
= sizeof(struct rt6_info
),
115 struct rt6_info ip6_null_entry
= {
118 .__refcnt
= ATOMIC_INIT(1),
120 .dev
= &loopback_dev
,
122 .error
= -ENETUNREACH
,
123 .metrics
= { [RTAX_HOPLIMIT
- 1] = 255, },
124 .input
= ip6_pkt_discard
,
125 .output
= ip6_pkt_discard_out
,
127 .path
= (struct dst_entry
*)&ip6_null_entry
,
130 .rt6i_flags
= (RTF_REJECT
| RTF_NONEXTHOP
),
131 .rt6i_metric
= ~(u32
) 0,
132 .rt6i_ref
= ATOMIC_INIT(1),
135 struct fib6_node ip6_routing_table
= {
136 .leaf
= &ip6_null_entry
,
137 .fn_flags
= RTN_ROOT
| RTN_TL_ROOT
| RTN_RTINFO
,
140 /* Protects all the ip6 fib */
142 DEFINE_RWLOCK(rt6_lock
);
145 /* allocate dst with ip6_dst_ops */
146 static __inline__
struct rt6_info
*ip6_dst_alloc(void)
148 return (struct rt6_info
*)dst_alloc(&ip6_dst_ops
);
151 static void ip6_dst_destroy(struct dst_entry
*dst
)
153 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
154 struct inet6_dev
*idev
= rt
->rt6i_idev
;
157 rt
->rt6i_idev
= NULL
;
162 static void ip6_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
165 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
166 struct inet6_dev
*idev
= rt
->rt6i_idev
;
168 if (dev
!= &loopback_dev
&& idev
!= NULL
&& idev
->dev
== dev
) {
169 struct inet6_dev
*loopback_idev
= in6_dev_get(&loopback_dev
);
170 if (loopback_idev
!= NULL
) {
171 rt
->rt6i_idev
= loopback_idev
;
177 static __inline__
int rt6_check_expired(const struct rt6_info
*rt
)
179 return (rt
->rt6i_flags
& RTF_EXPIRES
&&
180 time_after(jiffies
, rt
->rt6i_expires
));
184 * Route lookup. Any rt6_lock is implied.
187 static __inline__
struct rt6_info
*rt6_device_match(struct rt6_info
*rt
,
191 struct rt6_info
*local
= NULL
;
192 struct rt6_info
*sprt
;
195 for (sprt
= rt
; sprt
; sprt
= sprt
->u
.next
) {
196 struct net_device
*dev
= sprt
->rt6i_dev
;
197 if (dev
->ifindex
== oif
)
199 if (dev
->flags
& IFF_LOOPBACK
) {
200 if (sprt
->rt6i_idev
== NULL
||
201 sprt
->rt6i_idev
->dev
->ifindex
!= oif
) {
204 if (local
&& (!oif
||
205 local
->rt6i_idev
->dev
->ifindex
== oif
))
216 return &ip6_null_entry
;
222 * Default Router Selection (RFC 2461 6.3.6)
224 static int inline rt6_check_dev(struct rt6_info
*rt
, int oif
)
226 struct net_device
*dev
= rt
->rt6i_dev
;
227 if (!oif
|| dev
->ifindex
== oif
)
229 if ((dev
->flags
& IFF_LOOPBACK
) &&
230 rt
->rt6i_idev
&& rt
->rt6i_idev
->dev
->ifindex
== oif
)
235 static int inline rt6_check_neigh(struct rt6_info
*rt
)
237 struct neighbour
*neigh
= rt
->rt6i_nexthop
;
240 read_lock_bh(&neigh
->lock
);
241 if (neigh
->nud_state
& NUD_VALID
)
243 read_unlock_bh(&neigh
->lock
);
248 static int rt6_score_route(struct rt6_info
*rt
, int oif
,
251 int m
= rt6_check_dev(rt
, oif
);
252 if (!m
&& (strict
& RT6_SELECT_F_IFACE
))
254 #ifdef CONFIG_IPV6_ROUTER_PREF
255 m
|= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt
->rt6i_flags
)) << 2;
257 if (rt6_check_neigh(rt
))
259 else if (strict
& RT6_SELECT_F_REACHABLE
)
264 static struct rt6_info
*rt6_select(struct rt6_info
**head
, int oif
,
267 struct rt6_info
*match
= NULL
, *last
= NULL
;
268 struct rt6_info
*rt
, *rt0
= *head
;
272 RT6_TRACE("%s(head=%p(*head=%p), oif=%d)\n",
273 __FUNCTION__
, head
, head
? *head
: NULL
, oif
);
275 for (rt
= rt0
, metric
= rt0
->rt6i_metric
;
276 rt
&& rt
->rt6i_metric
== metric
;
280 if (rt6_check_expired(rt
))
285 m
= rt6_score_route(rt
, oif
, strict
);
296 (strict
& RT6_SELECT_F_REACHABLE
) &&
297 last
&& last
!= rt0
) {
298 /* no entries matched; do round-robin */
300 rt0
->u
.next
= last
->u
.next
;
304 RT6_TRACE("%s() => %p, score=%d\n",
305 __FUNCTION__
, match
, mpri
);
307 return (match
? match
: &ip6_null_entry
);
310 struct rt6_info
*rt6_lookup(struct in6_addr
*daddr
, struct in6_addr
*saddr
,
313 struct fib6_node
*fn
;
316 read_lock_bh(&rt6_lock
);
317 fn
= fib6_lookup(&ip6_routing_table
, daddr
, saddr
);
318 rt
= rt6_device_match(fn
->leaf
, oif
, strict
);
319 dst_hold(&rt
->u
.dst
);
321 read_unlock_bh(&rt6_lock
);
323 rt
->u
.dst
.lastuse
= jiffies
;
324 if (rt
->u
.dst
.error
== 0)
326 dst_release(&rt
->u
.dst
);
330 /* ip6_ins_rt is called with FREE rt6_lock.
331 It takes new route entry, the addition fails by any reason the
332 route is freed. In any case, if caller does not hold it, it may
336 int ip6_ins_rt(struct rt6_info
*rt
, struct nlmsghdr
*nlh
,
337 void *_rtattr
, struct netlink_skb_parms
*req
)
341 write_lock_bh(&rt6_lock
);
342 err
= fib6_add(&ip6_routing_table
, rt
, nlh
, _rtattr
, req
);
343 write_unlock_bh(&rt6_lock
);
348 static struct rt6_info
*rt6_alloc_cow(struct rt6_info
*ort
, struct in6_addr
*daddr
,
349 struct in6_addr
*saddr
)
357 rt
= ip6_rt_copy(ort
);
360 if (!(rt
->rt6i_flags
&RTF_GATEWAY
)) {
361 if (rt
->rt6i_dst
.plen
!= 128 &&
362 ipv6_addr_equal(&rt
->rt6i_dst
.addr
, daddr
))
363 rt
->rt6i_flags
|= RTF_ANYCAST
;
364 ipv6_addr_copy(&rt
->rt6i_gateway
, daddr
);
367 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
368 rt
->rt6i_dst
.plen
= 128;
369 rt
->rt6i_flags
|= RTF_CACHE
;
370 rt
->u
.dst
.flags
|= DST_HOST
;
372 #ifdef CONFIG_IPV6_SUBTREES
373 if (rt
->rt6i_src
.plen
&& saddr
) {
374 ipv6_addr_copy(&rt
->rt6i_src
.addr
, saddr
);
375 rt
->rt6i_src
.plen
= 128;
379 rt
->rt6i_nexthop
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
386 static struct rt6_info
*rt6_alloc_clone(struct rt6_info
*ort
, struct in6_addr
*daddr
)
388 struct rt6_info
*rt
= ip6_rt_copy(ort
);
390 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, daddr
);
391 rt
->rt6i_dst
.plen
= 128;
392 rt
->rt6i_flags
|= RTF_CACHE
;
393 if (rt
->rt6i_flags
& RTF_REJECT
)
394 rt
->u
.dst
.error
= ort
->u
.dst
.error
;
395 rt
->u
.dst
.flags
|= DST_HOST
;
396 rt
->rt6i_nexthop
= neigh_clone(ort
->rt6i_nexthop
);
401 #define BACKTRACK() \
402 if (rt == &ip6_null_entry) { \
403 while ((fn = fn->parent) != NULL) { \
404 if (fn->fn_flags & RTN_ROOT) { \
407 if (fn->fn_flags & RTN_RTINFO) \
413 void ip6_route_input(struct sk_buff
*skb
)
415 struct fib6_node
*fn
;
416 struct rt6_info
*rt
, *nrt
;
420 int reachable
= RT6_SELECT_F_REACHABLE
;
422 strict
= ipv6_addr_type(&skb
->nh
.ipv6h
->daddr
) & (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
) ? RT6_SELECT_F_IFACE
: 0;
425 read_lock_bh(&rt6_lock
);
428 fn
= fib6_lookup(&ip6_routing_table
, &skb
->nh
.ipv6h
->daddr
,
429 &skb
->nh
.ipv6h
->saddr
);
432 rt
= rt6_select(&fn
->leaf
, skb
->dev
->ifindex
, strict
| reachable
);
434 if (rt
== &ip6_null_entry
||
435 rt
->rt6i_flags
& RTF_CACHE
)
438 dst_hold(&rt
->u
.dst
);
439 read_unlock_bh(&rt6_lock
);
441 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
442 nrt
= rt6_alloc_cow(rt
, &skb
->nh
.ipv6h
->daddr
, &skb
->nh
.ipv6h
->saddr
);
444 #if CLONE_OFFLINK_ROUTE
445 nrt
= rt6_alloc_clone(rt
, &skb
->nh
.ipv6h
->daddr
);
451 dst_release(&rt
->u
.dst
);
452 rt
= nrt
? : &ip6_null_entry
;
454 dst_hold(&rt
->u
.dst
);
456 err
= ip6_ins_rt(nrt
, NULL
, NULL
, &NETLINK_CB(skb
));
465 * Race condition! In the gap, when rt6_lock was
466 * released someone could insert this route. Relookup.
468 dst_release(&rt
->u
.dst
);
476 dst_hold(&rt
->u
.dst
);
477 read_unlock_bh(&rt6_lock
);
479 rt
->u
.dst
.lastuse
= jiffies
;
481 skb
->dst
= (struct dst_entry
*) rt
;
485 struct dst_entry
* ip6_route_output(struct sock
*sk
, struct flowi
*fl
)
487 struct fib6_node
*fn
;
488 struct rt6_info
*rt
, *nrt
;
492 int reachable
= RT6_SELECT_F_REACHABLE
;
494 strict
= ipv6_addr_type(&fl
->fl6_dst
) & (IPV6_ADDR_MULTICAST
|IPV6_ADDR_LINKLOCAL
) ? RT6_SELECT_F_IFACE
: 0;
497 read_lock_bh(&rt6_lock
);
500 fn
= fib6_lookup(&ip6_routing_table
, &fl
->fl6_dst
, &fl
->fl6_src
);
503 rt
= rt6_select(&fn
->leaf
, fl
->oif
, strict
| reachable
);
505 if (rt
== &ip6_null_entry
||
506 rt
->rt6i_flags
& RTF_CACHE
)
509 dst_hold(&rt
->u
.dst
);
510 read_unlock_bh(&rt6_lock
);
512 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
513 nrt
= rt6_alloc_cow(rt
, &fl
->fl6_dst
, &fl
->fl6_src
);
515 #if CLONE_OFFLINK_ROUTE
516 nrt
= rt6_alloc_clone(rt
, &fl
->fl6_dst
);
522 dst_release(&rt
->u
.dst
);
523 rt
= nrt
? : &ip6_null_entry
;
525 dst_hold(&rt
->u
.dst
);
527 err
= ip6_ins_rt(nrt
, NULL
, NULL
, NULL
);
536 * Race condition! In the gap, when rt6_lock was
537 * released someone could insert this route. Relookup.
539 dst_release(&rt
->u
.dst
);
547 dst_hold(&rt
->u
.dst
);
548 read_unlock_bh(&rt6_lock
);
550 rt
->u
.dst
.lastuse
= jiffies
;
557 * Destination cache support functions
560 static struct dst_entry
*ip6_dst_check(struct dst_entry
*dst
, u32 cookie
)
564 rt
= (struct rt6_info
*) dst
;
566 if (rt
&& rt
->rt6i_node
&& (rt
->rt6i_node
->fn_sernum
== cookie
))
572 static struct dst_entry
*ip6_negative_advice(struct dst_entry
*dst
)
574 struct rt6_info
*rt
= (struct rt6_info
*) dst
;
577 if (rt
->rt6i_flags
& RTF_CACHE
)
578 ip6_del_rt(rt
, NULL
, NULL
, NULL
);
585 static void ip6_link_failure(struct sk_buff
*skb
)
589 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_ADDR_UNREACH
, 0, skb
->dev
);
591 rt
= (struct rt6_info
*) skb
->dst
;
593 if (rt
->rt6i_flags
&RTF_CACHE
) {
594 dst_set_expires(&rt
->u
.dst
, 0);
595 rt
->rt6i_flags
|= RTF_EXPIRES
;
596 } else if (rt
->rt6i_node
&& (rt
->rt6i_flags
& RTF_DEFAULT
))
597 rt
->rt6i_node
->fn_sernum
= -1;
601 static void ip6_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
603 struct rt6_info
*rt6
= (struct rt6_info
*)dst
;
605 if (mtu
< dst_mtu(dst
) && rt6
->rt6i_dst
.plen
== 128) {
606 rt6
->rt6i_flags
|= RTF_MODIFIED
;
607 if (mtu
< IPV6_MIN_MTU
) {
609 dst
->metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
611 dst
->metrics
[RTAX_MTU
-1] = mtu
;
615 /* Protected by rt6_lock. */
616 static struct dst_entry
*ndisc_dst_gc_list
;
617 static int ipv6_get_mtu(struct net_device
*dev
);
619 static inline unsigned int ipv6_advmss(unsigned int mtu
)
621 mtu
-= sizeof(struct ipv6hdr
) + sizeof(struct tcphdr
);
623 if (mtu
< ip6_rt_min_advmss
)
624 mtu
= ip6_rt_min_advmss
;
627 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
628 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
629 * IPV6_MAXPLEN is also valid and means: "any MSS,
630 * rely only on pmtu discovery"
632 if (mtu
> IPV6_MAXPLEN
- sizeof(struct tcphdr
))
637 struct dst_entry
*ndisc_dst_alloc(struct net_device
*dev
,
638 struct neighbour
*neigh
,
639 struct in6_addr
*addr
,
640 int (*output
)(struct sk_buff
*))
643 struct inet6_dev
*idev
= in6_dev_get(dev
);
645 if (unlikely(idev
== NULL
))
648 rt
= ip6_dst_alloc();
649 if (unlikely(rt
== NULL
)) {
658 neigh
= ndisc_get_neigh(dev
, addr
);
661 rt
->rt6i_idev
= idev
;
662 rt
->rt6i_nexthop
= neigh
;
663 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
664 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = 255;
665 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(rt
->rt6i_dev
);
666 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
667 rt
->u
.dst
.output
= output
;
669 #if 0 /* there's no chance to use these for ndisc */
670 rt
->u
.dst
.flags
= ipv6_addr_type(addr
) & IPV6_ADDR_UNICAST
673 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
674 rt
->rt6i_dst
.plen
= 128;
677 write_lock_bh(&rt6_lock
);
678 rt
->u
.dst
.next
= ndisc_dst_gc_list
;
679 ndisc_dst_gc_list
= &rt
->u
.dst
;
680 write_unlock_bh(&rt6_lock
);
682 fib6_force_start_gc();
685 return (struct dst_entry
*)rt
;
688 int ndisc_dst_gc(int *more
)
690 struct dst_entry
*dst
, *next
, **pprev
;
694 pprev
= &ndisc_dst_gc_list
;
696 while ((dst
= *pprev
) != NULL
) {
697 if (!atomic_read(&dst
->__refcnt
)) {
710 static int ip6_dst_gc(void)
712 static unsigned expire
= 30*HZ
;
713 static unsigned long last_gc
;
714 unsigned long now
= jiffies
;
716 if (time_after(last_gc
+ ip6_rt_gc_min_interval
, now
) &&
717 atomic_read(&ip6_dst_ops
.entries
) <= ip6_rt_max_size
)
723 if (atomic_read(&ip6_dst_ops
.entries
) < ip6_dst_ops
.gc_thresh
)
724 expire
= ip6_rt_gc_timeout
>>1;
727 expire
-= expire
>>ip6_rt_gc_elasticity
;
728 return (atomic_read(&ip6_dst_ops
.entries
) > ip6_rt_max_size
);
731 /* Clean host part of a prefix. Not necessary in radix tree,
732 but results in cleaner routing tables.
734 Remove it only when all the things will work!
737 static int ipv6_get_mtu(struct net_device
*dev
)
739 int mtu
= IPV6_MIN_MTU
;
740 struct inet6_dev
*idev
;
742 idev
= in6_dev_get(dev
);
744 mtu
= idev
->cnf
.mtu6
;
750 int ipv6_get_hoplimit(struct net_device
*dev
)
752 int hoplimit
= ipv6_devconf
.hop_limit
;
753 struct inet6_dev
*idev
;
755 idev
= in6_dev_get(dev
);
757 hoplimit
= idev
->cnf
.hop_limit
;
767 int ip6_route_add(struct in6_rtmsg
*rtmsg
, struct nlmsghdr
*nlh
,
768 void *_rtattr
, struct netlink_skb_parms
*req
)
773 struct rt6_info
*rt
= NULL
;
774 struct net_device
*dev
= NULL
;
775 struct inet6_dev
*idev
= NULL
;
778 rta
= (struct rtattr
**) _rtattr
;
780 if (rtmsg
->rtmsg_dst_len
> 128 || rtmsg
->rtmsg_src_len
> 128)
782 #ifndef CONFIG_IPV6_SUBTREES
783 if (rtmsg
->rtmsg_src_len
)
786 if (rtmsg
->rtmsg_ifindex
) {
788 dev
= dev_get_by_index(rtmsg
->rtmsg_ifindex
);
791 idev
= in6_dev_get(dev
);
796 if (rtmsg
->rtmsg_metric
== 0)
797 rtmsg
->rtmsg_metric
= IP6_RT_PRIO_USER
;
799 rt
= ip6_dst_alloc();
806 rt
->u
.dst
.obsolete
= -1;
807 rt
->rt6i_expires
= jiffies
+ clock_t_to_jiffies(rtmsg
->rtmsg_info
);
808 if (nlh
&& (r
= NLMSG_DATA(nlh
))) {
809 rt
->rt6i_protocol
= r
->rtm_protocol
;
811 rt
->rt6i_protocol
= RTPROT_BOOT
;
814 addr_type
= ipv6_addr_type(&rtmsg
->rtmsg_dst
);
816 if (addr_type
& IPV6_ADDR_MULTICAST
)
817 rt
->u
.dst
.input
= ip6_mc_input
;
819 rt
->u
.dst
.input
= ip6_forward
;
821 rt
->u
.dst
.output
= ip6_output
;
823 ipv6_addr_prefix(&rt
->rt6i_dst
.addr
,
824 &rtmsg
->rtmsg_dst
, rtmsg
->rtmsg_dst_len
);
825 rt
->rt6i_dst
.plen
= rtmsg
->rtmsg_dst_len
;
826 if (rt
->rt6i_dst
.plen
== 128)
827 rt
->u
.dst
.flags
= DST_HOST
;
829 #ifdef CONFIG_IPV6_SUBTREES
830 ipv6_addr_prefix(&rt
->rt6i_src
.addr
,
831 &rtmsg
->rtmsg_src
, rtmsg
->rtmsg_src_len
);
832 rt
->rt6i_src
.plen
= rtmsg
->rtmsg_src_len
;
835 rt
->rt6i_metric
= rtmsg
->rtmsg_metric
;
837 /* We cannot add true routes via loopback here,
838 they would result in kernel looping; promote them to reject routes
840 if ((rtmsg
->rtmsg_flags
&RTF_REJECT
) ||
841 (dev
&& (dev
->flags
&IFF_LOOPBACK
) && !(addr_type
&IPV6_ADDR_LOOPBACK
))) {
842 /* hold loopback dev/idev if we haven't done so. */
843 if (dev
!= &loopback_dev
) {
850 idev
= in6_dev_get(dev
);
856 rt
->u
.dst
.output
= ip6_pkt_discard_out
;
857 rt
->u
.dst
.input
= ip6_pkt_discard
;
858 rt
->u
.dst
.error
= -ENETUNREACH
;
859 rt
->rt6i_flags
= RTF_REJECT
|RTF_NONEXTHOP
;
863 if (rtmsg
->rtmsg_flags
& RTF_GATEWAY
) {
864 struct in6_addr
*gw_addr
;
867 gw_addr
= &rtmsg
->rtmsg_gateway
;
868 ipv6_addr_copy(&rt
->rt6i_gateway
, &rtmsg
->rtmsg_gateway
);
869 gwa_type
= ipv6_addr_type(gw_addr
);
871 if (gwa_type
!= (IPV6_ADDR_LINKLOCAL
|IPV6_ADDR_UNICAST
)) {
872 struct rt6_info
*grt
;
874 /* IPv6 strictly inhibits using not link-local
875 addresses as nexthop address.
876 Otherwise, router will not able to send redirects.
877 It is very good, but in some (rare!) circumstances
878 (SIT, PtP, NBMA NOARP links) it is handy to allow
879 some exceptions. --ANK
882 if (!(gwa_type
&IPV6_ADDR_UNICAST
))
885 grt
= rt6_lookup(gw_addr
, NULL
, rtmsg
->rtmsg_ifindex
, 1);
891 if (dev
!= grt
->rt6i_dev
) {
892 dst_release(&grt
->u
.dst
);
897 idev
= grt
->rt6i_idev
;
899 in6_dev_hold(grt
->rt6i_idev
);
901 if (!(grt
->rt6i_flags
&RTF_GATEWAY
))
903 dst_release(&grt
->u
.dst
);
909 if (dev
== NULL
|| (dev
->flags
&IFF_LOOPBACK
))
917 if (rtmsg
->rtmsg_flags
& (RTF_GATEWAY
|RTF_NONEXTHOP
)) {
918 rt
->rt6i_nexthop
= __neigh_lookup_errno(&nd_tbl
, &rt
->rt6i_gateway
, dev
);
919 if (IS_ERR(rt
->rt6i_nexthop
)) {
920 err
= PTR_ERR(rt
->rt6i_nexthop
);
921 rt
->rt6i_nexthop
= NULL
;
926 rt
->rt6i_flags
= rtmsg
->rtmsg_flags
;
929 if (rta
&& rta
[RTA_METRICS
-1]) {
930 int attrlen
= RTA_PAYLOAD(rta
[RTA_METRICS
-1]);
931 struct rtattr
*attr
= RTA_DATA(rta
[RTA_METRICS
-1]);
933 while (RTA_OK(attr
, attrlen
)) {
934 unsigned flavor
= attr
->rta_type
;
936 if (flavor
> RTAX_MAX
) {
940 rt
->u
.dst
.metrics
[flavor
-1] =
941 *(u32
*)RTA_DATA(attr
);
943 attr
= RTA_NEXT(attr
, attrlen
);
947 if (rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] == 0)
948 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = -1;
949 if (!rt
->u
.dst
.metrics
[RTAX_MTU
-1])
950 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(dev
);
951 if (!rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1])
952 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
954 rt
->rt6i_idev
= idev
;
955 return ip6_ins_rt(rt
, nlh
, _rtattr
, req
);
963 dst_free((struct dst_entry
*) rt
);
967 int ip6_del_rt(struct rt6_info
*rt
, struct nlmsghdr
*nlh
, void *_rtattr
, struct netlink_skb_parms
*req
)
971 write_lock_bh(&rt6_lock
);
973 err
= fib6_del(rt
, nlh
, _rtattr
, req
);
974 dst_release(&rt
->u
.dst
);
976 write_unlock_bh(&rt6_lock
);
981 static int ip6_route_del(struct in6_rtmsg
*rtmsg
, struct nlmsghdr
*nlh
, void *_rtattr
, struct netlink_skb_parms
*req
)
983 struct fib6_node
*fn
;
987 read_lock_bh(&rt6_lock
);
989 fn
= fib6_locate(&ip6_routing_table
,
990 &rtmsg
->rtmsg_dst
, rtmsg
->rtmsg_dst_len
,
991 &rtmsg
->rtmsg_src
, rtmsg
->rtmsg_src_len
);
994 for (rt
= fn
->leaf
; rt
; rt
= rt
->u
.next
) {
995 if (rtmsg
->rtmsg_ifindex
&&
996 (rt
->rt6i_dev
== NULL
||
997 rt
->rt6i_dev
->ifindex
!= rtmsg
->rtmsg_ifindex
))
999 if (rtmsg
->rtmsg_flags
&RTF_GATEWAY
&&
1000 !ipv6_addr_equal(&rtmsg
->rtmsg_gateway
, &rt
->rt6i_gateway
))
1002 if (rtmsg
->rtmsg_metric
&&
1003 rtmsg
->rtmsg_metric
!= rt
->rt6i_metric
)
1005 dst_hold(&rt
->u
.dst
);
1006 read_unlock_bh(&rt6_lock
);
1008 return ip6_del_rt(rt
, nlh
, _rtattr
, req
);
1011 read_unlock_bh(&rt6_lock
);
1019 void rt6_redirect(struct in6_addr
*dest
, struct in6_addr
*saddr
,
1020 struct neighbour
*neigh
, u8
*lladdr
, int on_link
)
1022 struct rt6_info
*rt
, *nrt
;
1024 /* Locate old route to this destination. */
1025 rt
= rt6_lookup(dest
, NULL
, neigh
->dev
->ifindex
, 1);
1030 if (neigh
->dev
!= rt
->rt6i_dev
)
1034 * Current route is on-link; redirect is always invalid.
1036 * Seems, previous statement is not true. It could
1037 * be node, which looks for us as on-link (f.e. proxy ndisc)
1038 * But then router serving it might decide, that we should
1039 * know truth 8)8) --ANK (980726).
1041 if (!(rt
->rt6i_flags
&RTF_GATEWAY
))
1045 * RFC 2461 specifies that redirects should only be
1046 * accepted if they come from the nexthop to the target.
1047 * Due to the way default routers are chosen, this notion
1048 * is a bit fuzzy and one might need to check all default
1051 if (!ipv6_addr_equal(saddr
, &rt
->rt6i_gateway
)) {
1052 if (rt
->rt6i_flags
& RTF_DEFAULT
) {
1053 struct rt6_info
*rt1
;
1055 read_lock(&rt6_lock
);
1056 for (rt1
= ip6_routing_table
.leaf
; rt1
; rt1
= rt1
->u
.next
) {
1057 if (ipv6_addr_equal(saddr
, &rt1
->rt6i_gateway
)) {
1058 dst_hold(&rt1
->u
.dst
);
1059 dst_release(&rt
->u
.dst
);
1060 read_unlock(&rt6_lock
);
1065 read_unlock(&rt6_lock
);
1067 if (net_ratelimit())
1068 printk(KERN_DEBUG
"rt6_redirect: source isn't a valid nexthop "
1069 "for redirect target\n");
1076 * We have finally decided to accept it.
1079 neigh_update(neigh
, lladdr
, NUD_STALE
,
1080 NEIGH_UPDATE_F_WEAK_OVERRIDE
|
1081 NEIGH_UPDATE_F_OVERRIDE
|
1082 (on_link
? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER
|
1083 NEIGH_UPDATE_F_ISROUTER
))
1087 * Redirect received -> path was valid.
1088 * Look, redirects are sent only in response to data packets,
1089 * so that this nexthop apparently is reachable. --ANK
1091 dst_confirm(&rt
->u
.dst
);
1093 /* Duplicate redirect: silently ignore. */
1094 if (neigh
== rt
->u
.dst
.neighbour
)
1097 nrt
= ip6_rt_copy(rt
);
1101 nrt
->rt6i_flags
= RTF_GATEWAY
|RTF_UP
|RTF_DYNAMIC
|RTF_CACHE
;
1103 nrt
->rt6i_flags
&= ~RTF_GATEWAY
;
1105 ipv6_addr_copy(&nrt
->rt6i_dst
.addr
, dest
);
1106 nrt
->rt6i_dst
.plen
= 128;
1107 nrt
->u
.dst
.flags
|= DST_HOST
;
1109 ipv6_addr_copy(&nrt
->rt6i_gateway
, (struct in6_addr
*)neigh
->primary_key
);
1110 nrt
->rt6i_nexthop
= neigh_clone(neigh
);
1111 /* Reset pmtu, it may be better */
1112 nrt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(neigh
->dev
);
1113 nrt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&nrt
->u
.dst
));
1115 if (ip6_ins_rt(nrt
, NULL
, NULL
, NULL
))
1118 if (rt
->rt6i_flags
&RTF_CACHE
) {
1119 ip6_del_rt(rt
, NULL
, NULL
, NULL
);
1124 dst_release(&rt
->u
.dst
);
1129 * Handle ICMP "packet too big" messages
1130 * i.e. Path MTU discovery
1133 void rt6_pmtu_discovery(struct in6_addr
*daddr
, struct in6_addr
*saddr
,
1134 struct net_device
*dev
, u32 pmtu
)
1136 struct rt6_info
*rt
, *nrt
;
1139 rt
= rt6_lookup(daddr
, saddr
, dev
->ifindex
, 0);
1143 if (pmtu
>= dst_mtu(&rt
->u
.dst
))
1146 if (pmtu
< IPV6_MIN_MTU
) {
1148 * According to RFC2460, PMTU is set to the IPv6 Minimum Link
1149 * MTU (1280) and a fragment header should always be included
1150 * after a node receiving Too Big message reporting PMTU is
1151 * less than the IPv6 Minimum Link MTU.
1153 pmtu
= IPV6_MIN_MTU
;
1157 /* New mtu received -> path was valid.
1158 They are sent only in response to data packets,
1159 so that this nexthop apparently is reachable. --ANK
1161 dst_confirm(&rt
->u
.dst
);
1163 /* Host route. If it is static, it would be better
1164 not to override it, but add new one, so that
1165 when cache entry will expire old pmtu
1166 would return automatically.
1168 if (rt
->rt6i_flags
& RTF_CACHE
) {
1169 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = pmtu
;
1171 rt
->u
.dst
.metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
1172 dst_set_expires(&rt
->u
.dst
, ip6_rt_mtu_expires
);
1173 rt
->rt6i_flags
|= RTF_MODIFIED
|RTF_EXPIRES
;
1178 Two cases are possible:
1179 1. It is connected route. Action: COW
1180 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1182 if (!rt
->rt6i_nexthop
&& !(rt
->rt6i_flags
& RTF_NONEXTHOP
))
1183 nrt
= rt6_alloc_cow(rt
, daddr
, saddr
);
1185 nrt
= rt6_alloc_clone(rt
, daddr
);
1188 nrt
->u
.dst
.metrics
[RTAX_MTU
-1] = pmtu
;
1190 nrt
->u
.dst
.metrics
[RTAX_FEATURES
-1] |= RTAX_FEATURE_ALLFRAG
;
1192 /* According to RFC 1981, detecting PMTU increase shouldn't be
1193 * happened within 5 mins, the recommended timer is 10 mins.
1194 * Here this route expiration time is set to ip6_rt_mtu_expires
1195 * which is 10 mins. After 10 mins the decreased pmtu is expired
1196 * and detecting PMTU increase will be automatically happened.
1198 dst_set_expires(&nrt
->u
.dst
, ip6_rt_mtu_expires
);
1199 nrt
->rt6i_flags
|= RTF_DYNAMIC
|RTF_EXPIRES
;
1201 ip6_ins_rt(nrt
, NULL
, NULL
, NULL
);
1204 dst_release(&rt
->u
.dst
);
1208 * Misc support functions
1211 static struct rt6_info
* ip6_rt_copy(struct rt6_info
*ort
)
1213 struct rt6_info
*rt
= ip6_dst_alloc();
1216 rt
->u
.dst
.input
= ort
->u
.dst
.input
;
1217 rt
->u
.dst
.output
= ort
->u
.dst
.output
;
1219 memcpy(rt
->u
.dst
.metrics
, ort
->u
.dst
.metrics
, RTAX_MAX
*sizeof(u32
));
1220 rt
->u
.dst
.dev
= ort
->u
.dst
.dev
;
1222 dev_hold(rt
->u
.dst
.dev
);
1223 rt
->rt6i_idev
= ort
->rt6i_idev
;
1225 in6_dev_hold(rt
->rt6i_idev
);
1226 rt
->u
.dst
.lastuse
= jiffies
;
1227 rt
->rt6i_expires
= 0;
1229 ipv6_addr_copy(&rt
->rt6i_gateway
, &ort
->rt6i_gateway
);
1230 rt
->rt6i_flags
= ort
->rt6i_flags
& ~RTF_EXPIRES
;
1231 rt
->rt6i_metric
= 0;
1233 memcpy(&rt
->rt6i_dst
, &ort
->rt6i_dst
, sizeof(struct rt6key
));
1234 #ifdef CONFIG_IPV6_SUBTREES
1235 memcpy(&rt
->rt6i_src
, &ort
->rt6i_src
, sizeof(struct rt6key
));
1241 struct rt6_info
*rt6_get_dflt_router(struct in6_addr
*addr
, struct net_device
*dev
)
1243 struct rt6_info
*rt
;
1244 struct fib6_node
*fn
;
1246 fn
= &ip6_routing_table
;
1248 write_lock_bh(&rt6_lock
);
1249 for (rt
= fn
->leaf
; rt
; rt
=rt
->u
.next
) {
1250 if (dev
== rt
->rt6i_dev
&&
1251 ((rt
->rt6i_flags
& (RTF_ADDRCONF
| RTF_DEFAULT
)) == (RTF_ADDRCONF
| RTF_DEFAULT
)) &&
1252 ipv6_addr_equal(&rt
->rt6i_gateway
, addr
))
1256 dst_hold(&rt
->u
.dst
);
1257 write_unlock_bh(&rt6_lock
);
1261 struct rt6_info
*rt6_add_dflt_router(struct in6_addr
*gwaddr
,
1262 struct net_device
*dev
,
1265 struct in6_rtmsg rtmsg
;
1267 memset(&rtmsg
, 0, sizeof(struct in6_rtmsg
));
1268 rtmsg
.rtmsg_type
= RTMSG_NEWROUTE
;
1269 ipv6_addr_copy(&rtmsg
.rtmsg_gateway
, gwaddr
);
1270 rtmsg
.rtmsg_metric
= 1024;
1271 rtmsg
.rtmsg_flags
= RTF_GATEWAY
| RTF_ADDRCONF
| RTF_DEFAULT
| RTF_UP
| RTF_EXPIRES
|
1274 rtmsg
.rtmsg_ifindex
= dev
->ifindex
;
1276 ip6_route_add(&rtmsg
, NULL
, NULL
, NULL
);
1277 return rt6_get_dflt_router(gwaddr
, dev
);
1280 void rt6_purge_dflt_routers(void)
1282 struct rt6_info
*rt
;
1285 read_lock_bh(&rt6_lock
);
1286 for (rt
= ip6_routing_table
.leaf
; rt
; rt
= rt
->u
.next
) {
1287 if (rt
->rt6i_flags
& (RTF_DEFAULT
| RTF_ADDRCONF
)) {
1288 dst_hold(&rt
->u
.dst
);
1290 read_unlock_bh(&rt6_lock
);
1292 ip6_del_rt(rt
, NULL
, NULL
, NULL
);
1297 read_unlock_bh(&rt6_lock
);
1300 int ipv6_route_ioctl(unsigned int cmd
, void __user
*arg
)
1302 struct in6_rtmsg rtmsg
;
1306 case SIOCADDRT
: /* Add a route */
1307 case SIOCDELRT
: /* Delete a route */
1308 if (!capable(CAP_NET_ADMIN
))
1310 err
= copy_from_user(&rtmsg
, arg
,
1311 sizeof(struct in6_rtmsg
));
1318 err
= ip6_route_add(&rtmsg
, NULL
, NULL
, NULL
);
1321 err
= ip6_route_del(&rtmsg
, NULL
, NULL
, NULL
);
1335 * Drop the packet on the floor
1338 static int ip6_pkt_discard(struct sk_buff
*skb
)
1340 IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES
);
1341 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
, ICMPV6_NOROUTE
, 0, skb
->dev
);
1346 static int ip6_pkt_discard_out(struct sk_buff
*skb
)
1348 skb
->dev
= skb
->dst
->dev
;
1349 return ip6_pkt_discard(skb
);
1353 * Allocate a dst for local (unicast / anycast) address.
1356 struct rt6_info
*addrconf_dst_alloc(struct inet6_dev
*idev
,
1357 const struct in6_addr
*addr
,
1360 struct rt6_info
*rt
= ip6_dst_alloc();
1363 return ERR_PTR(-ENOMEM
);
1365 dev_hold(&loopback_dev
);
1368 rt
->u
.dst
.flags
= DST_HOST
;
1369 rt
->u
.dst
.input
= ip6_input
;
1370 rt
->u
.dst
.output
= ip6_output
;
1371 rt
->rt6i_dev
= &loopback_dev
;
1372 rt
->rt6i_idev
= idev
;
1373 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = ipv6_get_mtu(rt
->rt6i_dev
);
1374 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(dst_mtu(&rt
->u
.dst
));
1375 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = -1;
1376 rt
->u
.dst
.obsolete
= -1;
1378 rt
->rt6i_flags
= RTF_UP
| RTF_NONEXTHOP
;
1380 rt
->rt6i_flags
|= RTF_ANYCAST
;
1382 rt
->rt6i_flags
|= RTF_LOCAL
;
1383 rt
->rt6i_nexthop
= ndisc_get_neigh(rt
->rt6i_dev
, &rt
->rt6i_gateway
);
1384 if (rt
->rt6i_nexthop
== NULL
) {
1385 dst_free((struct dst_entry
*) rt
);
1386 return ERR_PTR(-ENOMEM
);
1389 ipv6_addr_copy(&rt
->rt6i_dst
.addr
, addr
);
1390 rt
->rt6i_dst
.plen
= 128;
1392 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1397 static int fib6_ifdown(struct rt6_info
*rt
, void *arg
)
1399 if (((void*)rt
->rt6i_dev
== arg
|| arg
== NULL
) &&
1400 rt
!= &ip6_null_entry
) {
1401 RT6_TRACE("deleted by ifdown %p\n", rt
);
1407 void rt6_ifdown(struct net_device
*dev
)
1409 write_lock_bh(&rt6_lock
);
1410 fib6_clean_tree(&ip6_routing_table
, fib6_ifdown
, 0, dev
);
1411 write_unlock_bh(&rt6_lock
);
1414 struct rt6_mtu_change_arg
1416 struct net_device
*dev
;
1420 static int rt6_mtu_change_route(struct rt6_info
*rt
, void *p_arg
)
1422 struct rt6_mtu_change_arg
*arg
= (struct rt6_mtu_change_arg
*) p_arg
;
1423 struct inet6_dev
*idev
;
1425 /* In IPv6 pmtu discovery is not optional,
1426 so that RTAX_MTU lock cannot disable it.
1427 We still use this lock to block changes
1428 caused by addrconf/ndisc.
1431 idev
= __in6_dev_get(arg
->dev
);
1435 /* For administrative MTU increase, there is no way to discover
1436 IPv6 PMTU increase, so PMTU increase should be updated here.
1437 Since RFC 1981 doesn't include administrative MTU increase
1438 update PMTU increase is a MUST. (i.e. jumbo frame)
1441 If new MTU is less than route PMTU, this new MTU will be the
1442 lowest MTU in the path, update the route PMTU to reflect PMTU
1443 decreases; if new MTU is greater than route PMTU, and the
1444 old MTU is the lowest MTU in the path, update the route PMTU
1445 to reflect the increase. In this case if the other nodes' MTU
1446 also have the lowest MTU, TOO BIG MESSAGE will be lead to
1449 if (rt
->rt6i_dev
== arg
->dev
&&
1450 !dst_metric_locked(&rt
->u
.dst
, RTAX_MTU
) &&
1451 (dst_mtu(&rt
->u
.dst
) > arg
->mtu
||
1452 (dst_mtu(&rt
->u
.dst
) < arg
->mtu
&&
1453 dst_mtu(&rt
->u
.dst
) == idev
->cnf
.mtu6
)))
1454 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = arg
->mtu
;
1455 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = ipv6_advmss(arg
->mtu
);
1459 void rt6_mtu_change(struct net_device
*dev
, unsigned mtu
)
1461 struct rt6_mtu_change_arg arg
;
1465 read_lock_bh(&rt6_lock
);
1466 fib6_clean_tree(&ip6_routing_table
, rt6_mtu_change_route
, 0, &arg
);
1467 read_unlock_bh(&rt6_lock
);
1470 static int inet6_rtm_to_rtmsg(struct rtmsg
*r
, struct rtattr
**rta
,
1471 struct in6_rtmsg
*rtmsg
)
1473 memset(rtmsg
, 0, sizeof(*rtmsg
));
1475 rtmsg
->rtmsg_dst_len
= r
->rtm_dst_len
;
1476 rtmsg
->rtmsg_src_len
= r
->rtm_src_len
;
1477 rtmsg
->rtmsg_flags
= RTF_UP
;
1478 if (r
->rtm_type
== RTN_UNREACHABLE
)
1479 rtmsg
->rtmsg_flags
|= RTF_REJECT
;
1481 if (rta
[RTA_GATEWAY
-1]) {
1482 if (rta
[RTA_GATEWAY
-1]->rta_len
!= RTA_LENGTH(16))
1484 memcpy(&rtmsg
->rtmsg_gateway
, RTA_DATA(rta
[RTA_GATEWAY
-1]), 16);
1485 rtmsg
->rtmsg_flags
|= RTF_GATEWAY
;
1487 if (rta
[RTA_DST
-1]) {
1488 if (RTA_PAYLOAD(rta
[RTA_DST
-1]) < ((r
->rtm_dst_len
+7)>>3))
1490 memcpy(&rtmsg
->rtmsg_dst
, RTA_DATA(rta
[RTA_DST
-1]), ((r
->rtm_dst_len
+7)>>3));
1492 if (rta
[RTA_SRC
-1]) {
1493 if (RTA_PAYLOAD(rta
[RTA_SRC
-1]) < ((r
->rtm_src_len
+7)>>3))
1495 memcpy(&rtmsg
->rtmsg_src
, RTA_DATA(rta
[RTA_SRC
-1]), ((r
->rtm_src_len
+7)>>3));
1497 if (rta
[RTA_OIF
-1]) {
1498 if (rta
[RTA_OIF
-1]->rta_len
!= RTA_LENGTH(sizeof(int)))
1500 memcpy(&rtmsg
->rtmsg_ifindex
, RTA_DATA(rta
[RTA_OIF
-1]), sizeof(int));
1502 if (rta
[RTA_PRIORITY
-1]) {
1503 if (rta
[RTA_PRIORITY
-1]->rta_len
!= RTA_LENGTH(4))
1505 memcpy(&rtmsg
->rtmsg_metric
, RTA_DATA(rta
[RTA_PRIORITY
-1]), 4);
1510 int inet6_rtm_delroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
1512 struct rtmsg
*r
= NLMSG_DATA(nlh
);
1513 struct in6_rtmsg rtmsg
;
1515 if (inet6_rtm_to_rtmsg(r
, arg
, &rtmsg
))
1517 return ip6_route_del(&rtmsg
, nlh
, arg
, &NETLINK_CB(skb
));
1520 int inet6_rtm_newroute(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
1522 struct rtmsg
*r
= NLMSG_DATA(nlh
);
1523 struct in6_rtmsg rtmsg
;
1525 if (inet6_rtm_to_rtmsg(r
, arg
, &rtmsg
))
1527 return ip6_route_add(&rtmsg
, nlh
, arg
, &NETLINK_CB(skb
));
1530 struct rt6_rtnl_dump_arg
1532 struct sk_buff
*skb
;
1533 struct netlink_callback
*cb
;
1536 static int rt6_fill_node(struct sk_buff
*skb
, struct rt6_info
*rt
,
1537 struct in6_addr
*dst
, struct in6_addr
*src
,
1538 int iif
, int type
, u32 pid
, u32 seq
,
1539 int prefix
, unsigned int flags
)
1542 struct nlmsghdr
*nlh
;
1543 unsigned char *b
= skb
->tail
;
1544 struct rta_cacheinfo ci
;
1546 if (prefix
) { /* user wants prefix routes only */
1547 if (!(rt
->rt6i_flags
& RTF_PREFIX_RT
)) {
1548 /* success since this is not a prefix route */
1553 nlh
= NLMSG_NEW(skb
, pid
, seq
, type
, sizeof(*rtm
), flags
);
1554 rtm
= NLMSG_DATA(nlh
);
1555 rtm
->rtm_family
= AF_INET6
;
1556 rtm
->rtm_dst_len
= rt
->rt6i_dst
.plen
;
1557 rtm
->rtm_src_len
= rt
->rt6i_src
.plen
;
1559 rtm
->rtm_table
= RT_TABLE_MAIN
;
1560 if (rt
->rt6i_flags
&RTF_REJECT
)
1561 rtm
->rtm_type
= RTN_UNREACHABLE
;
1562 else if (rt
->rt6i_dev
&& (rt
->rt6i_dev
->flags
&IFF_LOOPBACK
))
1563 rtm
->rtm_type
= RTN_LOCAL
;
1565 rtm
->rtm_type
= RTN_UNICAST
;
1567 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
1568 rtm
->rtm_protocol
= rt
->rt6i_protocol
;
1569 if (rt
->rt6i_flags
&RTF_DYNAMIC
)
1570 rtm
->rtm_protocol
= RTPROT_REDIRECT
;
1571 else if (rt
->rt6i_flags
& RTF_ADDRCONF
)
1572 rtm
->rtm_protocol
= RTPROT_KERNEL
;
1573 else if (rt
->rt6i_flags
&RTF_DEFAULT
)
1574 rtm
->rtm_protocol
= RTPROT_RA
;
1576 if (rt
->rt6i_flags
&RTF_CACHE
)
1577 rtm
->rtm_flags
|= RTM_F_CLONED
;
1580 RTA_PUT(skb
, RTA_DST
, 16, dst
);
1581 rtm
->rtm_dst_len
= 128;
1582 } else if (rtm
->rtm_dst_len
)
1583 RTA_PUT(skb
, RTA_DST
, 16, &rt
->rt6i_dst
.addr
);
1584 #ifdef CONFIG_IPV6_SUBTREES
1586 RTA_PUT(skb
, RTA_SRC
, 16, src
);
1587 rtm
->rtm_src_len
= 128;
1588 } else if (rtm
->rtm_src_len
)
1589 RTA_PUT(skb
, RTA_SRC
, 16, &rt
->rt6i_src
.addr
);
1592 RTA_PUT(skb
, RTA_IIF
, 4, &iif
);
1594 struct in6_addr saddr_buf
;
1595 if (ipv6_get_saddr(&rt
->u
.dst
, dst
, &saddr_buf
) == 0)
1596 RTA_PUT(skb
, RTA_PREFSRC
, 16, &saddr_buf
);
1598 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
1599 goto rtattr_failure
;
1600 if (rt
->u
.dst
.neighbour
)
1601 RTA_PUT(skb
, RTA_GATEWAY
, 16, &rt
->u
.dst
.neighbour
->primary_key
);
1603 RTA_PUT(skb
, RTA_OIF
, sizeof(int), &rt
->rt6i_dev
->ifindex
);
1604 RTA_PUT(skb
, RTA_PRIORITY
, 4, &rt
->rt6i_metric
);
1605 ci
.rta_lastuse
= jiffies_to_clock_t(jiffies
- rt
->u
.dst
.lastuse
);
1606 if (rt
->rt6i_expires
)
1607 ci
.rta_expires
= jiffies_to_clock_t(rt
->rt6i_expires
- jiffies
);
1610 ci
.rta_used
= rt
->u
.dst
.__use
;
1611 ci
.rta_clntref
= atomic_read(&rt
->u
.dst
.__refcnt
);
1612 ci
.rta_error
= rt
->u
.dst
.error
;
1616 RTA_PUT(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
1617 nlh
->nlmsg_len
= skb
->tail
- b
;
1622 skb_trim(skb
, b
- skb
->data
);
1626 static int rt6_dump_route(struct rt6_info
*rt
, void *p_arg
)
1628 struct rt6_rtnl_dump_arg
*arg
= (struct rt6_rtnl_dump_arg
*) p_arg
;
1631 if (arg
->cb
->nlh
->nlmsg_len
>= NLMSG_LENGTH(sizeof(struct rtmsg
))) {
1632 struct rtmsg
*rtm
= NLMSG_DATA(arg
->cb
->nlh
);
1633 prefix
= (rtm
->rtm_flags
& RTM_F_PREFIX
) != 0;
1637 return rt6_fill_node(arg
->skb
, rt
, NULL
, NULL
, 0, RTM_NEWROUTE
,
1638 NETLINK_CB(arg
->cb
->skb
).pid
, arg
->cb
->nlh
->nlmsg_seq
,
1639 prefix
, NLM_F_MULTI
);
1642 static int fib6_dump_node(struct fib6_walker_t
*w
)
1645 struct rt6_info
*rt
;
1647 for (rt
= w
->leaf
; rt
; rt
= rt
->u
.next
) {
1648 res
= rt6_dump_route(rt
, w
->args
);
1650 /* Frame is full, suspend walking */
1660 static void fib6_dump_end(struct netlink_callback
*cb
)
1662 struct fib6_walker_t
*w
= (void*)cb
->args
[0];
1666 fib6_walker_unlink(w
);
1669 cb
->done
= (void*)cb
->args
[1];
1673 static int fib6_dump_done(struct netlink_callback
*cb
)
1676 return cb
->done
? cb
->done(cb
) : 0;
1679 int inet6_dump_fib(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1681 struct rt6_rtnl_dump_arg arg
;
1682 struct fib6_walker_t
*w
;
1688 w
= (void*)cb
->args
[0];
1692 * 1. hook callback destructor.
1694 cb
->args
[1] = (long)cb
->done
;
1695 cb
->done
= fib6_dump_done
;
1698 * 2. allocate and initialize walker.
1700 w
= kmalloc(sizeof(*w
), GFP_ATOMIC
);
1703 RT6_TRACE("dump<%p", w
);
1704 memset(w
, 0, sizeof(*w
));
1705 w
->root
= &ip6_routing_table
;
1706 w
->func
= fib6_dump_node
;
1708 cb
->args
[0] = (long)w
;
1709 read_lock_bh(&rt6_lock
);
1711 read_unlock_bh(&rt6_lock
);
1714 read_lock_bh(&rt6_lock
);
1715 res
= fib6_walk_continue(w
);
1716 read_unlock_bh(&rt6_lock
);
1719 if (res
<= 0 && skb
->len
== 0)
1720 RT6_TRACE("%p>dump end\n", w
);
1722 res
= res
< 0 ? res
: skb
->len
;
1723 /* res < 0 is an error. (really, impossible)
1724 res == 0 means that dump is complete, but skb still can contain data.
1725 res > 0 dump is not complete, but frame is full.
1727 /* Destroy walker, if dump of this table is complete. */
1733 int inet6_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
1735 struct rtattr
**rta
= arg
;
1738 struct sk_buff
*skb
;
1740 struct rt6_info
*rt
;
1742 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1746 /* Reserve room for dummy headers, this skb can pass
1747 through good chunk of routing engine.
1749 skb
->mac
.raw
= skb
->data
;
1750 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct ipv6hdr
));
1752 memset(&fl
, 0, sizeof(fl
));
1754 ipv6_addr_copy(&fl
.fl6_src
,
1755 (struct in6_addr
*)RTA_DATA(rta
[RTA_SRC
-1]));
1757 ipv6_addr_copy(&fl
.fl6_dst
,
1758 (struct in6_addr
*)RTA_DATA(rta
[RTA_DST
-1]));
1761 memcpy(&iif
, RTA_DATA(rta
[RTA_IIF
-1]), sizeof(int));
1764 struct net_device
*dev
;
1765 dev
= __dev_get_by_index(iif
);
1774 memcpy(&fl
.oif
, RTA_DATA(rta
[RTA_OIF
-1]), sizeof(int));
1776 rt
= (struct rt6_info
*)ip6_route_output(NULL
, &fl
);
1778 skb
->dst
= &rt
->u
.dst
;
1780 NETLINK_CB(skb
).dst_pid
= NETLINK_CB(in_skb
).pid
;
1781 err
= rt6_fill_node(skb
, rt
,
1782 &fl
.fl6_dst
, &fl
.fl6_src
,
1784 RTM_NEWROUTE
, NETLINK_CB(in_skb
).pid
,
1785 nlh
->nlmsg_seq
, 0, 0);
1791 err
= netlink_unicast(rtnl
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
1801 void inet6_rt_notify(int event
, struct rt6_info
*rt
, struct nlmsghdr
*nlh
,
1802 struct netlink_skb_parms
*req
)
1804 struct sk_buff
*skb
;
1805 int size
= NLMSG_SPACE(sizeof(struct rtmsg
)+256);
1806 u32 pid
= current
->pid
;
1812 seq
= nlh
->nlmsg_seq
;
1814 skb
= alloc_skb(size
, gfp_any());
1816 netlink_set_err(rtnl
, 0, RTNLGRP_IPV6_ROUTE
, ENOBUFS
);
1819 if (rt6_fill_node(skb
, rt
, NULL
, NULL
, 0, event
, pid
, seq
, 0, 0) < 0) {
1821 netlink_set_err(rtnl
, 0, RTNLGRP_IPV6_ROUTE
, EINVAL
);
1824 NETLINK_CB(skb
).dst_group
= RTNLGRP_IPV6_ROUTE
;
1825 netlink_broadcast(rtnl
, skb
, 0, RTNLGRP_IPV6_ROUTE
, gfp_any());
1832 #ifdef CONFIG_PROC_FS
1834 #define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
1845 static int rt6_info_route(struct rt6_info
*rt
, void *p_arg
)
1847 struct rt6_proc_arg
*arg
= (struct rt6_proc_arg
*) p_arg
;
1850 if (arg
->skip
< arg
->offset
/ RT6_INFO_LEN
) {
1855 if (arg
->len
>= arg
->length
)
1858 for (i
=0; i
<16; i
++) {
1859 sprintf(arg
->buffer
+ arg
->len
, "%02x",
1860 rt
->rt6i_dst
.addr
.s6_addr
[i
]);
1863 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
, " %02x ",
1866 #ifdef CONFIG_IPV6_SUBTREES
1867 for (i
=0; i
<16; i
++) {
1868 sprintf(arg
->buffer
+ arg
->len
, "%02x",
1869 rt
->rt6i_src
.addr
.s6_addr
[i
]);
1872 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
, " %02x ",
1875 sprintf(arg
->buffer
+ arg
->len
,
1876 "00000000000000000000000000000000 00 ");
1880 if (rt
->rt6i_nexthop
) {
1881 for (i
=0; i
<16; i
++) {
1882 sprintf(arg
->buffer
+ arg
->len
, "%02x",
1883 rt
->rt6i_nexthop
->primary_key
[i
]);
1887 sprintf(arg
->buffer
+ arg
->len
,
1888 "00000000000000000000000000000000");
1891 arg
->len
+= sprintf(arg
->buffer
+ arg
->len
,
1892 " %08x %08x %08x %08x %8s\n",
1893 rt
->rt6i_metric
, atomic_read(&rt
->u
.dst
.__refcnt
),
1894 rt
->u
.dst
.__use
, rt
->rt6i_flags
,
1895 rt
->rt6i_dev
? rt
->rt6i_dev
->name
: "");
1899 static int rt6_proc_info(char *buffer
, char **start
, off_t offset
, int length
)
1901 struct rt6_proc_arg arg
;
1902 arg
.buffer
= buffer
;
1903 arg
.offset
= offset
;
1904 arg
.length
= length
;
1908 read_lock_bh(&rt6_lock
);
1909 fib6_clean_tree(&ip6_routing_table
, rt6_info_route
, 0, &arg
);
1910 read_unlock_bh(&rt6_lock
);
1914 *start
+= offset
% RT6_INFO_LEN
;
1916 arg
.len
-= offset
% RT6_INFO_LEN
;
1918 if (arg
.len
> length
)
1926 static int rt6_stats_seq_show(struct seq_file
*seq
, void *v
)
1928 seq_printf(seq
, "%04x %04x %04x %04x %04x %04x %04x\n",
1929 rt6_stats
.fib_nodes
, rt6_stats
.fib_route_nodes
,
1930 rt6_stats
.fib_rt_alloc
, rt6_stats
.fib_rt_entries
,
1931 rt6_stats
.fib_rt_cache
,
1932 atomic_read(&ip6_dst_ops
.entries
),
1933 rt6_stats
.fib_discarded_routes
);
1938 static int rt6_stats_seq_open(struct inode
*inode
, struct file
*file
)
1940 return single_open(file
, rt6_stats_seq_show
, NULL
);
1943 static struct file_operations rt6_stats_seq_fops
= {
1944 .owner
= THIS_MODULE
,
1945 .open
= rt6_stats_seq_open
,
1947 .llseek
= seq_lseek
,
1948 .release
= single_release
,
1950 #endif /* CONFIG_PROC_FS */
1952 #ifdef CONFIG_SYSCTL
1954 static int flush_delay
;
1957 int ipv6_sysctl_rtcache_flush(ctl_table
*ctl
, int write
, struct file
* filp
,
1958 void __user
*buffer
, size_t *lenp
, loff_t
*ppos
)
1961 proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
1962 fib6_run_gc(flush_delay
<= 0 ? ~0UL : (unsigned long)flush_delay
);
1968 ctl_table ipv6_route_table
[] = {
1970 .ctl_name
= NET_IPV6_ROUTE_FLUSH
,
1971 .procname
= "flush",
1972 .data
= &flush_delay
,
1973 .maxlen
= sizeof(int),
1975 .proc_handler
= &ipv6_sysctl_rtcache_flush
1978 .ctl_name
= NET_IPV6_ROUTE_GC_THRESH
,
1979 .procname
= "gc_thresh",
1980 .data
= &ip6_dst_ops
.gc_thresh
,
1981 .maxlen
= sizeof(int),
1983 .proc_handler
= &proc_dointvec
,
1986 .ctl_name
= NET_IPV6_ROUTE_MAX_SIZE
,
1987 .procname
= "max_size",
1988 .data
= &ip6_rt_max_size
,
1989 .maxlen
= sizeof(int),
1991 .proc_handler
= &proc_dointvec
,
1994 .ctl_name
= NET_IPV6_ROUTE_GC_MIN_INTERVAL
,
1995 .procname
= "gc_min_interval",
1996 .data
= &ip6_rt_gc_min_interval
,
1997 .maxlen
= sizeof(int),
1999 .proc_handler
= &proc_dointvec_jiffies
,
2000 .strategy
= &sysctl_jiffies
,
2003 .ctl_name
= NET_IPV6_ROUTE_GC_TIMEOUT
,
2004 .procname
= "gc_timeout",
2005 .data
= &ip6_rt_gc_timeout
,
2006 .maxlen
= sizeof(int),
2008 .proc_handler
= &proc_dointvec_jiffies
,
2009 .strategy
= &sysctl_jiffies
,
2012 .ctl_name
= NET_IPV6_ROUTE_GC_INTERVAL
,
2013 .procname
= "gc_interval",
2014 .data
= &ip6_rt_gc_interval
,
2015 .maxlen
= sizeof(int),
2017 .proc_handler
= &proc_dointvec_jiffies
,
2018 .strategy
= &sysctl_jiffies
,
2021 .ctl_name
= NET_IPV6_ROUTE_GC_ELASTICITY
,
2022 .procname
= "gc_elasticity",
2023 .data
= &ip6_rt_gc_elasticity
,
2024 .maxlen
= sizeof(int),
2026 .proc_handler
= &proc_dointvec_jiffies
,
2027 .strategy
= &sysctl_jiffies
,
2030 .ctl_name
= NET_IPV6_ROUTE_MTU_EXPIRES
,
2031 .procname
= "mtu_expires",
2032 .data
= &ip6_rt_mtu_expires
,
2033 .maxlen
= sizeof(int),
2035 .proc_handler
= &proc_dointvec_jiffies
,
2036 .strategy
= &sysctl_jiffies
,
2039 .ctl_name
= NET_IPV6_ROUTE_MIN_ADVMSS
,
2040 .procname
= "min_adv_mss",
2041 .data
= &ip6_rt_min_advmss
,
2042 .maxlen
= sizeof(int),
2044 .proc_handler
= &proc_dointvec_jiffies
,
2045 .strategy
= &sysctl_jiffies
,
2048 .ctl_name
= NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS
,
2049 .procname
= "gc_min_interval_ms",
2050 .data
= &ip6_rt_gc_min_interval
,
2051 .maxlen
= sizeof(int),
2053 .proc_handler
= &proc_dointvec_ms_jiffies
,
2054 .strategy
= &sysctl_ms_jiffies
,
2061 void __init
ip6_route_init(void)
2063 struct proc_dir_entry
*p
;
2065 ip6_dst_ops
.kmem_cachep
= kmem_cache_create("ip6_dst_cache",
2066 sizeof(struct rt6_info
),
2067 0, SLAB_HWCACHE_ALIGN
,
2069 if (!ip6_dst_ops
.kmem_cachep
)
2070 panic("cannot create ip6_dst_cache");
2073 #ifdef CONFIG_PROC_FS
2074 p
= proc_net_create("ipv6_route", 0, rt6_proc_info
);
2076 p
->owner
= THIS_MODULE
;
2078 proc_net_fops_create("rt6_stats", S_IRUGO
, &rt6_stats_seq_fops
);
2085 void ip6_route_cleanup(void)
2087 #ifdef CONFIG_PROC_FS
2088 proc_net_remove("ipv6_route");
2089 proc_net_remove("rt6_stats");
2096 kmem_cache_destroy(ip6_dst_ops
.kmem_cachep
);