2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
100 #include <net/ip_fib.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
108 #include <linux/sysctl.h>
111 #define RT_FL_TOS(oldflp) \
112 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114 #define IP_MAX_MTU 0xFFF0
116 #define RT_GC_TIMEOUT (300*HZ)
118 static int ip_rt_max_size
;
119 static int ip_rt_gc_timeout __read_mostly
= RT_GC_TIMEOUT
;
120 static int ip_rt_gc_interval __read_mostly
= 60 * HZ
;
121 static int ip_rt_gc_min_interval __read_mostly
= HZ
/ 2;
122 static int ip_rt_redirect_number __read_mostly
= 9;
123 static int ip_rt_redirect_load __read_mostly
= HZ
/ 50;
124 static int ip_rt_redirect_silence __read_mostly
= ((HZ
/ 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly
= HZ
;
126 static int ip_rt_error_burst __read_mostly
= 5 * HZ
;
127 static int ip_rt_gc_elasticity __read_mostly
= 8;
128 static int ip_rt_mtu_expires __read_mostly
= 10 * 60 * HZ
;
129 static int ip_rt_min_pmtu __read_mostly
= 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly
= 256;
131 static int ip_rt_secret_interval __read_mostly
= 10 * 60 * HZ
;
132 static int rt_chain_length_max __read_mostly
= 20;
134 static void rt_worker_func(struct work_struct
*work
);
135 static DECLARE_DELAYED_WORK(expires_work
, rt_worker_func
);
138 * Interface to generic destination cache.
141 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
142 static void ipv4_dst_destroy(struct dst_entry
*dst
);
143 static void ipv4_dst_ifdown(struct dst_entry
*dst
,
144 struct net_device
*dev
, int how
);
145 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
146 static void ipv4_link_failure(struct sk_buff
*skb
);
147 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
148 static int rt_garbage_collect(struct dst_ops
*ops
);
149 static void rt_emergency_hash_rebuild(struct net
*net
);
152 static struct dst_ops ipv4_dst_ops
= {
154 .protocol
= cpu_to_be16(ETH_P_IP
),
155 .gc
= rt_garbage_collect
,
156 .check
= ipv4_dst_check
,
157 .destroy
= ipv4_dst_destroy
,
158 .ifdown
= ipv4_dst_ifdown
,
159 .negative_advice
= ipv4_negative_advice
,
160 .link_failure
= ipv4_link_failure
,
161 .update_pmtu
= ip_rt_update_pmtu
,
162 .local_out
= __ip_local_out
,
163 .entries
= ATOMIC_INIT(0),
166 #define ECN_OR_COST(class) TC_PRIO_##class
168 const __u8 ip_tos2prio
[16] = {
172 ECN_OR_COST(BESTEFFORT
),
178 ECN_OR_COST(INTERACTIVE
),
180 ECN_OR_COST(INTERACTIVE
),
181 TC_PRIO_INTERACTIVE_BULK
,
182 ECN_OR_COST(INTERACTIVE_BULK
),
183 TC_PRIO_INTERACTIVE_BULK
,
184 ECN_OR_COST(INTERACTIVE_BULK
)
192 /* The locking scheme is rather straight forward:
194 * 1) Read-Copy Update protects the buckets of the central route hash.
195 * 2) Only writers remove entries, and they hold the lock
196 * as they look at rtable reference counts.
197 * 3) Only readers acquire references to rtable entries,
198 * they do so with atomic increments and with the
202 struct rt_hash_bucket
{
203 struct rtable
*chain
;
206 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
207 defined(CONFIG_PROVE_LOCKING)
209 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
210 * The size of this table is a power of two and depends on the number of CPUS.
211 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
213 #ifdef CONFIG_LOCKDEP
214 # define RT_HASH_LOCK_SZ 256
217 # define RT_HASH_LOCK_SZ 4096
219 # define RT_HASH_LOCK_SZ 2048
221 # define RT_HASH_LOCK_SZ 1024
223 # define RT_HASH_LOCK_SZ 512
225 # define RT_HASH_LOCK_SZ 256
229 static spinlock_t
*rt_hash_locks
;
230 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
232 static __init
void rt_hash_lock_init(void)
236 rt_hash_locks
= kmalloc(sizeof(spinlock_t
) * RT_HASH_LOCK_SZ
,
239 panic("IP: failed to allocate rt_hash_locks\n");
241 for (i
= 0; i
< RT_HASH_LOCK_SZ
; i
++)
242 spin_lock_init(&rt_hash_locks
[i
]);
245 # define rt_hash_lock_addr(slot) NULL
247 static inline void rt_hash_lock_init(void)
252 static struct rt_hash_bucket
*rt_hash_table __read_mostly
;
253 static unsigned rt_hash_mask __read_mostly
;
254 static unsigned int rt_hash_log __read_mostly
;
256 static DEFINE_PER_CPU(struct rt_cache_stat
, rt_cache_stat
);
257 #define RT_CACHE_STAT_INC(field) \
258 (__raw_get_cpu_var(rt_cache_stat).field++)
260 static inline unsigned int rt_hash(__be32 daddr
, __be32 saddr
, int idx
,
263 return jhash_3words((__force u32
)(__be32
)(daddr
),
264 (__force u32
)(__be32
)(saddr
),
269 static inline int rt_genid(struct net
*net
)
271 return atomic_read(&net
->ipv4
.rt_genid
);
274 #ifdef CONFIG_PROC_FS
275 struct rt_cache_iter_state
{
276 struct seq_net_private p
;
281 static struct rtable
*rt_cache_get_first(struct seq_file
*seq
)
283 struct rt_cache_iter_state
*st
= seq
->private;
284 struct rtable
*r
= NULL
;
286 for (st
->bucket
= rt_hash_mask
; st
->bucket
>= 0; --st
->bucket
) {
287 if (!rt_hash_table
[st
->bucket
].chain
)
290 r
= rcu_dereference(rt_hash_table
[st
->bucket
].chain
);
292 if (dev_net(r
->u
.dst
.dev
) == seq_file_net(seq
) &&
293 r
->rt_genid
== st
->genid
)
295 r
= rcu_dereference(r
->u
.dst
.rt_next
);
297 rcu_read_unlock_bh();
302 static struct rtable
*__rt_cache_get_next(struct seq_file
*seq
,
305 struct rt_cache_iter_state
*st
= seq
->private;
307 r
= r
->u
.dst
.rt_next
;
309 rcu_read_unlock_bh();
311 if (--st
->bucket
< 0)
313 } while (!rt_hash_table
[st
->bucket
].chain
);
315 r
= rt_hash_table
[st
->bucket
].chain
;
317 return rcu_dereference(r
);
320 static struct rtable
*rt_cache_get_next(struct seq_file
*seq
,
323 struct rt_cache_iter_state
*st
= seq
->private;
324 while ((r
= __rt_cache_get_next(seq
, r
)) != NULL
) {
325 if (dev_net(r
->u
.dst
.dev
) != seq_file_net(seq
))
327 if (r
->rt_genid
== st
->genid
)
333 static struct rtable
*rt_cache_get_idx(struct seq_file
*seq
, loff_t pos
)
335 struct rtable
*r
= rt_cache_get_first(seq
);
338 while (pos
&& (r
= rt_cache_get_next(seq
, r
)))
340 return pos
? NULL
: r
;
343 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
345 struct rt_cache_iter_state
*st
= seq
->private;
347 return rt_cache_get_idx(seq
, *pos
- 1);
348 st
->genid
= rt_genid(seq_file_net(seq
));
349 return SEQ_START_TOKEN
;
352 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
356 if (v
== SEQ_START_TOKEN
)
357 r
= rt_cache_get_first(seq
);
359 r
= rt_cache_get_next(seq
, v
);
364 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
366 if (v
&& v
!= SEQ_START_TOKEN
)
367 rcu_read_unlock_bh();
370 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
372 if (v
== SEQ_START_TOKEN
)
373 seq_printf(seq
, "%-127s\n",
374 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
375 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
378 struct rtable
*r
= v
;
381 seq_printf(seq
, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
382 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 r
->u
.dst
.dev
? r
->u
.dst
.dev
->name
: "*",
384 (unsigned long)r
->rt_dst
, (unsigned long)r
->rt_gateway
,
385 r
->rt_flags
, atomic_read(&r
->u
.dst
.__refcnt
),
386 r
->u
.dst
.__use
, 0, (unsigned long)r
->rt_src
,
387 (dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) ?
388 (int)dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) + 40 : 0),
389 dst_metric(&r
->u
.dst
, RTAX_WINDOW
),
390 (int)((dst_metric(&r
->u
.dst
, RTAX_RTT
) >> 3) +
391 dst_metric(&r
->u
.dst
, RTAX_RTTVAR
)),
393 r
->u
.dst
.hh
? atomic_read(&r
->u
.dst
.hh
->hh_refcnt
) : -1,
394 r
->u
.dst
.hh
? (r
->u
.dst
.hh
->hh_output
==
396 r
->rt_spec_dst
, &len
);
398 seq_printf(seq
, "%*s\n", 127 - len
, "");
403 static const struct seq_operations rt_cache_seq_ops
= {
404 .start
= rt_cache_seq_start
,
405 .next
= rt_cache_seq_next
,
406 .stop
= rt_cache_seq_stop
,
407 .show
= rt_cache_seq_show
,
410 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
412 return seq_open_net(inode
, file
, &rt_cache_seq_ops
,
413 sizeof(struct rt_cache_iter_state
));
416 static const struct file_operations rt_cache_seq_fops
= {
417 .owner
= THIS_MODULE
,
418 .open
= rt_cache_seq_open
,
421 .release
= seq_release_net
,
425 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
430 return SEQ_START_TOKEN
;
432 for (cpu
= *pos
-1; cpu
< nr_cpu_ids
; ++cpu
) {
433 if (!cpu_possible(cpu
))
436 return &per_cpu(rt_cache_stat
, cpu
);
441 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
445 for (cpu
= *pos
; cpu
< nr_cpu_ids
; ++cpu
) {
446 if (!cpu_possible(cpu
))
449 return &per_cpu(rt_cache_stat
, cpu
);
455 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
460 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
462 struct rt_cache_stat
*st
= v
;
464 if (v
== SEQ_START_TOKEN
) {
465 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
469 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
470 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
471 atomic_read(&ipv4_dst_ops
.entries
),
494 static const struct seq_operations rt_cpu_seq_ops
= {
495 .start
= rt_cpu_seq_start
,
496 .next
= rt_cpu_seq_next
,
497 .stop
= rt_cpu_seq_stop
,
498 .show
= rt_cpu_seq_show
,
502 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
504 return seq_open(file
, &rt_cpu_seq_ops
);
507 static const struct file_operations rt_cpu_seq_fops
= {
508 .owner
= THIS_MODULE
,
509 .open
= rt_cpu_seq_open
,
512 .release
= seq_release
,
515 #ifdef CONFIG_NET_CLS_ROUTE
516 static int ip_rt_acct_read(char *buffer
, char **start
, off_t offset
,
517 int length
, int *eof
, void *data
)
521 if ((offset
& 3) || (length
& 3))
524 if (offset
>= sizeof(struct ip_rt_acct
) * 256) {
529 if (offset
+ length
>= sizeof(struct ip_rt_acct
) * 256) {
530 length
= sizeof(struct ip_rt_acct
) * 256 - offset
;
534 offset
/= sizeof(u32
);
537 u32
*dst
= (u32
*) buffer
;
540 memset(dst
, 0, length
);
542 for_each_possible_cpu(i
) {
546 src
= ((u32
*) per_cpu_ptr(ip_rt_acct
, i
)) + offset
;
547 for (j
= 0; j
< length
/4; j
++)
555 static int __net_init
ip_rt_do_proc_init(struct net
*net
)
557 struct proc_dir_entry
*pde
;
559 pde
= proc_net_fops_create(net
, "rt_cache", S_IRUGO
,
564 pde
= proc_create("rt_cache", S_IRUGO
,
565 net
->proc_net_stat
, &rt_cpu_seq_fops
);
569 #ifdef CONFIG_NET_CLS_ROUTE
570 pde
= create_proc_read_entry("rt_acct", 0, net
->proc_net
,
571 ip_rt_acct_read
, NULL
);
577 #ifdef CONFIG_NET_CLS_ROUTE
579 remove_proc_entry("rt_cache", net
->proc_net_stat
);
582 remove_proc_entry("rt_cache", net
->proc_net
);
587 static void __net_exit
ip_rt_do_proc_exit(struct net
*net
)
589 remove_proc_entry("rt_cache", net
->proc_net_stat
);
590 remove_proc_entry("rt_cache", net
->proc_net
);
591 remove_proc_entry("rt_acct", net
->proc_net
);
594 static struct pernet_operations ip_rt_proc_ops __net_initdata
= {
595 .init
= ip_rt_do_proc_init
,
596 .exit
= ip_rt_do_proc_exit
,
599 static int __init
ip_rt_proc_init(void)
601 return register_pernet_subsys(&ip_rt_proc_ops
);
605 static inline int ip_rt_proc_init(void)
609 #endif /* CONFIG_PROC_FS */
611 static inline void rt_free(struct rtable
*rt
)
613 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
616 static inline void rt_drop(struct rtable
*rt
)
619 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
622 static inline int rt_fast_clean(struct rtable
*rth
)
624 /* Kill broadcast/multicast entries very aggresively, if they
625 collide in hash table with more useful entries */
626 return (rth
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) &&
627 rth
->fl
.iif
&& rth
->u
.dst
.rt_next
;
630 static inline int rt_valuable(struct rtable
*rth
)
632 return (rth
->rt_flags
& (RTCF_REDIRECTED
| RTCF_NOTIFY
)) ||
636 static int rt_may_expire(struct rtable
*rth
, unsigned long tmo1
, unsigned long tmo2
)
641 if (atomic_read(&rth
->u
.dst
.__refcnt
))
645 if (rth
->u
.dst
.expires
&&
646 time_after_eq(jiffies
, rth
->u
.dst
.expires
))
649 age
= jiffies
- rth
->u
.dst
.lastuse
;
651 if ((age
<= tmo1
&& !rt_fast_clean(rth
)) ||
652 (age
<= tmo2
&& rt_valuable(rth
)))
658 /* Bits of score are:
660 * 30: not quite useless
661 * 29..0: usage counter
663 static inline u32
rt_score(struct rtable
*rt
)
665 u32 score
= jiffies
- rt
->u
.dst
.lastuse
;
667 score
= ~score
& ~(3<<30);
673 !(rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
|RTCF_LOCAL
)))
679 static inline bool rt_caching(const struct net
*net
)
681 return net
->ipv4
.current_rt_cache_rebuild_count
<=
682 net
->ipv4
.sysctl_rt_cache_rebuild_count
;
685 static inline bool compare_hash_inputs(const struct flowi
*fl1
,
686 const struct flowi
*fl2
)
688 return (__force u32
)(((fl1
->nl_u
.ip4_u
.daddr
^ fl2
->nl_u
.ip4_u
.daddr
) |
689 (fl1
->nl_u
.ip4_u
.saddr
^ fl2
->nl_u
.ip4_u
.saddr
) |
690 (fl1
->iif
^ fl2
->iif
)) == 0);
693 static inline int compare_keys(struct flowi
*fl1
, struct flowi
*fl2
)
695 return ((__force u32
)((fl1
->nl_u
.ip4_u
.daddr
^ fl2
->nl_u
.ip4_u
.daddr
) |
696 (fl1
->nl_u
.ip4_u
.saddr
^ fl2
->nl_u
.ip4_u
.saddr
)) |
697 (fl1
->mark
^ fl2
->mark
) |
698 (*(u16
*)&fl1
->nl_u
.ip4_u
.tos
^
699 *(u16
*)&fl2
->nl_u
.ip4_u
.tos
) |
700 (fl1
->oif
^ fl2
->oif
) |
701 (fl1
->iif
^ fl2
->iif
)) == 0;
704 static inline int compare_netns(struct rtable
*rt1
, struct rtable
*rt2
)
706 return dev_net(rt1
->u
.dst
.dev
) == dev_net(rt2
->u
.dst
.dev
);
709 static inline int rt_is_expired(struct rtable
*rth
)
711 return rth
->rt_genid
!= rt_genid(dev_net(rth
->u
.dst
.dev
));
715 * Perform a full scan of hash table and free all entries.
716 * Can be called by a softirq or a process.
717 * In the later case, we want to be reschedule if necessary
719 static void rt_do_flush(int process_context
)
722 struct rtable
*rth
, *next
;
723 struct rtable
* tail
;
725 for (i
= 0; i
<= rt_hash_mask
; i
++) {
726 if (process_context
&& need_resched())
728 rth
= rt_hash_table
[i
].chain
;
732 spin_lock_bh(rt_hash_lock_addr(i
));
735 struct rtable
** prev
, * p
;
737 rth
= rt_hash_table
[i
].chain
;
739 /* defer releasing the head of the list after spin_unlock */
740 for (tail
= rth
; tail
; tail
= tail
->u
.dst
.rt_next
)
741 if (!rt_is_expired(tail
))
744 rt_hash_table
[i
].chain
= tail
;
746 /* call rt_free on entries after the tail requiring flush */
747 prev
= &rt_hash_table
[i
].chain
;
748 for (p
= *prev
; p
; p
= next
) {
749 next
= p
->u
.dst
.rt_next
;
750 if (!rt_is_expired(p
)) {
751 prev
= &p
->u
.dst
.rt_next
;
759 rth
= rt_hash_table
[i
].chain
;
760 rt_hash_table
[i
].chain
= NULL
;
763 spin_unlock_bh(rt_hash_lock_addr(i
));
765 for (; rth
!= tail
; rth
= next
) {
766 next
= rth
->u
.dst
.rt_next
;
773 * While freeing expired entries, we compute average chain length
774 * and standard deviation, using fixed-point arithmetic.
775 * This to have an estimation of rt_chain_length_max
776 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
777 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
781 #define ONE (1UL << FRACT_BITS)
783 static void rt_check_expire(void)
785 static unsigned int rover
;
786 unsigned int i
= rover
, goal
;
787 struct rtable
*rth
, *aux
, **rthp
;
788 unsigned long samples
= 0;
789 unsigned long sum
= 0, sum2
= 0;
792 mult
= ((u64
)ip_rt_gc_interval
) << rt_hash_log
;
793 if (ip_rt_gc_timeout
> 1)
794 do_div(mult
, ip_rt_gc_timeout
);
795 goal
= (unsigned int)mult
;
796 if (goal
> rt_hash_mask
)
797 goal
= rt_hash_mask
+ 1;
798 for (; goal
> 0; goal
--) {
799 unsigned long tmo
= ip_rt_gc_timeout
;
800 unsigned long length
;
802 i
= (i
+ 1) & rt_hash_mask
;
803 rthp
= &rt_hash_table
[i
].chain
;
813 spin_lock_bh(rt_hash_lock_addr(i
));
814 while ((rth
= *rthp
) != NULL
) {
815 prefetch(rth
->u
.dst
.rt_next
);
816 if (rt_is_expired(rth
)) {
817 *rthp
= rth
->u
.dst
.rt_next
;
821 if (rth
->u
.dst
.expires
) {
822 /* Entry is expired even if it is in use */
823 if (time_before_eq(jiffies
, rth
->u
.dst
.expires
)) {
826 rthp
= &rth
->u
.dst
.rt_next
;
828 * We only count entries on
829 * a chain with equal hash inputs once
830 * so that entries for different QOS
831 * levels, and other non-hash input
832 * attributes don't unfairly skew
833 * the length computation
835 for (aux
= rt_hash_table
[i
].chain
;;) {
840 if (compare_hash_inputs(&aux
->fl
, &rth
->fl
))
842 aux
= aux
->u
.dst
.rt_next
;
846 } else if (!rt_may_expire(rth
, tmo
, ip_rt_gc_timeout
))
849 /* Cleanup aged off entries. */
850 *rthp
= rth
->u
.dst
.rt_next
;
853 spin_unlock_bh(rt_hash_lock_addr(i
));
855 sum2
+= length
*length
;
858 unsigned long avg
= sum
/ samples
;
859 unsigned long sd
= int_sqrt(sum2
/ samples
- avg
*avg
);
860 rt_chain_length_max
= max_t(unsigned long,
862 (avg
+ 4*sd
) >> FRACT_BITS
);
868 * rt_worker_func() is run in process context.
869 * we call rt_check_expire() to scan part of the hash table
871 static void rt_worker_func(struct work_struct
*work
)
874 schedule_delayed_work(&expires_work
, ip_rt_gc_interval
);
878 * Pertubation of rt_genid by a small quantity [1..256]
879 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
880 * many times (2^24) without giving recent rt_genid.
881 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
883 static void rt_cache_invalidate(struct net
*net
)
885 unsigned char shuffle
;
887 get_random_bytes(&shuffle
, sizeof(shuffle
));
888 atomic_add(shuffle
+ 1U, &net
->ipv4
.rt_genid
);
892 * delay < 0 : invalidate cache (fast : entries will be deleted later)
893 * delay >= 0 : invalidate & flush cache (can be long)
895 void rt_cache_flush(struct net
*net
, int delay
)
897 rt_cache_invalidate(net
);
899 rt_do_flush(!in_softirq());
903 * We change rt_genid and let gc do the cleanup
905 static void rt_secret_rebuild(unsigned long __net
)
907 struct net
*net
= (struct net
*)__net
;
908 rt_cache_invalidate(net
);
909 mod_timer(&net
->ipv4
.rt_secret_timer
, jiffies
+ ip_rt_secret_interval
);
912 static void rt_secret_rebuild_oneshot(struct net
*net
)
914 del_timer_sync(&net
->ipv4
.rt_secret_timer
);
915 rt_cache_invalidate(net
);
916 if (ip_rt_secret_interval
) {
917 net
->ipv4
.rt_secret_timer
.expires
+= ip_rt_secret_interval
;
918 add_timer(&net
->ipv4
.rt_secret_timer
);
922 static void rt_emergency_hash_rebuild(struct net
*net
)
924 if (net_ratelimit()) {
925 printk(KERN_WARNING
"Route hash chain too long!\n");
926 printk(KERN_WARNING
"Adjust your secret_interval!\n");
929 rt_secret_rebuild_oneshot(net
);
933 Short description of GC goals.
935 We want to build algorithm, which will keep routing cache
936 at some equilibrium point, when number of aged off entries
937 is kept approximately equal to newly generated ones.
939 Current expiration strength is variable "expire".
940 We try to adjust it dynamically, so that if networking
941 is idle expires is large enough to keep enough of warm entries,
942 and when load increases it reduces to limit cache size.
945 static int rt_garbage_collect(struct dst_ops
*ops
)
947 static unsigned long expire
= RT_GC_TIMEOUT
;
948 static unsigned long last_gc
;
950 static int equilibrium
;
951 struct rtable
*rth
, **rthp
;
952 unsigned long now
= jiffies
;
956 * Garbage collection is pretty expensive,
957 * do not make it too frequently.
960 RT_CACHE_STAT_INC(gc_total
);
962 if (now
- last_gc
< ip_rt_gc_min_interval
&&
963 atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
) {
964 RT_CACHE_STAT_INC(gc_ignored
);
968 /* Calculate number of entries, which we want to expire now. */
969 goal
= atomic_read(&ipv4_dst_ops
.entries
) -
970 (ip_rt_gc_elasticity
<< rt_hash_log
);
972 if (equilibrium
< ipv4_dst_ops
.gc_thresh
)
973 equilibrium
= ipv4_dst_ops
.gc_thresh
;
974 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
976 equilibrium
+= min_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
977 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
980 /* We are in dangerous area. Try to reduce cache really
983 goal
= max_t(unsigned int, goal
>> 1, rt_hash_mask
+ 1);
984 equilibrium
= atomic_read(&ipv4_dst_ops
.entries
) - goal
;
987 if (now
- last_gc
>= ip_rt_gc_min_interval
)
998 for (i
= rt_hash_mask
, k
= rover
; i
>= 0; i
--) {
999 unsigned long tmo
= expire
;
1001 k
= (k
+ 1) & rt_hash_mask
;
1002 rthp
= &rt_hash_table
[k
].chain
;
1003 spin_lock_bh(rt_hash_lock_addr(k
));
1004 while ((rth
= *rthp
) != NULL
) {
1005 if (!rt_is_expired(rth
) &&
1006 !rt_may_expire(rth
, tmo
, expire
)) {
1008 rthp
= &rth
->u
.dst
.rt_next
;
1011 *rthp
= rth
->u
.dst
.rt_next
;
1015 spin_unlock_bh(rt_hash_lock_addr(k
));
1024 /* Goal is not achieved. We stop process if:
1026 - if expire reduced to zero. Otherwise, expire is halfed.
1027 - if table is not full.
1028 - if we are called from interrupt.
1029 - jiffies check is just fallback/debug loop breaker.
1030 We will not spin here for long time in any case.
1033 RT_CACHE_STAT_INC(gc_goal_miss
);
1039 #if RT_CACHE_DEBUG >= 2
1040 printk(KERN_DEBUG
"expire>> %u %d %d %d\n", expire
,
1041 atomic_read(&ipv4_dst_ops
.entries
), goal
, i
);
1044 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
1046 } while (!in_softirq() && time_before_eq(jiffies
, now
));
1048 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
1050 if (net_ratelimit())
1051 printk(KERN_WARNING
"dst cache overflow\n");
1052 RT_CACHE_STAT_INC(gc_dst_overflow
);
1056 expire
+= ip_rt_gc_min_interval
;
1057 if (expire
> ip_rt_gc_timeout
||
1058 atomic_read(&ipv4_dst_ops
.entries
) < ipv4_dst_ops
.gc_thresh
)
1059 expire
= ip_rt_gc_timeout
;
1060 #if RT_CACHE_DEBUG >= 2
1061 printk(KERN_DEBUG
"expire++ %u %d %d %d\n", expire
,
1062 atomic_read(&ipv4_dst_ops
.entries
), goal
, rover
);
1067 static int rt_intern_hash(unsigned hash
, struct rtable
*rt
,
1068 struct rtable
**rp
, struct sk_buff
*skb
)
1070 struct rtable
*rth
, **rthp
;
1072 struct rtable
*cand
, **candp
;
1075 int attempts
= !in_softirq();
1079 min_score
= ~(u32
)0;
1084 if (!rt_caching(dev_net(rt
->u
.dst
.dev
))) {
1089 rthp
= &rt_hash_table
[hash
].chain
;
1091 spin_lock_bh(rt_hash_lock_addr(hash
));
1092 while ((rth
= *rthp
) != NULL
) {
1093 if (rt_is_expired(rth
)) {
1094 *rthp
= rth
->u
.dst
.rt_next
;
1098 if (compare_keys(&rth
->fl
, &rt
->fl
) && compare_netns(rth
, rt
)) {
1100 *rthp
= rth
->u
.dst
.rt_next
;
1102 * Since lookup is lockfree, the deletion
1103 * must be visible to another weakly ordered CPU before
1104 * the insertion at the start of the hash chain.
1106 rcu_assign_pointer(rth
->u
.dst
.rt_next
,
1107 rt_hash_table
[hash
].chain
);
1109 * Since lookup is lockfree, the update writes
1110 * must be ordered for consistency on SMP.
1112 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rth
);
1114 dst_use(&rth
->u
.dst
, now
);
1115 spin_unlock_bh(rt_hash_lock_addr(hash
));
1121 skb_dst_set(skb
, &rth
->u
.dst
);
1125 if (!atomic_read(&rth
->u
.dst
.__refcnt
)) {
1126 u32 score
= rt_score(rth
);
1128 if (score
<= min_score
) {
1137 rthp
= &rth
->u
.dst
.rt_next
;
1141 /* ip_rt_gc_elasticity used to be average length of chain
1142 * length, when exceeded gc becomes really aggressive.
1144 * The second limit is less certain. At the moment it allows
1145 * only 2 entries per bucket. We will see.
1147 if (chain_length
> ip_rt_gc_elasticity
) {
1148 *candp
= cand
->u
.dst
.rt_next
;
1152 if (chain_length
> rt_chain_length_max
) {
1153 struct net
*net
= dev_net(rt
->u
.dst
.dev
);
1154 int num
= ++net
->ipv4
.current_rt_cache_rebuild_count
;
1155 if (!rt_caching(dev_net(rt
->u
.dst
.dev
))) {
1156 printk(KERN_WARNING
"%s: %d rebuilds is over limit, route caching disabled\n",
1157 rt
->u
.dst
.dev
->name
, num
);
1159 rt_emergency_hash_rebuild(dev_net(rt
->u
.dst
.dev
));
1163 /* Try to bind route to arp only if it is output
1164 route or unicast forwarding path.
1166 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
1167 int err
= arp_bind_neighbour(&rt
->u
.dst
);
1169 spin_unlock_bh(rt_hash_lock_addr(hash
));
1171 if (err
!= -ENOBUFS
) {
1176 /* Neighbour tables are full and nothing
1177 can be released. Try to shrink route cache,
1178 it is most likely it holds some neighbour records.
1180 if (attempts
-- > 0) {
1181 int saved_elasticity
= ip_rt_gc_elasticity
;
1182 int saved_int
= ip_rt_gc_min_interval
;
1183 ip_rt_gc_elasticity
= 1;
1184 ip_rt_gc_min_interval
= 0;
1185 rt_garbage_collect(&ipv4_dst_ops
);
1186 ip_rt_gc_min_interval
= saved_int
;
1187 ip_rt_gc_elasticity
= saved_elasticity
;
1191 if (net_ratelimit())
1192 printk(KERN_WARNING
"Neighbour table overflow.\n");
1198 rt
->u
.dst
.rt_next
= rt_hash_table
[hash
].chain
;
1200 #if RT_CACHE_DEBUG >= 2
1201 if (rt
->u
.dst
.rt_next
) {
1203 printk(KERN_DEBUG
"rt_cache @%02x: %pI4", hash
, &rt
->rt_dst
);
1204 for (trt
= rt
->u
.dst
.rt_next
; trt
; trt
= trt
->u
.dst
.rt_next
)
1205 printk(" . %pI4", &trt
->rt_dst
);
1210 * Since lookup is lockfree, we must make sure
1211 * previous writes to rt are comitted to memory
1212 * before making rt visible to other CPUS.
1214 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rt
);
1216 spin_unlock_bh(rt_hash_lock_addr(hash
));
1220 skb_dst_set(skb
, &rt
->u
.dst
);
1224 void rt_bind_peer(struct rtable
*rt
, int create
)
1226 static DEFINE_SPINLOCK(rt_peer_lock
);
1227 struct inet_peer
*peer
;
1229 peer
= inet_getpeer(rt
->rt_dst
, create
);
1231 spin_lock_bh(&rt_peer_lock
);
1232 if (rt
->peer
== NULL
) {
1236 spin_unlock_bh(&rt_peer_lock
);
1242 * Peer allocation may fail only in serious out-of-memory conditions. However
1243 * we still can generate some output.
1244 * Random ID selection looks a bit dangerous because we have no chances to
1245 * select ID being unique in a reasonable period of time.
1246 * But broken packet identifier may be better than no packet at all.
1248 static void ip_select_fb_ident(struct iphdr
*iph
)
1250 static DEFINE_SPINLOCK(ip_fb_id_lock
);
1251 static u32 ip_fallback_id
;
1254 spin_lock_bh(&ip_fb_id_lock
);
1255 salt
= secure_ip_id((__force __be32
)ip_fallback_id
^ iph
->daddr
);
1256 iph
->id
= htons(salt
& 0xFFFF);
1257 ip_fallback_id
= salt
;
1258 spin_unlock_bh(&ip_fb_id_lock
);
1261 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
1263 struct rtable
*rt
= (struct rtable
*) dst
;
1266 if (rt
->peer
== NULL
)
1267 rt_bind_peer(rt
, 1);
1269 /* If peer is attached to destination, it is never detached,
1270 so that we need not to grab a lock to dereference it.
1273 iph
->id
= htons(inet_getid(rt
->peer
, more
));
1277 printk(KERN_DEBUG
"rt_bind_peer(0) @%p\n",
1278 __builtin_return_address(0));
1280 ip_select_fb_ident(iph
);
1283 static void rt_del(unsigned hash
, struct rtable
*rt
)
1285 struct rtable
**rthp
, *aux
;
1287 rthp
= &rt_hash_table
[hash
].chain
;
1288 spin_lock_bh(rt_hash_lock_addr(hash
));
1290 while ((aux
= *rthp
) != NULL
) {
1291 if (aux
== rt
|| rt_is_expired(aux
)) {
1292 *rthp
= aux
->u
.dst
.rt_next
;
1296 rthp
= &aux
->u
.dst
.rt_next
;
1298 spin_unlock_bh(rt_hash_lock_addr(hash
));
1301 void ip_rt_redirect(__be32 old_gw
, __be32 daddr
, __be32 new_gw
,
1302 __be32 saddr
, struct net_device
*dev
)
1305 struct in_device
*in_dev
= in_dev_get(dev
);
1306 struct rtable
*rth
, **rthp
;
1307 __be32 skeys
[2] = { saddr
, 0 };
1308 int ikeys
[2] = { dev
->ifindex
, 0 };
1309 struct netevent_redirect netevent
;
1316 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
)
1317 || ipv4_is_multicast(new_gw
) || ipv4_is_lbcast(new_gw
)
1318 || ipv4_is_zeronet(new_gw
))
1319 goto reject_redirect
;
1321 if (!rt_caching(net
))
1322 goto reject_redirect
;
1324 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
1325 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
1326 goto reject_redirect
;
1327 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
1328 goto reject_redirect
;
1330 if (inet_addr_type(net
, new_gw
) != RTN_UNICAST
)
1331 goto reject_redirect
;
1334 for (i
= 0; i
< 2; i
++) {
1335 for (k
= 0; k
< 2; k
++) {
1336 unsigned hash
= rt_hash(daddr
, skeys
[i
], ikeys
[k
],
1339 rthp
=&rt_hash_table
[hash
].chain
;
1342 while ((rth
= rcu_dereference(*rthp
)) != NULL
) {
1345 if (rth
->fl
.fl4_dst
!= daddr
||
1346 rth
->fl
.fl4_src
!= skeys
[i
] ||
1347 rth
->fl
.oif
!= ikeys
[k
] ||
1349 rt_is_expired(rth
) ||
1350 !net_eq(dev_net(rth
->u
.dst
.dev
), net
)) {
1351 rthp
= &rth
->u
.dst
.rt_next
;
1355 if (rth
->rt_dst
!= daddr
||
1356 rth
->rt_src
!= saddr
||
1358 rth
->rt_gateway
!= old_gw
||
1359 rth
->u
.dst
.dev
!= dev
)
1362 dst_hold(&rth
->u
.dst
);
1365 rt
= dst_alloc(&ipv4_dst_ops
);
1372 /* Copy all the information. */
1374 rt
->u
.dst
.__use
= 1;
1375 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1376 rt
->u
.dst
.child
= NULL
;
1378 dev_hold(rt
->u
.dst
.dev
);
1380 in_dev_hold(rt
->idev
);
1381 rt
->u
.dst
.obsolete
= 0;
1382 rt
->u
.dst
.lastuse
= jiffies
;
1383 rt
->u
.dst
.path
= &rt
->u
.dst
;
1384 rt
->u
.dst
.neighbour
= NULL
;
1385 rt
->u
.dst
.hh
= NULL
;
1387 rt
->u
.dst
.xfrm
= NULL
;
1389 rt
->rt_genid
= rt_genid(net
);
1390 rt
->rt_flags
|= RTCF_REDIRECTED
;
1392 /* Gateway is different ... */
1393 rt
->rt_gateway
= new_gw
;
1395 /* Redirect received -> path was valid */
1396 dst_confirm(&rth
->u
.dst
);
1399 atomic_inc(&rt
->peer
->refcnt
);
1401 if (arp_bind_neighbour(&rt
->u
.dst
) ||
1402 !(rt
->u
.dst
.neighbour
->nud_state
&
1404 if (rt
->u
.dst
.neighbour
)
1405 neigh_event_send(rt
->u
.dst
.neighbour
, NULL
);
1411 netevent
.old
= &rth
->u
.dst
;
1412 netevent
.new = &rt
->u
.dst
;
1413 call_netevent_notifiers(NETEVENT_REDIRECT
,
1417 if (!rt_intern_hash(hash
, rt
, &rt
, NULL
))
1430 #ifdef CONFIG_IP_ROUTE_VERBOSE
1431 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
1432 printk(KERN_INFO
"Redirect from %pI4 on %s about %pI4 ignored.\n"
1433 " Advised path = %pI4 -> %pI4\n",
1434 &old_gw
, dev
->name
, &new_gw
,
1440 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
1442 struct rtable
*rt
= (struct rtable
*)dst
;
1443 struct dst_entry
*ret
= dst
;
1446 if (dst
->obsolete
) {
1449 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
1450 rt
->u
.dst
.expires
) {
1451 unsigned hash
= rt_hash(rt
->fl
.fl4_dst
, rt
->fl
.fl4_src
,
1453 rt_genid(dev_net(dst
->dev
)));
1454 #if RT_CACHE_DEBUG >= 1
1455 printk(KERN_DEBUG
"ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1456 &rt
->rt_dst
, rt
->fl
.fl4_tos
);
1467 * 1. The first ip_rt_redirect_number redirects are sent
1468 * with exponential backoff, then we stop sending them at all,
1469 * assuming that the host ignores our redirects.
1470 * 2. If we did not see packets requiring redirects
1471 * during ip_rt_redirect_silence, we assume that the host
1472 * forgot redirected route and start to send redirects again.
1474 * This algorithm is much cheaper and more intelligent than dumb load limiting
1477 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1478 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1481 void ip_rt_send_redirect(struct sk_buff
*skb
)
1483 struct rtable
*rt
= skb_rtable(skb
);
1484 struct in_device
*in_dev
= in_dev_get(rt
->u
.dst
.dev
);
1489 if (!IN_DEV_TX_REDIRECTS(in_dev
))
1492 /* No redirected packets during ip_rt_redirect_silence;
1493 * reset the algorithm.
1495 if (time_after(jiffies
, rt
->u
.dst
.rate_last
+ ip_rt_redirect_silence
))
1496 rt
->u
.dst
.rate_tokens
= 0;
1498 /* Too many ignored redirects; do not send anything
1499 * set u.dst.rate_last to the last seen redirected packet.
1501 if (rt
->u
.dst
.rate_tokens
>= ip_rt_redirect_number
) {
1502 rt
->u
.dst
.rate_last
= jiffies
;
1506 /* Check for load limit; set rate_last to the latest sent
1509 if (rt
->u
.dst
.rate_tokens
== 0 ||
1511 (rt
->u
.dst
.rate_last
+
1512 (ip_rt_redirect_load
<< rt
->u
.dst
.rate_tokens
)))) {
1513 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
1514 rt
->u
.dst
.rate_last
= jiffies
;
1515 ++rt
->u
.dst
.rate_tokens
;
1516 #ifdef CONFIG_IP_ROUTE_VERBOSE
1517 if (IN_DEV_LOG_MARTIANS(in_dev
) &&
1518 rt
->u
.dst
.rate_tokens
== ip_rt_redirect_number
&&
1520 printk(KERN_WARNING
"host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1521 &rt
->rt_src
, rt
->rt_iif
,
1522 &rt
->rt_dst
, &rt
->rt_gateway
);
1529 static int ip_error(struct sk_buff
*skb
)
1531 struct rtable
*rt
= skb_rtable(skb
);
1535 switch (rt
->u
.dst
.error
) {
1540 code
= ICMP_HOST_UNREACH
;
1543 code
= ICMP_NET_UNREACH
;
1544 IP_INC_STATS_BH(dev_net(rt
->u
.dst
.dev
),
1545 IPSTATS_MIB_INNOROUTES
);
1548 code
= ICMP_PKT_FILTERED
;
1553 rt
->u
.dst
.rate_tokens
+= now
- rt
->u
.dst
.rate_last
;
1554 if (rt
->u
.dst
.rate_tokens
> ip_rt_error_burst
)
1555 rt
->u
.dst
.rate_tokens
= ip_rt_error_burst
;
1556 rt
->u
.dst
.rate_last
= now
;
1557 if (rt
->u
.dst
.rate_tokens
>= ip_rt_error_cost
) {
1558 rt
->u
.dst
.rate_tokens
-= ip_rt_error_cost
;
1559 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
1562 out
: kfree_skb(skb
);
1567 * The last two values are not from the RFC but
1568 * are needed for AMPRnet AX.25 paths.
1571 static const unsigned short mtu_plateau
[] =
1572 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1574 static inline unsigned short guess_mtu(unsigned short old_mtu
)
1578 for (i
= 0; i
< ARRAY_SIZE(mtu_plateau
); i
++)
1579 if (old_mtu
> mtu_plateau
[i
])
1580 return mtu_plateau
[i
];
1584 unsigned short ip_rt_frag_needed(struct net
*net
, struct iphdr
*iph
,
1585 unsigned short new_mtu
,
1586 struct net_device
*dev
)
1589 unsigned short old_mtu
= ntohs(iph
->tot_len
);
1591 int ikeys
[2] = { dev
->ifindex
, 0 };
1592 __be32 skeys
[2] = { iph
->saddr
, 0, };
1593 __be32 daddr
= iph
->daddr
;
1594 unsigned short est_mtu
= 0;
1596 if (ipv4_config
.no_pmtu_disc
)
1599 for (k
= 0; k
< 2; k
++) {
1600 for (i
= 0; i
< 2; i
++) {
1601 unsigned hash
= rt_hash(daddr
, skeys
[i
], ikeys
[k
],
1605 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
1606 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
1607 unsigned short mtu
= new_mtu
;
1609 if (rth
->fl
.fl4_dst
!= daddr
||
1610 rth
->fl
.fl4_src
!= skeys
[i
] ||
1611 rth
->rt_dst
!= daddr
||
1612 rth
->rt_src
!= iph
->saddr
||
1613 rth
->fl
.oif
!= ikeys
[k
] ||
1615 dst_metric_locked(&rth
->u
.dst
, RTAX_MTU
) ||
1616 !net_eq(dev_net(rth
->u
.dst
.dev
), net
) ||
1620 if (new_mtu
< 68 || new_mtu
>= old_mtu
) {
1622 /* BSD 4.2 compatibility hack :-( */
1624 old_mtu
>= dst_mtu(&rth
->u
.dst
) &&
1625 old_mtu
>= 68 + (iph
->ihl
<< 2))
1626 old_mtu
-= iph
->ihl
<< 2;
1628 mtu
= guess_mtu(old_mtu
);
1630 if (mtu
<= dst_mtu(&rth
->u
.dst
)) {
1631 if (mtu
< dst_mtu(&rth
->u
.dst
)) {
1632 dst_confirm(&rth
->u
.dst
);
1633 if (mtu
< ip_rt_min_pmtu
) {
1634 mtu
= ip_rt_min_pmtu
;
1635 rth
->u
.dst
.metrics
[RTAX_LOCK
-1] |=
1638 rth
->u
.dst
.metrics
[RTAX_MTU
-1] = mtu
;
1639 dst_set_expires(&rth
->u
.dst
,
1648 return est_mtu
? : new_mtu
;
1651 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
1653 if (dst_mtu(dst
) > mtu
&& mtu
>= 68 &&
1654 !(dst_metric_locked(dst
, RTAX_MTU
))) {
1655 if (mtu
< ip_rt_min_pmtu
) {
1656 mtu
= ip_rt_min_pmtu
;
1657 dst
->metrics
[RTAX_LOCK
-1] |= (1 << RTAX_MTU
);
1659 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1660 dst_set_expires(dst
, ip_rt_mtu_expires
);
1661 call_netevent_notifiers(NETEVENT_PMTU_UPDATE
, dst
);
1665 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1670 static void ipv4_dst_destroy(struct dst_entry
*dst
)
1672 struct rtable
*rt
= (struct rtable
*) dst
;
1673 struct inet_peer
*peer
= rt
->peer
;
1674 struct in_device
*idev
= rt
->idev
;
1687 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
1690 struct rtable
*rt
= (struct rtable
*) dst
;
1691 struct in_device
*idev
= rt
->idev
;
1692 if (dev
!= dev_net(dev
)->loopback_dev
&& idev
&& idev
->dev
== dev
) {
1693 struct in_device
*loopback_idev
=
1694 in_dev_get(dev_net(dev
)->loopback_dev
);
1695 if (loopback_idev
) {
1696 rt
->idev
= loopback_idev
;
1702 static void ipv4_link_failure(struct sk_buff
*skb
)
1706 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1708 rt
= skb_rtable(skb
);
1710 dst_set_expires(&rt
->u
.dst
, 0);
1713 static int ip_rt_bug(struct sk_buff
*skb
)
1715 printk(KERN_DEBUG
"ip_rt_bug: %pI4 -> %pI4, %s\n",
1716 &ip_hdr(skb
)->saddr
, &ip_hdr(skb
)->daddr
,
1717 skb
->dev
? skb
->dev
->name
: "?");
1723 We do not cache source address of outgoing interface,
1724 because it is used only by IP RR, TS and SRR options,
1725 so that it out of fast path.
1727 BTW remember: "addr" is allowed to be not aligned
1731 void ip_rt_get_source(u8
*addr
, struct rtable
*rt
)
1734 struct fib_result res
;
1736 if (rt
->fl
.iif
== 0)
1738 else if (fib_lookup(dev_net(rt
->u
.dst
.dev
), &rt
->fl
, &res
) == 0) {
1739 src
= FIB_RES_PREFSRC(res
);
1742 src
= inet_select_addr(rt
->u
.dst
.dev
, rt
->rt_gateway
,
1744 memcpy(addr
, &src
, 4);
1747 #ifdef CONFIG_NET_CLS_ROUTE
1748 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1750 if (!(rt
->u
.dst
.tclassid
& 0xFFFF))
1751 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF;
1752 if (!(rt
->u
.dst
.tclassid
& 0xFFFF0000))
1753 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF0000;
1757 static void rt_set_nexthop(struct rtable
*rt
, struct fib_result
*res
, u32 itag
)
1759 struct fib_info
*fi
= res
->fi
;
1762 if (FIB_RES_GW(*res
) &&
1763 FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
1764 rt
->rt_gateway
= FIB_RES_GW(*res
);
1765 memcpy(rt
->u
.dst
.metrics
, fi
->fib_metrics
,
1766 sizeof(rt
->u
.dst
.metrics
));
1767 if (fi
->fib_mtu
== 0) {
1768 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = rt
->u
.dst
.dev
->mtu
;
1769 if (dst_metric_locked(&rt
->u
.dst
, RTAX_MTU
) &&
1770 rt
->rt_gateway
!= rt
->rt_dst
&&
1771 rt
->u
.dst
.dev
->mtu
> 576)
1772 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = 576;
1774 #ifdef CONFIG_NET_CLS_ROUTE
1775 rt
->u
.dst
.tclassid
= FIB_RES_NH(*res
).nh_tclassid
;
1778 rt
->u
.dst
.metrics
[RTAX_MTU
-1]= rt
->u
.dst
.dev
->mtu
;
1780 if (dst_metric(&rt
->u
.dst
, RTAX_HOPLIMIT
) == 0)
1781 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = sysctl_ip_default_ttl
;
1782 if (dst_mtu(&rt
->u
.dst
) > IP_MAX_MTU
)
1783 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = IP_MAX_MTU
;
1784 if (dst_metric(&rt
->u
.dst
, RTAX_ADVMSS
) == 0)
1785 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = max_t(unsigned int, rt
->u
.dst
.dev
->mtu
- 40,
1787 if (dst_metric(&rt
->u
.dst
, RTAX_ADVMSS
) > 65535 - 40)
1788 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = 65535 - 40;
1790 #ifdef CONFIG_NET_CLS_ROUTE
1791 #ifdef CONFIG_IP_MULTIPLE_TABLES
1792 set_class_tag(rt
, fib_rules_tclass(res
));
1794 set_class_tag(rt
, itag
);
1796 rt
->rt_type
= res
->type
;
1799 static int ip_route_input_mc(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
1800 u8 tos
, struct net_device
*dev
, int our
)
1805 struct in_device
*in_dev
= in_dev_get(dev
);
1808 /* Primary sanity checks. */
1813 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
1814 ipv4_is_loopback(saddr
) || skb
->protocol
!= htons(ETH_P_IP
))
1817 if (ipv4_is_zeronet(saddr
)) {
1818 if (!ipv4_is_local_multicast(daddr
))
1820 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1821 } else if (fib_validate_source(saddr
, 0, tos
, 0,
1822 dev
, &spec_dst
, &itag
) < 0)
1825 rth
= dst_alloc(&ipv4_dst_ops
);
1829 rth
->u
.dst
.output
= ip_rt_bug
;
1831 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1832 rth
->u
.dst
.flags
= DST_HOST
;
1833 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1834 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1835 rth
->fl
.fl4_dst
= daddr
;
1836 rth
->rt_dst
= daddr
;
1837 rth
->fl
.fl4_tos
= tos
;
1838 rth
->fl
.mark
= skb
->mark
;
1839 rth
->fl
.fl4_src
= saddr
;
1840 rth
->rt_src
= saddr
;
1841 #ifdef CONFIG_NET_CLS_ROUTE
1842 rth
->u
.dst
.tclassid
= itag
;
1845 rth
->fl
.iif
= dev
->ifindex
;
1846 rth
->u
.dst
.dev
= init_net
.loopback_dev
;
1847 dev_hold(rth
->u
.dst
.dev
);
1848 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1850 rth
->rt_gateway
= daddr
;
1851 rth
->rt_spec_dst
= spec_dst
;
1852 rth
->rt_genid
= rt_genid(dev_net(dev
));
1853 rth
->rt_flags
= RTCF_MULTICAST
;
1854 rth
->rt_type
= RTN_MULTICAST
;
1856 rth
->u
.dst
.input
= ip_local_deliver
;
1857 rth
->rt_flags
|= RTCF_LOCAL
;
1860 #ifdef CONFIG_IP_MROUTE
1861 if (!ipv4_is_local_multicast(daddr
) && IN_DEV_MFORWARD(in_dev
))
1862 rth
->u
.dst
.input
= ip_mr_input
;
1864 RT_CACHE_STAT_INC(in_slow_mc
);
1867 hash
= rt_hash(daddr
, saddr
, dev
->ifindex
, rt_genid(dev_net(dev
)));
1868 return rt_intern_hash(hash
, rth
, NULL
, skb
);
1880 static void ip_handle_martian_source(struct net_device
*dev
,
1881 struct in_device
*in_dev
,
1882 struct sk_buff
*skb
,
1886 RT_CACHE_STAT_INC(in_martian_src
);
1887 #ifdef CONFIG_IP_ROUTE_VERBOSE
1888 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1890 * RFC1812 recommendation, if source is martian,
1891 * the only hint is MAC header.
1893 printk(KERN_WARNING
"martian source %pI4 from %pI4, on dev %s\n",
1894 &daddr
, &saddr
, dev
->name
);
1895 if (dev
->hard_header_len
&& skb_mac_header_was_set(skb
)) {
1897 const unsigned char *p
= skb_mac_header(skb
);
1898 printk(KERN_WARNING
"ll header: ");
1899 for (i
= 0; i
< dev
->hard_header_len
; i
++, p
++) {
1901 if (i
< (dev
->hard_header_len
- 1))
1910 static int __mkroute_input(struct sk_buff
*skb
,
1911 struct fib_result
*res
,
1912 struct in_device
*in_dev
,
1913 __be32 daddr
, __be32 saddr
, u32 tos
,
1914 struct rtable
**result
)
1919 struct in_device
*out_dev
;
1924 /* get a working reference to the output device */
1925 out_dev
= in_dev_get(FIB_RES_DEV(*res
));
1926 if (out_dev
== NULL
) {
1927 if (net_ratelimit())
1928 printk(KERN_CRIT
"Bug in ip_route_input" \
1929 "_slow(). Please, report\n");
1934 err
= fib_validate_source(saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1935 in_dev
->dev
, &spec_dst
, &itag
);
1937 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1945 flags
|= RTCF_DIRECTSRC
;
1947 if (out_dev
== in_dev
&& err
&&
1948 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1949 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1950 flags
|= RTCF_DOREDIRECT
;
1952 if (skb
->protocol
!= htons(ETH_P_IP
)) {
1953 /* Not IP (i.e. ARP). Do not create route, if it is
1954 * invalid for proxy arp. DNAT routes are always valid.
1956 if (out_dev
== in_dev
) {
1963 rth
= dst_alloc(&ipv4_dst_ops
);
1969 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1970 rth
->u
.dst
.flags
= DST_HOST
;
1971 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
1972 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1973 if (IN_DEV_CONF_GET(out_dev
, NOXFRM
))
1974 rth
->u
.dst
.flags
|= DST_NOXFRM
;
1975 rth
->fl
.fl4_dst
= daddr
;
1976 rth
->rt_dst
= daddr
;
1977 rth
->fl
.fl4_tos
= tos
;
1978 rth
->fl
.mark
= skb
->mark
;
1979 rth
->fl
.fl4_src
= saddr
;
1980 rth
->rt_src
= saddr
;
1981 rth
->rt_gateway
= daddr
;
1983 rth
->fl
.iif
= in_dev
->dev
->ifindex
;
1984 rth
->u
.dst
.dev
= (out_dev
)->dev
;
1985 dev_hold(rth
->u
.dst
.dev
);
1986 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1988 rth
->rt_spec_dst
= spec_dst
;
1990 rth
->u
.dst
.input
= ip_forward
;
1991 rth
->u
.dst
.output
= ip_output
;
1992 rth
->rt_genid
= rt_genid(dev_net(rth
->u
.dst
.dev
));
1994 rt_set_nexthop(rth
, res
, itag
);
1996 rth
->rt_flags
= flags
;
2001 /* release the working reference to the output device */
2002 in_dev_put(out_dev
);
2006 static int ip_mkroute_input(struct sk_buff
*skb
,
2007 struct fib_result
*res
,
2008 const struct flowi
*fl
,
2009 struct in_device
*in_dev
,
2010 __be32 daddr
, __be32 saddr
, u32 tos
)
2012 struct rtable
* rth
= NULL
;
2016 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2017 if (res
->fi
&& res
->fi
->fib_nhs
> 1 && fl
->oif
== 0)
2018 fib_select_multipath(fl
, res
);
2021 /* create a routing cache entry */
2022 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
2026 /* put it into the cache */
2027 hash
= rt_hash(daddr
, saddr
, fl
->iif
,
2028 rt_genid(dev_net(rth
->u
.dst
.dev
)));
2029 return rt_intern_hash(hash
, rth
, NULL
, skb
);
2033 * NOTE. We drop all the packets that has local source
2034 * addresses, because every properly looped back packet
2035 * must have correct destination already attached by output routine.
2037 * Such approach solves two big problems:
2038 * 1. Not simplex devices are handled properly.
2039 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2042 static int ip_route_input_slow(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
2043 u8 tos
, struct net_device
*dev
)
2045 struct fib_result res
;
2046 struct in_device
*in_dev
= in_dev_get(dev
);
2047 struct flowi fl
= { .nl_u
= { .ip4_u
=
2051 .scope
= RT_SCOPE_UNIVERSE
,
2054 .iif
= dev
->ifindex
};
2057 struct rtable
* rth
;
2062 struct net
* net
= dev_net(dev
);
2064 /* IP on this device is disabled. */
2069 /* Check for the most weird martians, which can be not detected
2073 if (ipv4_is_multicast(saddr
) || ipv4_is_lbcast(saddr
) ||
2074 ipv4_is_loopback(saddr
))
2075 goto martian_source
;
2077 if (daddr
== htonl(0xFFFFFFFF) || (saddr
== 0 && daddr
== 0))
2080 /* Accept zero addresses only to limited broadcast;
2081 * I even do not know to fix it or not. Waiting for complains :-)
2083 if (ipv4_is_zeronet(saddr
))
2084 goto martian_source
;
2086 if (ipv4_is_lbcast(daddr
) || ipv4_is_zeronet(daddr
) ||
2087 ipv4_is_loopback(daddr
))
2088 goto martian_destination
;
2091 * Now we are ready to route packet.
2093 if ((err
= fib_lookup(net
, &fl
, &res
)) != 0) {
2094 if (!IN_DEV_FORWARD(in_dev
))
2100 RT_CACHE_STAT_INC(in_slow_tot
);
2102 if (res
.type
== RTN_BROADCAST
)
2105 if (res
.type
== RTN_LOCAL
) {
2107 result
= fib_validate_source(saddr
, daddr
, tos
,
2108 net
->loopback_dev
->ifindex
,
2109 dev
, &spec_dst
, &itag
);
2111 goto martian_source
;
2113 flags
|= RTCF_DIRECTSRC
;
2118 if (!IN_DEV_FORWARD(in_dev
))
2120 if (res
.type
!= RTN_UNICAST
)
2121 goto martian_destination
;
2123 err
= ip_mkroute_input(skb
, &res
, &fl
, in_dev
, daddr
, saddr
, tos
);
2131 if (skb
->protocol
!= htons(ETH_P_IP
))
2134 if (ipv4_is_zeronet(saddr
))
2135 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
2137 err
= fib_validate_source(saddr
, 0, tos
, 0, dev
, &spec_dst
,
2140 goto martian_source
;
2142 flags
|= RTCF_DIRECTSRC
;
2144 flags
|= RTCF_BROADCAST
;
2145 res
.type
= RTN_BROADCAST
;
2146 RT_CACHE_STAT_INC(in_brd
);
2149 rth
= dst_alloc(&ipv4_dst_ops
);
2153 rth
->u
.dst
.output
= ip_rt_bug
;
2154 rth
->rt_genid
= rt_genid(net
);
2156 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2157 rth
->u
.dst
.flags
= DST_HOST
;
2158 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2159 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2160 rth
->fl
.fl4_dst
= daddr
;
2161 rth
->rt_dst
= daddr
;
2162 rth
->fl
.fl4_tos
= tos
;
2163 rth
->fl
.mark
= skb
->mark
;
2164 rth
->fl
.fl4_src
= saddr
;
2165 rth
->rt_src
= saddr
;
2166 #ifdef CONFIG_NET_CLS_ROUTE
2167 rth
->u
.dst
.tclassid
= itag
;
2170 rth
->fl
.iif
= dev
->ifindex
;
2171 rth
->u
.dst
.dev
= net
->loopback_dev
;
2172 dev_hold(rth
->u
.dst
.dev
);
2173 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
2174 rth
->rt_gateway
= daddr
;
2175 rth
->rt_spec_dst
= spec_dst
;
2176 rth
->u
.dst
.input
= ip_local_deliver
;
2177 rth
->rt_flags
= flags
|RTCF_LOCAL
;
2178 if (res
.type
== RTN_UNREACHABLE
) {
2179 rth
->u
.dst
.input
= ip_error
;
2180 rth
->u
.dst
.error
= -err
;
2181 rth
->rt_flags
&= ~RTCF_LOCAL
;
2183 rth
->rt_type
= res
.type
;
2184 hash
= rt_hash(daddr
, saddr
, fl
.iif
, rt_genid(net
));
2185 err
= rt_intern_hash(hash
, rth
, NULL
, skb
);
2189 RT_CACHE_STAT_INC(in_no_route
);
2190 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_UNIVERSE
);
2191 res
.type
= RTN_UNREACHABLE
;
2197 * Do not cache martian addresses: they should be logged (RFC1812)
2199 martian_destination
:
2200 RT_CACHE_STAT_INC(in_martian_dst
);
2201 #ifdef CONFIG_IP_ROUTE_VERBOSE
2202 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
2203 printk(KERN_WARNING
"martian destination %pI4 from %pI4, dev %s\n",
2204 &daddr
, &saddr
, dev
->name
);
2208 err
= -EHOSTUNREACH
;
2220 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
2224 int ip_route_input(struct sk_buff
*skb
, __be32 daddr
, __be32 saddr
,
2225 u8 tos
, struct net_device
*dev
)
2227 struct rtable
* rth
;
2229 int iif
= dev
->ifindex
;
2234 if (!rt_caching(net
))
2237 tos
&= IPTOS_RT_MASK
;
2238 hash
= rt_hash(daddr
, saddr
, iif
, rt_genid(net
));
2241 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2242 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
2243 if (((rth
->fl
.fl4_dst
^ daddr
) |
2244 (rth
->fl
.fl4_src
^ saddr
) |
2245 (rth
->fl
.iif
^ iif
) |
2247 (rth
->fl
.fl4_tos
^ tos
)) == 0 &&
2248 rth
->fl
.mark
== skb
->mark
&&
2249 net_eq(dev_net(rth
->u
.dst
.dev
), net
) &&
2250 !rt_is_expired(rth
)) {
2251 dst_use(&rth
->u
.dst
, jiffies
);
2252 RT_CACHE_STAT_INC(in_hit
);
2254 skb_dst_set(skb
, &rth
->u
.dst
);
2257 RT_CACHE_STAT_INC(in_hlist_search
);
2262 /* Multicast recognition logic is moved from route cache to here.
2263 The problem was that too many Ethernet cards have broken/missing
2264 hardware multicast filters :-( As result the host on multicasting
2265 network acquires a lot of useless route cache entries, sort of
2266 SDR messages from all the world. Now we try to get rid of them.
2267 Really, provided software IP multicast filter is organized
2268 reasonably (at least, hashed), it does not result in a slowdown
2269 comparing with route cache reject entries.
2270 Note, that multicast routers are not affected, because
2271 route cache entry is created eventually.
2273 if (ipv4_is_multicast(daddr
)) {
2274 struct in_device
*in_dev
;
2277 if ((in_dev
= __in_dev_get_rcu(dev
)) != NULL
) {
2278 int our
= ip_check_mc(in_dev
, daddr
, saddr
,
2279 ip_hdr(skb
)->protocol
);
2281 #ifdef CONFIG_IP_MROUTE
2282 || (!ipv4_is_local_multicast(daddr
) &&
2283 IN_DEV_MFORWARD(in_dev
))
2287 return ip_route_input_mc(skb
, daddr
, saddr
,
2294 return ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
2297 static int __mkroute_output(struct rtable
**result
,
2298 struct fib_result
*res
,
2299 const struct flowi
*fl
,
2300 const struct flowi
*oldflp
,
2301 struct net_device
*dev_out
,
2305 struct in_device
*in_dev
;
2306 u32 tos
= RT_FL_TOS(oldflp
);
2309 if (ipv4_is_loopback(fl
->fl4_src
) && !(dev_out
->flags
&IFF_LOOPBACK
))
2312 if (fl
->fl4_dst
== htonl(0xFFFFFFFF))
2313 res
->type
= RTN_BROADCAST
;
2314 else if (ipv4_is_multicast(fl
->fl4_dst
))
2315 res
->type
= RTN_MULTICAST
;
2316 else if (ipv4_is_lbcast(fl
->fl4_dst
) || ipv4_is_zeronet(fl
->fl4_dst
))
2319 if (dev_out
->flags
& IFF_LOOPBACK
)
2320 flags
|= RTCF_LOCAL
;
2322 /* get work reference to inet device */
2323 in_dev
= in_dev_get(dev_out
);
2327 if (res
->type
== RTN_BROADCAST
) {
2328 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
2330 fib_info_put(res
->fi
);
2333 } else if (res
->type
== RTN_MULTICAST
) {
2334 flags
|= RTCF_MULTICAST
|RTCF_LOCAL
;
2335 if (!ip_check_mc(in_dev
, oldflp
->fl4_dst
, oldflp
->fl4_src
,
2337 flags
&= ~RTCF_LOCAL
;
2338 /* If multicast route do not exist use
2339 default one, but do not gateway in this case.
2342 if (res
->fi
&& res
->prefixlen
< 4) {
2343 fib_info_put(res
->fi
);
2349 rth
= dst_alloc(&ipv4_dst_ops
);
2355 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2356 rth
->u
.dst
.flags
= DST_HOST
;
2357 if (IN_DEV_CONF_GET(in_dev
, NOXFRM
))
2358 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2359 if (IN_DEV_CONF_GET(in_dev
, NOPOLICY
))
2360 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2362 rth
->fl
.fl4_dst
= oldflp
->fl4_dst
;
2363 rth
->fl
.fl4_tos
= tos
;
2364 rth
->fl
.fl4_src
= oldflp
->fl4_src
;
2365 rth
->fl
.oif
= oldflp
->oif
;
2366 rth
->fl
.mark
= oldflp
->mark
;
2367 rth
->rt_dst
= fl
->fl4_dst
;
2368 rth
->rt_src
= fl
->fl4_src
;
2369 rth
->rt_iif
= oldflp
->oif
? : dev_out
->ifindex
;
2370 /* get references to the devices that are to be hold by the routing
2372 rth
->u
.dst
.dev
= dev_out
;
2374 rth
->idev
= in_dev_get(dev_out
);
2375 rth
->rt_gateway
= fl
->fl4_dst
;
2376 rth
->rt_spec_dst
= fl
->fl4_src
;
2378 rth
->u
.dst
.output
=ip_output
;
2379 rth
->rt_genid
= rt_genid(dev_net(dev_out
));
2381 RT_CACHE_STAT_INC(out_slow_tot
);
2383 if (flags
& RTCF_LOCAL
) {
2384 rth
->u
.dst
.input
= ip_local_deliver
;
2385 rth
->rt_spec_dst
= fl
->fl4_dst
;
2387 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
2388 rth
->rt_spec_dst
= fl
->fl4_src
;
2389 if (flags
& RTCF_LOCAL
&&
2390 !(dev_out
->flags
& IFF_LOOPBACK
)) {
2391 rth
->u
.dst
.output
= ip_mc_output
;
2392 RT_CACHE_STAT_INC(out_slow_mc
);
2394 #ifdef CONFIG_IP_MROUTE
2395 if (res
->type
== RTN_MULTICAST
) {
2396 if (IN_DEV_MFORWARD(in_dev
) &&
2397 !ipv4_is_local_multicast(oldflp
->fl4_dst
)) {
2398 rth
->u
.dst
.input
= ip_mr_input
;
2399 rth
->u
.dst
.output
= ip_mc_output
;
2405 rt_set_nexthop(rth
, res
, 0);
2407 rth
->rt_flags
= flags
;
2411 /* release work reference to inet device */
2417 static int ip_mkroute_output(struct rtable
**rp
,
2418 struct fib_result
*res
,
2419 const struct flowi
*fl
,
2420 const struct flowi
*oldflp
,
2421 struct net_device
*dev_out
,
2424 struct rtable
*rth
= NULL
;
2425 int err
= __mkroute_output(&rth
, res
, fl
, oldflp
, dev_out
, flags
);
2428 hash
= rt_hash(oldflp
->fl4_dst
, oldflp
->fl4_src
, oldflp
->oif
,
2429 rt_genid(dev_net(dev_out
)));
2430 err
= rt_intern_hash(hash
, rth
, rp
, NULL
);
2437 * Major route resolver routine.
2440 static int ip_route_output_slow(struct net
*net
, struct rtable
**rp
,
2441 const struct flowi
*oldflp
)
2443 u32 tos
= RT_FL_TOS(oldflp
);
2444 struct flowi fl
= { .nl_u
= { .ip4_u
=
2445 { .daddr
= oldflp
->fl4_dst
,
2446 .saddr
= oldflp
->fl4_src
,
2447 .tos
= tos
& IPTOS_RT_MASK
,
2448 .scope
= ((tos
& RTO_ONLINK
) ?
2452 .mark
= oldflp
->mark
,
2453 .iif
= net
->loopback_dev
->ifindex
,
2454 .oif
= oldflp
->oif
};
2455 struct fib_result res
;
2457 struct net_device
*dev_out
= NULL
;
2463 #ifdef CONFIG_IP_MULTIPLE_TABLES
2467 if (oldflp
->fl4_src
) {
2469 if (ipv4_is_multicast(oldflp
->fl4_src
) ||
2470 ipv4_is_lbcast(oldflp
->fl4_src
) ||
2471 ipv4_is_zeronet(oldflp
->fl4_src
))
2474 /* I removed check for oif == dev_out->oif here.
2475 It was wrong for two reasons:
2476 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2477 is assigned to multiple interfaces.
2478 2. Moreover, we are allowed to send packets with saddr
2479 of another iface. --ANK
2482 if (oldflp
->oif
== 0
2483 && (ipv4_is_multicast(oldflp
->fl4_dst
) ||
2484 oldflp
->fl4_dst
== htonl(0xFFFFFFFF))) {
2485 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2486 dev_out
= ip_dev_find(net
, oldflp
->fl4_src
);
2487 if (dev_out
== NULL
)
2490 /* Special hack: user can direct multicasts
2491 and limited broadcast via necessary interface
2492 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2493 This hack is not just for fun, it allows
2494 vic,vat and friends to work.
2495 They bind socket to loopback, set ttl to zero
2496 and expect that it will work.
2497 From the viewpoint of routing cache they are broken,
2498 because we are not allowed to build multicast path
2499 with loopback source addr (look, routing cache
2500 cannot know, that ttl is zero, so that packet
2501 will not leave this host and route is valid).
2502 Luckily, this hack is good workaround.
2505 fl
.oif
= dev_out
->ifindex
;
2509 if (!(oldflp
->flags
& FLOWI_FLAG_ANYSRC
)) {
2510 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2511 dev_out
= ip_dev_find(net
, oldflp
->fl4_src
);
2512 if (dev_out
== NULL
)
2521 dev_out
= dev_get_by_index(net
, oldflp
->oif
);
2523 if (dev_out
== NULL
)
2526 /* RACE: Check return value of inet_select_addr instead. */
2527 if (__in_dev_get_rtnl(dev_out
) == NULL
) {
2529 goto out
; /* Wrong error code */
2532 if (ipv4_is_local_multicast(oldflp
->fl4_dst
) ||
2533 oldflp
->fl4_dst
== htonl(0xFFFFFFFF)) {
2535 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2540 if (ipv4_is_multicast(oldflp
->fl4_dst
))
2541 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2543 else if (!oldflp
->fl4_dst
)
2544 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2550 fl
.fl4_dst
= fl
.fl4_src
;
2552 fl
.fl4_dst
= fl
.fl4_src
= htonl(INADDR_LOOPBACK
);
2555 dev_out
= net
->loopback_dev
;
2557 fl
.oif
= net
->loopback_dev
->ifindex
;
2558 res
.type
= RTN_LOCAL
;
2559 flags
|= RTCF_LOCAL
;
2563 if (fib_lookup(net
, &fl
, &res
)) {
2566 /* Apparently, routing tables are wrong. Assume,
2567 that the destination is on link.
2570 Because we are allowed to send to iface
2571 even if it has NO routes and NO assigned
2572 addresses. When oif is specified, routing
2573 tables are looked up with only one purpose:
2574 to catch if destination is gatewayed, rather than
2575 direct. Moreover, if MSG_DONTROUTE is set,
2576 we send packet, ignoring both routing tables
2577 and ifaddr state. --ANK
2580 We could make it even if oif is unknown,
2581 likely IPv6, but we do not.
2584 if (fl
.fl4_src
== 0)
2585 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2587 res
.type
= RTN_UNICAST
;
2597 if (res
.type
== RTN_LOCAL
) {
2599 fl
.fl4_src
= fl
.fl4_dst
;
2602 dev_out
= net
->loopback_dev
;
2604 fl
.oif
= dev_out
->ifindex
;
2606 fib_info_put(res
.fi
);
2608 flags
|= RTCF_LOCAL
;
2612 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2613 if (res
.fi
->fib_nhs
> 1 && fl
.oif
== 0)
2614 fib_select_multipath(&fl
, &res
);
2617 if (!res
.prefixlen
&& res
.type
== RTN_UNICAST
&& !fl
.oif
)
2618 fib_select_default(net
, &fl
, &res
);
2621 fl
.fl4_src
= FIB_RES_PREFSRC(res
);
2625 dev_out
= FIB_RES_DEV(res
);
2627 fl
.oif
= dev_out
->ifindex
;
2631 err
= ip_mkroute_output(rp
, &res
, &fl
, oldflp
, dev_out
, flags
);
2641 int __ip_route_output_key(struct net
*net
, struct rtable
**rp
,
2642 const struct flowi
*flp
)
2647 if (!rt_caching(net
))
2650 hash
= rt_hash(flp
->fl4_dst
, flp
->fl4_src
, flp
->oif
, rt_genid(net
));
2653 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2654 rth
= rcu_dereference(rth
->u
.dst
.rt_next
)) {
2655 if (rth
->fl
.fl4_dst
== flp
->fl4_dst
&&
2656 rth
->fl
.fl4_src
== flp
->fl4_src
&&
2658 rth
->fl
.oif
== flp
->oif
&&
2659 rth
->fl
.mark
== flp
->mark
&&
2660 !((rth
->fl
.fl4_tos
^ flp
->fl4_tos
) &
2661 (IPTOS_RT_MASK
| RTO_ONLINK
)) &&
2662 net_eq(dev_net(rth
->u
.dst
.dev
), net
) &&
2663 !rt_is_expired(rth
)) {
2664 dst_use(&rth
->u
.dst
, jiffies
);
2665 RT_CACHE_STAT_INC(out_hit
);
2666 rcu_read_unlock_bh();
2670 RT_CACHE_STAT_INC(out_hlist_search
);
2672 rcu_read_unlock_bh();
2675 return ip_route_output_slow(net
, rp
, flp
);
2678 EXPORT_SYMBOL_GPL(__ip_route_output_key
);
2680 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
2684 static struct dst_ops ipv4_dst_blackhole_ops
= {
2686 .protocol
= cpu_to_be16(ETH_P_IP
),
2687 .destroy
= ipv4_dst_destroy
,
2688 .check
= ipv4_dst_check
,
2689 .update_pmtu
= ipv4_rt_blackhole_update_pmtu
,
2690 .entries
= ATOMIC_INIT(0),
2694 static int ipv4_dst_blackhole(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
)
2696 struct rtable
*ort
= *rp
;
2697 struct rtable
*rt
= (struct rtable
*)
2698 dst_alloc(&ipv4_dst_blackhole_ops
);
2701 struct dst_entry
*new = &rt
->u
.dst
;
2703 atomic_set(&new->__refcnt
, 1);
2705 new->input
= dst_discard
;
2706 new->output
= dst_discard
;
2707 memcpy(new->metrics
, ort
->u
.dst
.metrics
, RTAX_MAX
*sizeof(u32
));
2709 new->dev
= ort
->u
.dst
.dev
;
2715 rt
->idev
= ort
->idev
;
2717 in_dev_hold(rt
->idev
);
2718 rt
->rt_genid
= rt_genid(net
);
2719 rt
->rt_flags
= ort
->rt_flags
;
2720 rt
->rt_type
= ort
->rt_type
;
2721 rt
->rt_dst
= ort
->rt_dst
;
2722 rt
->rt_src
= ort
->rt_src
;
2723 rt
->rt_iif
= ort
->rt_iif
;
2724 rt
->rt_gateway
= ort
->rt_gateway
;
2725 rt
->rt_spec_dst
= ort
->rt_spec_dst
;
2726 rt
->peer
= ort
->peer
;
2728 atomic_inc(&rt
->peer
->refcnt
);
2733 dst_release(&(*rp
)->u
.dst
);
2735 return (rt
? 0 : -ENOMEM
);
2738 int ip_route_output_flow(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
,
2739 struct sock
*sk
, int flags
)
2743 if ((err
= __ip_route_output_key(net
, rp
, flp
)) != 0)
2748 flp
->fl4_src
= (*rp
)->rt_src
;
2750 flp
->fl4_dst
= (*rp
)->rt_dst
;
2751 err
= __xfrm_lookup(net
, (struct dst_entry
**)rp
, flp
, sk
,
2752 flags
? XFRM_LOOKUP_WAIT
: 0);
2753 if (err
== -EREMOTE
)
2754 err
= ipv4_dst_blackhole(net
, rp
, flp
);
2762 EXPORT_SYMBOL_GPL(ip_route_output_flow
);
2764 int ip_route_output_key(struct net
*net
, struct rtable
**rp
, struct flowi
*flp
)
2766 return ip_route_output_flow(net
, rp
, flp
, NULL
, 0);
2769 static int rt_fill_info(struct net
*net
,
2770 struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
2771 int nowait
, unsigned int flags
)
2773 struct rtable
*rt
= skb_rtable(skb
);
2775 struct nlmsghdr
*nlh
;
2777 u32 id
= 0, ts
= 0, tsage
= 0, error
;
2779 nlh
= nlmsg_put(skb
, pid
, seq
, event
, sizeof(*r
), flags
);
2783 r
= nlmsg_data(nlh
);
2784 r
->rtm_family
= AF_INET
;
2785 r
->rtm_dst_len
= 32;
2787 r
->rtm_tos
= rt
->fl
.fl4_tos
;
2788 r
->rtm_table
= RT_TABLE_MAIN
;
2789 NLA_PUT_U32(skb
, RTA_TABLE
, RT_TABLE_MAIN
);
2790 r
->rtm_type
= rt
->rt_type
;
2791 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2792 r
->rtm_protocol
= RTPROT_UNSPEC
;
2793 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2794 if (rt
->rt_flags
& RTCF_NOTIFY
)
2795 r
->rtm_flags
|= RTM_F_NOTIFY
;
2797 NLA_PUT_BE32(skb
, RTA_DST
, rt
->rt_dst
);
2799 if (rt
->fl
.fl4_src
) {
2800 r
->rtm_src_len
= 32;
2801 NLA_PUT_BE32(skb
, RTA_SRC
, rt
->fl
.fl4_src
);
2804 NLA_PUT_U32(skb
, RTA_OIF
, rt
->u
.dst
.dev
->ifindex
);
2805 #ifdef CONFIG_NET_CLS_ROUTE
2806 if (rt
->u
.dst
.tclassid
)
2807 NLA_PUT_U32(skb
, RTA_FLOW
, rt
->u
.dst
.tclassid
);
2810 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_spec_dst
);
2811 else if (rt
->rt_src
!= rt
->fl
.fl4_src
)
2812 NLA_PUT_BE32(skb
, RTA_PREFSRC
, rt
->rt_src
);
2814 if (rt
->rt_dst
!= rt
->rt_gateway
)
2815 NLA_PUT_BE32(skb
, RTA_GATEWAY
, rt
->rt_gateway
);
2817 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2818 goto nla_put_failure
;
2820 error
= rt
->u
.dst
.error
;
2821 expires
= rt
->u
.dst
.expires
? rt
->u
.dst
.expires
- jiffies
: 0;
2823 id
= rt
->peer
->ip_id_count
;
2824 if (rt
->peer
->tcp_ts_stamp
) {
2825 ts
= rt
->peer
->tcp_ts
;
2826 tsage
= get_seconds() - rt
->peer
->tcp_ts_stamp
;
2831 #ifdef CONFIG_IP_MROUTE
2832 __be32 dst
= rt
->rt_dst
;
2834 if (ipv4_is_multicast(dst
) && !ipv4_is_local_multicast(dst
) &&
2835 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)) {
2836 int err
= ipmr_get_route(net
, skb
, r
, nowait
);
2841 goto nla_put_failure
;
2843 if (err
== -EMSGSIZE
)
2844 goto nla_put_failure
;
2850 NLA_PUT_U32(skb
, RTA_IIF
, rt
->fl
.iif
);
2853 if (rtnl_put_cacheinfo(skb
, &rt
->u
.dst
, id
, ts
, tsage
,
2854 expires
, error
) < 0)
2855 goto nla_put_failure
;
2857 return nlmsg_end(skb
, nlh
);
2860 nlmsg_cancel(skb
, nlh
);
2864 static int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2866 struct net
*net
= sock_net(in_skb
->sk
);
2868 struct nlattr
*tb
[RTA_MAX
+1];
2869 struct rtable
*rt
= NULL
;
2874 struct sk_buff
*skb
;
2876 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
, rtm_ipv4_policy
);
2880 rtm
= nlmsg_data(nlh
);
2882 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2888 /* Reserve room for dummy headers, this skb can pass
2889 through good chunk of routing engine.
2891 skb_reset_mac_header(skb
);
2892 skb_reset_network_header(skb
);
2894 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2895 ip_hdr(skb
)->protocol
= IPPROTO_ICMP
;
2896 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2898 src
= tb
[RTA_SRC
] ? nla_get_be32(tb
[RTA_SRC
]) : 0;
2899 dst
= tb
[RTA_DST
] ? nla_get_be32(tb
[RTA_DST
]) : 0;
2900 iif
= tb
[RTA_IIF
] ? nla_get_u32(tb
[RTA_IIF
]) : 0;
2903 struct net_device
*dev
;
2905 dev
= __dev_get_by_index(net
, iif
);
2911 skb
->protocol
= htons(ETH_P_IP
);
2914 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2917 rt
= skb_rtable(skb
);
2918 if (err
== 0 && rt
->u
.dst
.error
)
2919 err
= -rt
->u
.dst
.error
;
2926 .tos
= rtm
->rtm_tos
,
2929 .oif
= tb
[RTA_OIF
] ? nla_get_u32(tb
[RTA_OIF
]) : 0,
2931 err
= ip_route_output_key(net
, &rt
, &fl
);
2937 skb_dst_set(skb
, &rt
->u
.dst
);
2938 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2939 rt
->rt_flags
|= RTCF_NOTIFY
;
2941 err
= rt_fill_info(net
, skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
2942 RTM_NEWROUTE
, 0, 0);
2946 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).pid
);
2955 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2962 net
= sock_net(skb
->sk
);
2967 s_idx
= idx
= cb
->args
[1];
2968 for (h
= s_h
; h
<= rt_hash_mask
; h
++, s_idx
= 0) {
2969 if (!rt_hash_table
[h
].chain
)
2972 for (rt
= rcu_dereference(rt_hash_table
[h
].chain
), idx
= 0; rt
;
2973 rt
= rcu_dereference(rt
->u
.dst
.rt_next
), idx
++) {
2974 if (!net_eq(dev_net(rt
->u
.dst
.dev
), net
) || idx
< s_idx
)
2976 if (rt_is_expired(rt
))
2978 skb_dst_set(skb
, dst_clone(&rt
->u
.dst
));
2979 if (rt_fill_info(net
, skb
, NETLINK_CB(cb
->skb
).pid
,
2980 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
2981 1, NLM_F_MULTI
) <= 0) {
2983 rcu_read_unlock_bh();
2988 rcu_read_unlock_bh();
2997 void ip_rt_multicast_event(struct in_device
*in_dev
)
2999 rt_cache_flush(dev_net(in_dev
->dev
), 0);
3002 #ifdef CONFIG_SYSCTL
3003 static int ipv4_sysctl_rtcache_flush(ctl_table
*__ctl
, int write
,
3004 struct file
*filp
, void __user
*buffer
,
3005 size_t *lenp
, loff_t
*ppos
)
3012 memcpy(&ctl
, __ctl
, sizeof(ctl
));
3013 ctl
.data
= &flush_delay
;
3014 proc_dointvec(&ctl
, write
, filp
, buffer
, lenp
, ppos
);
3016 net
= (struct net
*)__ctl
->extra1
;
3017 rt_cache_flush(net
, flush_delay
);
3024 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table
*table
,
3025 void __user
*oldval
,
3026 size_t __user
*oldlenp
,
3027 void __user
*newval
,
3032 if (newlen
!= sizeof(int))
3034 if (get_user(delay
, (int __user
*)newval
))
3036 net
= (struct net
*)table
->extra1
;
3037 rt_cache_flush(net
, delay
);
3041 static void rt_secret_reschedule(int old
)
3044 int new = ip_rt_secret_interval
;
3045 int diff
= new - old
;
3052 int deleted
= del_timer_sync(&net
->ipv4
.rt_secret_timer
);
3058 long time
= net
->ipv4
.rt_secret_timer
.expires
- jiffies
;
3060 if (time
<= 0 || (time
+= diff
) <= 0)
3063 net
->ipv4
.rt_secret_timer
.expires
= time
;
3065 net
->ipv4
.rt_secret_timer
.expires
= new;
3067 net
->ipv4
.rt_secret_timer
.expires
+= jiffies
;
3068 add_timer(&net
->ipv4
.rt_secret_timer
);
3073 static int ipv4_sysctl_rt_secret_interval(ctl_table
*ctl
, int write
,
3075 void __user
*buffer
, size_t *lenp
,
3078 int old
= ip_rt_secret_interval
;
3079 int ret
= proc_dointvec_jiffies(ctl
, write
, filp
, buffer
, lenp
, ppos
);
3081 rt_secret_reschedule(old
);
3086 static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table
*table
,
3087 void __user
*oldval
,
3088 size_t __user
*oldlenp
,
3089 void __user
*newval
,
3092 int old
= ip_rt_secret_interval
;
3093 int ret
= sysctl_jiffies(table
, oldval
, oldlenp
, newval
, newlen
);
3095 rt_secret_reschedule(old
);
3100 static ctl_table ipv4_route_table
[] = {
3102 .ctl_name
= NET_IPV4_ROUTE_GC_THRESH
,
3103 .procname
= "gc_thresh",
3104 .data
= &ipv4_dst_ops
.gc_thresh
,
3105 .maxlen
= sizeof(int),
3107 .proc_handler
= proc_dointvec
,
3110 .ctl_name
= NET_IPV4_ROUTE_MAX_SIZE
,
3111 .procname
= "max_size",
3112 .data
= &ip_rt_max_size
,
3113 .maxlen
= sizeof(int),
3115 .proc_handler
= proc_dointvec
,
3118 /* Deprecated. Use gc_min_interval_ms */
3120 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL
,
3121 .procname
= "gc_min_interval",
3122 .data
= &ip_rt_gc_min_interval
,
3123 .maxlen
= sizeof(int),
3125 .proc_handler
= proc_dointvec_jiffies
,
3126 .strategy
= sysctl_jiffies
,
3129 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS
,
3130 .procname
= "gc_min_interval_ms",
3131 .data
= &ip_rt_gc_min_interval
,
3132 .maxlen
= sizeof(int),
3134 .proc_handler
= proc_dointvec_ms_jiffies
,
3135 .strategy
= sysctl_ms_jiffies
,
3138 .ctl_name
= NET_IPV4_ROUTE_GC_TIMEOUT
,
3139 .procname
= "gc_timeout",
3140 .data
= &ip_rt_gc_timeout
,
3141 .maxlen
= sizeof(int),
3143 .proc_handler
= proc_dointvec_jiffies
,
3144 .strategy
= sysctl_jiffies
,
3147 .ctl_name
= NET_IPV4_ROUTE_GC_INTERVAL
,
3148 .procname
= "gc_interval",
3149 .data
= &ip_rt_gc_interval
,
3150 .maxlen
= sizeof(int),
3152 .proc_handler
= proc_dointvec_jiffies
,
3153 .strategy
= sysctl_jiffies
,
3156 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_LOAD
,
3157 .procname
= "redirect_load",
3158 .data
= &ip_rt_redirect_load
,
3159 .maxlen
= sizeof(int),
3161 .proc_handler
= proc_dointvec
,
3164 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_NUMBER
,
3165 .procname
= "redirect_number",
3166 .data
= &ip_rt_redirect_number
,
3167 .maxlen
= sizeof(int),
3169 .proc_handler
= proc_dointvec
,
3172 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_SILENCE
,
3173 .procname
= "redirect_silence",
3174 .data
= &ip_rt_redirect_silence
,
3175 .maxlen
= sizeof(int),
3177 .proc_handler
= proc_dointvec
,
3180 .ctl_name
= NET_IPV4_ROUTE_ERROR_COST
,
3181 .procname
= "error_cost",
3182 .data
= &ip_rt_error_cost
,
3183 .maxlen
= sizeof(int),
3185 .proc_handler
= proc_dointvec
,
3188 .ctl_name
= NET_IPV4_ROUTE_ERROR_BURST
,
3189 .procname
= "error_burst",
3190 .data
= &ip_rt_error_burst
,
3191 .maxlen
= sizeof(int),
3193 .proc_handler
= proc_dointvec
,
3196 .ctl_name
= NET_IPV4_ROUTE_GC_ELASTICITY
,
3197 .procname
= "gc_elasticity",
3198 .data
= &ip_rt_gc_elasticity
,
3199 .maxlen
= sizeof(int),
3201 .proc_handler
= proc_dointvec
,
3204 .ctl_name
= NET_IPV4_ROUTE_MTU_EXPIRES
,
3205 .procname
= "mtu_expires",
3206 .data
= &ip_rt_mtu_expires
,
3207 .maxlen
= sizeof(int),
3209 .proc_handler
= proc_dointvec_jiffies
,
3210 .strategy
= sysctl_jiffies
,
3213 .ctl_name
= NET_IPV4_ROUTE_MIN_PMTU
,
3214 .procname
= "min_pmtu",
3215 .data
= &ip_rt_min_pmtu
,
3216 .maxlen
= sizeof(int),
3218 .proc_handler
= proc_dointvec
,
3221 .ctl_name
= NET_IPV4_ROUTE_MIN_ADVMSS
,
3222 .procname
= "min_adv_mss",
3223 .data
= &ip_rt_min_advmss
,
3224 .maxlen
= sizeof(int),
3226 .proc_handler
= proc_dointvec
,
3229 .ctl_name
= NET_IPV4_ROUTE_SECRET_INTERVAL
,
3230 .procname
= "secret_interval",
3231 .data
= &ip_rt_secret_interval
,
3232 .maxlen
= sizeof(int),
3234 .proc_handler
= ipv4_sysctl_rt_secret_interval
,
3235 .strategy
= ipv4_sysctl_rt_secret_interval_strategy
,
3240 static struct ctl_table empty
[1];
3242 static struct ctl_table ipv4_skeleton
[] =
3244 { .procname
= "route", .ctl_name
= NET_IPV4_ROUTE
,
3245 .mode
= 0555, .child
= ipv4_route_table
},
3246 { .procname
= "neigh", .ctl_name
= NET_IPV4_NEIGH
,
3247 .mode
= 0555, .child
= empty
},
3251 static __net_initdata
struct ctl_path ipv4_path
[] = {
3252 { .procname
= "net", .ctl_name
= CTL_NET
, },
3253 { .procname
= "ipv4", .ctl_name
= NET_IPV4
, },
3257 static struct ctl_table ipv4_route_flush_table
[] = {
3259 .ctl_name
= NET_IPV4_ROUTE_FLUSH
,
3260 .procname
= "flush",
3261 .maxlen
= sizeof(int),
3263 .proc_handler
= ipv4_sysctl_rtcache_flush
,
3264 .strategy
= ipv4_sysctl_rtcache_flush_strategy
,
3269 static __net_initdata
struct ctl_path ipv4_route_path
[] = {
3270 { .procname
= "net", .ctl_name
= CTL_NET
, },
3271 { .procname
= "ipv4", .ctl_name
= NET_IPV4
, },
3272 { .procname
= "route", .ctl_name
= NET_IPV4_ROUTE
, },
3276 static __net_init
int sysctl_route_net_init(struct net
*net
)
3278 struct ctl_table
*tbl
;
3280 tbl
= ipv4_route_flush_table
;
3281 if (net
!= &init_net
) {
3282 tbl
= kmemdup(tbl
, sizeof(ipv4_route_flush_table
), GFP_KERNEL
);
3286 tbl
[0].extra1
= net
;
3288 net
->ipv4
.route_hdr
=
3289 register_net_sysctl_table(net
, ipv4_route_path
, tbl
);
3290 if (net
->ipv4
.route_hdr
== NULL
)
3295 if (tbl
!= ipv4_route_flush_table
)
3301 static __net_exit
void sysctl_route_net_exit(struct net
*net
)
3303 struct ctl_table
*tbl
;
3305 tbl
= net
->ipv4
.route_hdr
->ctl_table_arg
;
3306 unregister_net_sysctl_table(net
->ipv4
.route_hdr
);
3307 BUG_ON(tbl
== ipv4_route_flush_table
);
3311 static __net_initdata
struct pernet_operations sysctl_route_ops
= {
3312 .init
= sysctl_route_net_init
,
3313 .exit
= sysctl_route_net_exit
,
3318 static __net_init
int rt_secret_timer_init(struct net
*net
)
3320 atomic_set(&net
->ipv4
.rt_genid
,
3321 (int) ((num_physpages
^ (num_physpages
>>8)) ^
3322 (jiffies
^ (jiffies
>> 7))));
3324 net
->ipv4
.rt_secret_timer
.function
= rt_secret_rebuild
;
3325 net
->ipv4
.rt_secret_timer
.data
= (unsigned long)net
;
3326 init_timer_deferrable(&net
->ipv4
.rt_secret_timer
);
3328 if (ip_rt_secret_interval
) {
3329 net
->ipv4
.rt_secret_timer
.expires
=
3330 jiffies
+ net_random() % ip_rt_secret_interval
+
3331 ip_rt_secret_interval
;
3332 add_timer(&net
->ipv4
.rt_secret_timer
);
3337 static __net_exit
void rt_secret_timer_exit(struct net
*net
)
3339 del_timer_sync(&net
->ipv4
.rt_secret_timer
);
3342 static __net_initdata
struct pernet_operations rt_secret_timer_ops
= {
3343 .init
= rt_secret_timer_init
,
3344 .exit
= rt_secret_timer_exit
,
3348 #ifdef CONFIG_NET_CLS_ROUTE
3349 struct ip_rt_acct
*ip_rt_acct __read_mostly
;
3350 #endif /* CONFIG_NET_CLS_ROUTE */
3352 static __initdata
unsigned long rhash_entries
;
3353 static int __init
set_rhash_entries(char *str
)
3357 rhash_entries
= simple_strtoul(str
, &str
, 0);
3360 __setup("rhash_entries=", set_rhash_entries
);
3362 int __init
ip_rt_init(void)
3366 #ifdef CONFIG_NET_CLS_ROUTE
3367 ip_rt_acct
= __alloc_percpu(256 * sizeof(struct ip_rt_acct
), __alignof__(struct ip_rt_acct
));
3369 panic("IP: failed to allocate ip_rt_acct\n");
3372 ipv4_dst_ops
.kmem_cachep
=
3373 kmem_cache_create("ip_dst_cache", sizeof(struct rtable
), 0,
3374 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3376 ipv4_dst_blackhole_ops
.kmem_cachep
= ipv4_dst_ops
.kmem_cachep
;
3378 rt_hash_table
= (struct rt_hash_bucket
*)
3379 alloc_large_system_hash("IP route cache",
3380 sizeof(struct rt_hash_bucket
),
3382 (num_physpages
>= 128 * 1024) ?
3387 rhash_entries
? 0 : 512 * 1024);
3388 memset(rt_hash_table
, 0, (rt_hash_mask
+ 1) * sizeof(struct rt_hash_bucket
));
3389 rt_hash_lock_init();
3391 ipv4_dst_ops
.gc_thresh
= (rt_hash_mask
+ 1);
3392 ip_rt_max_size
= (rt_hash_mask
+ 1) * 16;
3397 /* All the timers, started at system startup tend
3398 to synchronize. Perturb it a bit.
3400 schedule_delayed_work(&expires_work
,
3401 net_random() % ip_rt_gc_interval
+ ip_rt_gc_interval
);
3403 if (register_pernet_subsys(&rt_secret_timer_ops
))
3404 printk(KERN_ERR
"Unable to setup rt_secret_timer\n");
3406 if (ip_rt_proc_init())
3407 printk(KERN_ERR
"Unable to create route proc files\n");
3412 rtnl_register(PF_INET
, RTM_GETROUTE
, inet_rtm_getroute
, NULL
);
3414 #ifdef CONFIG_SYSCTL
3415 register_pernet_subsys(&sysctl_route_ops
);
3420 #ifdef CONFIG_SYSCTL
3422 * We really need to sanitize the damn ipv4 init order, then all
3423 * this nonsense will go away.
3425 void __init
ip_static_sysctl_init(void)
3427 register_sysctl_paths(ipv4_path
, ipv4_skeleton
);
3431 EXPORT_SYMBOL(__ip_select_ident
);
3432 EXPORT_SYMBOL(ip_route_input
);
3433 EXPORT_SYMBOL(ip_route_output_key
);