2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Version: $Id: route.c,v 1.103 2002/01/12 07:44:09 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
13 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
17 * Alan Cox : Verify area fixes.
18 * Alan Cox : cli() protects routing changes
19 * Rui Oliveira : ICMP routing table updates
20 * (rco@di.uminho.pt) Routing table insertion and update
21 * Linus Torvalds : Rewrote bits to be sensible
22 * Alan Cox : Added BSD route gw semantics
23 * Alan Cox : Super /proc >4K
24 * Alan Cox : MTU in route table
25 * Alan Cox : MSS actually. Also added the window
27 * Sam Lantinga : Fixed route matching in rt_del()
28 * Alan Cox : Routing cache support.
29 * Alan Cox : Removed compatibility cruft.
30 * Alan Cox : RTF_REJECT support.
31 * Alan Cox : TCP irtt support.
32 * Jonathan Naylor : Added Metric support.
33 * Miquel van Smoorenburg : BSD API fixes.
34 * Miquel van Smoorenburg : Metrics.
35 * Alan Cox : Use __u32 properly
36 * Alan Cox : Aligned routing errors more closely with BSD
37 * our system is still very different.
38 * Alan Cox : Faster /proc handling
39 * Alexey Kuznetsov : Massive rework to support tree based routing,
40 * routing caches and better behaviour.
42 * Olaf Erb : irtt wasn't being copied right.
43 * Bjorn Ekwall : Kerneld route support.
44 * Alan Cox : Multicast fixed (I hope)
45 * Pavel Krauz : Limited broadcast fixed
46 * Mike McLagan : Routing by source
47 * Alexey Kuznetsov : End of old history. Split to fib.c and
48 * route.c and rewritten from scratch.
49 * Andi Kleen : Load-limit warning messages.
50 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
51 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
52 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
53 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
54 * Marc Boucher : routing by fwmark
55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
61 * This program is free software; you can redistribute it and/or
62 * modify it under the terms of the GNU General Public License
63 * as published by the Free Software Foundation; either version
64 * 2 of the License, or (at your option) any later version.
67 #include <linux/config.h>
68 #include <linux/module.h>
69 #include <asm/uaccess.h>
70 #include <asm/system.h>
71 #include <linux/bitops.h>
72 #include <linux/types.h>
73 #include <linux/kernel.h>
74 #include <linux/sched.h>
76 #include <linux/bootmem.h>
77 #include <linux/string.h>
78 #include <linux/socket.h>
79 #include <linux/sockios.h>
80 #include <linux/errno.h>
82 #include <linux/inet.h>
83 #include <linux/netdevice.h>
84 #include <linux/proc_fs.h>
85 #include <linux/init.h>
86 #include <linux/skbuff.h>
87 #include <linux/rtnetlink.h>
88 #include <linux/inetdevice.h>
89 #include <linux/igmp.h>
90 #include <linux/pkt_sched.h>
91 #include <linux/mroute.h>
92 #include <linux/netfilter_ipv4.h>
93 #include <linux/random.h>
94 #include <linux/jhash.h>
95 #include <linux/rcupdate.h>
96 #include <linux/times.h>
97 #include <net/protocol.h>
99 #include <net/route.h>
100 #include <net/inetpeer.h>
101 #include <net/sock.h>
102 #include <net/ip_fib.h>
105 #include <net/icmp.h>
106 #include <net/xfrm.h>
107 #include <net/ip_mp_alg.h>
109 #include <linux/sysctl.h>
112 #define RT_FL_TOS(oldflp) \
113 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
115 #define IP_MAX_MTU 0xFFF0
117 #define RT_GC_TIMEOUT (300*HZ)
119 static int ip_rt_min_delay
= 2 * HZ
;
120 static int ip_rt_max_delay
= 10 * HZ
;
121 static int ip_rt_max_size
;
122 static int ip_rt_gc_timeout
= RT_GC_TIMEOUT
;
123 static int ip_rt_gc_interval
= 60 * HZ
;
124 static int ip_rt_gc_min_interval
= HZ
/ 2;
125 static int ip_rt_redirect_number
= 9;
126 static int ip_rt_redirect_load
= HZ
/ 50;
127 static int ip_rt_redirect_silence
= ((HZ
/ 50) << (9 + 1));
128 static int ip_rt_error_cost
= HZ
;
129 static int ip_rt_error_burst
= 5 * HZ
;
130 static int ip_rt_gc_elasticity
= 8;
131 static int ip_rt_mtu_expires
= 10 * 60 * HZ
;
132 static int ip_rt_min_pmtu
= 512 + 20 + 20;
133 static int ip_rt_min_advmss
= 256;
134 static int ip_rt_secret_interval
= 10 * 60 * HZ
;
135 static unsigned long rt_deadline
;
137 #define RTprint(a...) printk(KERN_DEBUG a)
139 static struct timer_list rt_flush_timer
;
140 static struct timer_list rt_periodic_timer
;
141 static struct timer_list rt_secret_timer
;
144 * Interface to generic destination cache.
147 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
);
148 static void ipv4_dst_destroy(struct dst_entry
*dst
);
149 static void ipv4_dst_ifdown(struct dst_entry
*dst
,
150 struct net_device
*dev
, int how
);
151 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
);
152 static void ipv4_link_failure(struct sk_buff
*skb
);
153 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
);
154 static int rt_garbage_collect(void);
157 static struct dst_ops ipv4_dst_ops
= {
159 .protocol
= __constant_htons(ETH_P_IP
),
160 .gc
= rt_garbage_collect
,
161 .check
= ipv4_dst_check
,
162 .destroy
= ipv4_dst_destroy
,
163 .ifdown
= ipv4_dst_ifdown
,
164 .negative_advice
= ipv4_negative_advice
,
165 .link_failure
= ipv4_link_failure
,
166 .update_pmtu
= ip_rt_update_pmtu
,
167 .entry_size
= sizeof(struct rtable
),
170 #define ECN_OR_COST(class) TC_PRIO_##class
172 __u8 ip_tos2prio
[16] = {
176 ECN_OR_COST(BESTEFFORT
),
182 ECN_OR_COST(INTERACTIVE
),
184 ECN_OR_COST(INTERACTIVE
),
185 TC_PRIO_INTERACTIVE_BULK
,
186 ECN_OR_COST(INTERACTIVE_BULK
),
187 TC_PRIO_INTERACTIVE_BULK
,
188 ECN_OR_COST(INTERACTIVE_BULK
)
196 /* The locking scheme is rather straight forward:
198 * 1) Read-Copy Update protects the buckets of the central route hash.
199 * 2) Only writers remove entries, and they hold the lock
200 * as they look at rtable reference counts.
201 * 3) Only readers acquire references to rtable entries,
202 * they do so with atomic increments and with the
206 struct rt_hash_bucket
{
207 struct rtable
*chain
;
209 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
211 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
212 * The size of this table is a power of two and depends on the number of CPUS.
215 #define RT_HASH_LOCK_SZ 4096
217 #define RT_HASH_LOCK_SZ 2048
219 #define RT_HASH_LOCK_SZ 1024
221 #define RT_HASH_LOCK_SZ 512
223 #define RT_HASH_LOCK_SZ 256
226 static spinlock_t
*rt_hash_locks
;
227 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
228 # define rt_hash_lock_init() { \
230 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ, GFP_KERNEL); \
231 if (!rt_hash_locks) panic("IP: failed to allocate rt_hash_locks\n"); \
232 for (i = 0; i < RT_HASH_LOCK_SZ; i++) \
233 spin_lock_init(&rt_hash_locks[i]); \
236 # define rt_hash_lock_addr(slot) NULL
237 # define rt_hash_lock_init()
240 static struct rt_hash_bucket
*rt_hash_table
;
241 static unsigned rt_hash_mask
;
242 static int rt_hash_log
;
243 static unsigned int rt_hash_rnd
;
245 static DEFINE_PER_CPU(struct rt_cache_stat
, rt_cache_stat
);
246 #define RT_CACHE_STAT_INC(field) \
247 (per_cpu(rt_cache_stat, raw_smp_processor_id()).field++)
249 static int rt_intern_hash(unsigned hash
, struct rtable
*rth
,
250 struct rtable
**res
);
252 static unsigned int rt_hash_code(u32 daddr
, u32 saddr
)
254 return (jhash_2words(daddr
, saddr
, rt_hash_rnd
)
258 #ifdef CONFIG_PROC_FS
259 struct rt_cache_iter_state
{
263 static struct rtable
*rt_cache_get_first(struct seq_file
*seq
)
265 struct rtable
*r
= NULL
;
266 struct rt_cache_iter_state
*st
= seq
->private;
268 for (st
->bucket
= rt_hash_mask
; st
->bucket
>= 0; --st
->bucket
) {
270 r
= rt_hash_table
[st
->bucket
].chain
;
273 rcu_read_unlock_bh();
278 static struct rtable
*rt_cache_get_next(struct seq_file
*seq
, struct rtable
*r
)
280 struct rt_cache_iter_state
*st
= rcu_dereference(seq
->private);
284 rcu_read_unlock_bh();
285 if (--st
->bucket
< 0)
288 r
= rt_hash_table
[st
->bucket
].chain
;
293 static struct rtable
*rt_cache_get_idx(struct seq_file
*seq
, loff_t pos
)
295 struct rtable
*r
= rt_cache_get_first(seq
);
298 while (pos
&& (r
= rt_cache_get_next(seq
, r
)))
300 return pos
? NULL
: r
;
303 static void *rt_cache_seq_start(struct seq_file
*seq
, loff_t
*pos
)
305 return *pos
? rt_cache_get_idx(seq
, *pos
- 1) : SEQ_START_TOKEN
;
308 static void *rt_cache_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
310 struct rtable
*r
= NULL
;
312 if (v
== SEQ_START_TOKEN
)
313 r
= rt_cache_get_first(seq
);
315 r
= rt_cache_get_next(seq
, v
);
320 static void rt_cache_seq_stop(struct seq_file
*seq
, void *v
)
322 if (v
&& v
!= SEQ_START_TOKEN
)
323 rcu_read_unlock_bh();
326 static int rt_cache_seq_show(struct seq_file
*seq
, void *v
)
328 if (v
== SEQ_START_TOKEN
)
329 seq_printf(seq
, "%-127s\n",
330 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
331 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
334 struct rtable
*r
= v
;
337 sprintf(temp
, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
338 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X",
339 r
->u
.dst
.dev
? r
->u
.dst
.dev
->name
: "*",
340 (unsigned long)r
->rt_dst
, (unsigned long)r
->rt_gateway
,
341 r
->rt_flags
, atomic_read(&r
->u
.dst
.__refcnt
),
342 r
->u
.dst
.__use
, 0, (unsigned long)r
->rt_src
,
343 (dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) ?
344 (int)dst_metric(&r
->u
.dst
, RTAX_ADVMSS
) + 40 : 0),
345 dst_metric(&r
->u
.dst
, RTAX_WINDOW
),
346 (int)((dst_metric(&r
->u
.dst
, RTAX_RTT
) >> 3) +
347 dst_metric(&r
->u
.dst
, RTAX_RTTVAR
)),
349 r
->u
.dst
.hh
? atomic_read(&r
->u
.dst
.hh
->hh_refcnt
) : -1,
350 r
->u
.dst
.hh
? (r
->u
.dst
.hh
->hh_output
==
353 seq_printf(seq
, "%-127s\n", temp
);
358 static struct seq_operations rt_cache_seq_ops
= {
359 .start
= rt_cache_seq_start
,
360 .next
= rt_cache_seq_next
,
361 .stop
= rt_cache_seq_stop
,
362 .show
= rt_cache_seq_show
,
365 static int rt_cache_seq_open(struct inode
*inode
, struct file
*file
)
367 struct seq_file
*seq
;
369 struct rt_cache_iter_state
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
373 rc
= seq_open(file
, &rt_cache_seq_ops
);
376 seq
= file
->private_data
;
378 memset(s
, 0, sizeof(*s
));
386 static struct file_operations rt_cache_seq_fops
= {
387 .owner
= THIS_MODULE
,
388 .open
= rt_cache_seq_open
,
391 .release
= seq_release_private
,
395 static void *rt_cpu_seq_start(struct seq_file
*seq
, loff_t
*pos
)
400 return SEQ_START_TOKEN
;
402 for (cpu
= *pos
-1; cpu
< NR_CPUS
; ++cpu
) {
403 if (!cpu_possible(cpu
))
406 return &per_cpu(rt_cache_stat
, cpu
);
411 static void *rt_cpu_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
415 for (cpu
= *pos
; cpu
< NR_CPUS
; ++cpu
) {
416 if (!cpu_possible(cpu
))
419 return &per_cpu(rt_cache_stat
, cpu
);
425 static void rt_cpu_seq_stop(struct seq_file
*seq
, void *v
)
430 static int rt_cpu_seq_show(struct seq_file
*seq
, void *v
)
432 struct rt_cache_stat
*st
= v
;
434 if (v
== SEQ_START_TOKEN
) {
435 seq_printf(seq
, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
439 seq_printf(seq
,"%08x %08x %08x %08x %08x %08x %08x %08x "
440 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
441 atomic_read(&ipv4_dst_ops
.entries
),
464 static struct seq_operations rt_cpu_seq_ops
= {
465 .start
= rt_cpu_seq_start
,
466 .next
= rt_cpu_seq_next
,
467 .stop
= rt_cpu_seq_stop
,
468 .show
= rt_cpu_seq_show
,
472 static int rt_cpu_seq_open(struct inode
*inode
, struct file
*file
)
474 return seq_open(file
, &rt_cpu_seq_ops
);
477 static struct file_operations rt_cpu_seq_fops
= {
478 .owner
= THIS_MODULE
,
479 .open
= rt_cpu_seq_open
,
482 .release
= seq_release
,
485 #endif /* CONFIG_PROC_FS */
487 static __inline__
void rt_free(struct rtable
*rt
)
489 multipath_remove(rt
);
490 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
493 static __inline__
void rt_drop(struct rtable
*rt
)
495 multipath_remove(rt
);
497 call_rcu_bh(&rt
->u
.dst
.rcu_head
, dst_rcu_free
);
500 static __inline__
int rt_fast_clean(struct rtable
*rth
)
502 /* Kill broadcast/multicast entries very aggresively, if they
503 collide in hash table with more useful entries */
504 return (rth
->rt_flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) &&
505 rth
->fl
.iif
&& rth
->u
.rt_next
;
508 static __inline__
int rt_valuable(struct rtable
*rth
)
510 return (rth
->rt_flags
& (RTCF_REDIRECTED
| RTCF_NOTIFY
)) ||
514 static int rt_may_expire(struct rtable
*rth
, unsigned long tmo1
, unsigned long tmo2
)
519 if (atomic_read(&rth
->u
.dst
.__refcnt
))
523 if (rth
->u
.dst
.expires
&&
524 time_after_eq(jiffies
, rth
->u
.dst
.expires
))
527 age
= jiffies
- rth
->u
.dst
.lastuse
;
529 if ((age
<= tmo1
&& !rt_fast_clean(rth
)) ||
530 (age
<= tmo2
&& rt_valuable(rth
)))
536 /* Bits of score are:
538 * 30: not quite useless
539 * 29..0: usage counter
541 static inline u32
rt_score(struct rtable
*rt
)
543 u32 score
= jiffies
- rt
->u
.dst
.lastuse
;
545 score
= ~score
& ~(3<<30);
551 !(rt
->rt_flags
& (RTCF_BROADCAST
|RTCF_MULTICAST
|RTCF_LOCAL
)))
557 static inline int compare_keys(struct flowi
*fl1
, struct flowi
*fl2
)
559 return memcmp(&fl1
->nl_u
.ip4_u
, &fl2
->nl_u
.ip4_u
, sizeof(fl1
->nl_u
.ip4_u
)) == 0 &&
560 fl1
->oif
== fl2
->oif
&&
561 fl1
->iif
== fl2
->iif
;
564 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
565 static struct rtable
**rt_remove_balanced_route(struct rtable
**chain_head
,
566 struct rtable
*expentry
,
569 int passedexpired
= 0;
570 struct rtable
**nextstep
= NULL
;
571 struct rtable
**rthp
= chain_head
;
577 while ((rth
= *rthp
) != NULL
) {
581 if (((*rthp
)->u
.dst
.flags
& DST_BALANCED
) != 0 &&
582 compare_keys(&(*rthp
)->fl
, &expentry
->fl
)) {
583 if (*rthp
== expentry
) {
584 *rthp
= rth
->u
.rt_next
;
587 *rthp
= rth
->u
.rt_next
;
593 if (!((*rthp
)->u
.dst
.flags
& DST_BALANCED
) &&
594 passedexpired
&& !nextstep
)
595 nextstep
= &rth
->u
.rt_next
;
597 rthp
= &rth
->u
.rt_next
;
607 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
610 /* This runs via a timer and thus is always in BH context. */
611 static void rt_check_expire(unsigned long dummy
)
613 static unsigned int rover
;
614 unsigned int i
= rover
, goal
;
615 struct rtable
*rth
, **rthp
;
616 unsigned long now
= jiffies
;
619 mult
= ((u64
)ip_rt_gc_interval
) << rt_hash_log
;
620 if (ip_rt_gc_timeout
> 1)
621 do_div(mult
, ip_rt_gc_timeout
);
622 goal
= (unsigned int)mult
;
623 if (goal
> rt_hash_mask
) goal
= rt_hash_mask
+ 1;
624 for (; goal
> 0; goal
--) {
625 unsigned long tmo
= ip_rt_gc_timeout
;
627 i
= (i
+ 1) & rt_hash_mask
;
628 rthp
= &rt_hash_table
[i
].chain
;
632 spin_lock(rt_hash_lock_addr(i
));
633 while ((rth
= *rthp
) != NULL
) {
634 if (rth
->u
.dst
.expires
) {
635 /* Entry is expired even if it is in use */
636 if (time_before_eq(now
, rth
->u
.dst
.expires
)) {
638 rthp
= &rth
->u
.rt_next
;
641 } else if (!rt_may_expire(rth
, tmo
, ip_rt_gc_timeout
)) {
643 rthp
= &rth
->u
.rt_next
;
647 /* Cleanup aged off entries. */
648 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
649 /* remove all related balanced entries if necessary */
650 if (rth
->u
.dst
.flags
& DST_BALANCED
) {
651 rthp
= rt_remove_balanced_route(
652 &rt_hash_table
[i
].chain
,
657 *rthp
= rth
->u
.rt_next
;
660 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
661 *rthp
= rth
->u
.rt_next
;
663 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
665 spin_unlock(rt_hash_lock_addr(i
));
667 /* Fallback loop breaker. */
668 if (time_after(jiffies
, now
))
672 mod_timer(&rt_periodic_timer
, jiffies
+ ip_rt_gc_interval
);
675 /* This can run from both BH and non-BH contexts, the latter
676 * in the case of a forced flush event.
678 static void rt_run_flush(unsigned long dummy
)
681 struct rtable
*rth
, *next
;
685 get_random_bytes(&rt_hash_rnd
, 4);
687 for (i
= rt_hash_mask
; i
>= 0; i
--) {
688 spin_lock_bh(rt_hash_lock_addr(i
));
689 rth
= rt_hash_table
[i
].chain
;
691 rt_hash_table
[i
].chain
= NULL
;
692 spin_unlock_bh(rt_hash_lock_addr(i
));
694 for (; rth
; rth
= next
) {
695 next
= rth
->u
.rt_next
;
701 static DEFINE_SPINLOCK(rt_flush_lock
);
703 void rt_cache_flush(int delay
)
705 unsigned long now
= jiffies
;
706 int user_mode
= !in_softirq();
709 delay
= ip_rt_min_delay
;
711 /* flush existing multipath state*/
714 spin_lock_bh(&rt_flush_lock
);
716 if (del_timer(&rt_flush_timer
) && delay
> 0 && rt_deadline
) {
717 long tmo
= (long)(rt_deadline
- now
);
719 /* If flush timer is already running
720 and flush request is not immediate (delay > 0):
722 if deadline is not achieved, prolongate timer to "delay",
723 otherwise fire it at deadline time.
726 if (user_mode
&& tmo
< ip_rt_max_delay
-ip_rt_min_delay
)
734 spin_unlock_bh(&rt_flush_lock
);
739 if (rt_deadline
== 0)
740 rt_deadline
= now
+ ip_rt_max_delay
;
742 mod_timer(&rt_flush_timer
, now
+delay
);
743 spin_unlock_bh(&rt_flush_lock
);
746 static void rt_secret_rebuild(unsigned long dummy
)
748 unsigned long now
= jiffies
;
751 mod_timer(&rt_secret_timer
, now
+ ip_rt_secret_interval
);
755 Short description of GC goals.
757 We want to build algorithm, which will keep routing cache
758 at some equilibrium point, when number of aged off entries
759 is kept approximately equal to newly generated ones.
761 Current expiration strength is variable "expire".
762 We try to adjust it dynamically, so that if networking
763 is idle expires is large enough to keep enough of warm entries,
764 and when load increases it reduces to limit cache size.
767 static int rt_garbage_collect(void)
769 static unsigned long expire
= RT_GC_TIMEOUT
;
770 static unsigned long last_gc
;
772 static int equilibrium
;
773 struct rtable
*rth
, **rthp
;
774 unsigned long now
= jiffies
;
778 * Garbage collection is pretty expensive,
779 * do not make it too frequently.
782 RT_CACHE_STAT_INC(gc_total
);
784 if (now
- last_gc
< ip_rt_gc_min_interval
&&
785 atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
) {
786 RT_CACHE_STAT_INC(gc_ignored
);
790 /* Calculate number of entries, which we want to expire now. */
791 goal
= atomic_read(&ipv4_dst_ops
.entries
) -
792 (ip_rt_gc_elasticity
<< rt_hash_log
);
794 if (equilibrium
< ipv4_dst_ops
.gc_thresh
)
795 equilibrium
= ipv4_dst_ops
.gc_thresh
;
796 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
798 equilibrium
+= min_t(unsigned int, goal
/ 2, rt_hash_mask
+ 1);
799 goal
= atomic_read(&ipv4_dst_ops
.entries
) - equilibrium
;
802 /* We are in dangerous area. Try to reduce cache really
805 goal
= max_t(unsigned int, goal
/ 2, rt_hash_mask
+ 1);
806 equilibrium
= atomic_read(&ipv4_dst_ops
.entries
) - goal
;
809 if (now
- last_gc
>= ip_rt_gc_min_interval
)
820 for (i
= rt_hash_mask
, k
= rover
; i
>= 0; i
--) {
821 unsigned long tmo
= expire
;
823 k
= (k
+ 1) & rt_hash_mask
;
824 rthp
= &rt_hash_table
[k
].chain
;
825 spin_lock_bh(rt_hash_lock_addr(k
));
826 while ((rth
= *rthp
) != NULL
) {
827 if (!rt_may_expire(rth
, tmo
, expire
)) {
829 rthp
= &rth
->u
.rt_next
;
832 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
833 /* remove all related balanced entries
836 if (rth
->u
.dst
.flags
& DST_BALANCED
) {
839 rthp
= rt_remove_balanced_route(
840 &rt_hash_table
[k
].chain
,
847 *rthp
= rth
->u
.rt_next
;
851 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
852 *rthp
= rth
->u
.rt_next
;
855 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
857 spin_unlock_bh(rt_hash_lock_addr(k
));
866 /* Goal is not achieved. We stop process if:
868 - if expire reduced to zero. Otherwise, expire is halfed.
869 - if table is not full.
870 - if we are called from interrupt.
871 - jiffies check is just fallback/debug loop breaker.
872 We will not spin here for long time in any case.
875 RT_CACHE_STAT_INC(gc_goal_miss
);
881 #if RT_CACHE_DEBUG >= 2
882 printk(KERN_DEBUG
"expire>> %u %d %d %d\n", expire
,
883 atomic_read(&ipv4_dst_ops
.entries
), goal
, i
);
886 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
888 } while (!in_softirq() && time_before_eq(jiffies
, now
));
890 if (atomic_read(&ipv4_dst_ops
.entries
) < ip_rt_max_size
)
893 printk(KERN_WARNING
"dst cache overflow\n");
894 RT_CACHE_STAT_INC(gc_dst_overflow
);
898 expire
+= ip_rt_gc_min_interval
;
899 if (expire
> ip_rt_gc_timeout
||
900 atomic_read(&ipv4_dst_ops
.entries
) < ipv4_dst_ops
.gc_thresh
)
901 expire
= ip_rt_gc_timeout
;
902 #if RT_CACHE_DEBUG >= 2
903 printk(KERN_DEBUG
"expire++ %u %d %d %d\n", expire
,
904 atomic_read(&ipv4_dst_ops
.entries
), goal
, rover
);
909 static int rt_intern_hash(unsigned hash
, struct rtable
*rt
, struct rtable
**rp
)
911 struct rtable
*rth
, **rthp
;
913 struct rtable
*cand
, **candp
;
916 int attempts
= !in_softirq();
925 rthp
= &rt_hash_table
[hash
].chain
;
927 spin_lock_bh(rt_hash_lock_addr(hash
));
928 while ((rth
= *rthp
) != NULL
) {
929 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
930 if (!(rth
->u
.dst
.flags
& DST_BALANCED
) &&
931 compare_keys(&rth
->fl
, &rt
->fl
)) {
933 if (compare_keys(&rth
->fl
, &rt
->fl
)) {
936 *rthp
= rth
->u
.rt_next
;
938 * Since lookup is lockfree, the deletion
939 * must be visible to another weakly ordered CPU before
940 * the insertion at the start of the hash chain.
942 rcu_assign_pointer(rth
->u
.rt_next
,
943 rt_hash_table
[hash
].chain
);
945 * Since lookup is lockfree, the update writes
946 * must be ordered for consistency on SMP.
948 rcu_assign_pointer(rt_hash_table
[hash
].chain
, rth
);
951 dst_hold(&rth
->u
.dst
);
952 rth
->u
.dst
.lastuse
= now
;
953 spin_unlock_bh(rt_hash_lock_addr(hash
));
960 if (!atomic_read(&rth
->u
.dst
.__refcnt
)) {
961 u32 score
= rt_score(rth
);
963 if (score
<= min_score
) {
972 rthp
= &rth
->u
.rt_next
;
976 /* ip_rt_gc_elasticity used to be average length of chain
977 * length, when exceeded gc becomes really aggressive.
979 * The second limit is less certain. At the moment it allows
980 * only 2 entries per bucket. We will see.
982 if (chain_length
> ip_rt_gc_elasticity
) {
983 *candp
= cand
->u
.rt_next
;
988 /* Try to bind route to arp only if it is output
989 route or unicast forwarding path.
991 if (rt
->rt_type
== RTN_UNICAST
|| rt
->fl
.iif
== 0) {
992 int err
= arp_bind_neighbour(&rt
->u
.dst
);
994 spin_unlock_bh(rt_hash_lock_addr(hash
));
996 if (err
!= -ENOBUFS
) {
1001 /* Neighbour tables are full and nothing
1002 can be released. Try to shrink route cache,
1003 it is most likely it holds some neighbour records.
1005 if (attempts
-- > 0) {
1006 int saved_elasticity
= ip_rt_gc_elasticity
;
1007 int saved_int
= ip_rt_gc_min_interval
;
1008 ip_rt_gc_elasticity
= 1;
1009 ip_rt_gc_min_interval
= 0;
1010 rt_garbage_collect();
1011 ip_rt_gc_min_interval
= saved_int
;
1012 ip_rt_gc_elasticity
= saved_elasticity
;
1016 if (net_ratelimit())
1017 printk(KERN_WARNING
"Neighbour table overflow.\n");
1023 rt
->u
.rt_next
= rt_hash_table
[hash
].chain
;
1024 #if RT_CACHE_DEBUG >= 2
1025 if (rt
->u
.rt_next
) {
1027 printk(KERN_DEBUG
"rt_cache @%02x: %u.%u.%u.%u", hash
,
1028 NIPQUAD(rt
->rt_dst
));
1029 for (trt
= rt
->u
.rt_next
; trt
; trt
= trt
->u
.rt_next
)
1030 printk(" . %u.%u.%u.%u", NIPQUAD(trt
->rt_dst
));
1034 rt_hash_table
[hash
].chain
= rt
;
1035 spin_unlock_bh(rt_hash_lock_addr(hash
));
1040 void rt_bind_peer(struct rtable
*rt
, int create
)
1042 static DEFINE_SPINLOCK(rt_peer_lock
);
1043 struct inet_peer
*peer
;
1045 peer
= inet_getpeer(rt
->rt_dst
, create
);
1047 spin_lock_bh(&rt_peer_lock
);
1048 if (rt
->peer
== NULL
) {
1052 spin_unlock_bh(&rt_peer_lock
);
1058 * Peer allocation may fail only in serious out-of-memory conditions. However
1059 * we still can generate some output.
1060 * Random ID selection looks a bit dangerous because we have no chances to
1061 * select ID being unique in a reasonable period of time.
1062 * But broken packet identifier may be better than no packet at all.
1064 static void ip_select_fb_ident(struct iphdr
*iph
)
1066 static DEFINE_SPINLOCK(ip_fb_id_lock
);
1067 static u32 ip_fallback_id
;
1070 spin_lock_bh(&ip_fb_id_lock
);
1071 salt
= secure_ip_id(ip_fallback_id
^ iph
->daddr
);
1072 iph
->id
= htons(salt
& 0xFFFF);
1073 ip_fallback_id
= salt
;
1074 spin_unlock_bh(&ip_fb_id_lock
);
1077 void __ip_select_ident(struct iphdr
*iph
, struct dst_entry
*dst
, int more
)
1079 struct rtable
*rt
= (struct rtable
*) dst
;
1082 if (rt
->peer
== NULL
)
1083 rt_bind_peer(rt
, 1);
1085 /* If peer is attached to destination, it is never detached,
1086 so that we need not to grab a lock to dereference it.
1089 iph
->id
= htons(inet_getid(rt
->peer
, more
));
1093 printk(KERN_DEBUG
"rt_bind_peer(0) @%p\n",
1094 __builtin_return_address(0));
1096 ip_select_fb_ident(iph
);
1099 static void rt_del(unsigned hash
, struct rtable
*rt
)
1101 struct rtable
**rthp
;
1103 spin_lock_bh(rt_hash_lock_addr(hash
));
1105 for (rthp
= &rt_hash_table
[hash
].chain
; *rthp
;
1106 rthp
= &(*rthp
)->u
.rt_next
)
1108 *rthp
= rt
->u
.rt_next
;
1112 spin_unlock_bh(rt_hash_lock_addr(hash
));
1115 void ip_rt_redirect(u32 old_gw
, u32 daddr
, u32 new_gw
,
1116 u32 saddr
, struct net_device
*dev
)
1119 struct in_device
*in_dev
= in_dev_get(dev
);
1120 struct rtable
*rth
, **rthp
;
1121 u32 skeys
[2] = { saddr
, 0 };
1122 int ikeys
[2] = { dev
->ifindex
, 0 };
1127 if (new_gw
== old_gw
|| !IN_DEV_RX_REDIRECTS(in_dev
)
1128 || MULTICAST(new_gw
) || BADCLASS(new_gw
) || ZERONET(new_gw
))
1129 goto reject_redirect
;
1131 if (!IN_DEV_SHARED_MEDIA(in_dev
)) {
1132 if (!inet_addr_onlink(in_dev
, new_gw
, old_gw
))
1133 goto reject_redirect
;
1134 if (IN_DEV_SEC_REDIRECTS(in_dev
) && ip_fib_check_default(new_gw
, dev
))
1135 goto reject_redirect
;
1137 if (inet_addr_type(new_gw
) != RTN_UNICAST
)
1138 goto reject_redirect
;
1141 for (i
= 0; i
< 2; i
++) {
1142 for (k
= 0; k
< 2; k
++) {
1143 unsigned hash
= rt_hash_code(daddr
,
1144 skeys
[i
] ^ (ikeys
[k
] << 5));
1146 rthp
=&rt_hash_table
[hash
].chain
;
1149 while ((rth
= rcu_dereference(*rthp
)) != NULL
) {
1152 if (rth
->fl
.fl4_dst
!= daddr
||
1153 rth
->fl
.fl4_src
!= skeys
[i
] ||
1154 rth
->fl
.oif
!= ikeys
[k
] ||
1156 rthp
= &rth
->u
.rt_next
;
1160 if (rth
->rt_dst
!= daddr
||
1161 rth
->rt_src
!= saddr
||
1163 rth
->rt_gateway
!= old_gw
||
1164 rth
->u
.dst
.dev
!= dev
)
1167 dst_hold(&rth
->u
.dst
);
1170 rt
= dst_alloc(&ipv4_dst_ops
);
1177 /* Copy all the information. */
1179 INIT_RCU_HEAD(&rt
->u
.dst
.rcu_head
);
1180 rt
->u
.dst
.__use
= 1;
1181 atomic_set(&rt
->u
.dst
.__refcnt
, 1);
1182 rt
->u
.dst
.child
= NULL
;
1184 dev_hold(rt
->u
.dst
.dev
);
1186 in_dev_hold(rt
->idev
);
1187 rt
->u
.dst
.obsolete
= 0;
1188 rt
->u
.dst
.lastuse
= jiffies
;
1189 rt
->u
.dst
.path
= &rt
->u
.dst
;
1190 rt
->u
.dst
.neighbour
= NULL
;
1191 rt
->u
.dst
.hh
= NULL
;
1192 rt
->u
.dst
.xfrm
= NULL
;
1194 rt
->rt_flags
|= RTCF_REDIRECTED
;
1196 /* Gateway is different ... */
1197 rt
->rt_gateway
= new_gw
;
1199 /* Redirect received -> path was valid */
1200 dst_confirm(&rth
->u
.dst
);
1203 atomic_inc(&rt
->peer
->refcnt
);
1205 if (arp_bind_neighbour(&rt
->u
.dst
) ||
1206 !(rt
->u
.dst
.neighbour
->nud_state
&
1208 if (rt
->u
.dst
.neighbour
)
1209 neigh_event_send(rt
->u
.dst
.neighbour
, NULL
);
1216 if (!rt_intern_hash(hash
, rt
, &rt
))
1229 #ifdef CONFIG_IP_ROUTE_VERBOSE
1230 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
1231 printk(KERN_INFO
"Redirect from %u.%u.%u.%u on %s about "
1232 "%u.%u.%u.%u ignored.\n"
1233 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1234 NIPQUAD(old_gw
), dev
->name
, NIPQUAD(new_gw
),
1235 NIPQUAD(saddr
), NIPQUAD(daddr
));
1240 static struct dst_entry
*ipv4_negative_advice(struct dst_entry
*dst
)
1242 struct rtable
*rt
= (struct rtable
*)dst
;
1243 struct dst_entry
*ret
= dst
;
1246 if (dst
->obsolete
) {
1249 } else if ((rt
->rt_flags
& RTCF_REDIRECTED
) ||
1250 rt
->u
.dst
.expires
) {
1251 unsigned hash
= rt_hash_code(rt
->fl
.fl4_dst
,
1254 #if RT_CACHE_DEBUG >= 1
1255 printk(KERN_DEBUG
"ip_rt_advice: redirect to "
1256 "%u.%u.%u.%u/%02x dropped\n",
1257 NIPQUAD(rt
->rt_dst
), rt
->fl
.fl4_tos
);
1268 * 1. The first ip_rt_redirect_number redirects are sent
1269 * with exponential backoff, then we stop sending them at all,
1270 * assuming that the host ignores our redirects.
1271 * 2. If we did not see packets requiring redirects
1272 * during ip_rt_redirect_silence, we assume that the host
1273 * forgot redirected route and start to send redirects again.
1275 * This algorithm is much cheaper and more intelligent than dumb load limiting
1278 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1279 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1282 void ip_rt_send_redirect(struct sk_buff
*skb
)
1284 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1285 struct in_device
*in_dev
= in_dev_get(rt
->u
.dst
.dev
);
1290 if (!IN_DEV_TX_REDIRECTS(in_dev
))
1293 /* No redirected packets during ip_rt_redirect_silence;
1294 * reset the algorithm.
1296 if (time_after(jiffies
, rt
->u
.dst
.rate_last
+ ip_rt_redirect_silence
))
1297 rt
->u
.dst
.rate_tokens
= 0;
1299 /* Too many ignored redirects; do not send anything
1300 * set u.dst.rate_last to the last seen redirected packet.
1302 if (rt
->u
.dst
.rate_tokens
>= ip_rt_redirect_number
) {
1303 rt
->u
.dst
.rate_last
= jiffies
;
1307 /* Check for load limit; set rate_last to the latest sent
1310 if (time_after(jiffies
,
1311 (rt
->u
.dst
.rate_last
+
1312 (ip_rt_redirect_load
<< rt
->u
.dst
.rate_tokens
)))) {
1313 icmp_send(skb
, ICMP_REDIRECT
, ICMP_REDIR_HOST
, rt
->rt_gateway
);
1314 rt
->u
.dst
.rate_last
= jiffies
;
1315 ++rt
->u
.dst
.rate_tokens
;
1316 #ifdef CONFIG_IP_ROUTE_VERBOSE
1317 if (IN_DEV_LOG_MARTIANS(in_dev
) &&
1318 rt
->u
.dst
.rate_tokens
== ip_rt_redirect_number
&&
1320 printk(KERN_WARNING
"host %u.%u.%u.%u/if%d ignores "
1321 "redirects for %u.%u.%u.%u to %u.%u.%u.%u.\n",
1322 NIPQUAD(rt
->rt_src
), rt
->rt_iif
,
1323 NIPQUAD(rt
->rt_dst
), NIPQUAD(rt
->rt_gateway
));
1330 static int ip_error(struct sk_buff
*skb
)
1332 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1336 switch (rt
->u
.dst
.error
) {
1341 code
= ICMP_HOST_UNREACH
;
1344 code
= ICMP_NET_UNREACH
;
1347 code
= ICMP_PKT_FILTERED
;
1352 rt
->u
.dst
.rate_tokens
+= now
- rt
->u
.dst
.rate_last
;
1353 if (rt
->u
.dst
.rate_tokens
> ip_rt_error_burst
)
1354 rt
->u
.dst
.rate_tokens
= ip_rt_error_burst
;
1355 rt
->u
.dst
.rate_last
= now
;
1356 if (rt
->u
.dst
.rate_tokens
>= ip_rt_error_cost
) {
1357 rt
->u
.dst
.rate_tokens
-= ip_rt_error_cost
;
1358 icmp_send(skb
, ICMP_DEST_UNREACH
, code
, 0);
1361 out
: kfree_skb(skb
);
1366 * The last two values are not from the RFC but
1367 * are needed for AMPRnet AX.25 paths.
1370 static const unsigned short mtu_plateau
[] =
1371 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1373 static __inline__
unsigned short guess_mtu(unsigned short old_mtu
)
1377 for (i
= 0; i
< ARRAY_SIZE(mtu_plateau
); i
++)
1378 if (old_mtu
> mtu_plateau
[i
])
1379 return mtu_plateau
[i
];
1383 unsigned short ip_rt_frag_needed(struct iphdr
*iph
, unsigned short new_mtu
)
1386 unsigned short old_mtu
= ntohs(iph
->tot_len
);
1388 u32 skeys
[2] = { iph
->saddr
, 0, };
1389 u32 daddr
= iph
->daddr
;
1390 unsigned short est_mtu
= 0;
1392 if (ipv4_config
.no_pmtu_disc
)
1395 for (i
= 0; i
< 2; i
++) {
1396 unsigned hash
= rt_hash_code(daddr
, skeys
[i
]);
1399 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
1400 rth
= rcu_dereference(rth
->u
.rt_next
)) {
1401 if (rth
->fl
.fl4_dst
== daddr
&&
1402 rth
->fl
.fl4_src
== skeys
[i
] &&
1403 rth
->rt_dst
== daddr
&&
1404 rth
->rt_src
== iph
->saddr
&&
1406 !(dst_metric_locked(&rth
->u
.dst
, RTAX_MTU
))) {
1407 unsigned short mtu
= new_mtu
;
1409 if (new_mtu
< 68 || new_mtu
>= old_mtu
) {
1411 /* BSD 4.2 compatibility hack :-( */
1413 old_mtu
>= rth
->u
.dst
.metrics
[RTAX_MTU
-1] &&
1414 old_mtu
>= 68 + (iph
->ihl
<< 2))
1415 old_mtu
-= iph
->ihl
<< 2;
1417 mtu
= guess_mtu(old_mtu
);
1419 if (mtu
<= rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1420 if (mtu
< rth
->u
.dst
.metrics
[RTAX_MTU
-1]) {
1421 dst_confirm(&rth
->u
.dst
);
1422 if (mtu
< ip_rt_min_pmtu
) {
1423 mtu
= ip_rt_min_pmtu
;
1424 rth
->u
.dst
.metrics
[RTAX_LOCK
-1] |=
1427 rth
->u
.dst
.metrics
[RTAX_MTU
-1] = mtu
;
1428 dst_set_expires(&rth
->u
.dst
,
1437 return est_mtu
? : new_mtu
;
1440 static void ip_rt_update_pmtu(struct dst_entry
*dst
, u32 mtu
)
1442 if (dst
->metrics
[RTAX_MTU
-1] > mtu
&& mtu
>= 68 &&
1443 !(dst_metric_locked(dst
, RTAX_MTU
))) {
1444 if (mtu
< ip_rt_min_pmtu
) {
1445 mtu
= ip_rt_min_pmtu
;
1446 dst
->metrics
[RTAX_LOCK
-1] |= (1 << RTAX_MTU
);
1448 dst
->metrics
[RTAX_MTU
-1] = mtu
;
1449 dst_set_expires(dst
, ip_rt_mtu_expires
);
1453 static struct dst_entry
*ipv4_dst_check(struct dst_entry
*dst
, u32 cookie
)
1458 static void ipv4_dst_destroy(struct dst_entry
*dst
)
1460 struct rtable
*rt
= (struct rtable
*) dst
;
1461 struct inet_peer
*peer
= rt
->peer
;
1462 struct in_device
*idev
= rt
->idev
;
1475 static void ipv4_dst_ifdown(struct dst_entry
*dst
, struct net_device
*dev
,
1478 struct rtable
*rt
= (struct rtable
*) dst
;
1479 struct in_device
*idev
= rt
->idev
;
1480 if (dev
!= &loopback_dev
&& idev
&& idev
->dev
== dev
) {
1481 struct in_device
*loopback_idev
= in_dev_get(&loopback_dev
);
1482 if (loopback_idev
) {
1483 rt
->idev
= loopback_idev
;
1489 static void ipv4_link_failure(struct sk_buff
*skb
)
1493 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
1495 rt
= (struct rtable
*) skb
->dst
;
1497 dst_set_expires(&rt
->u
.dst
, 0);
1500 static int ip_rt_bug(struct sk_buff
*skb
)
1502 printk(KERN_DEBUG
"ip_rt_bug: %u.%u.%u.%u -> %u.%u.%u.%u, %s\n",
1503 NIPQUAD(skb
->nh
.iph
->saddr
), NIPQUAD(skb
->nh
.iph
->daddr
),
1504 skb
->dev
? skb
->dev
->name
: "?");
1510 We do not cache source address of outgoing interface,
1511 because it is used only by IP RR, TS and SRR options,
1512 so that it out of fast path.
1514 BTW remember: "addr" is allowed to be not aligned
1518 void ip_rt_get_source(u8
*addr
, struct rtable
*rt
)
1521 struct fib_result res
;
1523 if (rt
->fl
.iif
== 0)
1525 else if (fib_lookup(&rt
->fl
, &res
) == 0) {
1526 src
= FIB_RES_PREFSRC(res
);
1529 src
= inet_select_addr(rt
->u
.dst
.dev
, rt
->rt_gateway
,
1531 memcpy(addr
, &src
, 4);
1534 #ifdef CONFIG_NET_CLS_ROUTE
1535 static void set_class_tag(struct rtable
*rt
, u32 tag
)
1537 if (!(rt
->u
.dst
.tclassid
& 0xFFFF))
1538 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF;
1539 if (!(rt
->u
.dst
.tclassid
& 0xFFFF0000))
1540 rt
->u
.dst
.tclassid
|= tag
& 0xFFFF0000;
1544 static void rt_set_nexthop(struct rtable
*rt
, struct fib_result
*res
, u32 itag
)
1546 struct fib_info
*fi
= res
->fi
;
1549 if (FIB_RES_GW(*res
) &&
1550 FIB_RES_NH(*res
).nh_scope
== RT_SCOPE_LINK
)
1551 rt
->rt_gateway
= FIB_RES_GW(*res
);
1552 memcpy(rt
->u
.dst
.metrics
, fi
->fib_metrics
,
1553 sizeof(rt
->u
.dst
.metrics
));
1554 if (fi
->fib_mtu
== 0) {
1555 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = rt
->u
.dst
.dev
->mtu
;
1556 if (rt
->u
.dst
.metrics
[RTAX_LOCK
-1] & (1 << RTAX_MTU
) &&
1557 rt
->rt_gateway
!= rt
->rt_dst
&&
1558 rt
->u
.dst
.dev
->mtu
> 576)
1559 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = 576;
1561 #ifdef CONFIG_NET_CLS_ROUTE
1562 rt
->u
.dst
.tclassid
= FIB_RES_NH(*res
).nh_tclassid
;
1565 rt
->u
.dst
.metrics
[RTAX_MTU
-1]= rt
->u
.dst
.dev
->mtu
;
1567 if (rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] == 0)
1568 rt
->u
.dst
.metrics
[RTAX_HOPLIMIT
-1] = sysctl_ip_default_ttl
;
1569 if (rt
->u
.dst
.metrics
[RTAX_MTU
-1] > IP_MAX_MTU
)
1570 rt
->u
.dst
.metrics
[RTAX_MTU
-1] = IP_MAX_MTU
;
1571 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] == 0)
1572 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = max_t(unsigned int, rt
->u
.dst
.dev
->mtu
- 40,
1574 if (rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] > 65535 - 40)
1575 rt
->u
.dst
.metrics
[RTAX_ADVMSS
-1] = 65535 - 40;
1577 #ifdef CONFIG_NET_CLS_ROUTE
1578 #ifdef CONFIG_IP_MULTIPLE_TABLES
1579 set_class_tag(rt
, fib_rules_tclass(res
));
1581 set_class_tag(rt
, itag
);
1583 rt
->rt_type
= res
->type
;
1586 static int ip_route_input_mc(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
1587 u8 tos
, struct net_device
*dev
, int our
)
1592 struct in_device
*in_dev
= in_dev_get(dev
);
1595 /* Primary sanity checks. */
1600 if (MULTICAST(saddr
) || BADCLASS(saddr
) || LOOPBACK(saddr
) ||
1601 skb
->protocol
!= htons(ETH_P_IP
))
1604 if (ZERONET(saddr
)) {
1605 if (!LOCAL_MCAST(daddr
))
1607 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1608 } else if (fib_validate_source(saddr
, 0, tos
, 0,
1609 dev
, &spec_dst
, &itag
) < 0)
1612 rth
= dst_alloc(&ipv4_dst_ops
);
1616 rth
->u
.dst
.output
= ip_rt_bug
;
1618 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1619 rth
->u
.dst
.flags
= DST_HOST
;
1620 if (in_dev
->cnf
.no_policy
)
1621 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1622 rth
->fl
.fl4_dst
= daddr
;
1623 rth
->rt_dst
= daddr
;
1624 rth
->fl
.fl4_tos
= tos
;
1625 #ifdef CONFIG_IP_ROUTE_FWMARK
1626 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
1628 rth
->fl
.fl4_src
= saddr
;
1629 rth
->rt_src
= saddr
;
1630 #ifdef CONFIG_NET_CLS_ROUTE
1631 rth
->u
.dst
.tclassid
= itag
;
1634 rth
->fl
.iif
= dev
->ifindex
;
1635 rth
->u
.dst
.dev
= &loopback_dev
;
1636 dev_hold(rth
->u
.dst
.dev
);
1637 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1639 rth
->rt_gateway
= daddr
;
1640 rth
->rt_spec_dst
= spec_dst
;
1641 rth
->rt_type
= RTN_MULTICAST
;
1642 rth
->rt_flags
= RTCF_MULTICAST
;
1644 rth
->u
.dst
.input
= ip_local_deliver
;
1645 rth
->rt_flags
|= RTCF_LOCAL
;
1648 #ifdef CONFIG_IP_MROUTE
1649 if (!LOCAL_MCAST(daddr
) && IN_DEV_MFORWARD(in_dev
))
1650 rth
->u
.dst
.input
= ip_mr_input
;
1652 RT_CACHE_STAT_INC(in_slow_mc
);
1655 hash
= rt_hash_code(daddr
, saddr
^ (dev
->ifindex
<< 5));
1656 return rt_intern_hash(hash
, rth
, (struct rtable
**) &skb
->dst
);
1668 static void ip_handle_martian_source(struct net_device
*dev
,
1669 struct in_device
*in_dev
,
1670 struct sk_buff
*skb
,
1674 RT_CACHE_STAT_INC(in_martian_src
);
1675 #ifdef CONFIG_IP_ROUTE_VERBOSE
1676 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit()) {
1678 * RFC1812 recommendation, if source is martian,
1679 * the only hint is MAC header.
1681 printk(KERN_WARNING
"martian source %u.%u.%u.%u from "
1682 "%u.%u.%u.%u, on dev %s\n",
1683 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
1684 if (dev
->hard_header_len
&& skb
->mac
.raw
) {
1686 unsigned char *p
= skb
->mac
.raw
;
1687 printk(KERN_WARNING
"ll header: ");
1688 for (i
= 0; i
< dev
->hard_header_len
; i
++, p
++) {
1690 if (i
< (dev
->hard_header_len
- 1))
1699 static inline int __mkroute_input(struct sk_buff
*skb
,
1700 struct fib_result
* res
,
1701 struct in_device
*in_dev
,
1702 u32 daddr
, u32 saddr
, u32 tos
,
1703 struct rtable
**result
)
1708 struct in_device
*out_dev
;
1712 /* get a working reference to the output device */
1713 out_dev
= in_dev_get(FIB_RES_DEV(*res
));
1714 if (out_dev
== NULL
) {
1715 if (net_ratelimit())
1716 printk(KERN_CRIT
"Bug in ip_route_input" \
1717 "_slow(). Please, report\n");
1722 err
= fib_validate_source(saddr
, daddr
, tos
, FIB_RES_OIF(*res
),
1723 in_dev
->dev
, &spec_dst
, &itag
);
1725 ip_handle_martian_source(in_dev
->dev
, in_dev
, skb
, daddr
,
1733 flags
|= RTCF_DIRECTSRC
;
1735 if (out_dev
== in_dev
&& err
&& !(flags
& (RTCF_NAT
| RTCF_MASQ
)) &&
1736 (IN_DEV_SHARED_MEDIA(out_dev
) ||
1737 inet_addr_onlink(out_dev
, saddr
, FIB_RES_GW(*res
))))
1738 flags
|= RTCF_DOREDIRECT
;
1740 if (skb
->protocol
!= htons(ETH_P_IP
)) {
1741 /* Not IP (i.e. ARP). Do not create route, if it is
1742 * invalid for proxy arp. DNAT routes are always valid.
1744 if (out_dev
== in_dev
&& !(flags
& RTCF_DNAT
)) {
1751 rth
= dst_alloc(&ipv4_dst_ops
);
1757 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
1758 rth
->u
.dst
.flags
= DST_HOST
;
1759 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1760 if (res
->fi
->fib_nhs
> 1)
1761 rth
->u
.dst
.flags
|= DST_BALANCED
;
1763 if (in_dev
->cnf
.no_policy
)
1764 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
1765 if (in_dev
->cnf
.no_xfrm
)
1766 rth
->u
.dst
.flags
|= DST_NOXFRM
;
1767 rth
->fl
.fl4_dst
= daddr
;
1768 rth
->rt_dst
= daddr
;
1769 rth
->fl
.fl4_tos
= tos
;
1770 #ifdef CONFIG_IP_ROUTE_FWMARK
1771 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
1773 rth
->fl
.fl4_src
= saddr
;
1774 rth
->rt_src
= saddr
;
1775 rth
->rt_gateway
= daddr
;
1777 rth
->fl
.iif
= in_dev
->dev
->ifindex
;
1778 rth
->u
.dst
.dev
= (out_dev
)->dev
;
1779 dev_hold(rth
->u
.dst
.dev
);
1780 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
1782 rth
->rt_spec_dst
= spec_dst
;
1784 rth
->u
.dst
.input
= ip_forward
;
1785 rth
->u
.dst
.output
= ip_output
;
1787 rt_set_nexthop(rth
, res
, itag
);
1789 rth
->rt_flags
= flags
;
1794 /* release the working reference to the output device */
1795 in_dev_put(out_dev
);
1799 static inline int ip_mkroute_input_def(struct sk_buff
*skb
,
1800 struct fib_result
* res
,
1801 const struct flowi
*fl
,
1802 struct in_device
*in_dev
,
1803 u32 daddr
, u32 saddr
, u32 tos
)
1805 struct rtable
* rth
= NULL
;
1809 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1810 if (res
->fi
&& res
->fi
->fib_nhs
> 1 && fl
->oif
== 0)
1811 fib_select_multipath(fl
, res
);
1814 /* create a routing cache entry */
1815 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
, &rth
);
1819 /* put it into the cache */
1820 hash
= rt_hash_code(daddr
, saddr
^ (fl
->iif
<< 5));
1821 return rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
1824 static inline int ip_mkroute_input(struct sk_buff
*skb
,
1825 struct fib_result
* res
,
1826 const struct flowi
*fl
,
1827 struct in_device
*in_dev
,
1828 u32 daddr
, u32 saddr
, u32 tos
)
1830 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
1831 struct rtable
* rth
= NULL
, *rtres
;
1832 unsigned char hop
, hopcount
;
1837 hopcount
= res
->fi
->fib_nhs
;
1841 /* distinguish between multipath and singlepath */
1843 return ip_mkroute_input_def(skb
, res
, fl
, in_dev
, daddr
,
1846 /* add all alternatives to the routing cache */
1847 for (hop
= 0; hop
< hopcount
; hop
++) {
1850 /* put reference to previous result */
1854 /* create a routing cache entry */
1855 err
= __mkroute_input(skb
, res
, in_dev
, daddr
, saddr
, tos
,
1860 /* put it into the cache */
1861 hash
= rt_hash_code(daddr
, saddr
^ (fl
->iif
<< 5));
1862 err
= rt_intern_hash(hash
, rth
, &rtres
);
1866 /* forward hop information to multipath impl. */
1867 multipath_set_nhinfo(rth
,
1868 FIB_RES_NETWORK(*res
),
1869 FIB_RES_NETMASK(*res
),
1873 skb
->dst
= &rtres
->u
.dst
;
1875 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1876 return ip_mkroute_input_def(skb
, res
, fl
, in_dev
, daddr
, saddr
, tos
);
1877 #endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
1882 * NOTE. We drop all the packets that has local source
1883 * addresses, because every properly looped back packet
1884 * must have correct destination already attached by output routine.
1886 * Such approach solves two big problems:
1887 * 1. Not simplex devices are handled properly.
1888 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1891 static int ip_route_input_slow(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
1892 u8 tos
, struct net_device
*dev
)
1894 struct fib_result res
;
1895 struct in_device
*in_dev
= in_dev_get(dev
);
1896 struct flowi fl
= { .nl_u
= { .ip4_u
=
1900 .scope
= RT_SCOPE_UNIVERSE
,
1901 #ifdef CONFIG_IP_ROUTE_FWMARK
1902 .fwmark
= skb
->nfmark
1905 .iif
= dev
->ifindex
};
1908 struct rtable
* rth
;
1914 /* IP on this device is disabled. */
1919 /* Check for the most weird martians, which can be not detected
1923 if (MULTICAST(saddr
) || BADCLASS(saddr
) || LOOPBACK(saddr
))
1924 goto martian_source
;
1926 if (daddr
== 0xFFFFFFFF || (saddr
== 0 && daddr
== 0))
1929 /* Accept zero addresses only to limited broadcast;
1930 * I even do not know to fix it or not. Waiting for complains :-)
1933 goto martian_source
;
1935 if (BADCLASS(daddr
) || ZERONET(daddr
) || LOOPBACK(daddr
))
1936 goto martian_destination
;
1939 * Now we are ready to route packet.
1941 if ((err
= fib_lookup(&fl
, &res
)) != 0) {
1942 if (!IN_DEV_FORWARD(in_dev
))
1948 RT_CACHE_STAT_INC(in_slow_tot
);
1950 if (res
.type
== RTN_BROADCAST
)
1953 if (res
.type
== RTN_LOCAL
) {
1955 result
= fib_validate_source(saddr
, daddr
, tos
,
1956 loopback_dev
.ifindex
,
1957 dev
, &spec_dst
, &itag
);
1959 goto martian_source
;
1961 flags
|= RTCF_DIRECTSRC
;
1966 if (!IN_DEV_FORWARD(in_dev
))
1968 if (res
.type
!= RTN_UNICAST
)
1969 goto martian_destination
;
1971 err
= ip_mkroute_input(skb
, &res
, &fl
, in_dev
, daddr
, saddr
, tos
);
1972 if (err
== -ENOBUFS
)
1984 if (skb
->protocol
!= htons(ETH_P_IP
))
1988 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_LINK
);
1990 err
= fib_validate_source(saddr
, 0, tos
, 0, dev
, &spec_dst
,
1993 goto martian_source
;
1995 flags
|= RTCF_DIRECTSRC
;
1997 flags
|= RTCF_BROADCAST
;
1998 res
.type
= RTN_BROADCAST
;
1999 RT_CACHE_STAT_INC(in_brd
);
2002 rth
= dst_alloc(&ipv4_dst_ops
);
2006 rth
->u
.dst
.output
= ip_rt_bug
;
2008 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2009 rth
->u
.dst
.flags
= DST_HOST
;
2010 if (in_dev
->cnf
.no_policy
)
2011 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2012 rth
->fl
.fl4_dst
= daddr
;
2013 rth
->rt_dst
= daddr
;
2014 rth
->fl
.fl4_tos
= tos
;
2015 #ifdef CONFIG_IP_ROUTE_FWMARK
2016 rth
->fl
.fl4_fwmark
= skb
->nfmark
;
2018 rth
->fl
.fl4_src
= saddr
;
2019 rth
->rt_src
= saddr
;
2020 #ifdef CONFIG_NET_CLS_ROUTE
2021 rth
->u
.dst
.tclassid
= itag
;
2024 rth
->fl
.iif
= dev
->ifindex
;
2025 rth
->u
.dst
.dev
= &loopback_dev
;
2026 dev_hold(rth
->u
.dst
.dev
);
2027 rth
->idev
= in_dev_get(rth
->u
.dst
.dev
);
2028 rth
->rt_gateway
= daddr
;
2029 rth
->rt_spec_dst
= spec_dst
;
2030 rth
->u
.dst
.input
= ip_local_deliver
;
2031 rth
->rt_flags
= flags
|RTCF_LOCAL
;
2032 if (res
.type
== RTN_UNREACHABLE
) {
2033 rth
->u
.dst
.input
= ip_error
;
2034 rth
->u
.dst
.error
= -err
;
2035 rth
->rt_flags
&= ~RTCF_LOCAL
;
2037 rth
->rt_type
= res
.type
;
2038 hash
= rt_hash_code(daddr
, saddr
^ (fl
.iif
<< 5));
2039 err
= rt_intern_hash(hash
, rth
, (struct rtable
**)&skb
->dst
);
2043 RT_CACHE_STAT_INC(in_no_route
);
2044 spec_dst
= inet_select_addr(dev
, 0, RT_SCOPE_UNIVERSE
);
2045 res
.type
= RTN_UNREACHABLE
;
2049 * Do not cache martian addresses: they should be logged (RFC1812)
2051 martian_destination
:
2052 RT_CACHE_STAT_INC(in_martian_dst
);
2053 #ifdef CONFIG_IP_ROUTE_VERBOSE
2054 if (IN_DEV_LOG_MARTIANS(in_dev
) && net_ratelimit())
2055 printk(KERN_WARNING
"martian destination %u.%u.%u.%u from "
2056 "%u.%u.%u.%u, dev %s\n",
2057 NIPQUAD(daddr
), NIPQUAD(saddr
), dev
->name
);
2061 err
= -EHOSTUNREACH
;
2073 ip_handle_martian_source(dev
, in_dev
, skb
, daddr
, saddr
);
2077 int ip_route_input(struct sk_buff
*skb
, u32 daddr
, u32 saddr
,
2078 u8 tos
, struct net_device
*dev
)
2080 struct rtable
* rth
;
2082 int iif
= dev
->ifindex
;
2084 tos
&= IPTOS_RT_MASK
;
2085 hash
= rt_hash_code(daddr
, saddr
^ (iif
<< 5));
2088 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2089 rth
= rcu_dereference(rth
->u
.rt_next
)) {
2090 if (rth
->fl
.fl4_dst
== daddr
&&
2091 rth
->fl
.fl4_src
== saddr
&&
2092 rth
->fl
.iif
== iif
&&
2094 #ifdef CONFIG_IP_ROUTE_FWMARK
2095 rth
->fl
.fl4_fwmark
== skb
->nfmark
&&
2097 rth
->fl
.fl4_tos
== tos
) {
2098 rth
->u
.dst
.lastuse
= jiffies
;
2099 dst_hold(&rth
->u
.dst
);
2101 RT_CACHE_STAT_INC(in_hit
);
2103 skb
->dst
= (struct dst_entry
*)rth
;
2106 RT_CACHE_STAT_INC(in_hlist_search
);
2110 /* Multicast recognition logic is moved from route cache to here.
2111 The problem was that too many Ethernet cards have broken/missing
2112 hardware multicast filters :-( As result the host on multicasting
2113 network acquires a lot of useless route cache entries, sort of
2114 SDR messages from all the world. Now we try to get rid of them.
2115 Really, provided software IP multicast filter is organized
2116 reasonably (at least, hashed), it does not result in a slowdown
2117 comparing with route cache reject entries.
2118 Note, that multicast routers are not affected, because
2119 route cache entry is created eventually.
2121 if (MULTICAST(daddr
)) {
2122 struct in_device
*in_dev
;
2125 if ((in_dev
= __in_dev_get_rcu(dev
)) != NULL
) {
2126 int our
= ip_check_mc(in_dev
, daddr
, saddr
,
2127 skb
->nh
.iph
->protocol
);
2129 #ifdef CONFIG_IP_MROUTE
2130 || (!LOCAL_MCAST(daddr
) && IN_DEV_MFORWARD(in_dev
))
2134 return ip_route_input_mc(skb
, daddr
, saddr
,
2141 return ip_route_input_slow(skb
, daddr
, saddr
, tos
, dev
);
2144 static inline int __mkroute_output(struct rtable
**result
,
2145 struct fib_result
* res
,
2146 const struct flowi
*fl
,
2147 const struct flowi
*oldflp
,
2148 struct net_device
*dev_out
,
2152 struct in_device
*in_dev
;
2153 u32 tos
= RT_FL_TOS(oldflp
);
2156 if (LOOPBACK(fl
->fl4_src
) && !(dev_out
->flags
&IFF_LOOPBACK
))
2159 if (fl
->fl4_dst
== 0xFFFFFFFF)
2160 res
->type
= RTN_BROADCAST
;
2161 else if (MULTICAST(fl
->fl4_dst
))
2162 res
->type
= RTN_MULTICAST
;
2163 else if (BADCLASS(fl
->fl4_dst
) || ZERONET(fl
->fl4_dst
))
2166 if (dev_out
->flags
& IFF_LOOPBACK
)
2167 flags
|= RTCF_LOCAL
;
2169 /* get work reference to inet device */
2170 in_dev
= in_dev_get(dev_out
);
2174 if (res
->type
== RTN_BROADCAST
) {
2175 flags
|= RTCF_BROADCAST
| RTCF_LOCAL
;
2177 fib_info_put(res
->fi
);
2180 } else if (res
->type
== RTN_MULTICAST
) {
2181 flags
|= RTCF_MULTICAST
|RTCF_LOCAL
;
2182 if (!ip_check_mc(in_dev
, oldflp
->fl4_dst
, oldflp
->fl4_src
,
2184 flags
&= ~RTCF_LOCAL
;
2185 /* If multicast route do not exist use
2186 default one, but do not gateway in this case.
2189 if (res
->fi
&& res
->prefixlen
< 4) {
2190 fib_info_put(res
->fi
);
2196 rth
= dst_alloc(&ipv4_dst_ops
);
2202 atomic_set(&rth
->u
.dst
.__refcnt
, 1);
2203 rth
->u
.dst
.flags
= DST_HOST
;
2204 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2206 rth
->rt_multipath_alg
= res
->fi
->fib_mp_alg
;
2207 if (res
->fi
->fib_nhs
> 1)
2208 rth
->u
.dst
.flags
|= DST_BALANCED
;
2211 if (in_dev
->cnf
.no_xfrm
)
2212 rth
->u
.dst
.flags
|= DST_NOXFRM
;
2213 if (in_dev
->cnf
.no_policy
)
2214 rth
->u
.dst
.flags
|= DST_NOPOLICY
;
2216 rth
->fl
.fl4_dst
= oldflp
->fl4_dst
;
2217 rth
->fl
.fl4_tos
= tos
;
2218 rth
->fl
.fl4_src
= oldflp
->fl4_src
;
2219 rth
->fl
.oif
= oldflp
->oif
;
2220 #ifdef CONFIG_IP_ROUTE_FWMARK
2221 rth
->fl
.fl4_fwmark
= oldflp
->fl4_fwmark
;
2223 rth
->rt_dst
= fl
->fl4_dst
;
2224 rth
->rt_src
= fl
->fl4_src
;
2225 rth
->rt_iif
= oldflp
->oif
? : dev_out
->ifindex
;
2226 /* get references to the devices that are to be hold by the routing
2228 rth
->u
.dst
.dev
= dev_out
;
2230 rth
->idev
= in_dev_get(dev_out
);
2231 rth
->rt_gateway
= fl
->fl4_dst
;
2232 rth
->rt_spec_dst
= fl
->fl4_src
;
2234 rth
->u
.dst
.output
=ip_output
;
2236 RT_CACHE_STAT_INC(out_slow_tot
);
2238 if (flags
& RTCF_LOCAL
) {
2239 rth
->u
.dst
.input
= ip_local_deliver
;
2240 rth
->rt_spec_dst
= fl
->fl4_dst
;
2242 if (flags
& (RTCF_BROADCAST
| RTCF_MULTICAST
)) {
2243 rth
->rt_spec_dst
= fl
->fl4_src
;
2244 if (flags
& RTCF_LOCAL
&&
2245 !(dev_out
->flags
& IFF_LOOPBACK
)) {
2246 rth
->u
.dst
.output
= ip_mc_output
;
2247 RT_CACHE_STAT_INC(out_slow_mc
);
2249 #ifdef CONFIG_IP_MROUTE
2250 if (res
->type
== RTN_MULTICAST
) {
2251 if (IN_DEV_MFORWARD(in_dev
) &&
2252 !LOCAL_MCAST(oldflp
->fl4_dst
)) {
2253 rth
->u
.dst
.input
= ip_mr_input
;
2254 rth
->u
.dst
.output
= ip_mc_output
;
2260 rt_set_nexthop(rth
, res
, 0);
2262 rth
->rt_flags
= flags
;
2266 /* release work reference to inet device */
2272 static inline int ip_mkroute_output_def(struct rtable
**rp
,
2273 struct fib_result
* res
,
2274 const struct flowi
*fl
,
2275 const struct flowi
*oldflp
,
2276 struct net_device
*dev_out
,
2279 struct rtable
*rth
= NULL
;
2280 int err
= __mkroute_output(&rth
, res
, fl
, oldflp
, dev_out
, flags
);
2283 hash
= rt_hash_code(oldflp
->fl4_dst
,
2284 oldflp
->fl4_src
^ (oldflp
->oif
<< 5));
2285 err
= rt_intern_hash(hash
, rth
, rp
);
2291 static inline int ip_mkroute_output(struct rtable
** rp
,
2292 struct fib_result
* res
,
2293 const struct flowi
*fl
,
2294 const struct flowi
*oldflp
,
2295 struct net_device
*dev_out
,
2298 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2302 struct rtable
*rth
= NULL
;
2304 if (res
->fi
&& res
->fi
->fib_nhs
> 1) {
2305 unsigned char hopcount
= res
->fi
->fib_nhs
;
2307 for (hop
= 0; hop
< hopcount
; hop
++) {
2308 struct net_device
*dev2nexthop
;
2312 /* hold a work reference to the output device */
2313 dev2nexthop
= FIB_RES_DEV(*res
);
2314 dev_hold(dev2nexthop
);
2316 /* put reference to previous result */
2320 err
= __mkroute_output(&rth
, res
, fl
, oldflp
,
2321 dev2nexthop
, flags
);
2326 hash
= rt_hash_code(oldflp
->fl4_dst
,
2328 (oldflp
->oif
<< 5));
2329 err
= rt_intern_hash(hash
, rth
, rp
);
2331 /* forward hop information to multipath impl. */
2332 multipath_set_nhinfo(rth
,
2333 FIB_RES_NETWORK(*res
),
2334 FIB_RES_NETMASK(*res
),
2338 /* release work reference to output device */
2339 dev_put(dev2nexthop
);
2346 return ip_mkroute_output_def(rp
, res
, fl
, oldflp
, dev_out
,
2349 #else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
2350 return ip_mkroute_output_def(rp
, res
, fl
, oldflp
, dev_out
, flags
);
2355 * Major route resolver routine.
2358 static int ip_route_output_slow(struct rtable
**rp
, const struct flowi
*oldflp
)
2360 u32 tos
= RT_FL_TOS(oldflp
);
2361 struct flowi fl
= { .nl_u
= { .ip4_u
=
2362 { .daddr
= oldflp
->fl4_dst
,
2363 .saddr
= oldflp
->fl4_src
,
2364 .tos
= tos
& IPTOS_RT_MASK
,
2365 .scope
= ((tos
& RTO_ONLINK
) ?
2368 #ifdef CONFIG_IP_ROUTE_FWMARK
2369 .fwmark
= oldflp
->fl4_fwmark
2372 .iif
= loopback_dev
.ifindex
,
2373 .oif
= oldflp
->oif
};
2374 struct fib_result res
;
2376 struct net_device
*dev_out
= NULL
;
2382 #ifdef CONFIG_IP_MULTIPLE_TABLES
2386 if (oldflp
->fl4_src
) {
2388 if (MULTICAST(oldflp
->fl4_src
) ||
2389 BADCLASS(oldflp
->fl4_src
) ||
2390 ZERONET(oldflp
->fl4_src
))
2393 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2394 dev_out
= ip_dev_find(oldflp
->fl4_src
);
2395 if (dev_out
== NULL
)
2398 /* I removed check for oif == dev_out->oif here.
2399 It was wrong for two reasons:
2400 1. ip_dev_find(saddr) can return wrong iface, if saddr is
2401 assigned to multiple interfaces.
2402 2. Moreover, we are allowed to send packets with saddr
2403 of another iface. --ANK
2406 if (oldflp
->oif
== 0
2407 && (MULTICAST(oldflp
->fl4_dst
) || oldflp
->fl4_dst
== 0xFFFFFFFF)) {
2408 /* Special hack: user can direct multicasts
2409 and limited broadcast via necessary interface
2410 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2411 This hack is not just for fun, it allows
2412 vic,vat and friends to work.
2413 They bind socket to loopback, set ttl to zero
2414 and expect that it will work.
2415 From the viewpoint of routing cache they are broken,
2416 because we are not allowed to build multicast path
2417 with loopback source addr (look, routing cache
2418 cannot know, that ttl is zero, so that packet
2419 will not leave this host and route is valid).
2420 Luckily, this hack is good workaround.
2423 fl
.oif
= dev_out
->ifindex
;
2433 dev_out
= dev_get_by_index(oldflp
->oif
);
2435 if (dev_out
== NULL
)
2438 /* RACE: Check return value of inet_select_addr instead. */
2439 if (__in_dev_get_rtnl(dev_out
) == NULL
) {
2441 goto out
; /* Wrong error code */
2444 if (LOCAL_MCAST(oldflp
->fl4_dst
) || oldflp
->fl4_dst
== 0xFFFFFFFF) {
2446 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2451 if (MULTICAST(oldflp
->fl4_dst
))
2452 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2454 else if (!oldflp
->fl4_dst
)
2455 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2461 fl
.fl4_dst
= fl
.fl4_src
;
2463 fl
.fl4_dst
= fl
.fl4_src
= htonl(INADDR_LOOPBACK
);
2466 dev_out
= &loopback_dev
;
2468 fl
.oif
= loopback_dev
.ifindex
;
2469 res
.type
= RTN_LOCAL
;
2470 flags
|= RTCF_LOCAL
;
2474 if (fib_lookup(&fl
, &res
)) {
2477 /* Apparently, routing tables are wrong. Assume,
2478 that the destination is on link.
2481 Because we are allowed to send to iface
2482 even if it has NO routes and NO assigned
2483 addresses. When oif is specified, routing
2484 tables are looked up with only one purpose:
2485 to catch if destination is gatewayed, rather than
2486 direct. Moreover, if MSG_DONTROUTE is set,
2487 we send packet, ignoring both routing tables
2488 and ifaddr state. --ANK
2491 We could make it even if oif is unknown,
2492 likely IPv6, but we do not.
2495 if (fl
.fl4_src
== 0)
2496 fl
.fl4_src
= inet_select_addr(dev_out
, 0,
2498 res
.type
= RTN_UNICAST
;
2508 if (res
.type
== RTN_LOCAL
) {
2510 fl
.fl4_src
= fl
.fl4_dst
;
2513 dev_out
= &loopback_dev
;
2515 fl
.oif
= dev_out
->ifindex
;
2517 fib_info_put(res
.fi
);
2519 flags
|= RTCF_LOCAL
;
2523 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2524 if (res
.fi
->fib_nhs
> 1 && fl
.oif
== 0)
2525 fib_select_multipath(&fl
, &res
);
2528 if (!res
.prefixlen
&& res
.type
== RTN_UNICAST
&& !fl
.oif
)
2529 fib_select_default(&fl
, &res
);
2532 fl
.fl4_src
= FIB_RES_PREFSRC(res
);
2536 dev_out
= FIB_RES_DEV(res
);
2538 fl
.oif
= dev_out
->ifindex
;
2542 err
= ip_mkroute_output(rp
, &res
, &fl
, oldflp
, dev_out
, flags
);
2552 int __ip_route_output_key(struct rtable
**rp
, const struct flowi
*flp
)
2557 hash
= rt_hash_code(flp
->fl4_dst
, flp
->fl4_src
^ (flp
->oif
<< 5));
2560 for (rth
= rcu_dereference(rt_hash_table
[hash
].chain
); rth
;
2561 rth
= rcu_dereference(rth
->u
.rt_next
)) {
2562 if (rth
->fl
.fl4_dst
== flp
->fl4_dst
&&
2563 rth
->fl
.fl4_src
== flp
->fl4_src
&&
2565 rth
->fl
.oif
== flp
->oif
&&
2566 #ifdef CONFIG_IP_ROUTE_FWMARK
2567 rth
->fl
.fl4_fwmark
== flp
->fl4_fwmark
&&
2569 !((rth
->fl
.fl4_tos
^ flp
->fl4_tos
) &
2570 (IPTOS_RT_MASK
| RTO_ONLINK
))) {
2572 /* check for multipath routes and choose one if
2575 if (multipath_select_route(flp
, rth
, rp
)) {
2576 dst_hold(&(*rp
)->u
.dst
);
2577 RT_CACHE_STAT_INC(out_hit
);
2578 rcu_read_unlock_bh();
2582 rth
->u
.dst
.lastuse
= jiffies
;
2583 dst_hold(&rth
->u
.dst
);
2585 RT_CACHE_STAT_INC(out_hit
);
2586 rcu_read_unlock_bh();
2590 RT_CACHE_STAT_INC(out_hlist_search
);
2592 rcu_read_unlock_bh();
2594 return ip_route_output_slow(rp
, flp
);
2597 EXPORT_SYMBOL_GPL(__ip_route_output_key
);
2599 int ip_route_output_flow(struct rtable
**rp
, struct flowi
*flp
, struct sock
*sk
, int flags
)
2603 if ((err
= __ip_route_output_key(rp
, flp
)) != 0)
2608 flp
->fl4_src
= (*rp
)->rt_src
;
2610 flp
->fl4_dst
= (*rp
)->rt_dst
;
2611 return xfrm_lookup((struct dst_entry
**)rp
, flp
, sk
, flags
);
2617 EXPORT_SYMBOL_GPL(ip_route_output_flow
);
2619 int ip_route_output_key(struct rtable
**rp
, struct flowi
*flp
)
2621 return ip_route_output_flow(rp
, flp
, NULL
, 0);
2624 static int rt_fill_info(struct sk_buff
*skb
, u32 pid
, u32 seq
, int event
,
2625 int nowait
, unsigned int flags
)
2627 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
2629 struct nlmsghdr
*nlh
;
2630 unsigned char *b
= skb
->tail
;
2631 struct rta_cacheinfo ci
;
2632 #ifdef CONFIG_IP_MROUTE
2633 struct rtattr
*eptr
;
2635 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*r
), flags
);
2636 r
= NLMSG_DATA(nlh
);
2637 r
->rtm_family
= AF_INET
;
2638 r
->rtm_dst_len
= 32;
2640 r
->rtm_tos
= rt
->fl
.fl4_tos
;
2641 r
->rtm_table
= RT_TABLE_MAIN
;
2642 r
->rtm_type
= rt
->rt_type
;
2643 r
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2644 r
->rtm_protocol
= RTPROT_UNSPEC
;
2645 r
->rtm_flags
= (rt
->rt_flags
& ~0xFFFF) | RTM_F_CLONED
;
2646 if (rt
->rt_flags
& RTCF_NOTIFY
)
2647 r
->rtm_flags
|= RTM_F_NOTIFY
;
2648 RTA_PUT(skb
, RTA_DST
, 4, &rt
->rt_dst
);
2649 if (rt
->fl
.fl4_src
) {
2650 r
->rtm_src_len
= 32;
2651 RTA_PUT(skb
, RTA_SRC
, 4, &rt
->fl
.fl4_src
);
2654 RTA_PUT(skb
, RTA_OIF
, sizeof(int), &rt
->u
.dst
.dev
->ifindex
);
2655 #ifdef CONFIG_NET_CLS_ROUTE
2656 if (rt
->u
.dst
.tclassid
)
2657 RTA_PUT(skb
, RTA_FLOW
, 4, &rt
->u
.dst
.tclassid
);
2659 #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2660 if (rt
->rt_multipath_alg
!= IP_MP_ALG_NONE
) {
2661 __u32 alg
= rt
->rt_multipath_alg
;
2663 RTA_PUT(skb
, RTA_MP_ALGO
, 4, &alg
);
2667 RTA_PUT(skb
, RTA_PREFSRC
, 4, &rt
->rt_spec_dst
);
2668 else if (rt
->rt_src
!= rt
->fl
.fl4_src
)
2669 RTA_PUT(skb
, RTA_PREFSRC
, 4, &rt
->rt_src
);
2670 if (rt
->rt_dst
!= rt
->rt_gateway
)
2671 RTA_PUT(skb
, RTA_GATEWAY
, 4, &rt
->rt_gateway
);
2672 if (rtnetlink_put_metrics(skb
, rt
->u
.dst
.metrics
) < 0)
2673 goto rtattr_failure
;
2674 ci
.rta_lastuse
= jiffies_to_clock_t(jiffies
- rt
->u
.dst
.lastuse
);
2675 ci
.rta_used
= rt
->u
.dst
.__use
;
2676 ci
.rta_clntref
= atomic_read(&rt
->u
.dst
.__refcnt
);
2677 if (rt
->u
.dst
.expires
)
2678 ci
.rta_expires
= jiffies_to_clock_t(rt
->u
.dst
.expires
- jiffies
);
2681 ci
.rta_error
= rt
->u
.dst
.error
;
2682 ci
.rta_id
= ci
.rta_ts
= ci
.rta_tsage
= 0;
2684 ci
.rta_id
= rt
->peer
->ip_id_count
;
2685 if (rt
->peer
->tcp_ts_stamp
) {
2686 ci
.rta_ts
= rt
->peer
->tcp_ts
;
2687 ci
.rta_tsage
= xtime
.tv_sec
- rt
->peer
->tcp_ts_stamp
;
2690 #ifdef CONFIG_IP_MROUTE
2691 eptr
= (struct rtattr
*)skb
->tail
;
2693 RTA_PUT(skb
, RTA_CACHEINFO
, sizeof(ci
), &ci
);
2695 #ifdef CONFIG_IP_MROUTE
2696 u32 dst
= rt
->rt_dst
;
2698 if (MULTICAST(dst
) && !LOCAL_MCAST(dst
) &&
2699 ipv4_devconf
.mc_forwarding
) {
2700 int err
= ipmr_get_route(skb
, r
, nowait
);
2707 if (err
== -EMSGSIZE
)
2709 ((struct rta_cacheinfo
*)RTA_DATA(eptr
))->rta_error
= err
;
2714 RTA_PUT(skb
, RTA_IIF
, sizeof(int), &rt
->fl
.iif
);
2717 nlh
->nlmsg_len
= skb
->tail
- b
;
2722 skb_trim(skb
, b
- skb
->data
);
2726 int inet_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
* nlh
, void *arg
)
2728 struct rtattr
**rta
= arg
;
2729 struct rtmsg
*rtm
= NLMSG_DATA(nlh
);
2730 struct rtable
*rt
= NULL
;
2735 struct sk_buff
*skb
;
2737 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
2741 /* Reserve room for dummy headers, this skb can pass
2742 through good chunk of routing engine.
2744 skb
->mac
.raw
= skb
->nh
.raw
= skb
->data
;
2746 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2747 skb
->nh
.iph
->protocol
= IPPROTO_ICMP
;
2748 skb_reserve(skb
, MAX_HEADER
+ sizeof(struct iphdr
));
2750 if (rta
[RTA_SRC
- 1])
2751 memcpy(&src
, RTA_DATA(rta
[RTA_SRC
- 1]), 4);
2752 if (rta
[RTA_DST
- 1])
2753 memcpy(&dst
, RTA_DATA(rta
[RTA_DST
- 1]), 4);
2754 if (rta
[RTA_IIF
- 1])
2755 memcpy(&iif
, RTA_DATA(rta
[RTA_IIF
- 1]), sizeof(int));
2758 struct net_device
*dev
= __dev_get_by_index(iif
);
2762 skb
->protocol
= htons(ETH_P_IP
);
2765 err
= ip_route_input(skb
, dst
, src
, rtm
->rtm_tos
, dev
);
2767 rt
= (struct rtable
*)skb
->dst
;
2768 if (!err
&& rt
->u
.dst
.error
)
2769 err
= -rt
->u
.dst
.error
;
2771 struct flowi fl
= { .nl_u
= { .ip4_u
= { .daddr
= dst
,
2773 .tos
= rtm
->rtm_tos
} } };
2775 if (rta
[RTA_OIF
- 1])
2776 memcpy(&oif
, RTA_DATA(rta
[RTA_OIF
- 1]), sizeof(int));
2778 err
= ip_route_output_key(&rt
, &fl
);
2783 skb
->dst
= &rt
->u
.dst
;
2784 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2785 rt
->rt_flags
|= RTCF_NOTIFY
;
2787 NETLINK_CB(skb
).dst_pid
= NETLINK_CB(in_skb
).pid
;
2789 err
= rt_fill_info(skb
, NETLINK_CB(in_skb
).pid
, nlh
->nlmsg_seq
,
2790 RTM_NEWROUTE
, 0, 0);
2798 err
= netlink_unicast(rtnl
, skb
, NETLINK_CB(in_skb
).pid
, MSG_DONTWAIT
);
2808 int ip_rt_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2815 s_idx
= idx
= cb
->args
[1];
2816 for (h
= 0; h
<= rt_hash_mask
; h
++) {
2817 if (h
< s_h
) continue;
2821 for (rt
= rcu_dereference(rt_hash_table
[h
].chain
), idx
= 0; rt
;
2822 rt
= rcu_dereference(rt
->u
.rt_next
), idx
++) {
2825 skb
->dst
= dst_clone(&rt
->u
.dst
);
2826 if (rt_fill_info(skb
, NETLINK_CB(cb
->skb
).pid
,
2827 cb
->nlh
->nlmsg_seq
, RTM_NEWROUTE
,
2828 1, NLM_F_MULTI
) <= 0) {
2829 dst_release(xchg(&skb
->dst
, NULL
));
2830 rcu_read_unlock_bh();
2833 dst_release(xchg(&skb
->dst
, NULL
));
2835 rcu_read_unlock_bh();
2844 void ip_rt_multicast_event(struct in_device
*in_dev
)
2849 #ifdef CONFIG_SYSCTL
2850 static int flush_delay
;
2852 static int ipv4_sysctl_rtcache_flush(ctl_table
*ctl
, int write
,
2853 struct file
*filp
, void __user
*buffer
,
2854 size_t *lenp
, loff_t
*ppos
)
2857 proc_dointvec(ctl
, write
, filp
, buffer
, lenp
, ppos
);
2858 rt_cache_flush(flush_delay
);
2865 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table
*table
,
2868 void __user
*oldval
,
2869 size_t __user
*oldlenp
,
2870 void __user
*newval
,
2875 if (newlen
!= sizeof(int))
2877 if (get_user(delay
, (int __user
*)newval
))
2879 rt_cache_flush(delay
);
2883 ctl_table ipv4_route_table
[] = {
2885 .ctl_name
= NET_IPV4_ROUTE_FLUSH
,
2886 .procname
= "flush",
2887 .data
= &flush_delay
,
2888 .maxlen
= sizeof(int),
2890 .proc_handler
= &ipv4_sysctl_rtcache_flush
,
2891 .strategy
= &ipv4_sysctl_rtcache_flush_strategy
,
2894 .ctl_name
= NET_IPV4_ROUTE_MIN_DELAY
,
2895 .procname
= "min_delay",
2896 .data
= &ip_rt_min_delay
,
2897 .maxlen
= sizeof(int),
2899 .proc_handler
= &proc_dointvec_jiffies
,
2900 .strategy
= &sysctl_jiffies
,
2903 .ctl_name
= NET_IPV4_ROUTE_MAX_DELAY
,
2904 .procname
= "max_delay",
2905 .data
= &ip_rt_max_delay
,
2906 .maxlen
= sizeof(int),
2908 .proc_handler
= &proc_dointvec_jiffies
,
2909 .strategy
= &sysctl_jiffies
,
2912 .ctl_name
= NET_IPV4_ROUTE_GC_THRESH
,
2913 .procname
= "gc_thresh",
2914 .data
= &ipv4_dst_ops
.gc_thresh
,
2915 .maxlen
= sizeof(int),
2917 .proc_handler
= &proc_dointvec
,
2920 .ctl_name
= NET_IPV4_ROUTE_MAX_SIZE
,
2921 .procname
= "max_size",
2922 .data
= &ip_rt_max_size
,
2923 .maxlen
= sizeof(int),
2925 .proc_handler
= &proc_dointvec
,
2928 /* Deprecated. Use gc_min_interval_ms */
2930 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL
,
2931 .procname
= "gc_min_interval",
2932 .data
= &ip_rt_gc_min_interval
,
2933 .maxlen
= sizeof(int),
2935 .proc_handler
= &proc_dointvec_jiffies
,
2936 .strategy
= &sysctl_jiffies
,
2939 .ctl_name
= NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS
,
2940 .procname
= "gc_min_interval_ms",
2941 .data
= &ip_rt_gc_min_interval
,
2942 .maxlen
= sizeof(int),
2944 .proc_handler
= &proc_dointvec_ms_jiffies
,
2945 .strategy
= &sysctl_ms_jiffies
,
2948 .ctl_name
= NET_IPV4_ROUTE_GC_TIMEOUT
,
2949 .procname
= "gc_timeout",
2950 .data
= &ip_rt_gc_timeout
,
2951 .maxlen
= sizeof(int),
2953 .proc_handler
= &proc_dointvec_jiffies
,
2954 .strategy
= &sysctl_jiffies
,
2957 .ctl_name
= NET_IPV4_ROUTE_GC_INTERVAL
,
2958 .procname
= "gc_interval",
2959 .data
= &ip_rt_gc_interval
,
2960 .maxlen
= sizeof(int),
2962 .proc_handler
= &proc_dointvec_jiffies
,
2963 .strategy
= &sysctl_jiffies
,
2966 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_LOAD
,
2967 .procname
= "redirect_load",
2968 .data
= &ip_rt_redirect_load
,
2969 .maxlen
= sizeof(int),
2971 .proc_handler
= &proc_dointvec
,
2974 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_NUMBER
,
2975 .procname
= "redirect_number",
2976 .data
= &ip_rt_redirect_number
,
2977 .maxlen
= sizeof(int),
2979 .proc_handler
= &proc_dointvec
,
2982 .ctl_name
= NET_IPV4_ROUTE_REDIRECT_SILENCE
,
2983 .procname
= "redirect_silence",
2984 .data
= &ip_rt_redirect_silence
,
2985 .maxlen
= sizeof(int),
2987 .proc_handler
= &proc_dointvec
,
2990 .ctl_name
= NET_IPV4_ROUTE_ERROR_COST
,
2991 .procname
= "error_cost",
2992 .data
= &ip_rt_error_cost
,
2993 .maxlen
= sizeof(int),
2995 .proc_handler
= &proc_dointvec
,
2998 .ctl_name
= NET_IPV4_ROUTE_ERROR_BURST
,
2999 .procname
= "error_burst",
3000 .data
= &ip_rt_error_burst
,
3001 .maxlen
= sizeof(int),
3003 .proc_handler
= &proc_dointvec
,
3006 .ctl_name
= NET_IPV4_ROUTE_GC_ELASTICITY
,
3007 .procname
= "gc_elasticity",
3008 .data
= &ip_rt_gc_elasticity
,
3009 .maxlen
= sizeof(int),
3011 .proc_handler
= &proc_dointvec
,
3014 .ctl_name
= NET_IPV4_ROUTE_MTU_EXPIRES
,
3015 .procname
= "mtu_expires",
3016 .data
= &ip_rt_mtu_expires
,
3017 .maxlen
= sizeof(int),
3019 .proc_handler
= &proc_dointvec_jiffies
,
3020 .strategy
= &sysctl_jiffies
,
3023 .ctl_name
= NET_IPV4_ROUTE_MIN_PMTU
,
3024 .procname
= "min_pmtu",
3025 .data
= &ip_rt_min_pmtu
,
3026 .maxlen
= sizeof(int),
3028 .proc_handler
= &proc_dointvec
,
3031 .ctl_name
= NET_IPV4_ROUTE_MIN_ADVMSS
,
3032 .procname
= "min_adv_mss",
3033 .data
= &ip_rt_min_advmss
,
3034 .maxlen
= sizeof(int),
3036 .proc_handler
= &proc_dointvec
,
3039 .ctl_name
= NET_IPV4_ROUTE_SECRET_INTERVAL
,
3040 .procname
= "secret_interval",
3041 .data
= &ip_rt_secret_interval
,
3042 .maxlen
= sizeof(int),
3044 .proc_handler
= &proc_dointvec_jiffies
,
3045 .strategy
= &sysctl_jiffies
,
3051 #ifdef CONFIG_NET_CLS_ROUTE
3052 struct ip_rt_acct
*ip_rt_acct
;
3054 /* This code sucks. But you should have seen it before! --RR */
3056 /* IP route accounting ptr for this logical cpu number. */
3057 #define IP_RT_ACCT_CPU(i) (ip_rt_acct + i * 256)
3059 #ifdef CONFIG_PROC_FS
3060 static int ip_rt_acct_read(char *buffer
, char **start
, off_t offset
,
3061 int length
, int *eof
, void *data
)
3065 if ((offset
& 3) || (length
& 3))
3068 if (offset
>= sizeof(struct ip_rt_acct
) * 256) {
3073 if (offset
+ length
>= sizeof(struct ip_rt_acct
) * 256) {
3074 length
= sizeof(struct ip_rt_acct
) * 256 - offset
;
3078 offset
/= sizeof(u32
);
3081 u32
*src
= ((u32
*) IP_RT_ACCT_CPU(0)) + offset
;
3082 u32
*dst
= (u32
*) buffer
;
3084 /* Copy first cpu. */
3086 memcpy(dst
, src
, length
);
3088 /* Add the other cpus in, one int at a time */
3089 for_each_possible_cpu(i
) {
3092 src
= ((u32
*) IP_RT_ACCT_CPU(i
)) + offset
;
3094 for (j
= 0; j
< length
/4; j
++)
3100 #endif /* CONFIG_PROC_FS */
3101 #endif /* CONFIG_NET_CLS_ROUTE */
3103 static __initdata
unsigned long rhash_entries
;
3104 static int __init
set_rhash_entries(char *str
)
3108 rhash_entries
= simple_strtoul(str
, &str
, 0);
3111 __setup("rhash_entries=", set_rhash_entries
);
3113 int __init
ip_rt_init(void)
3117 rt_hash_rnd
= (int) ((num_physpages
^ (num_physpages
>>8)) ^
3118 (jiffies
^ (jiffies
>> 7)));
3120 #ifdef CONFIG_NET_CLS_ROUTE
3124 (PAGE_SIZE
<< order
) < 256 * sizeof(struct ip_rt_acct
) * NR_CPUS
; order
++)
3126 ip_rt_acct
= (struct ip_rt_acct
*)__get_free_pages(GFP_KERNEL
, order
);
3128 panic("IP: failed to allocate ip_rt_acct\n");
3129 memset(ip_rt_acct
, 0, PAGE_SIZE
<< order
);
3133 ipv4_dst_ops
.kmem_cachep
= kmem_cache_create("ip_dst_cache",
3134 sizeof(struct rtable
),
3135 0, SLAB_HWCACHE_ALIGN
,
3138 if (!ipv4_dst_ops
.kmem_cachep
)
3139 panic("IP: failed to allocate ip_dst_cache\n");
3141 rt_hash_table
= (struct rt_hash_bucket
*)
3142 alloc_large_system_hash("IP route cache",
3143 sizeof(struct rt_hash_bucket
),
3145 (num_physpages
>= 128 * 1024) ?
3151 memset(rt_hash_table
, 0, (rt_hash_mask
+ 1) * sizeof(struct rt_hash_bucket
));
3152 rt_hash_lock_init();
3154 ipv4_dst_ops
.gc_thresh
= (rt_hash_mask
+ 1);
3155 ip_rt_max_size
= (rt_hash_mask
+ 1) * 16;
3160 init_timer(&rt_flush_timer
);
3161 rt_flush_timer
.function
= rt_run_flush
;
3162 init_timer(&rt_periodic_timer
);
3163 rt_periodic_timer
.function
= rt_check_expire
;
3164 init_timer(&rt_secret_timer
);
3165 rt_secret_timer
.function
= rt_secret_rebuild
;
3167 /* All the timers, started at system startup tend
3168 to synchronize. Perturb it a bit.
3170 rt_periodic_timer
.expires
= jiffies
+ net_random() % ip_rt_gc_interval
+
3172 add_timer(&rt_periodic_timer
);
3174 rt_secret_timer
.expires
= jiffies
+ net_random() % ip_rt_secret_interval
+
3175 ip_rt_secret_interval
;
3176 add_timer(&rt_secret_timer
);
3178 #ifdef CONFIG_PROC_FS
3180 struct proc_dir_entry
*rtstat_pde
= NULL
; /* keep gcc happy */
3181 if (!proc_net_fops_create("rt_cache", S_IRUGO
, &rt_cache_seq_fops
) ||
3182 !(rtstat_pde
= create_proc_entry("rt_cache", S_IRUGO
,
3186 rtstat_pde
->proc_fops
= &rt_cpu_seq_fops
;
3188 #ifdef CONFIG_NET_CLS_ROUTE
3189 create_proc_read_entry("rt_acct", 0, proc_net
, ip_rt_acct_read
, NULL
);
3199 EXPORT_SYMBOL(__ip_select_ident
);
3200 EXPORT_SYMBOL(ip_route_input
);
3201 EXPORT_SYMBOL(ip_route_output_key
);