net: ipv4: fix RCU races on dst refcounts
[linux-2.6/libata-dev.git] / net / ipv4 / route.c
blobd6eabcfe8a9089c315cb6b812d1d327e4fdf1406
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
72 #include <linux/mm.h>
73 #include <linux/bootmem.h>
74 #include <linux/string.h>
75 #include <linux/socket.h>
76 #include <linux/sockios.h>
77 #include <linux/errno.h>
78 #include <linux/in.h>
79 #include <linux/inet.h>
80 #include <linux/netdevice.h>
81 #include <linux/proc_fs.h>
82 #include <linux/init.h>
83 #include <linux/workqueue.h>
84 #include <linux/skbuff.h>
85 #include <linux/inetdevice.h>
86 #include <linux/igmp.h>
87 #include <linux/pkt_sched.h>
88 #include <linux/mroute.h>
89 #include <linux/netfilter_ipv4.h>
90 #include <linux/random.h>
91 #include <linux/jhash.h>
92 #include <linux/rcupdate.h>
93 #include <linux/times.h>
94 #include <linux/slab.h>
95 #include <linux/prefetch.h>
96 #include <net/dst.h>
97 #include <net/net_namespace.h>
98 #include <net/protocol.h>
99 #include <net/ip.h>
100 #include <net/route.h>
101 #include <net/inetpeer.h>
102 #include <net/sock.h>
103 #include <net/ip_fib.h>
104 #include <net/arp.h>
105 #include <net/tcp.h>
106 #include <net/icmp.h>
107 #include <net/xfrm.h>
108 #include <net/netevent.h>
109 #include <net/rtnetlink.h>
110 #ifdef CONFIG_SYSCTL
111 #include <linux/sysctl.h>
112 #include <linux/kmemleak.h>
113 #endif
114 #include <net/secure_seq.h>
116 #define RT_FL_TOS(oldflp4) \
117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
119 #define IP_MAX_MTU 0xFFF0
121 #define RT_GC_TIMEOUT (300*HZ)
123 static int ip_rt_max_size;
124 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
125 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
126 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
127 static int ip_rt_redirect_number __read_mostly = 9;
128 static int ip_rt_redirect_load __read_mostly = HZ / 50;
129 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130 static int ip_rt_error_cost __read_mostly = HZ;
131 static int ip_rt_error_burst __read_mostly = 5 * HZ;
132 static int ip_rt_gc_elasticity __read_mostly = 8;
133 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135 static int ip_rt_min_advmss __read_mostly = 256;
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
143 static unsigned int ipv4_mtu(const struct dst_entry *dst);
144 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145 static void ipv4_link_failure(struct sk_buff *skb);
146 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb, u32 mtu);
148 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
149 struct sk_buff *skb);
151 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
152 int how)
156 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
158 WARN_ON(1);
159 return NULL;
162 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
163 struct sk_buff *skb,
164 const void *daddr);
166 static struct dst_ops ipv4_dst_ops = {
167 .family = AF_INET,
168 .protocol = cpu_to_be16(ETH_P_IP),
169 .check = ipv4_dst_check,
170 .default_advmss = ipv4_default_advmss,
171 .mtu = ipv4_mtu,
172 .cow_metrics = ipv4_cow_metrics,
173 .ifdown = ipv4_dst_ifdown,
174 .negative_advice = ipv4_negative_advice,
175 .link_failure = ipv4_link_failure,
176 .update_pmtu = ip_rt_update_pmtu,
177 .redirect = ip_do_redirect,
178 .local_out = __ip_local_out,
179 .neigh_lookup = ipv4_neigh_lookup,
182 #define ECN_OR_COST(class) TC_PRIO_##class
184 const __u8 ip_tos2prio[16] = {
185 TC_PRIO_BESTEFFORT,
186 ECN_OR_COST(BESTEFFORT),
187 TC_PRIO_BESTEFFORT,
188 ECN_OR_COST(BESTEFFORT),
189 TC_PRIO_BULK,
190 ECN_OR_COST(BULK),
191 TC_PRIO_BULK,
192 ECN_OR_COST(BULK),
193 TC_PRIO_INTERACTIVE,
194 ECN_OR_COST(INTERACTIVE),
195 TC_PRIO_INTERACTIVE,
196 ECN_OR_COST(INTERACTIVE),
197 TC_PRIO_INTERACTIVE_BULK,
198 ECN_OR_COST(INTERACTIVE_BULK),
199 TC_PRIO_INTERACTIVE_BULK,
200 ECN_OR_COST(INTERACTIVE_BULK)
202 EXPORT_SYMBOL(ip_tos2prio);
204 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
205 #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
207 static inline int rt_genid(struct net *net)
209 return atomic_read(&net->ipv4.rt_genid);
212 #ifdef CONFIG_PROC_FS
213 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
215 if (*pos)
216 return NULL;
217 return SEQ_START_TOKEN;
220 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
222 ++*pos;
223 return NULL;
226 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
230 static int rt_cache_seq_show(struct seq_file *seq, void *v)
232 if (v == SEQ_START_TOKEN)
233 seq_printf(seq, "%-127s\n",
234 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
235 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
236 "HHUptod\tSpecDst");
237 return 0;
240 static const struct seq_operations rt_cache_seq_ops = {
241 .start = rt_cache_seq_start,
242 .next = rt_cache_seq_next,
243 .stop = rt_cache_seq_stop,
244 .show = rt_cache_seq_show,
247 static int rt_cache_seq_open(struct inode *inode, struct file *file)
249 return seq_open(file, &rt_cache_seq_ops);
252 static const struct file_operations rt_cache_seq_fops = {
253 .owner = THIS_MODULE,
254 .open = rt_cache_seq_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = seq_release,
261 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
263 int cpu;
265 if (*pos == 0)
266 return SEQ_START_TOKEN;
268 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
269 if (!cpu_possible(cpu))
270 continue;
271 *pos = cpu+1;
272 return &per_cpu(rt_cache_stat, cpu);
274 return NULL;
277 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
279 int cpu;
281 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
282 if (!cpu_possible(cpu))
283 continue;
284 *pos = cpu+1;
285 return &per_cpu(rt_cache_stat, cpu);
287 return NULL;
291 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
296 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
298 struct rt_cache_stat *st = v;
300 if (v == SEQ_START_TOKEN) {
301 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
302 return 0;
305 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
306 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
307 dst_entries_get_slow(&ipv4_dst_ops),
308 st->in_hit,
309 st->in_slow_tot,
310 st->in_slow_mc,
311 st->in_no_route,
312 st->in_brd,
313 st->in_martian_dst,
314 st->in_martian_src,
316 st->out_hit,
317 st->out_slow_tot,
318 st->out_slow_mc,
320 st->gc_total,
321 st->gc_ignored,
322 st->gc_goal_miss,
323 st->gc_dst_overflow,
324 st->in_hlist_search,
325 st->out_hlist_search
327 return 0;
330 static const struct seq_operations rt_cpu_seq_ops = {
331 .start = rt_cpu_seq_start,
332 .next = rt_cpu_seq_next,
333 .stop = rt_cpu_seq_stop,
334 .show = rt_cpu_seq_show,
338 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
340 return seq_open(file, &rt_cpu_seq_ops);
343 static const struct file_operations rt_cpu_seq_fops = {
344 .owner = THIS_MODULE,
345 .open = rt_cpu_seq_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = seq_release,
351 #ifdef CONFIG_IP_ROUTE_CLASSID
352 static int rt_acct_proc_show(struct seq_file *m, void *v)
354 struct ip_rt_acct *dst, *src;
355 unsigned int i, j;
357 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
358 if (!dst)
359 return -ENOMEM;
361 for_each_possible_cpu(i) {
362 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
363 for (j = 0; j < 256; j++) {
364 dst[j].o_bytes += src[j].o_bytes;
365 dst[j].o_packets += src[j].o_packets;
366 dst[j].i_bytes += src[j].i_bytes;
367 dst[j].i_packets += src[j].i_packets;
371 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
372 kfree(dst);
373 return 0;
376 static int rt_acct_proc_open(struct inode *inode, struct file *file)
378 return single_open(file, rt_acct_proc_show, NULL);
381 static const struct file_operations rt_acct_proc_fops = {
382 .owner = THIS_MODULE,
383 .open = rt_acct_proc_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = single_release,
388 #endif
390 static int __net_init ip_rt_do_proc_init(struct net *net)
392 struct proc_dir_entry *pde;
394 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
395 &rt_cache_seq_fops);
396 if (!pde)
397 goto err1;
399 pde = proc_create("rt_cache", S_IRUGO,
400 net->proc_net_stat, &rt_cpu_seq_fops);
401 if (!pde)
402 goto err2;
404 #ifdef CONFIG_IP_ROUTE_CLASSID
405 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
406 if (!pde)
407 goto err3;
408 #endif
409 return 0;
411 #ifdef CONFIG_IP_ROUTE_CLASSID
412 err3:
413 remove_proc_entry("rt_cache", net->proc_net_stat);
414 #endif
415 err2:
416 remove_proc_entry("rt_cache", net->proc_net);
417 err1:
418 return -ENOMEM;
421 static void __net_exit ip_rt_do_proc_exit(struct net *net)
423 remove_proc_entry("rt_cache", net->proc_net_stat);
424 remove_proc_entry("rt_cache", net->proc_net);
425 #ifdef CONFIG_IP_ROUTE_CLASSID
426 remove_proc_entry("rt_acct", net->proc_net);
427 #endif
430 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
431 .init = ip_rt_do_proc_init,
432 .exit = ip_rt_do_proc_exit,
435 static int __init ip_rt_proc_init(void)
437 return register_pernet_subsys(&ip_rt_proc_ops);
440 #else
441 static inline int ip_rt_proc_init(void)
443 return 0;
445 #endif /* CONFIG_PROC_FS */
447 static inline bool rt_is_expired(const struct rtable *rth)
449 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
453 * Perturbation of rt_genid by a small quantity [1..256]
454 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
455 * many times (2^24) without giving recent rt_genid.
456 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
458 static void rt_cache_invalidate(struct net *net)
460 unsigned char shuffle;
462 get_random_bytes(&shuffle, sizeof(shuffle));
463 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
467 * delay < 0 : invalidate cache (fast : entries will be deleted later)
468 * delay >= 0 : invalidate & flush cache (can be long)
470 void rt_cache_flush(struct net *net, int delay)
472 rt_cache_invalidate(net);
475 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
476 struct sk_buff *skb,
477 const void *daddr)
479 struct net_device *dev = dst->dev;
480 const __be32 *pkey = daddr;
481 const struct rtable *rt;
482 struct neighbour *n;
484 rt = (const struct rtable *) dst;
485 if (rt->rt_gateway)
486 pkey = (const __be32 *) &rt->rt_gateway;
487 else if (skb)
488 pkey = &ip_hdr(skb)->daddr;
490 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
491 if (n)
492 return n;
493 return neigh_create(&arp_tbl, pkey, dev);
497 * Peer allocation may fail only in serious out-of-memory conditions. However
498 * we still can generate some output.
499 * Random ID selection looks a bit dangerous because we have no chances to
500 * select ID being unique in a reasonable period of time.
501 * But broken packet identifier may be better than no packet at all.
503 static void ip_select_fb_ident(struct iphdr *iph)
505 static DEFINE_SPINLOCK(ip_fb_id_lock);
506 static u32 ip_fallback_id;
507 u32 salt;
509 spin_lock_bh(&ip_fb_id_lock);
510 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
511 iph->id = htons(salt & 0xFFFF);
512 ip_fallback_id = salt;
513 spin_unlock_bh(&ip_fb_id_lock);
516 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
518 struct net *net = dev_net(dst->dev);
519 struct inet_peer *peer;
521 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
522 if (peer) {
523 iph->id = htons(inet_getid(peer, more));
524 inet_putpeer(peer);
525 return;
528 ip_select_fb_ident(iph);
530 EXPORT_SYMBOL(__ip_select_ident);
532 static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
533 const struct iphdr *iph,
534 int oif, u8 tos,
535 u8 prot, u32 mark, int flow_flags)
537 if (sk) {
538 const struct inet_sock *inet = inet_sk(sk);
540 oif = sk->sk_bound_dev_if;
541 mark = sk->sk_mark;
542 tos = RT_CONN_FLAGS(sk);
543 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
545 flowi4_init_output(fl4, oif, mark, tos,
546 RT_SCOPE_UNIVERSE, prot,
547 flow_flags,
548 iph->daddr, iph->saddr, 0, 0);
551 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
552 const struct sock *sk)
554 const struct iphdr *iph = ip_hdr(skb);
555 int oif = skb->dev->ifindex;
556 u8 tos = RT_TOS(iph->tos);
557 u8 prot = iph->protocol;
558 u32 mark = skb->mark;
560 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
563 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
565 const struct inet_sock *inet = inet_sk(sk);
566 const struct ip_options_rcu *inet_opt;
567 __be32 daddr = inet->inet_daddr;
569 rcu_read_lock();
570 inet_opt = rcu_dereference(inet->inet_opt);
571 if (inet_opt && inet_opt->opt.srr)
572 daddr = inet_opt->opt.faddr;
573 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576 inet_sk_flowi_flags(sk),
577 daddr, inet->inet_saddr, 0, 0);
578 rcu_read_unlock();
581 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582 const struct sk_buff *skb)
584 if (skb)
585 build_skb_flow_key(fl4, skb, sk);
586 else
587 build_sk_flow_key(fl4, sk);
590 static DEFINE_SEQLOCK(fnhe_seqlock);
592 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
594 struct fib_nh_exception *fnhe, *oldest;
596 oldest = rcu_dereference(hash->chain);
597 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
598 fnhe = rcu_dereference(fnhe->fnhe_next)) {
599 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
600 oldest = fnhe;
602 return oldest;
605 static inline u32 fnhe_hashfun(__be32 daddr)
607 u32 hval;
609 hval = (__force u32) daddr;
610 hval ^= (hval >> 11) ^ (hval >> 22);
612 return hval & (FNHE_HASH_SIZE - 1);
615 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
616 u32 pmtu, unsigned long expires)
618 struct fnhe_hash_bucket *hash;
619 struct fib_nh_exception *fnhe;
620 int depth;
621 u32 hval = fnhe_hashfun(daddr);
623 write_seqlock_bh(&fnhe_seqlock);
625 hash = nh->nh_exceptions;
626 if (!hash) {
627 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
628 if (!hash)
629 goto out_unlock;
630 nh->nh_exceptions = hash;
633 hash += hval;
635 depth = 0;
636 for (fnhe = rcu_dereference(hash->chain); fnhe;
637 fnhe = rcu_dereference(fnhe->fnhe_next)) {
638 if (fnhe->fnhe_daddr == daddr)
639 break;
640 depth++;
643 if (fnhe) {
644 if (gw)
645 fnhe->fnhe_gw = gw;
646 if (pmtu) {
647 fnhe->fnhe_pmtu = pmtu;
648 fnhe->fnhe_expires = expires;
650 } else {
651 if (depth > FNHE_RECLAIM_DEPTH)
652 fnhe = fnhe_oldest(hash);
653 else {
654 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
655 if (!fnhe)
656 goto out_unlock;
658 fnhe->fnhe_next = hash->chain;
659 rcu_assign_pointer(hash->chain, fnhe);
661 fnhe->fnhe_daddr = daddr;
662 fnhe->fnhe_gw = gw;
663 fnhe->fnhe_pmtu = pmtu;
664 fnhe->fnhe_expires = expires;
667 fnhe->fnhe_stamp = jiffies;
669 out_unlock:
670 write_sequnlock_bh(&fnhe_seqlock);
671 return;
674 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
675 bool kill_route)
677 __be32 new_gw = icmp_hdr(skb)->un.gateway;
678 __be32 old_gw = ip_hdr(skb)->saddr;
679 struct net_device *dev = skb->dev;
680 struct in_device *in_dev;
681 struct fib_result res;
682 struct neighbour *n;
683 struct net *net;
685 switch (icmp_hdr(skb)->code & 7) {
686 case ICMP_REDIR_NET:
687 case ICMP_REDIR_NETTOS:
688 case ICMP_REDIR_HOST:
689 case ICMP_REDIR_HOSTTOS:
690 break;
692 default:
693 return;
696 if (rt->rt_gateway != old_gw)
697 return;
699 in_dev = __in_dev_get_rcu(dev);
700 if (!in_dev)
701 return;
703 net = dev_net(dev);
704 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
705 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
706 ipv4_is_zeronet(new_gw))
707 goto reject_redirect;
709 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
710 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
711 goto reject_redirect;
712 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
713 goto reject_redirect;
714 } else {
715 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
716 goto reject_redirect;
719 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
720 if (n) {
721 if (!(n->nud_state & NUD_VALID)) {
722 neigh_event_send(n, NULL);
723 } else {
724 if (fib_lookup(net, fl4, &res) == 0) {
725 struct fib_nh *nh = &FIB_RES_NH(res);
727 update_or_create_fnhe(nh, fl4->daddr, new_gw,
728 0, 0);
730 if (kill_route)
731 rt->dst.obsolete = DST_OBSOLETE_KILL;
732 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
734 neigh_release(n);
736 return;
738 reject_redirect:
739 #ifdef CONFIG_IP_ROUTE_VERBOSE
740 if (IN_DEV_LOG_MARTIANS(in_dev)) {
741 const struct iphdr *iph = (const struct iphdr *) skb->data;
742 __be32 daddr = iph->daddr;
743 __be32 saddr = iph->saddr;
745 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
746 " Advised path = %pI4 -> %pI4\n",
747 &old_gw, dev->name, &new_gw,
748 &saddr, &daddr);
750 #endif
754 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
756 struct rtable *rt;
757 struct flowi4 fl4;
759 rt = (struct rtable *) dst;
761 ip_rt_build_flow_key(&fl4, sk, skb);
762 __ip_do_redirect(rt, skb, &fl4, true);
765 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
767 struct rtable *rt = (struct rtable *)dst;
768 struct dst_entry *ret = dst;
770 if (rt) {
771 if (dst->obsolete > 0) {
772 ip_rt_put(rt);
773 ret = NULL;
774 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
775 rt->dst.expires) {
776 ip_rt_put(rt);
777 ret = NULL;
780 return ret;
784 * Algorithm:
785 * 1. The first ip_rt_redirect_number redirects are sent
786 * with exponential backoff, then we stop sending them at all,
787 * assuming that the host ignores our redirects.
788 * 2. If we did not see packets requiring redirects
789 * during ip_rt_redirect_silence, we assume that the host
790 * forgot redirected route and start to send redirects again.
792 * This algorithm is much cheaper and more intelligent than dumb load limiting
793 * in icmp.c.
795 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
796 * and "frag. need" (breaks PMTU discovery) in icmp.c.
799 void ip_rt_send_redirect(struct sk_buff *skb)
801 struct rtable *rt = skb_rtable(skb);
802 struct in_device *in_dev;
803 struct inet_peer *peer;
804 struct net *net;
805 int log_martians;
807 rcu_read_lock();
808 in_dev = __in_dev_get_rcu(rt->dst.dev);
809 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
810 rcu_read_unlock();
811 return;
813 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
814 rcu_read_unlock();
816 net = dev_net(rt->dst.dev);
817 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
818 if (!peer) {
819 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
820 return;
823 /* No redirected packets during ip_rt_redirect_silence;
824 * reset the algorithm.
826 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
827 peer->rate_tokens = 0;
829 /* Too many ignored redirects; do not send anything
830 * set dst.rate_last to the last seen redirected packet.
832 if (peer->rate_tokens >= ip_rt_redirect_number) {
833 peer->rate_last = jiffies;
834 goto out_put_peer;
837 /* Check for load limit; set rate_last to the latest sent
838 * redirect.
840 if (peer->rate_tokens == 0 ||
841 time_after(jiffies,
842 (peer->rate_last +
843 (ip_rt_redirect_load << peer->rate_tokens)))) {
844 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
845 peer->rate_last = jiffies;
846 ++peer->rate_tokens;
847 #ifdef CONFIG_IP_ROUTE_VERBOSE
848 if (log_martians &&
849 peer->rate_tokens == ip_rt_redirect_number)
850 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
851 &ip_hdr(skb)->saddr, inet_iif(skb),
852 &ip_hdr(skb)->daddr, &rt->rt_gateway);
853 #endif
855 out_put_peer:
856 inet_putpeer(peer);
859 static int ip_error(struct sk_buff *skb)
861 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
862 struct rtable *rt = skb_rtable(skb);
863 struct inet_peer *peer;
864 unsigned long now;
865 struct net *net;
866 bool send;
867 int code;
869 net = dev_net(rt->dst.dev);
870 if (!IN_DEV_FORWARD(in_dev)) {
871 switch (rt->dst.error) {
872 case EHOSTUNREACH:
873 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
874 break;
876 case ENETUNREACH:
877 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
878 break;
880 goto out;
883 switch (rt->dst.error) {
884 case EINVAL:
885 default:
886 goto out;
887 case EHOSTUNREACH:
888 code = ICMP_HOST_UNREACH;
889 break;
890 case ENETUNREACH:
891 code = ICMP_NET_UNREACH;
892 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
893 break;
894 case EACCES:
895 code = ICMP_PKT_FILTERED;
896 break;
899 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
901 send = true;
902 if (peer) {
903 now = jiffies;
904 peer->rate_tokens += now - peer->rate_last;
905 if (peer->rate_tokens > ip_rt_error_burst)
906 peer->rate_tokens = ip_rt_error_burst;
907 peer->rate_last = now;
908 if (peer->rate_tokens >= ip_rt_error_cost)
909 peer->rate_tokens -= ip_rt_error_cost;
910 else
911 send = false;
912 inet_putpeer(peer);
914 if (send)
915 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
917 out: kfree_skb(skb);
918 return 0;
921 static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
923 struct fib_result res;
925 if (mtu < ip_rt_min_pmtu)
926 mtu = ip_rt_min_pmtu;
928 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
929 struct fib_nh *nh = &FIB_RES_NH(res);
931 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
932 jiffies + ip_rt_mtu_expires);
934 return mtu;
937 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
938 struct sk_buff *skb, u32 mtu)
940 struct rtable *rt = (struct rtable *) dst;
941 struct flowi4 fl4;
943 ip_rt_build_flow_key(&fl4, sk, skb);
944 mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
946 if (!rt->rt_pmtu) {
947 dst->obsolete = DST_OBSOLETE_KILL;
948 } else {
949 rt->rt_pmtu = mtu;
950 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
954 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
955 int oif, u32 mark, u8 protocol, int flow_flags)
957 const struct iphdr *iph = (const struct iphdr *) skb->data;
958 struct flowi4 fl4;
959 struct rtable *rt;
961 __build_flow_key(&fl4, NULL, iph, oif,
962 RT_TOS(iph->tos), protocol, mark, flow_flags);
963 rt = __ip_route_output_key(net, &fl4);
964 if (!IS_ERR(rt)) {
965 __ip_rt_update_pmtu(rt, &fl4, mtu);
966 ip_rt_put(rt);
969 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
971 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
973 const struct iphdr *iph = (const struct iphdr *) skb->data;
974 struct flowi4 fl4;
975 struct rtable *rt;
977 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
978 rt = __ip_route_output_key(sock_net(sk), &fl4);
979 if (!IS_ERR(rt)) {
980 __ip_rt_update_pmtu(rt, &fl4, mtu);
981 ip_rt_put(rt);
984 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
986 void ipv4_redirect(struct sk_buff *skb, struct net *net,
987 int oif, u32 mark, u8 protocol, int flow_flags)
989 const struct iphdr *iph = (const struct iphdr *) skb->data;
990 struct flowi4 fl4;
991 struct rtable *rt;
993 __build_flow_key(&fl4, NULL, iph, oif,
994 RT_TOS(iph->tos), protocol, mark, flow_flags);
995 rt = __ip_route_output_key(net, &fl4);
996 if (!IS_ERR(rt)) {
997 __ip_do_redirect(rt, skb, &fl4, false);
998 ip_rt_put(rt);
1001 EXPORT_SYMBOL_GPL(ipv4_redirect);
1003 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1005 const struct iphdr *iph = (const struct iphdr *) skb->data;
1006 struct flowi4 fl4;
1007 struct rtable *rt;
1009 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1010 rt = __ip_route_output_key(sock_net(sk), &fl4);
1011 if (!IS_ERR(rt)) {
1012 __ip_do_redirect(rt, skb, &fl4, false);
1013 ip_rt_put(rt);
1016 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1018 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1020 struct rtable *rt = (struct rtable *) dst;
1022 /* All IPV4 dsts are created with ->obsolete set to the value
1023 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1024 * into this function always.
1026 * When a PMTU/redirect information update invalidates a
1027 * route, this is indicated by setting obsolete to
1028 * DST_OBSOLETE_KILL.
1030 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
1031 return NULL;
1032 return dst;
1035 static void ipv4_link_failure(struct sk_buff *skb)
1037 struct rtable *rt;
1039 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1041 rt = skb_rtable(skb);
1042 if (rt)
1043 dst_set_expires(&rt->dst, 0);
1046 static int ip_rt_bug(struct sk_buff *skb)
1048 pr_debug("%s: %pI4 -> %pI4, %s\n",
1049 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1050 skb->dev ? skb->dev->name : "?");
1051 kfree_skb(skb);
1052 WARN_ON(1);
1053 return 0;
1057 We do not cache source address of outgoing interface,
1058 because it is used only by IP RR, TS and SRR options,
1059 so that it out of fast path.
1061 BTW remember: "addr" is allowed to be not aligned
1062 in IP options!
1065 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1067 __be32 src;
1069 if (rt_is_output_route(rt))
1070 src = ip_hdr(skb)->saddr;
1071 else {
1072 struct fib_result res;
1073 struct flowi4 fl4;
1074 struct iphdr *iph;
1076 iph = ip_hdr(skb);
1078 memset(&fl4, 0, sizeof(fl4));
1079 fl4.daddr = iph->daddr;
1080 fl4.saddr = iph->saddr;
1081 fl4.flowi4_tos = RT_TOS(iph->tos);
1082 fl4.flowi4_oif = rt->dst.dev->ifindex;
1083 fl4.flowi4_iif = skb->dev->ifindex;
1084 fl4.flowi4_mark = skb->mark;
1086 rcu_read_lock();
1087 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
1088 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1089 else
1090 src = inet_select_addr(rt->dst.dev,
1091 rt_nexthop(rt, iph->daddr),
1092 RT_SCOPE_UNIVERSE);
1093 rcu_read_unlock();
1095 memcpy(addr, &src, 4);
1098 #ifdef CONFIG_IP_ROUTE_CLASSID
1099 static void set_class_tag(struct rtable *rt, u32 tag)
1101 if (!(rt->dst.tclassid & 0xFFFF))
1102 rt->dst.tclassid |= tag & 0xFFFF;
1103 if (!(rt->dst.tclassid & 0xFFFF0000))
1104 rt->dst.tclassid |= tag & 0xFFFF0000;
1106 #endif
1108 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1110 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1112 if (advmss == 0) {
1113 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1114 ip_rt_min_advmss);
1115 if (advmss > 65535 - 40)
1116 advmss = 65535 - 40;
1118 return advmss;
1121 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1123 const struct rtable *rt = (const struct rtable *) dst;
1124 unsigned int mtu = rt->rt_pmtu;
1126 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1127 mtu = 0;
1129 if (!mtu)
1130 mtu = dst_metric_raw(dst, RTAX_MTU);
1132 if (mtu && rt_is_output_route(rt))
1133 return mtu;
1135 mtu = dst->dev->mtu;
1137 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1138 if (rt->rt_gateway && mtu > 576)
1139 mtu = 576;
1142 if (mtu > IP_MAX_MTU)
1143 mtu = IP_MAX_MTU;
1145 return mtu;
1148 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1150 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1151 struct fib_nh_exception *fnhe;
1152 u32 hval;
1154 if (!hash)
1155 return NULL;
1157 hval = fnhe_hashfun(daddr);
1159 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1160 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1161 if (fnhe->fnhe_daddr == daddr)
1162 return fnhe;
1164 return NULL;
1167 static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1168 __be32 daddr)
1170 __be32 fnhe_daddr, gw;
1171 unsigned long expires;
1172 unsigned int seq;
1173 u32 pmtu;
1175 restart:
1176 seq = read_seqbegin(&fnhe_seqlock);
1177 fnhe_daddr = fnhe->fnhe_daddr;
1178 gw = fnhe->fnhe_gw;
1179 pmtu = fnhe->fnhe_pmtu;
1180 expires = fnhe->fnhe_expires;
1181 if (read_seqretry(&fnhe_seqlock, seq))
1182 goto restart;
1184 if (daddr != fnhe_daddr)
1185 return;
1187 if (pmtu) {
1188 unsigned long diff = expires - jiffies;
1190 if (time_before(jiffies, expires)) {
1191 rt->rt_pmtu = pmtu;
1192 dst_set_expires(&rt->dst, diff);
1195 if (gw) {
1196 rt->rt_flags |= RTCF_REDIRECTED;
1197 rt->rt_gateway = gw;
1199 fnhe->fnhe_stamp = jiffies;
1202 static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1204 struct rtable *orig, *prev, **p = &nh->nh_rth_output;
1206 if (rt_is_input_route(rt))
1207 p = &nh->nh_rth_input;
1209 orig = *p;
1211 rt->dst.flags |= DST_RCU_FREE;
1212 dst_hold(&rt->dst);
1213 prev = cmpxchg(p, orig, rt);
1214 if (prev == orig) {
1215 if (orig)
1216 dst_release(&orig->dst);
1217 } else {
1218 dst_release(&rt->dst);
1222 static bool rt_cache_valid(const struct rtable *rt)
1224 return rt &&
1225 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1226 !rt_is_expired(rt);
1229 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1230 const struct fib_result *res,
1231 struct fib_nh_exception *fnhe,
1232 struct fib_info *fi, u16 type, u32 itag)
1234 if (fi) {
1235 struct fib_nh *nh = &FIB_RES_NH(*res);
1237 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1238 rt->rt_gateway = nh->nh_gw;
1239 if (unlikely(fnhe))
1240 rt_bind_exception(rt, fnhe, daddr);
1241 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1242 #ifdef CONFIG_IP_ROUTE_CLASSID
1243 rt->dst.tclassid = nh->nh_tclassid;
1244 #endif
1245 if (!(rt->dst.flags & DST_NOCACHE))
1246 rt_cache_route(nh, rt);
1249 #ifdef CONFIG_IP_ROUTE_CLASSID
1250 #ifdef CONFIG_IP_MULTIPLE_TABLES
1251 set_class_tag(rt, res->tclassid);
1252 #endif
1253 set_class_tag(rt, itag);
1254 #endif
1257 static struct rtable *rt_dst_alloc(struct net_device *dev,
1258 bool nopolicy, bool noxfrm, bool will_cache)
1260 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1261 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1262 (nopolicy ? DST_NOPOLICY : 0) |
1263 (noxfrm ? DST_NOXFRM : 0));
1266 /* called in rcu_read_lock() section */
1267 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1268 u8 tos, struct net_device *dev, int our)
1270 struct rtable *rth;
1271 struct in_device *in_dev = __in_dev_get_rcu(dev);
1272 u32 itag = 0;
1273 int err;
1275 /* Primary sanity checks. */
1277 if (in_dev == NULL)
1278 return -EINVAL;
1280 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1281 skb->protocol != htons(ETH_P_IP))
1282 goto e_inval;
1284 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1285 if (ipv4_is_loopback(saddr))
1286 goto e_inval;
1288 if (ipv4_is_zeronet(saddr)) {
1289 if (!ipv4_is_local_multicast(daddr))
1290 goto e_inval;
1291 } else {
1292 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1293 in_dev, &itag);
1294 if (err < 0)
1295 goto e_err;
1297 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1298 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1299 if (!rth)
1300 goto e_nobufs;
1302 #ifdef CONFIG_IP_ROUTE_CLASSID
1303 rth->dst.tclassid = itag;
1304 #endif
1305 rth->dst.output = ip_rt_bug;
1307 rth->rt_genid = rt_genid(dev_net(dev));
1308 rth->rt_flags = RTCF_MULTICAST;
1309 rth->rt_type = RTN_MULTICAST;
1310 rth->rt_is_input= 1;
1311 rth->rt_iif = 0;
1312 rth->rt_pmtu = 0;
1313 rth->rt_gateway = 0;
1314 if (our) {
1315 rth->dst.input= ip_local_deliver;
1316 rth->rt_flags |= RTCF_LOCAL;
1319 #ifdef CONFIG_IP_MROUTE
1320 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1321 rth->dst.input = ip_mr_input;
1322 #endif
1323 RT_CACHE_STAT_INC(in_slow_mc);
1325 skb_dst_set(skb, &rth->dst);
1326 return 0;
1328 e_nobufs:
1329 return -ENOBUFS;
1330 e_inval:
1331 return -EINVAL;
1332 e_err:
1333 return err;
1337 static void ip_handle_martian_source(struct net_device *dev,
1338 struct in_device *in_dev,
1339 struct sk_buff *skb,
1340 __be32 daddr,
1341 __be32 saddr)
1343 RT_CACHE_STAT_INC(in_martian_src);
1344 #ifdef CONFIG_IP_ROUTE_VERBOSE
1345 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1347 * RFC1812 recommendation, if source is martian,
1348 * the only hint is MAC header.
1350 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1351 &daddr, &saddr, dev->name);
1352 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1353 print_hex_dump(KERN_WARNING, "ll header: ",
1354 DUMP_PREFIX_OFFSET, 16, 1,
1355 skb_mac_header(skb),
1356 dev->hard_header_len, true);
1359 #endif
1362 /* called in rcu_read_lock() section */
1363 static int __mkroute_input(struct sk_buff *skb,
1364 const struct fib_result *res,
1365 struct in_device *in_dev,
1366 __be32 daddr, __be32 saddr, u32 tos)
1368 struct rtable *rth;
1369 int err;
1370 struct in_device *out_dev;
1371 unsigned int flags = 0;
1372 bool do_cache;
1373 u32 itag;
1375 /* get a working reference to the output device */
1376 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1377 if (out_dev == NULL) {
1378 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1379 return -EINVAL;
1383 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1384 in_dev->dev, in_dev, &itag);
1385 if (err < 0) {
1386 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1387 saddr);
1389 goto cleanup;
1392 if (out_dev == in_dev && err &&
1393 (IN_DEV_SHARED_MEDIA(out_dev) ||
1394 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1395 flags |= RTCF_DOREDIRECT;
1397 if (skb->protocol != htons(ETH_P_IP)) {
1398 /* Not IP (i.e. ARP). Do not create route, if it is
1399 * invalid for proxy arp. DNAT routes are always valid.
1401 * Proxy arp feature have been extended to allow, ARP
1402 * replies back to the same interface, to support
1403 * Private VLAN switch technologies. See arp.c.
1405 if (out_dev == in_dev &&
1406 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1407 err = -EINVAL;
1408 goto cleanup;
1412 do_cache = false;
1413 if (res->fi) {
1414 if (!itag) {
1415 rth = FIB_RES_NH(*res).nh_rth_input;
1416 if (rt_cache_valid(rth)) {
1417 skb_dst_set_noref(skb, &rth->dst);
1418 goto out;
1420 do_cache = true;
1424 rth = rt_dst_alloc(out_dev->dev,
1425 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1426 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1427 if (!rth) {
1428 err = -ENOBUFS;
1429 goto cleanup;
1432 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1433 rth->rt_flags = flags;
1434 rth->rt_type = res->type;
1435 rth->rt_is_input = 1;
1436 rth->rt_iif = 0;
1437 rth->rt_pmtu = 0;
1438 rth->rt_gateway = 0;
1440 rth->dst.input = ip_forward;
1441 rth->dst.output = ip_output;
1443 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
1444 skb_dst_set(skb, &rth->dst);
1445 out:
1446 err = 0;
1447 cleanup:
1448 return err;
1451 static int ip_mkroute_input(struct sk_buff *skb,
1452 struct fib_result *res,
1453 const struct flowi4 *fl4,
1454 struct in_device *in_dev,
1455 __be32 daddr, __be32 saddr, u32 tos)
1457 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1458 if (res->fi && res->fi->fib_nhs > 1)
1459 fib_select_multipath(res);
1460 #endif
1462 /* create a routing cache entry */
1463 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1467 * NOTE. We drop all the packets that has local source
1468 * addresses, because every properly looped back packet
1469 * must have correct destination already attached by output routine.
1471 * Such approach solves two big problems:
1472 * 1. Not simplex devices are handled properly.
1473 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1474 * called with rcu_read_lock()
1477 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1478 u8 tos, struct net_device *dev)
1480 struct fib_result res;
1481 struct in_device *in_dev = __in_dev_get_rcu(dev);
1482 struct flowi4 fl4;
1483 unsigned int flags = 0;
1484 u32 itag = 0;
1485 struct rtable *rth;
1486 int err = -EINVAL;
1487 struct net *net = dev_net(dev);
1488 bool do_cache;
1490 /* IP on this device is disabled. */
1492 if (!in_dev)
1493 goto out;
1495 /* Check for the most weird martians, which can be not detected
1496 by fib_lookup.
1499 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1500 goto martian_source;
1502 res.fi = NULL;
1503 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1504 goto brd_input;
1506 /* Accept zero addresses only to limited broadcast;
1507 * I even do not know to fix it or not. Waiting for complains :-)
1509 if (ipv4_is_zeronet(saddr))
1510 goto martian_source;
1512 if (ipv4_is_zeronet(daddr))
1513 goto martian_destination;
1515 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
1516 if (ipv4_is_loopback(daddr))
1517 goto martian_destination;
1519 if (ipv4_is_loopback(saddr))
1520 goto martian_source;
1524 * Now we are ready to route packet.
1526 fl4.flowi4_oif = 0;
1527 fl4.flowi4_iif = dev->ifindex;
1528 fl4.flowi4_mark = skb->mark;
1529 fl4.flowi4_tos = tos;
1530 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1531 fl4.daddr = daddr;
1532 fl4.saddr = saddr;
1533 err = fib_lookup(net, &fl4, &res);
1534 if (err != 0)
1535 goto no_route;
1537 RT_CACHE_STAT_INC(in_slow_tot);
1539 if (res.type == RTN_BROADCAST)
1540 goto brd_input;
1542 if (res.type == RTN_LOCAL) {
1543 err = fib_validate_source(skb, saddr, daddr, tos,
1544 net->loopback_dev->ifindex,
1545 dev, in_dev, &itag);
1546 if (err < 0)
1547 goto martian_source_keep_err;
1548 goto local_input;
1551 if (!IN_DEV_FORWARD(in_dev))
1552 goto no_route;
1553 if (res.type != RTN_UNICAST)
1554 goto martian_destination;
1556 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1557 out: return err;
1559 brd_input:
1560 if (skb->protocol != htons(ETH_P_IP))
1561 goto e_inval;
1563 if (!ipv4_is_zeronet(saddr)) {
1564 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1565 in_dev, &itag);
1566 if (err < 0)
1567 goto martian_source_keep_err;
1569 flags |= RTCF_BROADCAST;
1570 res.type = RTN_BROADCAST;
1571 RT_CACHE_STAT_INC(in_brd);
1573 local_input:
1574 do_cache = false;
1575 if (res.fi) {
1576 if (!itag) {
1577 rth = FIB_RES_NH(res).nh_rth_input;
1578 if (rt_cache_valid(rth)) {
1579 skb_dst_set_noref(skb, &rth->dst);
1580 err = 0;
1581 goto out;
1583 do_cache = true;
1587 rth = rt_dst_alloc(net->loopback_dev,
1588 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1589 if (!rth)
1590 goto e_nobufs;
1592 rth->dst.input= ip_local_deliver;
1593 rth->dst.output= ip_rt_bug;
1594 #ifdef CONFIG_IP_ROUTE_CLASSID
1595 rth->dst.tclassid = itag;
1596 #endif
1598 rth->rt_genid = rt_genid(net);
1599 rth->rt_flags = flags|RTCF_LOCAL;
1600 rth->rt_type = res.type;
1601 rth->rt_is_input = 1;
1602 rth->rt_iif = 0;
1603 rth->rt_pmtu = 0;
1604 rth->rt_gateway = 0;
1605 if (res.type == RTN_UNREACHABLE) {
1606 rth->dst.input= ip_error;
1607 rth->dst.error= -err;
1608 rth->rt_flags &= ~RTCF_LOCAL;
1610 if (do_cache)
1611 rt_cache_route(&FIB_RES_NH(res), rth);
1612 skb_dst_set(skb, &rth->dst);
1613 err = 0;
1614 goto out;
1616 no_route:
1617 RT_CACHE_STAT_INC(in_no_route);
1618 res.type = RTN_UNREACHABLE;
1619 if (err == -ESRCH)
1620 err = -ENETUNREACH;
1621 goto local_input;
1624 * Do not cache martian addresses: they should be logged (RFC1812)
1626 martian_destination:
1627 RT_CACHE_STAT_INC(in_martian_dst);
1628 #ifdef CONFIG_IP_ROUTE_VERBOSE
1629 if (IN_DEV_LOG_MARTIANS(in_dev))
1630 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1631 &daddr, &saddr, dev->name);
1632 #endif
1634 e_inval:
1635 err = -EINVAL;
1636 goto out;
1638 e_nobufs:
1639 err = -ENOBUFS;
1640 goto out;
1642 martian_source:
1643 err = -EINVAL;
1644 martian_source_keep_err:
1645 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1646 goto out;
1649 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1650 u8 tos, struct net_device *dev)
1652 int res;
1654 rcu_read_lock();
1656 /* Multicast recognition logic is moved from route cache to here.
1657 The problem was that too many Ethernet cards have broken/missing
1658 hardware multicast filters :-( As result the host on multicasting
1659 network acquires a lot of useless route cache entries, sort of
1660 SDR messages from all the world. Now we try to get rid of them.
1661 Really, provided software IP multicast filter is organized
1662 reasonably (at least, hashed), it does not result in a slowdown
1663 comparing with route cache reject entries.
1664 Note, that multicast routers are not affected, because
1665 route cache entry is created eventually.
1667 if (ipv4_is_multicast(daddr)) {
1668 struct in_device *in_dev = __in_dev_get_rcu(dev);
1670 if (in_dev) {
1671 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1672 ip_hdr(skb)->protocol);
1673 if (our
1674 #ifdef CONFIG_IP_MROUTE
1676 (!ipv4_is_local_multicast(daddr) &&
1677 IN_DEV_MFORWARD(in_dev))
1678 #endif
1680 int res = ip_route_input_mc(skb, daddr, saddr,
1681 tos, dev, our);
1682 rcu_read_unlock();
1683 return res;
1686 rcu_read_unlock();
1687 return -EINVAL;
1689 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1690 rcu_read_unlock();
1691 return res;
1693 EXPORT_SYMBOL(ip_route_input_noref);
1695 /* called with rcu_read_lock() */
1696 static struct rtable *__mkroute_output(const struct fib_result *res,
1697 const struct flowi4 *fl4, int orig_oif,
1698 struct net_device *dev_out,
1699 unsigned int flags)
1701 struct fib_info *fi = res->fi;
1702 struct fib_nh_exception *fnhe;
1703 struct in_device *in_dev;
1704 u16 type = res->type;
1705 struct rtable *rth;
1707 in_dev = __in_dev_get_rcu(dev_out);
1708 if (!in_dev)
1709 return ERR_PTR(-EINVAL);
1711 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1712 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1713 return ERR_PTR(-EINVAL);
1715 if (ipv4_is_lbcast(fl4->daddr))
1716 type = RTN_BROADCAST;
1717 else if (ipv4_is_multicast(fl4->daddr))
1718 type = RTN_MULTICAST;
1719 else if (ipv4_is_zeronet(fl4->daddr))
1720 return ERR_PTR(-EINVAL);
1722 if (dev_out->flags & IFF_LOOPBACK)
1723 flags |= RTCF_LOCAL;
1725 if (type == RTN_BROADCAST) {
1726 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1727 fi = NULL;
1728 } else if (type == RTN_MULTICAST) {
1729 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1730 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1731 fl4->flowi4_proto))
1732 flags &= ~RTCF_LOCAL;
1733 /* If multicast route do not exist use
1734 * default one, but do not gateway in this case.
1735 * Yes, it is hack.
1737 if (fi && res->prefixlen < 4)
1738 fi = NULL;
1741 fnhe = NULL;
1742 if (fi) {
1743 fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
1744 if (!fnhe) {
1745 rth = FIB_RES_NH(*res).nh_rth_output;
1746 if (rt_cache_valid(rth)) {
1747 dst_hold(&rth->dst);
1748 return rth;
1752 rth = rt_dst_alloc(dev_out,
1753 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1754 IN_DEV_CONF_GET(in_dev, NOXFRM),
1755 fi && !fnhe);
1756 if (!rth)
1757 return ERR_PTR(-ENOBUFS);
1759 rth->dst.output = ip_output;
1761 rth->rt_genid = rt_genid(dev_net(dev_out));
1762 rth->rt_flags = flags;
1763 rth->rt_type = type;
1764 rth->rt_is_input = 0;
1765 rth->rt_iif = orig_oif ? : 0;
1766 rth->rt_pmtu = 0;
1767 rth->rt_gateway = 0;
1769 RT_CACHE_STAT_INC(out_slow_tot);
1771 if (flags & RTCF_LOCAL)
1772 rth->dst.input = ip_local_deliver;
1773 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
1774 if (flags & RTCF_LOCAL &&
1775 !(dev_out->flags & IFF_LOOPBACK)) {
1776 rth->dst.output = ip_mc_output;
1777 RT_CACHE_STAT_INC(out_slow_mc);
1779 #ifdef CONFIG_IP_MROUTE
1780 if (type == RTN_MULTICAST) {
1781 if (IN_DEV_MFORWARD(in_dev) &&
1782 !ipv4_is_local_multicast(fl4->daddr)) {
1783 rth->dst.input = ip_mr_input;
1784 rth->dst.output = ip_mc_output;
1787 #endif
1790 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
1792 return rth;
1796 * Major route resolver routine.
1799 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1801 struct net_device *dev_out = NULL;
1802 __u8 tos = RT_FL_TOS(fl4);
1803 unsigned int flags = 0;
1804 struct fib_result res;
1805 struct rtable *rth;
1806 int orig_oif;
1808 res.tclassid = 0;
1809 res.fi = NULL;
1810 res.table = NULL;
1812 orig_oif = fl4->flowi4_oif;
1814 fl4->flowi4_iif = net->loopback_dev->ifindex;
1815 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1816 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1817 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
1819 rcu_read_lock();
1820 if (fl4->saddr) {
1821 rth = ERR_PTR(-EINVAL);
1822 if (ipv4_is_multicast(fl4->saddr) ||
1823 ipv4_is_lbcast(fl4->saddr) ||
1824 ipv4_is_zeronet(fl4->saddr))
1825 goto out;
1827 /* I removed check for oif == dev_out->oif here.
1828 It was wrong for two reasons:
1829 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1830 is assigned to multiple interfaces.
1831 2. Moreover, we are allowed to send packets with saddr
1832 of another iface. --ANK
1835 if (fl4->flowi4_oif == 0 &&
1836 (ipv4_is_multicast(fl4->daddr) ||
1837 ipv4_is_lbcast(fl4->daddr))) {
1838 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1839 dev_out = __ip_dev_find(net, fl4->saddr, false);
1840 if (dev_out == NULL)
1841 goto out;
1843 /* Special hack: user can direct multicasts
1844 and limited broadcast via necessary interface
1845 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1846 This hack is not just for fun, it allows
1847 vic,vat and friends to work.
1848 They bind socket to loopback, set ttl to zero
1849 and expect that it will work.
1850 From the viewpoint of routing cache they are broken,
1851 because we are not allowed to build multicast path
1852 with loopback source addr (look, routing cache
1853 cannot know, that ttl is zero, so that packet
1854 will not leave this host and route is valid).
1855 Luckily, this hack is good workaround.
1858 fl4->flowi4_oif = dev_out->ifindex;
1859 goto make_route;
1862 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
1863 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
1864 if (!__ip_dev_find(net, fl4->saddr, false))
1865 goto out;
1870 if (fl4->flowi4_oif) {
1871 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
1872 rth = ERR_PTR(-ENODEV);
1873 if (dev_out == NULL)
1874 goto out;
1876 /* RACE: Check return value of inet_select_addr instead. */
1877 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
1878 rth = ERR_PTR(-ENETUNREACH);
1879 goto out;
1881 if (ipv4_is_local_multicast(fl4->daddr) ||
1882 ipv4_is_lbcast(fl4->daddr)) {
1883 if (!fl4->saddr)
1884 fl4->saddr = inet_select_addr(dev_out, 0,
1885 RT_SCOPE_LINK);
1886 goto make_route;
1888 if (fl4->saddr) {
1889 if (ipv4_is_multicast(fl4->daddr))
1890 fl4->saddr = inet_select_addr(dev_out, 0,
1891 fl4->flowi4_scope);
1892 else if (!fl4->daddr)
1893 fl4->saddr = inet_select_addr(dev_out, 0,
1894 RT_SCOPE_HOST);
1898 if (!fl4->daddr) {
1899 fl4->daddr = fl4->saddr;
1900 if (!fl4->daddr)
1901 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
1902 dev_out = net->loopback_dev;
1903 fl4->flowi4_oif = net->loopback_dev->ifindex;
1904 res.type = RTN_LOCAL;
1905 flags |= RTCF_LOCAL;
1906 goto make_route;
1909 if (fib_lookup(net, fl4, &res)) {
1910 res.fi = NULL;
1911 res.table = NULL;
1912 if (fl4->flowi4_oif) {
1913 /* Apparently, routing tables are wrong. Assume,
1914 that the destination is on link.
1916 WHY? DW.
1917 Because we are allowed to send to iface
1918 even if it has NO routes and NO assigned
1919 addresses. When oif is specified, routing
1920 tables are looked up with only one purpose:
1921 to catch if destination is gatewayed, rather than
1922 direct. Moreover, if MSG_DONTROUTE is set,
1923 we send packet, ignoring both routing tables
1924 and ifaddr state. --ANK
1927 We could make it even if oif is unknown,
1928 likely IPv6, but we do not.
1931 if (fl4->saddr == 0)
1932 fl4->saddr = inet_select_addr(dev_out, 0,
1933 RT_SCOPE_LINK);
1934 res.type = RTN_UNICAST;
1935 goto make_route;
1937 rth = ERR_PTR(-ENETUNREACH);
1938 goto out;
1941 if (res.type == RTN_LOCAL) {
1942 if (!fl4->saddr) {
1943 if (res.fi->fib_prefsrc)
1944 fl4->saddr = res.fi->fib_prefsrc;
1945 else
1946 fl4->saddr = fl4->daddr;
1948 dev_out = net->loopback_dev;
1949 fl4->flowi4_oif = dev_out->ifindex;
1950 res.fi = NULL;
1951 flags |= RTCF_LOCAL;
1952 goto make_route;
1955 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1956 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
1957 fib_select_multipath(&res);
1958 else
1959 #endif
1960 if (!res.prefixlen &&
1961 res.table->tb_num_default > 1 &&
1962 res.type == RTN_UNICAST && !fl4->flowi4_oif)
1963 fib_select_default(&res);
1965 if (!fl4->saddr)
1966 fl4->saddr = FIB_RES_PREFSRC(net, res);
1968 dev_out = FIB_RES_DEV(res);
1969 fl4->flowi4_oif = dev_out->ifindex;
1972 make_route:
1973 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
1975 out:
1976 rcu_read_unlock();
1977 return rth;
1979 EXPORT_SYMBOL_GPL(__ip_route_output_key);
1981 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
1983 return NULL;
1986 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
1988 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
1990 return mtu ? : dst->dev->mtu;
1993 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
1994 struct sk_buff *skb, u32 mtu)
1998 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
1999 struct sk_buff *skb)
2003 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2004 unsigned long old)
2006 return NULL;
2009 static struct dst_ops ipv4_dst_blackhole_ops = {
2010 .family = AF_INET,
2011 .protocol = cpu_to_be16(ETH_P_IP),
2012 .check = ipv4_blackhole_dst_check,
2013 .mtu = ipv4_blackhole_mtu,
2014 .default_advmss = ipv4_default_advmss,
2015 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2016 .redirect = ipv4_rt_blackhole_redirect,
2017 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2018 .neigh_lookup = ipv4_neigh_lookup,
2021 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2023 struct rtable *ort = (struct rtable *) dst_orig;
2024 struct rtable *rt;
2026 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2027 if (rt) {
2028 struct dst_entry *new = &rt->dst;
2030 new->__use = 1;
2031 new->input = dst_discard;
2032 new->output = dst_discard;
2034 new->dev = ort->dst.dev;
2035 if (new->dev)
2036 dev_hold(new->dev);
2038 rt->rt_is_input = ort->rt_is_input;
2039 rt->rt_iif = ort->rt_iif;
2040 rt->rt_pmtu = ort->rt_pmtu;
2042 rt->rt_genid = rt_genid(net);
2043 rt->rt_flags = ort->rt_flags;
2044 rt->rt_type = ort->rt_type;
2045 rt->rt_gateway = ort->rt_gateway;
2047 dst_free(new);
2050 dst_release(dst_orig);
2052 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2055 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2056 struct sock *sk)
2058 struct rtable *rt = __ip_route_output_key(net, flp4);
2060 if (IS_ERR(rt))
2061 return rt;
2063 if (flp4->flowi4_proto)
2064 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2065 flowi4_to_flowi(flp4),
2066 sk, 0);
2068 return rt;
2070 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2072 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2073 struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
2074 u32 seq, int event, int nowait, unsigned int flags)
2076 struct rtable *rt = skb_rtable(skb);
2077 struct rtmsg *r;
2078 struct nlmsghdr *nlh;
2079 unsigned long expires = 0;
2080 u32 error;
2081 u32 metrics[RTAX_MAX];
2083 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2084 if (nlh == NULL)
2085 return -EMSGSIZE;
2087 r = nlmsg_data(nlh);
2088 r->rtm_family = AF_INET;
2089 r->rtm_dst_len = 32;
2090 r->rtm_src_len = 0;
2091 r->rtm_tos = fl4->flowi4_tos;
2092 r->rtm_table = RT_TABLE_MAIN;
2093 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2094 goto nla_put_failure;
2095 r->rtm_type = rt->rt_type;
2096 r->rtm_scope = RT_SCOPE_UNIVERSE;
2097 r->rtm_protocol = RTPROT_UNSPEC;
2098 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2099 if (rt->rt_flags & RTCF_NOTIFY)
2100 r->rtm_flags |= RTM_F_NOTIFY;
2102 if (nla_put_be32(skb, RTA_DST, dst))
2103 goto nla_put_failure;
2104 if (src) {
2105 r->rtm_src_len = 32;
2106 if (nla_put_be32(skb, RTA_SRC, src))
2107 goto nla_put_failure;
2109 if (rt->dst.dev &&
2110 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2111 goto nla_put_failure;
2112 #ifdef CONFIG_IP_ROUTE_CLASSID
2113 if (rt->dst.tclassid &&
2114 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2115 goto nla_put_failure;
2116 #endif
2117 if (!rt_is_input_route(rt) &&
2118 fl4->saddr != src) {
2119 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
2120 goto nla_put_failure;
2122 if (rt->rt_gateway &&
2123 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2124 goto nla_put_failure;
2126 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2127 if (rt->rt_pmtu)
2128 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2129 if (rtnetlink_put_metrics(skb, metrics) < 0)
2130 goto nla_put_failure;
2132 if (fl4->flowi4_mark &&
2133 nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
2134 goto nla_put_failure;
2136 error = rt->dst.error;
2137 expires = rt->dst.expires;
2138 if (expires) {
2139 if (time_before(jiffies, expires))
2140 expires -= jiffies;
2141 else
2142 expires = 0;
2145 if (rt_is_input_route(rt)) {
2146 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2147 goto nla_put_failure;
2150 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2151 goto nla_put_failure;
2153 return nlmsg_end(skb, nlh);
2155 nla_put_failure:
2156 nlmsg_cancel(skb, nlh);
2157 return -EMSGSIZE;
2160 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
2162 struct net *net = sock_net(in_skb->sk);
2163 struct rtmsg *rtm;
2164 struct nlattr *tb[RTA_MAX+1];
2165 struct rtable *rt = NULL;
2166 struct flowi4 fl4;
2167 __be32 dst = 0;
2168 __be32 src = 0;
2169 u32 iif;
2170 int err;
2171 int mark;
2172 struct sk_buff *skb;
2174 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2175 if (err < 0)
2176 goto errout;
2178 rtm = nlmsg_data(nlh);
2180 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2181 if (skb == NULL) {
2182 err = -ENOBUFS;
2183 goto errout;
2186 /* Reserve room for dummy headers, this skb can pass
2187 through good chunk of routing engine.
2189 skb_reset_mac_header(skb);
2190 skb_reset_network_header(skb);
2192 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2193 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2194 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2196 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2197 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2198 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2199 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2201 memset(&fl4, 0, sizeof(fl4));
2202 fl4.daddr = dst;
2203 fl4.saddr = src;
2204 fl4.flowi4_tos = rtm->rtm_tos;
2205 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2206 fl4.flowi4_mark = mark;
2208 if (iif) {
2209 struct net_device *dev;
2211 dev = __dev_get_by_index(net, iif);
2212 if (dev == NULL) {
2213 err = -ENODEV;
2214 goto errout_free;
2217 skb->protocol = htons(ETH_P_IP);
2218 skb->dev = dev;
2219 skb->mark = mark;
2220 local_bh_disable();
2221 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2222 local_bh_enable();
2224 rt = skb_rtable(skb);
2225 if (err == 0 && rt->dst.error)
2226 err = -rt->dst.error;
2227 } else {
2228 rt = ip_route_output_key(net, &fl4);
2230 err = 0;
2231 if (IS_ERR(rt))
2232 err = PTR_ERR(rt);
2235 if (err)
2236 goto errout_free;
2238 skb_dst_set(skb, &rt->dst);
2239 if (rtm->rtm_flags & RTM_F_NOTIFY)
2240 rt->rt_flags |= RTCF_NOTIFY;
2242 err = rt_fill_info(net, dst, src, &fl4, skb,
2243 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2244 RTM_NEWROUTE, 0, 0);
2245 if (err <= 0)
2246 goto errout_free;
2248 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2249 errout:
2250 return err;
2252 errout_free:
2253 kfree_skb(skb);
2254 goto errout;
2257 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2259 return skb->len;
2262 void ip_rt_multicast_event(struct in_device *in_dev)
2264 rt_cache_flush(dev_net(in_dev->dev), 0);
2267 #ifdef CONFIG_SYSCTL
2268 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
2269 void __user *buffer,
2270 size_t *lenp, loff_t *ppos)
2272 if (write) {
2273 int flush_delay;
2274 ctl_table ctl;
2275 struct net *net;
2277 memcpy(&ctl, __ctl, sizeof(ctl));
2278 ctl.data = &flush_delay;
2279 proc_dointvec(&ctl, write, buffer, lenp, ppos);
2281 net = (struct net *)__ctl->extra1;
2282 rt_cache_flush(net, flush_delay);
2283 return 0;
2286 return -EINVAL;
2289 static ctl_table ipv4_route_table[] = {
2291 .procname = "gc_thresh",
2292 .data = &ipv4_dst_ops.gc_thresh,
2293 .maxlen = sizeof(int),
2294 .mode = 0644,
2295 .proc_handler = proc_dointvec,
2298 .procname = "max_size",
2299 .data = &ip_rt_max_size,
2300 .maxlen = sizeof(int),
2301 .mode = 0644,
2302 .proc_handler = proc_dointvec,
2305 /* Deprecated. Use gc_min_interval_ms */
2307 .procname = "gc_min_interval",
2308 .data = &ip_rt_gc_min_interval,
2309 .maxlen = sizeof(int),
2310 .mode = 0644,
2311 .proc_handler = proc_dointvec_jiffies,
2314 .procname = "gc_min_interval_ms",
2315 .data = &ip_rt_gc_min_interval,
2316 .maxlen = sizeof(int),
2317 .mode = 0644,
2318 .proc_handler = proc_dointvec_ms_jiffies,
2321 .procname = "gc_timeout",
2322 .data = &ip_rt_gc_timeout,
2323 .maxlen = sizeof(int),
2324 .mode = 0644,
2325 .proc_handler = proc_dointvec_jiffies,
2328 .procname = "gc_interval",
2329 .data = &ip_rt_gc_interval,
2330 .maxlen = sizeof(int),
2331 .mode = 0644,
2332 .proc_handler = proc_dointvec_jiffies,
2335 .procname = "redirect_load",
2336 .data = &ip_rt_redirect_load,
2337 .maxlen = sizeof(int),
2338 .mode = 0644,
2339 .proc_handler = proc_dointvec,
2342 .procname = "redirect_number",
2343 .data = &ip_rt_redirect_number,
2344 .maxlen = sizeof(int),
2345 .mode = 0644,
2346 .proc_handler = proc_dointvec,
2349 .procname = "redirect_silence",
2350 .data = &ip_rt_redirect_silence,
2351 .maxlen = sizeof(int),
2352 .mode = 0644,
2353 .proc_handler = proc_dointvec,
2356 .procname = "error_cost",
2357 .data = &ip_rt_error_cost,
2358 .maxlen = sizeof(int),
2359 .mode = 0644,
2360 .proc_handler = proc_dointvec,
2363 .procname = "error_burst",
2364 .data = &ip_rt_error_burst,
2365 .maxlen = sizeof(int),
2366 .mode = 0644,
2367 .proc_handler = proc_dointvec,
2370 .procname = "gc_elasticity",
2371 .data = &ip_rt_gc_elasticity,
2372 .maxlen = sizeof(int),
2373 .mode = 0644,
2374 .proc_handler = proc_dointvec,
2377 .procname = "mtu_expires",
2378 .data = &ip_rt_mtu_expires,
2379 .maxlen = sizeof(int),
2380 .mode = 0644,
2381 .proc_handler = proc_dointvec_jiffies,
2384 .procname = "min_pmtu",
2385 .data = &ip_rt_min_pmtu,
2386 .maxlen = sizeof(int),
2387 .mode = 0644,
2388 .proc_handler = proc_dointvec,
2391 .procname = "min_adv_mss",
2392 .data = &ip_rt_min_advmss,
2393 .maxlen = sizeof(int),
2394 .mode = 0644,
2395 .proc_handler = proc_dointvec,
2400 static struct ctl_table ipv4_route_flush_table[] = {
2402 .procname = "flush",
2403 .maxlen = sizeof(int),
2404 .mode = 0200,
2405 .proc_handler = ipv4_sysctl_rtcache_flush,
2407 { },
2410 static __net_init int sysctl_route_net_init(struct net *net)
2412 struct ctl_table *tbl;
2414 tbl = ipv4_route_flush_table;
2415 if (!net_eq(net, &init_net)) {
2416 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2417 if (tbl == NULL)
2418 goto err_dup;
2420 tbl[0].extra1 = net;
2422 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2423 if (net->ipv4.route_hdr == NULL)
2424 goto err_reg;
2425 return 0;
2427 err_reg:
2428 if (tbl != ipv4_route_flush_table)
2429 kfree(tbl);
2430 err_dup:
2431 return -ENOMEM;
2434 static __net_exit void sysctl_route_net_exit(struct net *net)
2436 struct ctl_table *tbl;
2438 tbl = net->ipv4.route_hdr->ctl_table_arg;
2439 unregister_net_sysctl_table(net->ipv4.route_hdr);
2440 BUG_ON(tbl == ipv4_route_flush_table);
2441 kfree(tbl);
2444 static __net_initdata struct pernet_operations sysctl_route_ops = {
2445 .init = sysctl_route_net_init,
2446 .exit = sysctl_route_net_exit,
2448 #endif
2450 static __net_init int rt_genid_init(struct net *net)
2452 get_random_bytes(&net->ipv4.rt_genid,
2453 sizeof(net->ipv4.rt_genid));
2454 get_random_bytes(&net->ipv4.dev_addr_genid,
2455 sizeof(net->ipv4.dev_addr_genid));
2456 return 0;
2459 static __net_initdata struct pernet_operations rt_genid_ops = {
2460 .init = rt_genid_init,
2463 static int __net_init ipv4_inetpeer_init(struct net *net)
2465 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2467 if (!bp)
2468 return -ENOMEM;
2469 inet_peer_base_init(bp);
2470 net->ipv4.peers = bp;
2471 return 0;
2474 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2476 struct inet_peer_base *bp = net->ipv4.peers;
2478 net->ipv4.peers = NULL;
2479 inetpeer_invalidate_tree(bp);
2480 kfree(bp);
2483 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2484 .init = ipv4_inetpeer_init,
2485 .exit = ipv4_inetpeer_exit,
2488 #ifdef CONFIG_IP_ROUTE_CLASSID
2489 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2490 #endif /* CONFIG_IP_ROUTE_CLASSID */
2492 int __init ip_rt_init(void)
2494 int rc = 0;
2496 #ifdef CONFIG_IP_ROUTE_CLASSID
2497 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2498 if (!ip_rt_acct)
2499 panic("IP: failed to allocate ip_rt_acct\n");
2500 #endif
2502 ipv4_dst_ops.kmem_cachep =
2503 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2504 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2506 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2508 if (dst_entries_init(&ipv4_dst_ops) < 0)
2509 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2511 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2512 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2514 ipv4_dst_ops.gc_thresh = ~0;
2515 ip_rt_max_size = INT_MAX;
2517 devinet_init();
2518 ip_fib_init();
2520 if (ip_rt_proc_init())
2521 pr_err("Unable to create route proc files\n");
2522 #ifdef CONFIG_XFRM
2523 xfrm_init();
2524 xfrm4_init(ip_rt_max_size);
2525 #endif
2526 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2528 #ifdef CONFIG_SYSCTL
2529 register_pernet_subsys(&sysctl_route_ops);
2530 #endif
2531 register_pernet_subsys(&rt_genid_ops);
2532 register_pernet_subsys(&ipv4_inetpeer_ops);
2533 return rc;
2536 #ifdef CONFIG_SYSCTL
2538 * We really need to sanitize the damn ipv4 init order, then all
2539 * this nonsense will go away.
2541 void __init ip_static_sysctl_init(void)
2543 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
2545 #endif