kbuild: ignore cache modifiers for generating the tags files
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / core / fib_rules.c
blob848132b6cb733500663b64f4097516799d599f36
1 /*
2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
15 #include <net/sock.h>
16 #include <net/fib_rules.h>
18 static LIST_HEAD(rules_ops);
19 static DEFINE_SPINLOCK(rules_mod_lock);
21 int fib_default_rule_add(struct fib_rules_ops *ops,
22 u32 pref, u32 table, u32 flags)
24 struct fib_rule *r;
26 r = kzalloc(ops->rule_size, GFP_KERNEL);
27 if (r == NULL)
28 return -ENOMEM;
30 atomic_set(&r->refcnt, 1);
31 r->action = FR_ACT_TO_TBL;
32 r->pref = pref;
33 r->table = table;
34 r->flags = flags;
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r->list, &ops->rules_list);
39 return 0;
41 EXPORT_SYMBOL(fib_default_rule_add);
43 static void notify_rule_change(int event, struct fib_rule *rule,
44 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
45 u32 pid);
47 static struct fib_rules_ops *lookup_rules_ops(int family)
49 struct fib_rules_ops *ops;
51 rcu_read_lock();
52 list_for_each_entry_rcu(ops, &rules_ops, list) {
53 if (ops->family == family) {
54 if (!try_module_get(ops->owner))
55 ops = NULL;
56 rcu_read_unlock();
57 return ops;
60 rcu_read_unlock();
62 return NULL;
65 static void rules_ops_put(struct fib_rules_ops *ops)
67 if (ops)
68 module_put(ops->owner);
71 static void flush_route_cache(struct fib_rules_ops *ops)
73 if (ops->flush_cache)
74 ops->flush_cache();
77 int fib_rules_register(struct fib_rules_ops *ops)
79 int err = -EEXIST;
80 struct fib_rules_ops *o;
82 if (ops->rule_size < sizeof(struct fib_rule))
83 return -EINVAL;
85 if (ops->match == NULL || ops->configure == NULL ||
86 ops->compare == NULL || ops->fill == NULL ||
87 ops->action == NULL)
88 return -EINVAL;
90 spin_lock(&rules_mod_lock);
91 list_for_each_entry(o, &rules_ops, list)
92 if (ops->family == o->family)
93 goto errout;
95 list_add_tail_rcu(&ops->list, &rules_ops);
96 err = 0;
97 errout:
98 spin_unlock(&rules_mod_lock);
100 return err;
103 EXPORT_SYMBOL_GPL(fib_rules_register);
105 static void cleanup_ops(struct fib_rules_ops *ops)
107 struct fib_rule *rule, *tmp;
109 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
110 list_del_rcu(&rule->list);
111 fib_rule_put(rule);
115 int fib_rules_unregister(struct fib_rules_ops *ops)
117 int err = 0;
118 struct fib_rules_ops *o;
120 spin_lock(&rules_mod_lock);
121 list_for_each_entry(o, &rules_ops, list) {
122 if (o == ops) {
123 list_del_rcu(&o->list);
124 cleanup_ops(ops);
125 goto out;
129 err = -ENOENT;
130 out:
131 spin_unlock(&rules_mod_lock);
133 synchronize_rcu();
135 return err;
138 EXPORT_SYMBOL_GPL(fib_rules_unregister);
140 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
141 struct flowi *fl, int flags)
143 int ret = 0;
145 if (rule->ifindex && (rule->ifindex != fl->iif))
146 goto out;
148 if ((rule->mark ^ fl->mark) & rule->mark_mask)
149 goto out;
151 ret = ops->match(rule, fl, flags);
152 out:
153 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
156 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
157 int flags, struct fib_lookup_arg *arg)
159 struct fib_rule *rule;
160 int err;
162 rcu_read_lock();
164 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
165 jumped:
166 if (!fib_rule_match(rule, ops, fl, flags))
167 continue;
169 if (rule->action == FR_ACT_GOTO) {
170 struct fib_rule *target;
172 target = rcu_dereference(rule->ctarget);
173 if (target == NULL) {
174 continue;
175 } else {
176 rule = target;
177 goto jumped;
179 } else if (rule->action == FR_ACT_NOP)
180 continue;
181 else
182 err = ops->action(rule, fl, flags, arg);
184 if (err != -EAGAIN) {
185 fib_rule_get(rule);
186 arg->rule = rule;
187 goto out;
191 err = -ESRCH;
192 out:
193 rcu_read_unlock();
195 return err;
198 EXPORT_SYMBOL_GPL(fib_rules_lookup);
200 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
201 struct fib_rules_ops *ops)
203 int err = -EINVAL;
205 if (frh->src_len)
206 if (tb[FRA_SRC] == NULL ||
207 frh->src_len > (ops->addr_size * 8) ||
208 nla_len(tb[FRA_SRC]) != ops->addr_size)
209 goto errout;
211 if (frh->dst_len)
212 if (tb[FRA_DST] == NULL ||
213 frh->dst_len > (ops->addr_size * 8) ||
214 nla_len(tb[FRA_DST]) != ops->addr_size)
215 goto errout;
217 err = 0;
218 errout:
219 return err;
222 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
224 struct net *net = skb->sk->sk_net;
225 struct fib_rule_hdr *frh = nlmsg_data(nlh);
226 struct fib_rules_ops *ops = NULL;
227 struct fib_rule *rule, *r, *last = NULL;
228 struct nlattr *tb[FRA_MAX+1];
229 int err = -EINVAL, unresolved = 0;
231 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
232 goto errout;
234 ops = lookup_rules_ops(frh->family);
235 if (ops == NULL) {
236 err = EAFNOSUPPORT;
237 goto errout;
240 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
241 if (err < 0)
242 goto errout;
244 err = validate_rulemsg(frh, tb, ops);
245 if (err < 0)
246 goto errout;
248 rule = kzalloc(ops->rule_size, GFP_KERNEL);
249 if (rule == NULL) {
250 err = -ENOMEM;
251 goto errout;
254 if (tb[FRA_PRIORITY])
255 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
257 if (tb[FRA_IFNAME]) {
258 struct net_device *dev;
260 rule->ifindex = -1;
261 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
262 dev = __dev_get_by_name(net, rule->ifname);
263 if (dev)
264 rule->ifindex = dev->ifindex;
267 if (tb[FRA_FWMARK]) {
268 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
269 if (rule->mark)
270 /* compatibility: if the mark value is non-zero all bits
271 * are compared unless a mask is explicitly specified.
273 rule->mark_mask = 0xFFFFFFFF;
276 if (tb[FRA_FWMASK])
277 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
279 rule->action = frh->action;
280 rule->flags = frh->flags;
281 rule->table = frh_get_table(frh, tb);
283 if (!rule->pref && ops->default_pref)
284 rule->pref = ops->default_pref();
286 err = -EINVAL;
287 if (tb[FRA_GOTO]) {
288 if (rule->action != FR_ACT_GOTO)
289 goto errout_free;
291 rule->target = nla_get_u32(tb[FRA_GOTO]);
292 /* Backward jumps are prohibited to avoid endless loops */
293 if (rule->target <= rule->pref)
294 goto errout_free;
296 list_for_each_entry(r, &ops->rules_list, list) {
297 if (r->pref == rule->target) {
298 rule->ctarget = r;
299 break;
303 if (rule->ctarget == NULL)
304 unresolved = 1;
305 } else if (rule->action == FR_ACT_GOTO)
306 goto errout_free;
308 err = ops->configure(rule, skb, nlh, frh, tb);
309 if (err < 0)
310 goto errout_free;
312 list_for_each_entry(r, &ops->rules_list, list) {
313 if (r->pref > rule->pref)
314 break;
315 last = r;
318 fib_rule_get(rule);
320 if (ops->unresolved_rules) {
322 * There are unresolved goto rules in the list, check if
323 * any of them are pointing to this new rule.
325 list_for_each_entry(r, &ops->rules_list, list) {
326 if (r->action == FR_ACT_GOTO &&
327 r->target == rule->pref) {
328 BUG_ON(r->ctarget != NULL);
329 rcu_assign_pointer(r->ctarget, rule);
330 if (--ops->unresolved_rules == 0)
331 break;
336 if (rule->action == FR_ACT_GOTO)
337 ops->nr_goto_rules++;
339 if (unresolved)
340 ops->unresolved_rules++;
342 if (last)
343 list_add_rcu(&rule->list, &last->list);
344 else
345 list_add_rcu(&rule->list, &ops->rules_list);
347 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
348 flush_route_cache(ops);
349 rules_ops_put(ops);
350 return 0;
352 errout_free:
353 kfree(rule);
354 errout:
355 rules_ops_put(ops);
356 return err;
359 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
361 struct fib_rule_hdr *frh = nlmsg_data(nlh);
362 struct fib_rules_ops *ops = NULL;
363 struct fib_rule *rule, *tmp;
364 struct nlattr *tb[FRA_MAX+1];
365 int err = -EINVAL;
367 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
368 goto errout;
370 ops = lookup_rules_ops(frh->family);
371 if (ops == NULL) {
372 err = EAFNOSUPPORT;
373 goto errout;
376 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
377 if (err < 0)
378 goto errout;
380 err = validate_rulemsg(frh, tb, ops);
381 if (err < 0)
382 goto errout;
384 list_for_each_entry(rule, &ops->rules_list, list) {
385 if (frh->action && (frh->action != rule->action))
386 continue;
388 if (frh->table && (frh_get_table(frh, tb) != rule->table))
389 continue;
391 if (tb[FRA_PRIORITY] &&
392 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
393 continue;
395 if (tb[FRA_IFNAME] &&
396 nla_strcmp(tb[FRA_IFNAME], rule->ifname))
397 continue;
399 if (tb[FRA_FWMARK] &&
400 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
401 continue;
403 if (tb[FRA_FWMASK] &&
404 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
405 continue;
407 if (!ops->compare(rule, frh, tb))
408 continue;
410 if (rule->flags & FIB_RULE_PERMANENT) {
411 err = -EPERM;
412 goto errout;
415 list_del_rcu(&rule->list);
417 if (rule->action == FR_ACT_GOTO)
418 ops->nr_goto_rules--;
421 * Check if this rule is a target to any of them. If so,
422 * disable them. As this operation is eventually very
423 * expensive, it is only performed if goto rules have
424 * actually been added.
426 if (ops->nr_goto_rules > 0) {
427 list_for_each_entry(tmp, &ops->rules_list, list) {
428 if (tmp->ctarget == rule) {
429 rcu_assign_pointer(tmp->ctarget, NULL);
430 ops->unresolved_rules++;
435 synchronize_rcu();
436 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
437 NETLINK_CB(skb).pid);
438 fib_rule_put(rule);
439 flush_route_cache(ops);
440 rules_ops_put(ops);
441 return 0;
444 err = -ENOENT;
445 errout:
446 rules_ops_put(ops);
447 return err;
450 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
451 struct fib_rule *rule)
453 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
454 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
455 + nla_total_size(4) /* FRA_PRIORITY */
456 + nla_total_size(4) /* FRA_TABLE */
457 + nla_total_size(4) /* FRA_FWMARK */
458 + nla_total_size(4); /* FRA_FWMASK */
460 if (ops->nlmsg_payload)
461 payload += ops->nlmsg_payload(rule);
463 return payload;
466 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
467 u32 pid, u32 seq, int type, int flags,
468 struct fib_rules_ops *ops)
470 struct nlmsghdr *nlh;
471 struct fib_rule_hdr *frh;
473 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
474 if (nlh == NULL)
475 return -EMSGSIZE;
477 frh = nlmsg_data(nlh);
478 frh->table = rule->table;
479 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
480 frh->res1 = 0;
481 frh->res2 = 0;
482 frh->action = rule->action;
483 frh->flags = rule->flags;
485 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
486 frh->flags |= FIB_RULE_UNRESOLVED;
488 if (rule->ifname[0]) {
489 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
491 if (rule->ifindex == -1)
492 frh->flags |= FIB_RULE_DEV_DETACHED;
495 if (rule->pref)
496 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
498 if (rule->mark)
499 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
501 if (rule->mark_mask || rule->mark)
502 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
504 if (rule->target)
505 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
507 if (ops->fill(rule, skb, nlh, frh) < 0)
508 goto nla_put_failure;
510 return nlmsg_end(skb, nlh);
512 nla_put_failure:
513 nlmsg_cancel(skb, nlh);
514 return -EMSGSIZE;
517 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
518 struct fib_rules_ops *ops)
520 int idx = 0;
521 struct fib_rule *rule;
523 list_for_each_entry(rule, &ops->rules_list, list) {
524 if (idx < cb->args[1])
525 goto skip;
527 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
528 cb->nlh->nlmsg_seq, RTM_NEWRULE,
529 NLM_F_MULTI, ops) < 0)
530 break;
531 skip:
532 idx++;
534 cb->args[1] = idx;
535 rules_ops_put(ops);
537 return skb->len;
540 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
542 struct fib_rules_ops *ops;
543 int idx = 0, family;
545 family = rtnl_msg_family(cb->nlh);
546 if (family != AF_UNSPEC) {
547 /* Protocol specific dump request */
548 ops = lookup_rules_ops(family);
549 if (ops == NULL)
550 return -EAFNOSUPPORT;
552 return dump_rules(skb, cb, ops);
555 rcu_read_lock();
556 list_for_each_entry_rcu(ops, &rules_ops, list) {
557 if (idx < cb->args[0] || !try_module_get(ops->owner))
558 goto skip;
560 if (dump_rules(skb, cb, ops) < 0)
561 break;
563 cb->args[1] = 0;
564 skip:
565 idx++;
567 rcu_read_unlock();
568 cb->args[0] = idx;
570 return skb->len;
573 static void notify_rule_change(int event, struct fib_rule *rule,
574 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
575 u32 pid)
577 struct sk_buff *skb;
578 int err = -ENOBUFS;
580 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
581 if (skb == NULL)
582 goto errout;
584 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
585 if (err < 0) {
586 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
587 WARN_ON(err == -EMSGSIZE);
588 kfree_skb(skb);
589 goto errout;
591 err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
592 errout:
593 if (err < 0)
594 rtnl_set_sk_err(ops->nlgroup, err);
597 static void attach_rules(struct list_head *rules, struct net_device *dev)
599 struct fib_rule *rule;
601 list_for_each_entry(rule, rules, list) {
602 if (rule->ifindex == -1 &&
603 strcmp(dev->name, rule->ifname) == 0)
604 rule->ifindex = dev->ifindex;
608 static void detach_rules(struct list_head *rules, struct net_device *dev)
610 struct fib_rule *rule;
612 list_for_each_entry(rule, rules, list)
613 if (rule->ifindex == dev->ifindex)
614 rule->ifindex = -1;
618 static int fib_rules_event(struct notifier_block *this, unsigned long event,
619 void *ptr)
621 struct net_device *dev = ptr;
622 struct fib_rules_ops *ops;
624 if (dev->nd_net != &init_net)
625 return NOTIFY_DONE;
627 ASSERT_RTNL();
628 rcu_read_lock();
630 switch (event) {
631 case NETDEV_REGISTER:
632 list_for_each_entry(ops, &rules_ops, list)
633 attach_rules(&ops->rules_list, dev);
634 break;
636 case NETDEV_UNREGISTER:
637 list_for_each_entry(ops, &rules_ops, list)
638 detach_rules(&ops->rules_list, dev);
639 break;
642 rcu_read_unlock();
644 return NOTIFY_DONE;
647 static struct notifier_block fib_rules_notifier = {
648 .notifier_call = fib_rules_event,
651 static int __init fib_rules_init(void)
653 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
654 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
655 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
657 return register_netdevice_notifier(&fib_rules_notifier);
660 subsys_initcall(fib_rules_init);