[TCP] Avoid two divides in tcp_output.c
[linux-2.6/mini2440.git] / net / core / fib_rules.c
blobfcbf41c0a5d490b0096fbaba083427a802d96bfa
1 /*
2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
15 #include <net/sock.h>
16 #include <net/fib_rules.h>
18 static LIST_HEAD(rules_ops);
19 static DEFINE_SPINLOCK(rules_mod_lock);
21 int fib_default_rule_add(struct fib_rules_ops *ops,
22 u32 pref, u32 table, u32 flags)
24 struct fib_rule *r;
26 r = kzalloc(ops->rule_size, GFP_KERNEL);
27 if (r == NULL)
28 return -ENOMEM;
30 atomic_set(&r->refcnt, 1);
31 r->action = FR_ACT_TO_TBL;
32 r->pref = pref;
33 r->table = table;
34 r->flags = flags;
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r->list, &ops->rules_list);
39 return 0;
41 EXPORT_SYMBOL(fib_default_rule_add);
43 static void notify_rule_change(int event, struct fib_rule *rule,
44 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
45 u32 pid);
47 static struct fib_rules_ops *lookup_rules_ops(int family)
49 struct fib_rules_ops *ops;
51 rcu_read_lock();
52 list_for_each_entry_rcu(ops, &rules_ops, list) {
53 if (ops->family == family) {
54 if (!try_module_get(ops->owner))
55 ops = NULL;
56 rcu_read_unlock();
57 return ops;
60 rcu_read_unlock();
62 return NULL;
65 static void rules_ops_put(struct fib_rules_ops *ops)
67 if (ops)
68 module_put(ops->owner);
71 static void flush_route_cache(struct fib_rules_ops *ops)
73 if (ops->flush_cache)
74 ops->flush_cache();
77 int fib_rules_register(struct fib_rules_ops *ops)
79 int err = -EEXIST;
80 struct fib_rules_ops *o;
82 if (ops->rule_size < sizeof(struct fib_rule))
83 return -EINVAL;
85 if (ops->match == NULL || ops->configure == NULL ||
86 ops->compare == NULL || ops->fill == NULL ||
87 ops->action == NULL)
88 return -EINVAL;
90 spin_lock(&rules_mod_lock);
91 list_for_each_entry(o, &rules_ops, list)
92 if (ops->family == o->family)
93 goto errout;
95 list_add_tail_rcu(&ops->list, &rules_ops);
96 err = 0;
97 errout:
98 spin_unlock(&rules_mod_lock);
100 return err;
103 EXPORT_SYMBOL_GPL(fib_rules_register);
105 void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
107 struct fib_rule *rule, *tmp;
109 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
110 list_del_rcu(&rule->list);
111 fib_rule_put(rule);
114 EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
116 int fib_rules_unregister(struct fib_rules_ops *ops)
118 int err = 0;
119 struct fib_rules_ops *o;
121 spin_lock(&rules_mod_lock);
122 list_for_each_entry(o, &rules_ops, list) {
123 if (o == ops) {
124 list_del_rcu(&o->list);
125 fib_rules_cleanup_ops(ops);
126 goto out;
130 err = -ENOENT;
131 out:
132 spin_unlock(&rules_mod_lock);
134 synchronize_rcu();
136 return err;
139 EXPORT_SYMBOL_GPL(fib_rules_unregister);
141 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
142 struct flowi *fl, int flags)
144 int ret = 0;
146 if (rule->ifindex && (rule->ifindex != fl->iif))
147 goto out;
149 if ((rule->mark ^ fl->mark) & rule->mark_mask)
150 goto out;
152 ret = ops->match(rule, fl, flags);
153 out:
154 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
157 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
158 int flags, struct fib_lookup_arg *arg)
160 struct fib_rule *rule;
161 int err;
163 rcu_read_lock();
165 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
166 jumped:
167 if (!fib_rule_match(rule, ops, fl, flags))
168 continue;
170 if (rule->action == FR_ACT_GOTO) {
171 struct fib_rule *target;
173 target = rcu_dereference(rule->ctarget);
174 if (target == NULL) {
175 continue;
176 } else {
177 rule = target;
178 goto jumped;
180 } else if (rule->action == FR_ACT_NOP)
181 continue;
182 else
183 err = ops->action(rule, fl, flags, arg);
185 if (err != -EAGAIN) {
186 fib_rule_get(rule);
187 arg->rule = rule;
188 goto out;
192 err = -ESRCH;
193 out:
194 rcu_read_unlock();
196 return err;
199 EXPORT_SYMBOL_GPL(fib_rules_lookup);
201 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
202 struct fib_rules_ops *ops)
204 int err = -EINVAL;
206 if (frh->src_len)
207 if (tb[FRA_SRC] == NULL ||
208 frh->src_len > (ops->addr_size * 8) ||
209 nla_len(tb[FRA_SRC]) != ops->addr_size)
210 goto errout;
212 if (frh->dst_len)
213 if (tb[FRA_DST] == NULL ||
214 frh->dst_len > (ops->addr_size * 8) ||
215 nla_len(tb[FRA_DST]) != ops->addr_size)
216 goto errout;
218 err = 0;
219 errout:
220 return err;
223 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
225 struct net *net = skb->sk->sk_net;
226 struct fib_rule_hdr *frh = nlmsg_data(nlh);
227 struct fib_rules_ops *ops = NULL;
228 struct fib_rule *rule, *r, *last = NULL;
229 struct nlattr *tb[FRA_MAX+1];
230 int err = -EINVAL, unresolved = 0;
232 if (net != &init_net)
233 return -EINVAL;
235 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
236 goto errout;
238 ops = lookup_rules_ops(frh->family);
239 if (ops == NULL) {
240 err = EAFNOSUPPORT;
241 goto errout;
244 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
245 if (err < 0)
246 goto errout;
248 err = validate_rulemsg(frh, tb, ops);
249 if (err < 0)
250 goto errout;
252 rule = kzalloc(ops->rule_size, GFP_KERNEL);
253 if (rule == NULL) {
254 err = -ENOMEM;
255 goto errout;
258 if (tb[FRA_PRIORITY])
259 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
261 if (tb[FRA_IFNAME]) {
262 struct net_device *dev;
264 rule->ifindex = -1;
265 nla_strlcpy(rule->ifname, tb[FRA_IFNAME], IFNAMSIZ);
266 dev = __dev_get_by_name(net, rule->ifname);
267 if (dev)
268 rule->ifindex = dev->ifindex;
271 if (tb[FRA_FWMARK]) {
272 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
273 if (rule->mark)
274 /* compatibility: if the mark value is non-zero all bits
275 * are compared unless a mask is explicitly specified.
277 rule->mark_mask = 0xFFFFFFFF;
280 if (tb[FRA_FWMASK])
281 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
283 rule->action = frh->action;
284 rule->flags = frh->flags;
285 rule->table = frh_get_table(frh, tb);
287 if (!rule->pref && ops->default_pref)
288 rule->pref = ops->default_pref();
290 err = -EINVAL;
291 if (tb[FRA_GOTO]) {
292 if (rule->action != FR_ACT_GOTO)
293 goto errout_free;
295 rule->target = nla_get_u32(tb[FRA_GOTO]);
296 /* Backward jumps are prohibited to avoid endless loops */
297 if (rule->target <= rule->pref)
298 goto errout_free;
300 list_for_each_entry(r, &ops->rules_list, list) {
301 if (r->pref == rule->target) {
302 rule->ctarget = r;
303 break;
307 if (rule->ctarget == NULL)
308 unresolved = 1;
309 } else if (rule->action == FR_ACT_GOTO)
310 goto errout_free;
312 err = ops->configure(rule, skb, nlh, frh, tb);
313 if (err < 0)
314 goto errout_free;
316 list_for_each_entry(r, &ops->rules_list, list) {
317 if (r->pref > rule->pref)
318 break;
319 last = r;
322 fib_rule_get(rule);
324 if (ops->unresolved_rules) {
326 * There are unresolved goto rules in the list, check if
327 * any of them are pointing to this new rule.
329 list_for_each_entry(r, &ops->rules_list, list) {
330 if (r->action == FR_ACT_GOTO &&
331 r->target == rule->pref) {
332 BUG_ON(r->ctarget != NULL);
333 rcu_assign_pointer(r->ctarget, rule);
334 if (--ops->unresolved_rules == 0)
335 break;
340 if (rule->action == FR_ACT_GOTO)
341 ops->nr_goto_rules++;
343 if (unresolved)
344 ops->unresolved_rules++;
346 if (last)
347 list_add_rcu(&rule->list, &last->list);
348 else
349 list_add_rcu(&rule->list, &ops->rules_list);
351 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).pid);
352 flush_route_cache(ops);
353 rules_ops_put(ops);
354 return 0;
356 errout_free:
357 kfree(rule);
358 errout:
359 rules_ops_put(ops);
360 return err;
363 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
365 struct net *net = skb->sk->sk_net;
366 struct fib_rule_hdr *frh = nlmsg_data(nlh);
367 struct fib_rules_ops *ops = NULL;
368 struct fib_rule *rule, *tmp;
369 struct nlattr *tb[FRA_MAX+1];
370 int err = -EINVAL;
372 if (net != &init_net)
373 return -EINVAL;
375 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
376 goto errout;
378 ops = lookup_rules_ops(frh->family);
379 if (ops == NULL) {
380 err = EAFNOSUPPORT;
381 goto errout;
384 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
385 if (err < 0)
386 goto errout;
388 err = validate_rulemsg(frh, tb, ops);
389 if (err < 0)
390 goto errout;
392 list_for_each_entry(rule, &ops->rules_list, list) {
393 if (frh->action && (frh->action != rule->action))
394 continue;
396 if (frh->table && (frh_get_table(frh, tb) != rule->table))
397 continue;
399 if (tb[FRA_PRIORITY] &&
400 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
401 continue;
403 if (tb[FRA_IFNAME] &&
404 nla_strcmp(tb[FRA_IFNAME], rule->ifname))
405 continue;
407 if (tb[FRA_FWMARK] &&
408 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
409 continue;
411 if (tb[FRA_FWMASK] &&
412 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
413 continue;
415 if (!ops->compare(rule, frh, tb))
416 continue;
418 if (rule->flags & FIB_RULE_PERMANENT) {
419 err = -EPERM;
420 goto errout;
423 list_del_rcu(&rule->list);
425 if (rule->action == FR_ACT_GOTO)
426 ops->nr_goto_rules--;
429 * Check if this rule is a target to any of them. If so,
430 * disable them. As this operation is eventually very
431 * expensive, it is only performed if goto rules have
432 * actually been added.
434 if (ops->nr_goto_rules > 0) {
435 list_for_each_entry(tmp, &ops->rules_list, list) {
436 if (tmp->ctarget == rule) {
437 rcu_assign_pointer(tmp->ctarget, NULL);
438 ops->unresolved_rules++;
443 synchronize_rcu();
444 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
445 NETLINK_CB(skb).pid);
446 fib_rule_put(rule);
447 flush_route_cache(ops);
448 rules_ops_put(ops);
449 return 0;
452 err = -ENOENT;
453 errout:
454 rules_ops_put(ops);
455 return err;
458 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
459 struct fib_rule *rule)
461 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
462 + nla_total_size(IFNAMSIZ) /* FRA_IFNAME */
463 + nla_total_size(4) /* FRA_PRIORITY */
464 + nla_total_size(4) /* FRA_TABLE */
465 + nla_total_size(4) /* FRA_FWMARK */
466 + nla_total_size(4); /* FRA_FWMASK */
468 if (ops->nlmsg_payload)
469 payload += ops->nlmsg_payload(rule);
471 return payload;
474 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
475 u32 pid, u32 seq, int type, int flags,
476 struct fib_rules_ops *ops)
478 struct nlmsghdr *nlh;
479 struct fib_rule_hdr *frh;
481 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
482 if (nlh == NULL)
483 return -EMSGSIZE;
485 frh = nlmsg_data(nlh);
486 frh->table = rule->table;
487 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
488 frh->res1 = 0;
489 frh->res2 = 0;
490 frh->action = rule->action;
491 frh->flags = rule->flags;
493 if (rule->action == FR_ACT_GOTO && rule->ctarget == NULL)
494 frh->flags |= FIB_RULE_UNRESOLVED;
496 if (rule->ifname[0]) {
497 NLA_PUT_STRING(skb, FRA_IFNAME, rule->ifname);
499 if (rule->ifindex == -1)
500 frh->flags |= FIB_RULE_DEV_DETACHED;
503 if (rule->pref)
504 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref);
506 if (rule->mark)
507 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark);
509 if (rule->mark_mask || rule->mark)
510 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask);
512 if (rule->target)
513 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
515 if (ops->fill(rule, skb, nlh, frh) < 0)
516 goto nla_put_failure;
518 return nlmsg_end(skb, nlh);
520 nla_put_failure:
521 nlmsg_cancel(skb, nlh);
522 return -EMSGSIZE;
525 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
526 struct fib_rules_ops *ops)
528 int idx = 0;
529 struct fib_rule *rule;
531 list_for_each_entry(rule, &ops->rules_list, list) {
532 if (idx < cb->args[1])
533 goto skip;
535 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
536 cb->nlh->nlmsg_seq, RTM_NEWRULE,
537 NLM_F_MULTI, ops) < 0)
538 break;
539 skip:
540 idx++;
542 cb->args[1] = idx;
543 rules_ops_put(ops);
545 return skb->len;
548 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
550 struct net *net = skb->sk->sk_net;
551 struct fib_rules_ops *ops;
552 int idx = 0, family;
554 if (net != &init_net)
555 return -EINVAL;
557 family = rtnl_msg_family(cb->nlh);
558 if (family != AF_UNSPEC) {
559 /* Protocol specific dump request */
560 ops = lookup_rules_ops(family);
561 if (ops == NULL)
562 return -EAFNOSUPPORT;
564 return dump_rules(skb, cb, ops);
567 rcu_read_lock();
568 list_for_each_entry_rcu(ops, &rules_ops, list) {
569 if (idx < cb->args[0] || !try_module_get(ops->owner))
570 goto skip;
572 if (dump_rules(skb, cb, ops) < 0)
573 break;
575 cb->args[1] = 0;
576 skip:
577 idx++;
579 rcu_read_unlock();
580 cb->args[0] = idx;
582 return skb->len;
585 static void notify_rule_change(int event, struct fib_rule *rule,
586 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
587 u32 pid)
589 struct sk_buff *skb;
590 int err = -ENOBUFS;
592 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
593 if (skb == NULL)
594 goto errout;
596 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
597 if (err < 0) {
598 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
599 WARN_ON(err == -EMSGSIZE);
600 kfree_skb(skb);
601 goto errout;
603 err = rtnl_notify(skb, &init_net, pid, ops->nlgroup, nlh, GFP_KERNEL);
604 errout:
605 if (err < 0)
606 rtnl_set_sk_err(&init_net, ops->nlgroup, err);
609 static void attach_rules(struct list_head *rules, struct net_device *dev)
611 struct fib_rule *rule;
613 list_for_each_entry(rule, rules, list) {
614 if (rule->ifindex == -1 &&
615 strcmp(dev->name, rule->ifname) == 0)
616 rule->ifindex = dev->ifindex;
620 static void detach_rules(struct list_head *rules, struct net_device *dev)
622 struct fib_rule *rule;
624 list_for_each_entry(rule, rules, list)
625 if (rule->ifindex == dev->ifindex)
626 rule->ifindex = -1;
630 static int fib_rules_event(struct notifier_block *this, unsigned long event,
631 void *ptr)
633 struct net_device *dev = ptr;
634 struct fib_rules_ops *ops;
636 if (dev->nd_net != &init_net)
637 return NOTIFY_DONE;
639 ASSERT_RTNL();
640 rcu_read_lock();
642 switch (event) {
643 case NETDEV_REGISTER:
644 list_for_each_entry(ops, &rules_ops, list)
645 attach_rules(&ops->rules_list, dev);
646 break;
648 case NETDEV_UNREGISTER:
649 list_for_each_entry(ops, &rules_ops, list)
650 detach_rules(&ops->rules_list, dev);
651 break;
654 rcu_read_unlock();
656 return NOTIFY_DONE;
659 static struct notifier_block fib_rules_notifier = {
660 .notifier_call = fib_rules_event,
663 static int __init fib_rules_init(void)
665 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL);
666 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL);
667 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule);
669 return register_netdevice_notifier(&fib_rules_notifier);
672 subsys_initcall(fib_rules_init);