2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
18 #include <net/fib_rules.h>
20 int fib_default_rule_add(struct fib_rules_ops
*ops
,
21 u32 pref
, u32 table
, u32 flags
)
25 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
29 atomic_set(&r
->refcnt
, 1);
30 r
->action
= FR_ACT_TO_TBL
;
34 r
->fr_net
= hold_net(ops
->fro_net
);
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r
->list
, &ops
->rules_list
);
41 EXPORT_SYMBOL(fib_default_rule_add
);
43 u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
45 struct list_head
*pos
;
46 struct fib_rule
*rule
;
48 if (!list_empty(&ops
->rules_list
)) {
49 pos
= ops
->rules_list
.next
;
50 if (pos
->next
!= &ops
->rules_list
) {
51 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
53 return rule
->pref
- 1;
59 EXPORT_SYMBOL(fib_default_rule_pref
);
61 static void notify_rule_change(int event
, struct fib_rule
*rule
,
62 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
65 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
67 struct fib_rules_ops
*ops
;
70 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
71 if (ops
->family
== family
) {
72 if (!try_module_get(ops
->owner
))
83 static void rules_ops_put(struct fib_rules_ops
*ops
)
86 module_put(ops
->owner
);
89 static void flush_route_cache(struct fib_rules_ops
*ops
)
92 ops
->flush_cache(ops
);
95 static int __fib_rules_register(struct fib_rules_ops
*ops
)
98 struct fib_rules_ops
*o
;
103 if (ops
->rule_size
< sizeof(struct fib_rule
))
106 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
107 ops
->compare
== NULL
|| ops
->fill
== NULL
||
111 spin_lock(&net
->rules_mod_lock
);
112 list_for_each_entry(o
, &net
->rules_ops
, list
)
113 if (ops
->family
== o
->family
)
117 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
120 spin_unlock(&net
->rules_mod_lock
);
125 struct fib_rules_ops
*
126 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
128 struct fib_rules_ops
*ops
;
131 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
133 return ERR_PTR(-ENOMEM
);
135 INIT_LIST_HEAD(&ops
->rules_list
);
138 err
= __fib_rules_register(ops
);
146 EXPORT_SYMBOL_GPL(fib_rules_register
);
148 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
150 struct fib_rule
*rule
, *tmp
;
152 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
153 list_del_rcu(&rule
->list
);
158 static void fib_rules_put_rcu(struct rcu_head
*head
)
160 struct fib_rules_ops
*ops
= container_of(head
, struct fib_rules_ops
, rcu
);
161 struct net
*net
= ops
->fro_net
;
167 void fib_rules_unregister(struct fib_rules_ops
*ops
)
169 struct net
*net
= ops
->fro_net
;
171 spin_lock(&net
->rules_mod_lock
);
172 list_del_rcu(&ops
->list
);
173 fib_rules_cleanup_ops(ops
);
174 spin_unlock(&net
->rules_mod_lock
);
176 call_rcu(&ops
->rcu
, fib_rules_put_rcu
);
178 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
180 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
181 struct flowi
*fl
, int flags
)
185 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->flowi_iif
))
188 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->flowi_oif
))
191 if ((rule
->mark
^ fl
->flowi_mark
) & rule
->mark_mask
)
194 ret
= ops
->match(rule
, fl
, flags
);
196 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
199 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
200 int flags
, struct fib_lookup_arg
*arg
)
202 struct fib_rule
*rule
;
207 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
209 if (!fib_rule_match(rule
, ops
, fl
, flags
))
212 if (rule
->action
== FR_ACT_GOTO
) {
213 struct fib_rule
*target
;
215 target
= rcu_dereference(rule
->ctarget
);
216 if (target
== NULL
) {
222 } else if (rule
->action
== FR_ACT_NOP
)
225 err
= ops
->action(rule
, fl
, flags
, arg
);
227 if (err
!= -EAGAIN
) {
228 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
229 likely(atomic_inc_not_zero(&rule
->refcnt
))) {
243 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
245 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
246 struct fib_rules_ops
*ops
)
251 if (tb
[FRA_SRC
] == NULL
||
252 frh
->src_len
> (ops
->addr_size
* 8) ||
253 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
257 if (tb
[FRA_DST
] == NULL
||
258 frh
->dst_len
> (ops
->addr_size
* 8) ||
259 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
267 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
269 struct net
*net
= sock_net(skb
->sk
);
270 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
271 struct fib_rules_ops
*ops
= NULL
;
272 struct fib_rule
*rule
, *r
, *last
= NULL
;
273 struct nlattr
*tb
[FRA_MAX
+1];
274 int err
= -EINVAL
, unresolved
= 0;
276 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
279 ops
= lookup_rules_ops(net
, frh
->family
);
285 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
289 err
= validate_rulemsg(frh
, tb
, ops
);
293 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
298 rule
->fr_net
= hold_net(net
);
300 if (tb
[FRA_PRIORITY
])
301 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
303 if (tb
[FRA_IIFNAME
]) {
304 struct net_device
*dev
;
307 nla_strlcpy(rule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
308 dev
= __dev_get_by_name(net
, rule
->iifname
);
310 rule
->iifindex
= dev
->ifindex
;
313 if (tb
[FRA_OIFNAME
]) {
314 struct net_device
*dev
;
317 nla_strlcpy(rule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
318 dev
= __dev_get_by_name(net
, rule
->oifname
);
320 rule
->oifindex
= dev
->ifindex
;
323 if (tb
[FRA_FWMARK
]) {
324 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
326 /* compatibility: if the mark value is non-zero all bits
327 * are compared unless a mask is explicitly specified.
329 rule
->mark_mask
= 0xFFFFFFFF;
333 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
335 rule
->action
= frh
->action
;
336 rule
->flags
= frh
->flags
;
337 rule
->table
= frh_get_table(frh
, tb
);
339 if (!tb
[FRA_PRIORITY
] && ops
->default_pref
)
340 rule
->pref
= ops
->default_pref(ops
);
344 if (rule
->action
!= FR_ACT_GOTO
)
347 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
348 /* Backward jumps are prohibited to avoid endless loops */
349 if (rule
->target
<= rule
->pref
)
352 list_for_each_entry(r
, &ops
->rules_list
, list
) {
353 if (r
->pref
== rule
->target
) {
354 RCU_INIT_POINTER(rule
->ctarget
, r
);
359 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
361 } else if (rule
->action
== FR_ACT_GOTO
)
364 err
= ops
->configure(rule
, skb
, frh
, tb
);
368 list_for_each_entry(r
, &ops
->rules_list
, list
) {
369 if (r
->pref
> rule
->pref
)
377 list_add_rcu(&rule
->list
, &last
->list
);
379 list_add_rcu(&rule
->list
, &ops
->rules_list
);
381 if (ops
->unresolved_rules
) {
383 * There are unresolved goto rules in the list, check if
384 * any of them are pointing to this new rule.
386 list_for_each_entry(r
, &ops
->rules_list
, list
) {
387 if (r
->action
== FR_ACT_GOTO
&&
388 r
->target
== rule
->pref
&&
389 rtnl_dereference(r
->ctarget
) == NULL
) {
390 rcu_assign_pointer(r
->ctarget
, rule
);
391 if (--ops
->unresolved_rules
== 0)
397 if (rule
->action
== FR_ACT_GOTO
)
398 ops
->nr_goto_rules
++;
401 ops
->unresolved_rules
++;
403 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).pid
);
404 flush_route_cache(ops
);
409 release_net(rule
->fr_net
);
416 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
418 struct net
*net
= sock_net(skb
->sk
);
419 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
420 struct fib_rules_ops
*ops
= NULL
;
421 struct fib_rule
*rule
, *tmp
;
422 struct nlattr
*tb
[FRA_MAX
+1];
425 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
428 ops
= lookup_rules_ops(net
, frh
->family
);
434 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
438 err
= validate_rulemsg(frh
, tb
, ops
);
442 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
443 if (frh
->action
&& (frh
->action
!= rule
->action
))
446 if (frh
->table
&& (frh_get_table(frh
, tb
) != rule
->table
))
449 if (tb
[FRA_PRIORITY
] &&
450 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
453 if (tb
[FRA_IIFNAME
] &&
454 nla_strcmp(tb
[FRA_IIFNAME
], rule
->iifname
))
457 if (tb
[FRA_OIFNAME
] &&
458 nla_strcmp(tb
[FRA_OIFNAME
], rule
->oifname
))
461 if (tb
[FRA_FWMARK
] &&
462 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
465 if (tb
[FRA_FWMASK
] &&
466 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
469 if (!ops
->compare(rule
, frh
, tb
))
472 if (rule
->flags
& FIB_RULE_PERMANENT
) {
477 list_del_rcu(&rule
->list
);
479 if (rule
->action
== FR_ACT_GOTO
) {
480 ops
->nr_goto_rules
--;
481 if (rtnl_dereference(rule
->ctarget
) == NULL
)
482 ops
->unresolved_rules
--;
486 * Check if this rule is a target to any of them. If so,
487 * disable them. As this operation is eventually very
488 * expensive, it is only performed if goto rules have
489 * actually been added.
491 if (ops
->nr_goto_rules
> 0) {
492 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
493 if (rtnl_dereference(tmp
->ctarget
) == rule
) {
494 RCU_INIT_POINTER(tmp
->ctarget
, NULL
);
495 ops
->unresolved_rules
++;
500 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
501 NETLINK_CB(skb
).pid
);
503 flush_route_cache(ops
);
514 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
515 struct fib_rule
*rule
)
517 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
518 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
519 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
520 + nla_total_size(4) /* FRA_PRIORITY */
521 + nla_total_size(4) /* FRA_TABLE */
522 + nla_total_size(4) /* FRA_FWMARK */
523 + nla_total_size(4); /* FRA_FWMASK */
525 if (ops
->nlmsg_payload
)
526 payload
+= ops
->nlmsg_payload(rule
);
531 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
532 u32 pid
, u32 seq
, int type
, int flags
,
533 struct fib_rules_ops
*ops
)
535 struct nlmsghdr
*nlh
;
536 struct fib_rule_hdr
*frh
;
538 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
542 frh
= nlmsg_data(nlh
);
543 frh
->family
= ops
->family
;
544 frh
->table
= rule
->table
;
545 NLA_PUT_U32(skb
, FRA_TABLE
, rule
->table
);
548 frh
->action
= rule
->action
;
549 frh
->flags
= rule
->flags
;
551 if (rule
->action
== FR_ACT_GOTO
&&
552 rcu_access_pointer(rule
->ctarget
) == NULL
)
553 frh
->flags
|= FIB_RULE_UNRESOLVED
;
555 if (rule
->iifname
[0]) {
556 NLA_PUT_STRING(skb
, FRA_IIFNAME
, rule
->iifname
);
558 if (rule
->iifindex
== -1)
559 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
562 if (rule
->oifname
[0]) {
563 NLA_PUT_STRING(skb
, FRA_OIFNAME
, rule
->oifname
);
565 if (rule
->oifindex
== -1)
566 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
570 NLA_PUT_U32(skb
, FRA_PRIORITY
, rule
->pref
);
573 NLA_PUT_U32(skb
, FRA_FWMARK
, rule
->mark
);
575 if (rule
->mark_mask
|| rule
->mark
)
576 NLA_PUT_U32(skb
, FRA_FWMASK
, rule
->mark_mask
);
579 NLA_PUT_U32(skb
, FRA_GOTO
, rule
->target
);
581 if (ops
->fill(rule
, skb
, frh
) < 0)
582 goto nla_put_failure
;
584 return nlmsg_end(skb
, nlh
);
587 nlmsg_cancel(skb
, nlh
);
591 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
592 struct fib_rules_ops
*ops
)
595 struct fib_rule
*rule
;
598 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
599 if (idx
< cb
->args
[1])
602 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).pid
,
603 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
604 NLM_F_MULTI
, ops
) < 0)
616 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
618 struct net
*net
= sock_net(skb
->sk
);
619 struct fib_rules_ops
*ops
;
622 family
= rtnl_msg_family(cb
->nlh
);
623 if (family
!= AF_UNSPEC
) {
624 /* Protocol specific dump request */
625 ops
= lookup_rules_ops(net
, family
);
627 return -EAFNOSUPPORT
;
629 return dump_rules(skb
, cb
, ops
);
633 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
634 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
637 if (dump_rules(skb
, cb
, ops
) < 0)
650 static void notify_rule_change(int event
, struct fib_rule
*rule
,
651 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
659 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
663 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
665 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
666 WARN_ON(err
== -EMSGSIZE
);
671 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
675 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
678 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
680 struct fib_rule
*rule
;
682 list_for_each_entry(rule
, rules
, list
) {
683 if (rule
->iifindex
== -1 &&
684 strcmp(dev
->name
, rule
->iifname
) == 0)
685 rule
->iifindex
= dev
->ifindex
;
686 if (rule
->oifindex
== -1 &&
687 strcmp(dev
->name
, rule
->oifname
) == 0)
688 rule
->oifindex
= dev
->ifindex
;
692 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
694 struct fib_rule
*rule
;
696 list_for_each_entry(rule
, rules
, list
) {
697 if (rule
->iifindex
== dev
->ifindex
)
699 if (rule
->oifindex
== dev
->ifindex
)
705 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
708 struct net_device
*dev
= ptr
;
709 struct net
*net
= dev_net(dev
);
710 struct fib_rules_ops
*ops
;
715 case NETDEV_REGISTER
:
716 list_for_each_entry(ops
, &net
->rules_ops
, list
)
717 attach_rules(&ops
->rules_list
, dev
);
720 case NETDEV_UNREGISTER
:
721 list_for_each_entry(ops
, &net
->rules_ops
, list
)
722 detach_rules(&ops
->rules_list
, dev
);
729 static struct notifier_block fib_rules_notifier
= {
730 .notifier_call
= fib_rules_event
,
733 static int __net_init
fib_rules_net_init(struct net
*net
)
735 INIT_LIST_HEAD(&net
->rules_ops
);
736 spin_lock_init(&net
->rules_mod_lock
);
740 static struct pernet_operations fib_rules_net_ops
= {
741 .init
= fib_rules_net_init
,
744 static int __init
fib_rules_init(void)
747 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
, NULL
);
748 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
, NULL
);
749 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
, NULL
);
751 err
= register_pernet_subsys(&fib_rules_net_ops
);
755 err
= register_netdevice_notifier(&fib_rules_notifier
);
757 goto fail_unregister
;
762 unregister_pernet_subsys(&fib_rules_net_ops
);
764 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
765 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
766 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
770 subsys_initcall(fib_rules_init
);