2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <net/net_namespace.h>
17 #include <net/fib_rules.h>
19 int fib_default_rule_add(struct fib_rules_ops
*ops
,
20 u32 pref
, u32 table
, u32 flags
)
24 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
28 atomic_set(&r
->refcnt
, 1);
29 r
->action
= FR_ACT_TO_TBL
;
33 r
->fr_net
= hold_net(ops
->fro_net
);
35 /* The lock is not required here, the list in unreacheable
36 * at the moment this function is called */
37 list_add_tail(&r
->list
, &ops
->rules_list
);
40 EXPORT_SYMBOL(fib_default_rule_add
);
42 u32
fib_default_rule_pref(struct fib_rules_ops
*ops
)
44 struct list_head
*pos
;
45 struct fib_rule
*rule
;
47 if (!list_empty(&ops
->rules_list
)) {
48 pos
= ops
->rules_list
.next
;
49 if (pos
->next
!= &ops
->rules_list
) {
50 rule
= list_entry(pos
->next
, struct fib_rule
, list
);
52 return rule
->pref
- 1;
58 EXPORT_SYMBOL(fib_default_rule_pref
);
60 static void notify_rule_change(int event
, struct fib_rule
*rule
,
61 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
64 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
66 struct fib_rules_ops
*ops
;
69 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
70 if (ops
->family
== family
) {
71 if (!try_module_get(ops
->owner
))
82 static void rules_ops_put(struct fib_rules_ops
*ops
)
85 module_put(ops
->owner
);
88 static void flush_route_cache(struct fib_rules_ops
*ops
)
91 ops
->flush_cache(ops
);
94 static int __fib_rules_register(struct fib_rules_ops
*ops
)
97 struct fib_rules_ops
*o
;
102 if (ops
->rule_size
< sizeof(struct fib_rule
))
105 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
106 ops
->compare
== NULL
|| ops
->fill
== NULL
||
110 spin_lock(&net
->rules_mod_lock
);
111 list_for_each_entry(o
, &net
->rules_ops
, list
)
112 if (ops
->family
== o
->family
)
116 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
119 spin_unlock(&net
->rules_mod_lock
);
124 struct fib_rules_ops
*
125 fib_rules_register(const struct fib_rules_ops
*tmpl
, struct net
*net
)
127 struct fib_rules_ops
*ops
;
130 ops
= kmemdup(tmpl
, sizeof(*ops
), GFP_KERNEL
);
132 return ERR_PTR(-ENOMEM
);
134 INIT_LIST_HEAD(&ops
->rules_list
);
137 err
= __fib_rules_register(ops
);
145 EXPORT_SYMBOL_GPL(fib_rules_register
);
147 static void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
149 struct fib_rule
*rule
, *tmp
;
151 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
152 list_del_rcu(&rule
->list
);
157 static void fib_rules_put_rcu(struct rcu_head
*head
)
159 struct fib_rules_ops
*ops
= container_of(head
, struct fib_rules_ops
, rcu
);
160 struct net
*net
= ops
->fro_net
;
166 void fib_rules_unregister(struct fib_rules_ops
*ops
)
168 struct net
*net
= ops
->fro_net
;
170 spin_lock(&net
->rules_mod_lock
);
171 list_del_rcu(&ops
->list
);
172 fib_rules_cleanup_ops(ops
);
173 spin_unlock(&net
->rules_mod_lock
);
175 call_rcu(&ops
->rcu
, fib_rules_put_rcu
);
177 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
179 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
180 struct flowi
*fl
, int flags
)
184 if (rule
->iifindex
&& (rule
->iifindex
!= fl
->iif
) &&
185 !(fl
->flags
& FLOWI_FLAG_MATCH_ANY_IIF
))
188 if (rule
->oifindex
&& (rule
->oifindex
!= fl
->oif
))
191 if ((rule
->mark
^ fl
->mark
) & rule
->mark_mask
)
194 ret
= ops
->match(rule
, fl
, flags
);
196 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
199 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
200 int flags
, struct fib_lookup_arg
*arg
)
202 struct fib_rule
*rule
;
207 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
209 if (!fib_rule_match(rule
, ops
, fl
, flags
))
212 if (rule
->action
== FR_ACT_GOTO
) {
213 struct fib_rule
*target
;
215 target
= rcu_dereference(rule
->ctarget
);
216 if (target
== NULL
) {
222 } else if (rule
->action
== FR_ACT_NOP
)
225 err
= ops
->action(rule
, fl
, flags
, arg
);
227 if (err
!= -EAGAIN
) {
228 if ((arg
->flags
& FIB_LOOKUP_NOREF
) ||
229 likely(atomic_inc_not_zero(&rule
->refcnt
))) {
243 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
245 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
246 struct fib_rules_ops
*ops
)
251 if (tb
[FRA_SRC
] == NULL
||
252 frh
->src_len
> (ops
->addr_size
* 8) ||
253 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
257 if (tb
[FRA_DST
] == NULL
||
258 frh
->dst_len
> (ops
->addr_size
* 8) ||
259 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
267 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
269 struct net
*net
= sock_net(skb
->sk
);
270 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
271 struct fib_rules_ops
*ops
= NULL
;
272 struct fib_rule
*rule
, *r
, *last
= NULL
;
273 struct nlattr
*tb
[FRA_MAX
+1];
274 int err
= -EINVAL
, unresolved
= 0;
276 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
279 ops
= lookup_rules_ops(net
, frh
->family
);
285 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
289 err
= validate_rulemsg(frh
, tb
, ops
);
293 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
298 rule
->fr_net
= hold_net(net
);
300 if (tb
[FRA_PRIORITY
])
301 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
303 if (tb
[FRA_IIFNAME
]) {
304 struct net_device
*dev
;
307 nla_strlcpy(rule
->iifname
, tb
[FRA_IIFNAME
], IFNAMSIZ
);
308 dev
= __dev_get_by_name(net
, rule
->iifname
);
310 rule
->iifindex
= dev
->ifindex
;
313 if (tb
[FRA_OIFNAME
]) {
314 struct net_device
*dev
;
317 nla_strlcpy(rule
->oifname
, tb
[FRA_OIFNAME
], IFNAMSIZ
);
318 dev
= __dev_get_by_name(net
, rule
->oifname
);
320 rule
->oifindex
= dev
->ifindex
;
323 if (tb
[FRA_FWMARK
]) {
324 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
326 /* compatibility: if the mark value is non-zero all bits
327 * are compared unless a mask is explicitly specified.
329 rule
->mark_mask
= 0xFFFFFFFF;
333 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
335 rule
->action
= frh
->action
;
336 rule
->flags
= frh
->flags
;
337 rule
->table
= frh_get_table(frh
, tb
);
339 if (!tb
[FRA_PRIORITY
] && ops
->default_pref
)
340 rule
->pref
= ops
->default_pref(ops
);
344 if (rule
->action
!= FR_ACT_GOTO
)
347 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
348 /* Backward jumps are prohibited to avoid endless loops */
349 if (rule
->target
<= rule
->pref
)
352 list_for_each_entry(r
, &ops
->rules_list
, list
) {
353 if (r
->pref
== rule
->target
) {
354 RCU_INIT_POINTER(rule
->ctarget
, r
);
359 if (rcu_dereference_protected(rule
->ctarget
, 1) == NULL
)
361 } else if (rule
->action
== FR_ACT_GOTO
)
364 err
= ops
->configure(rule
, skb
, frh
, tb
);
368 list_for_each_entry(r
, &ops
->rules_list
, list
) {
369 if (r
->pref
> rule
->pref
)
377 list_add_rcu(&rule
->list
, &last
->list
);
379 list_add_rcu(&rule
->list
, &ops
->rules_list
);
381 if (ops
->unresolved_rules
) {
383 * There are unresolved goto rules in the list, check if
384 * any of them are pointing to this new rule.
386 list_for_each_entry(r
, &ops
->rules_list
, list
) {
387 if (r
->action
== FR_ACT_GOTO
&&
388 r
->target
== rule
->pref
) {
389 BUG_ON(rtnl_dereference(r
->ctarget
) != NULL
);
390 rcu_assign_pointer(r
->ctarget
, rule
);
391 if (--ops
->unresolved_rules
== 0)
397 if (rule
->action
== FR_ACT_GOTO
)
398 ops
->nr_goto_rules
++;
401 ops
->unresolved_rules
++;
403 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).pid
);
404 flush_route_cache(ops
);
409 release_net(rule
->fr_net
);
416 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
418 struct net
*net
= sock_net(skb
->sk
);
419 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
420 struct fib_rules_ops
*ops
= NULL
;
421 struct fib_rule
*rule
, *tmp
;
422 struct nlattr
*tb
[FRA_MAX
+1];
425 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
428 ops
= lookup_rules_ops(net
, frh
->family
);
434 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
438 err
= validate_rulemsg(frh
, tb
, ops
);
442 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
443 if (frh
->action
&& (frh
->action
!= rule
->action
))
446 if (frh
->table
&& (frh_get_table(frh
, tb
) != rule
->table
))
449 if (tb
[FRA_PRIORITY
] &&
450 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
453 if (tb
[FRA_IIFNAME
] &&
454 nla_strcmp(tb
[FRA_IIFNAME
], rule
->iifname
))
457 if (tb
[FRA_OIFNAME
] &&
458 nla_strcmp(tb
[FRA_OIFNAME
], rule
->oifname
))
461 if (tb
[FRA_FWMARK
] &&
462 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
465 if (tb
[FRA_FWMASK
] &&
466 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
469 if (!ops
->compare(rule
, frh
, tb
))
472 if (rule
->flags
& FIB_RULE_PERMANENT
) {
477 list_del_rcu(&rule
->list
);
479 if (rule
->action
== FR_ACT_GOTO
)
480 ops
->nr_goto_rules
--;
483 * Check if this rule is a target to any of them. If so,
484 * disable them. As this operation is eventually very
485 * expensive, it is only performed if goto rules have
486 * actually been added.
488 if (ops
->nr_goto_rules
> 0) {
489 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
490 if (rtnl_dereference(tmp
->ctarget
) == rule
) {
491 rcu_assign_pointer(tmp
->ctarget
, NULL
);
492 ops
->unresolved_rules
++;
497 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
498 NETLINK_CB(skb
).pid
);
500 flush_route_cache(ops
);
511 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
512 struct fib_rule
*rule
)
514 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
515 + nla_total_size(IFNAMSIZ
) /* FRA_IIFNAME */
516 + nla_total_size(IFNAMSIZ
) /* FRA_OIFNAME */
517 + nla_total_size(4) /* FRA_PRIORITY */
518 + nla_total_size(4) /* FRA_TABLE */
519 + nla_total_size(4) /* FRA_FWMARK */
520 + nla_total_size(4); /* FRA_FWMASK */
522 if (ops
->nlmsg_payload
)
523 payload
+= ops
->nlmsg_payload(rule
);
528 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
529 u32 pid
, u32 seq
, int type
, int flags
,
530 struct fib_rules_ops
*ops
)
532 struct nlmsghdr
*nlh
;
533 struct fib_rule_hdr
*frh
;
535 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
539 frh
= nlmsg_data(nlh
);
540 frh
->family
= ops
->family
;
541 frh
->table
= rule
->table
;
542 NLA_PUT_U32(skb
, FRA_TABLE
, rule
->table
);
545 frh
->action
= rule
->action
;
546 frh
->flags
= rule
->flags
;
548 if (rule
->action
== FR_ACT_GOTO
&&
549 rcu_dereference_raw(rule
->ctarget
) == NULL
)
550 frh
->flags
|= FIB_RULE_UNRESOLVED
;
552 if (rule
->iifname
[0]) {
553 NLA_PUT_STRING(skb
, FRA_IIFNAME
, rule
->iifname
);
555 if (rule
->iifindex
== -1)
556 frh
->flags
|= FIB_RULE_IIF_DETACHED
;
559 if (rule
->oifname
[0]) {
560 NLA_PUT_STRING(skb
, FRA_OIFNAME
, rule
->oifname
);
562 if (rule
->oifindex
== -1)
563 frh
->flags
|= FIB_RULE_OIF_DETACHED
;
567 NLA_PUT_U32(skb
, FRA_PRIORITY
, rule
->pref
);
570 NLA_PUT_U32(skb
, FRA_FWMARK
, rule
->mark
);
572 if (rule
->mark_mask
|| rule
->mark
)
573 NLA_PUT_U32(skb
, FRA_FWMASK
, rule
->mark_mask
);
576 NLA_PUT_U32(skb
, FRA_GOTO
, rule
->target
);
578 if (ops
->fill(rule
, skb
, frh
) < 0)
579 goto nla_put_failure
;
581 return nlmsg_end(skb
, nlh
);
584 nlmsg_cancel(skb
, nlh
);
588 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
589 struct fib_rules_ops
*ops
)
592 struct fib_rule
*rule
;
594 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
595 if (idx
< cb
->args
[1])
598 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).pid
,
599 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
600 NLM_F_MULTI
, ops
) < 0)
611 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
613 struct net
*net
= sock_net(skb
->sk
);
614 struct fib_rules_ops
*ops
;
617 family
= rtnl_msg_family(cb
->nlh
);
618 if (family
!= AF_UNSPEC
) {
619 /* Protocol specific dump request */
620 ops
= lookup_rules_ops(net
, family
);
622 return -EAFNOSUPPORT
;
624 return dump_rules(skb
, cb
, ops
);
628 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
629 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
632 if (dump_rules(skb
, cb
, ops
) < 0)
645 static void notify_rule_change(int event
, struct fib_rule
*rule
,
646 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
654 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
658 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
660 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
661 WARN_ON(err
== -EMSGSIZE
);
666 rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
670 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
673 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
675 struct fib_rule
*rule
;
677 list_for_each_entry(rule
, rules
, list
) {
678 if (rule
->iifindex
== -1 &&
679 strcmp(dev
->name
, rule
->iifname
) == 0)
680 rule
->iifindex
= dev
->ifindex
;
681 if (rule
->oifindex
== -1 &&
682 strcmp(dev
->name
, rule
->oifname
) == 0)
683 rule
->oifindex
= dev
->ifindex
;
687 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
689 struct fib_rule
*rule
;
691 list_for_each_entry(rule
, rules
, list
) {
692 if (rule
->iifindex
== dev
->ifindex
)
694 if (rule
->oifindex
== dev
->ifindex
)
700 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
703 struct net_device
*dev
= ptr
;
704 struct net
*net
= dev_net(dev
);
705 struct fib_rules_ops
*ops
;
710 case NETDEV_REGISTER
:
711 list_for_each_entry(ops
, &net
->rules_ops
, list
)
712 attach_rules(&ops
->rules_list
, dev
);
715 case NETDEV_UNREGISTER
:
716 list_for_each_entry(ops
, &net
->rules_ops
, list
)
717 detach_rules(&ops
->rules_list
, dev
);
724 static struct notifier_block fib_rules_notifier
= {
725 .notifier_call
= fib_rules_event
,
728 static int __net_init
fib_rules_net_init(struct net
*net
)
730 INIT_LIST_HEAD(&net
->rules_ops
);
731 spin_lock_init(&net
->rules_mod_lock
);
735 static struct pernet_operations fib_rules_net_ops
= {
736 .init
= fib_rules_net_init
,
739 static int __init
fib_rules_init(void)
742 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
);
743 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
);
744 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
);
746 err
= register_pernet_subsys(&fib_rules_net_ops
);
750 err
= register_netdevice_notifier(&fib_rules_notifier
);
752 goto fail_unregister
;
757 unregister_pernet_subsys(&fib_rules_net_ops
);
759 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
760 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
761 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
765 subsys_initcall(fib_rules_init
);