2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
16 #include <net/fib_rules.h>
18 int fib_default_rule_add(struct fib_rules_ops
*ops
,
19 u32 pref
, u32 table
, u32 flags
)
23 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
27 atomic_set(&r
->refcnt
, 1);
28 r
->action
= FR_ACT_TO_TBL
;
33 /* The lock is not required here, the list in unreacheable
34 * at the moment this function is called */
35 list_add_tail(&r
->list
, &ops
->rules_list
);
38 EXPORT_SYMBOL(fib_default_rule_add
);
40 static void notify_rule_change(struct net
*net
, int event
,
41 struct fib_rule
*rule
,
42 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
45 static struct fib_rules_ops
*lookup_rules_ops(struct net
*net
, int family
)
47 struct fib_rules_ops
*ops
;
50 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
51 if (ops
->family
== family
) {
52 if (!try_module_get(ops
->owner
))
63 static void rules_ops_put(struct fib_rules_ops
*ops
)
66 module_put(ops
->owner
);
69 static void flush_route_cache(struct fib_rules_ops
*ops
)
75 int fib_rules_register(struct net
*net
, struct fib_rules_ops
*ops
)
78 struct fib_rules_ops
*o
;
80 if (ops
->rule_size
< sizeof(struct fib_rule
))
83 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
84 ops
->compare
== NULL
|| ops
->fill
== NULL
||
88 spin_lock(&net
->rules_mod_lock
);
89 list_for_each_entry(o
, &net
->rules_ops
, list
)
90 if (ops
->family
== o
->family
)
94 list_add_tail_rcu(&ops
->list
, &net
->rules_ops
);
97 spin_unlock(&net
->rules_mod_lock
);
102 EXPORT_SYMBOL_GPL(fib_rules_register
);
104 void fib_rules_cleanup_ops(struct fib_rules_ops
*ops
)
106 struct fib_rule
*rule
, *tmp
;
108 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
109 list_del_rcu(&rule
->list
);
113 EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops
);
115 int fib_rules_unregister(struct net
*net
, struct fib_rules_ops
*ops
)
118 struct fib_rules_ops
*o
;
120 spin_lock(&net
->rules_mod_lock
);
121 list_for_each_entry(o
, &net
->rules_ops
, list
) {
123 list_del_rcu(&o
->list
);
124 fib_rules_cleanup_ops(ops
);
131 spin_unlock(&net
->rules_mod_lock
);
140 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
142 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
143 struct flowi
*fl
, int flags
)
147 if (rule
->ifindex
&& (rule
->ifindex
!= fl
->iif
))
150 if ((rule
->mark
^ fl
->mark
) & rule
->mark_mask
)
153 ret
= ops
->match(rule
, fl
, flags
);
155 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
158 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
159 int flags
, struct fib_lookup_arg
*arg
)
161 struct fib_rule
*rule
;
166 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
168 if (!fib_rule_match(rule
, ops
, fl
, flags
))
171 if (rule
->action
== FR_ACT_GOTO
) {
172 struct fib_rule
*target
;
174 target
= rcu_dereference(rule
->ctarget
);
175 if (target
== NULL
) {
181 } else if (rule
->action
== FR_ACT_NOP
)
184 err
= ops
->action(rule
, fl
, flags
, arg
);
186 if (err
!= -EAGAIN
) {
200 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
202 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
203 struct fib_rules_ops
*ops
)
208 if (tb
[FRA_SRC
] == NULL
||
209 frh
->src_len
> (ops
->addr_size
* 8) ||
210 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
214 if (tb
[FRA_DST
] == NULL
||
215 frh
->dst_len
> (ops
->addr_size
* 8) ||
216 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
224 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
226 struct net
*net
= skb
->sk
->sk_net
;
227 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
228 struct fib_rules_ops
*ops
= NULL
;
229 struct fib_rule
*rule
, *r
, *last
= NULL
;
230 struct nlattr
*tb
[FRA_MAX
+1];
231 int err
= -EINVAL
, unresolved
= 0;
233 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
236 ops
= lookup_rules_ops(net
, frh
->family
);
242 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
246 err
= validate_rulemsg(frh
, tb
, ops
);
250 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
256 if (tb
[FRA_PRIORITY
])
257 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
259 if (tb
[FRA_IFNAME
]) {
260 struct net_device
*dev
;
263 nla_strlcpy(rule
->ifname
, tb
[FRA_IFNAME
], IFNAMSIZ
);
264 dev
= __dev_get_by_name(net
, rule
->ifname
);
266 rule
->ifindex
= dev
->ifindex
;
269 if (tb
[FRA_FWMARK
]) {
270 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
272 /* compatibility: if the mark value is non-zero all bits
273 * are compared unless a mask is explicitly specified.
275 rule
->mark_mask
= 0xFFFFFFFF;
279 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
281 rule
->action
= frh
->action
;
282 rule
->flags
= frh
->flags
;
283 rule
->table
= frh_get_table(frh
, tb
);
285 if (!rule
->pref
&& ops
->default_pref
)
286 rule
->pref
= ops
->default_pref(ops
);
290 if (rule
->action
!= FR_ACT_GOTO
)
293 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
294 /* Backward jumps are prohibited to avoid endless loops */
295 if (rule
->target
<= rule
->pref
)
298 list_for_each_entry(r
, &ops
->rules_list
, list
) {
299 if (r
->pref
== rule
->target
) {
305 if (rule
->ctarget
== NULL
)
307 } else if (rule
->action
== FR_ACT_GOTO
)
310 err
= ops
->configure(rule
, skb
, nlh
, frh
, tb
);
314 list_for_each_entry(r
, &ops
->rules_list
, list
) {
315 if (r
->pref
> rule
->pref
)
322 if (ops
->unresolved_rules
) {
324 * There are unresolved goto rules in the list, check if
325 * any of them are pointing to this new rule.
327 list_for_each_entry(r
, &ops
->rules_list
, list
) {
328 if (r
->action
== FR_ACT_GOTO
&&
329 r
->target
== rule
->pref
) {
330 BUG_ON(r
->ctarget
!= NULL
);
331 rcu_assign_pointer(r
->ctarget
, rule
);
332 if (--ops
->unresolved_rules
== 0)
338 if (rule
->action
== FR_ACT_GOTO
)
339 ops
->nr_goto_rules
++;
342 ops
->unresolved_rules
++;
345 list_add_rcu(&rule
->list
, &last
->list
);
347 list_add_rcu(&rule
->list
, &ops
->rules_list
);
349 notify_rule_change(net
, RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).pid
);
350 flush_route_cache(ops
);
361 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
363 struct net
*net
= skb
->sk
->sk_net
;
364 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
365 struct fib_rules_ops
*ops
= NULL
;
366 struct fib_rule
*rule
, *tmp
;
367 struct nlattr
*tb
[FRA_MAX
+1];
370 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
373 ops
= lookup_rules_ops(net
, frh
->family
);
379 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
383 err
= validate_rulemsg(frh
, tb
, ops
);
387 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
388 if (frh
->action
&& (frh
->action
!= rule
->action
))
391 if (frh
->table
&& (frh_get_table(frh
, tb
) != rule
->table
))
394 if (tb
[FRA_PRIORITY
] &&
395 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
398 if (tb
[FRA_IFNAME
] &&
399 nla_strcmp(tb
[FRA_IFNAME
], rule
->ifname
))
402 if (tb
[FRA_FWMARK
] &&
403 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
406 if (tb
[FRA_FWMASK
] &&
407 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
410 if (!ops
->compare(rule
, frh
, tb
))
413 if (rule
->flags
& FIB_RULE_PERMANENT
) {
418 list_del_rcu(&rule
->list
);
420 if (rule
->action
== FR_ACT_GOTO
)
421 ops
->nr_goto_rules
--;
424 * Check if this rule is a target to any of them. If so,
425 * disable them. As this operation is eventually very
426 * expensive, it is only performed if goto rules have
427 * actually been added.
429 if (ops
->nr_goto_rules
> 0) {
430 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
431 if (tmp
->ctarget
== rule
) {
432 rcu_assign_pointer(tmp
->ctarget
, NULL
);
433 ops
->unresolved_rules
++;
439 notify_rule_change(net
, RTM_DELRULE
, rule
, ops
, nlh
,
440 NETLINK_CB(skb
).pid
);
442 flush_route_cache(ops
);
453 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
454 struct fib_rule
*rule
)
456 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
457 + nla_total_size(IFNAMSIZ
) /* FRA_IFNAME */
458 + nla_total_size(4) /* FRA_PRIORITY */
459 + nla_total_size(4) /* FRA_TABLE */
460 + nla_total_size(4) /* FRA_FWMARK */
461 + nla_total_size(4); /* FRA_FWMASK */
463 if (ops
->nlmsg_payload
)
464 payload
+= ops
->nlmsg_payload(rule
);
469 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
470 u32 pid
, u32 seq
, int type
, int flags
,
471 struct fib_rules_ops
*ops
)
473 struct nlmsghdr
*nlh
;
474 struct fib_rule_hdr
*frh
;
476 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
480 frh
= nlmsg_data(nlh
);
481 frh
->table
= rule
->table
;
482 NLA_PUT_U32(skb
, FRA_TABLE
, rule
->table
);
485 frh
->action
= rule
->action
;
486 frh
->flags
= rule
->flags
;
488 if (rule
->action
== FR_ACT_GOTO
&& rule
->ctarget
== NULL
)
489 frh
->flags
|= FIB_RULE_UNRESOLVED
;
491 if (rule
->ifname
[0]) {
492 NLA_PUT_STRING(skb
, FRA_IFNAME
, rule
->ifname
);
494 if (rule
->ifindex
== -1)
495 frh
->flags
|= FIB_RULE_DEV_DETACHED
;
499 NLA_PUT_U32(skb
, FRA_PRIORITY
, rule
->pref
);
502 NLA_PUT_U32(skb
, FRA_FWMARK
, rule
->mark
);
504 if (rule
->mark_mask
|| rule
->mark
)
505 NLA_PUT_U32(skb
, FRA_FWMASK
, rule
->mark_mask
);
508 NLA_PUT_U32(skb
, FRA_GOTO
, rule
->target
);
510 if (ops
->fill(rule
, skb
, nlh
, frh
) < 0)
511 goto nla_put_failure
;
513 return nlmsg_end(skb
, nlh
);
516 nlmsg_cancel(skb
, nlh
);
520 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
521 struct fib_rules_ops
*ops
)
524 struct fib_rule
*rule
;
526 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
527 if (idx
< cb
->args
[1])
530 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).pid
,
531 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
532 NLM_F_MULTI
, ops
) < 0)
543 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
545 struct net
*net
= skb
->sk
->sk_net
;
546 struct fib_rules_ops
*ops
;
549 family
= rtnl_msg_family(cb
->nlh
);
550 if (family
!= AF_UNSPEC
) {
551 /* Protocol specific dump request */
552 ops
= lookup_rules_ops(net
, family
);
554 return -EAFNOSUPPORT
;
556 return dump_rules(skb
, cb
, ops
);
560 list_for_each_entry_rcu(ops
, &net
->rules_ops
, list
) {
561 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
564 if (dump_rules(skb
, cb
, ops
) < 0)
577 static void notify_rule_change(struct net
*net
, int event
, struct fib_rule
*rule
,
578 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
584 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
588 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
590 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
591 WARN_ON(err
== -EMSGSIZE
);
595 err
= rtnl_notify(skb
, net
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
598 rtnl_set_sk_err(net
, ops
->nlgroup
, err
);
601 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
603 struct fib_rule
*rule
;
605 list_for_each_entry(rule
, rules
, list
) {
606 if (rule
->ifindex
== -1 &&
607 strcmp(dev
->name
, rule
->ifname
) == 0)
608 rule
->ifindex
= dev
->ifindex
;
612 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
614 struct fib_rule
*rule
;
616 list_for_each_entry(rule
, rules
, list
)
617 if (rule
->ifindex
== dev
->ifindex
)
622 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
625 struct net_device
*dev
= ptr
;
626 struct net
*net
= dev
->nd_net
;
627 struct fib_rules_ops
*ops
;
633 case NETDEV_REGISTER
:
634 list_for_each_entry(ops
, &net
->rules_ops
, list
)
635 attach_rules(&ops
->rules_list
, dev
);
638 case NETDEV_UNREGISTER
:
639 list_for_each_entry(ops
, &net
->rules_ops
, list
)
640 detach_rules(&ops
->rules_list
, dev
);
649 static struct notifier_block fib_rules_notifier
= {
650 .notifier_call
= fib_rules_event
,
653 static int fib_rules_net_init(struct net
*net
)
655 INIT_LIST_HEAD(&net
->rules_ops
);
656 spin_lock_init(&net
->rules_mod_lock
);
660 static struct pernet_operations fib_rules_net_ops
= {
661 .init
= fib_rules_net_init
,
664 static int __init
fib_rules_init(void)
667 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
);
668 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
);
669 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
);
671 err
= register_netdevice_notifier(&fib_rules_notifier
);
675 err
= register_pernet_subsys(&fib_rules_net_ops
);
677 goto fail_unregister
;
681 unregister_netdevice_notifier(&fib_rules_notifier
);
683 rtnl_unregister(PF_UNSPEC
, RTM_NEWRULE
);
684 rtnl_unregister(PF_UNSPEC
, RTM_DELRULE
);
685 rtnl_unregister(PF_UNSPEC
, RTM_GETRULE
);
689 subsys_initcall(fib_rules_init
);