2 * net/core/fib_rules.c Generic Routing Rules
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
8 * Authors: Thomas Graf <tgraf@suug.ch>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <net/net_namespace.h>
16 #include <net/fib_rules.h>
18 static LIST_HEAD(rules_ops
);
19 static DEFINE_SPINLOCK(rules_mod_lock
);
21 int fib_default_rule_add(struct fib_rules_ops
*ops
,
22 u32 pref
, u32 table
, u32 flags
)
26 r
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
30 atomic_set(&r
->refcnt
, 1);
31 r
->action
= FR_ACT_TO_TBL
;
36 /* The lock is not required here, the list in unreacheable
37 * at the moment this function is called */
38 list_add_tail(&r
->list
, &ops
->rules_list
);
41 EXPORT_SYMBOL(fib_default_rule_add
);
43 static void notify_rule_change(int event
, struct fib_rule
*rule
,
44 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
47 static struct fib_rules_ops
*lookup_rules_ops(int family
)
49 struct fib_rules_ops
*ops
;
52 list_for_each_entry_rcu(ops
, &rules_ops
, list
) {
53 if (ops
->family
== family
) {
54 if (!try_module_get(ops
->owner
))
65 static void rules_ops_put(struct fib_rules_ops
*ops
)
68 module_put(ops
->owner
);
71 static void flush_route_cache(struct fib_rules_ops
*ops
)
77 int fib_rules_register(struct fib_rules_ops
*ops
)
80 struct fib_rules_ops
*o
;
82 if (ops
->rule_size
< sizeof(struct fib_rule
))
85 if (ops
->match
== NULL
|| ops
->configure
== NULL
||
86 ops
->compare
== NULL
|| ops
->fill
== NULL
||
90 spin_lock(&rules_mod_lock
);
91 list_for_each_entry(o
, &rules_ops
, list
)
92 if (ops
->family
== o
->family
)
95 list_add_tail_rcu(&ops
->list
, &rules_ops
);
98 spin_unlock(&rules_mod_lock
);
103 EXPORT_SYMBOL_GPL(fib_rules_register
);
105 static void cleanup_ops(struct fib_rules_ops
*ops
)
107 struct fib_rule
*rule
, *tmp
;
109 list_for_each_entry_safe(rule
, tmp
, &ops
->rules_list
, list
) {
110 list_del_rcu(&rule
->list
);
115 int fib_rules_unregister(struct fib_rules_ops
*ops
)
118 struct fib_rules_ops
*o
;
120 spin_lock(&rules_mod_lock
);
121 list_for_each_entry(o
, &rules_ops
, list
) {
123 list_del_rcu(&o
->list
);
131 spin_unlock(&rules_mod_lock
);
138 EXPORT_SYMBOL_GPL(fib_rules_unregister
);
140 static int fib_rule_match(struct fib_rule
*rule
, struct fib_rules_ops
*ops
,
141 struct flowi
*fl
, int flags
)
145 if (rule
->ifindex
&& (rule
->ifindex
!= fl
->iif
))
148 if ((rule
->mark
^ fl
->mark
) & rule
->mark_mask
)
151 ret
= ops
->match(rule
, fl
, flags
);
153 return (rule
->flags
& FIB_RULE_INVERT
) ? !ret
: ret
;
156 int fib_rules_lookup(struct fib_rules_ops
*ops
, struct flowi
*fl
,
157 int flags
, struct fib_lookup_arg
*arg
)
159 struct fib_rule
*rule
;
164 list_for_each_entry_rcu(rule
, &ops
->rules_list
, list
) {
166 if (!fib_rule_match(rule
, ops
, fl
, flags
))
169 if (rule
->action
== FR_ACT_GOTO
) {
170 struct fib_rule
*target
;
172 target
= rcu_dereference(rule
->ctarget
);
173 if (target
== NULL
) {
179 } else if (rule
->action
== FR_ACT_NOP
)
182 err
= ops
->action(rule
, fl
, flags
, arg
);
184 if (err
!= -EAGAIN
) {
198 EXPORT_SYMBOL_GPL(fib_rules_lookup
);
200 static int validate_rulemsg(struct fib_rule_hdr
*frh
, struct nlattr
**tb
,
201 struct fib_rules_ops
*ops
)
206 if (tb
[FRA_SRC
] == NULL
||
207 frh
->src_len
> (ops
->addr_size
* 8) ||
208 nla_len(tb
[FRA_SRC
]) != ops
->addr_size
)
212 if (tb
[FRA_DST
] == NULL
||
213 frh
->dst_len
> (ops
->addr_size
* 8) ||
214 nla_len(tb
[FRA_DST
]) != ops
->addr_size
)
222 static int fib_nl_newrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
224 struct net
*net
= skb
->sk
->sk_net
;
225 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
226 struct fib_rules_ops
*ops
= NULL
;
227 struct fib_rule
*rule
, *r
, *last
= NULL
;
228 struct nlattr
*tb
[FRA_MAX
+1];
229 int err
= -EINVAL
, unresolved
= 0;
231 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
234 ops
= lookup_rules_ops(frh
->family
);
240 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
244 err
= validate_rulemsg(frh
, tb
, ops
);
248 rule
= kzalloc(ops
->rule_size
, GFP_KERNEL
);
254 if (tb
[FRA_PRIORITY
])
255 rule
->pref
= nla_get_u32(tb
[FRA_PRIORITY
]);
257 if (tb
[FRA_IFNAME
]) {
258 struct net_device
*dev
;
261 nla_strlcpy(rule
->ifname
, tb
[FRA_IFNAME
], IFNAMSIZ
);
262 dev
= __dev_get_by_name(net
, rule
->ifname
);
264 rule
->ifindex
= dev
->ifindex
;
267 if (tb
[FRA_FWMARK
]) {
268 rule
->mark
= nla_get_u32(tb
[FRA_FWMARK
]);
270 /* compatibility: if the mark value is non-zero all bits
271 * are compared unless a mask is explicitly specified.
273 rule
->mark_mask
= 0xFFFFFFFF;
277 rule
->mark_mask
= nla_get_u32(tb
[FRA_FWMASK
]);
279 rule
->action
= frh
->action
;
280 rule
->flags
= frh
->flags
;
281 rule
->table
= frh_get_table(frh
, tb
);
283 if (!rule
->pref
&& ops
->default_pref
)
284 rule
->pref
= ops
->default_pref();
288 if (rule
->action
!= FR_ACT_GOTO
)
291 rule
->target
= nla_get_u32(tb
[FRA_GOTO
]);
292 /* Backward jumps are prohibited to avoid endless loops */
293 if (rule
->target
<= rule
->pref
)
296 list_for_each_entry(r
, &ops
->rules_list
, list
) {
297 if (r
->pref
== rule
->target
) {
303 if (rule
->ctarget
== NULL
)
305 } else if (rule
->action
== FR_ACT_GOTO
)
308 err
= ops
->configure(rule
, skb
, nlh
, frh
, tb
);
312 list_for_each_entry(r
, &ops
->rules_list
, list
) {
313 if (r
->pref
> rule
->pref
)
320 if (ops
->unresolved_rules
) {
322 * There are unresolved goto rules in the list, check if
323 * any of them are pointing to this new rule.
325 list_for_each_entry(r
, &ops
->rules_list
, list
) {
326 if (r
->action
== FR_ACT_GOTO
&&
327 r
->target
== rule
->pref
) {
328 BUG_ON(r
->ctarget
!= NULL
);
329 rcu_assign_pointer(r
->ctarget
, rule
);
330 if (--ops
->unresolved_rules
== 0)
336 if (rule
->action
== FR_ACT_GOTO
)
337 ops
->nr_goto_rules
++;
340 ops
->unresolved_rules
++;
343 list_add_rcu(&rule
->list
, &last
->list
);
345 list_add_rcu(&rule
->list
, &ops
->rules_list
);
347 notify_rule_change(RTM_NEWRULE
, rule
, ops
, nlh
, NETLINK_CB(skb
).pid
);
348 flush_route_cache(ops
);
359 static int fib_nl_delrule(struct sk_buff
*skb
, struct nlmsghdr
* nlh
, void *arg
)
361 struct fib_rule_hdr
*frh
= nlmsg_data(nlh
);
362 struct fib_rules_ops
*ops
= NULL
;
363 struct fib_rule
*rule
, *tmp
;
364 struct nlattr
*tb
[FRA_MAX
+1];
367 if (nlh
->nlmsg_len
< nlmsg_msg_size(sizeof(*frh
)))
370 ops
= lookup_rules_ops(frh
->family
);
376 err
= nlmsg_parse(nlh
, sizeof(*frh
), tb
, FRA_MAX
, ops
->policy
);
380 err
= validate_rulemsg(frh
, tb
, ops
);
384 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
385 if (frh
->action
&& (frh
->action
!= rule
->action
))
388 if (frh
->table
&& (frh_get_table(frh
, tb
) != rule
->table
))
391 if (tb
[FRA_PRIORITY
] &&
392 (rule
->pref
!= nla_get_u32(tb
[FRA_PRIORITY
])))
395 if (tb
[FRA_IFNAME
] &&
396 nla_strcmp(tb
[FRA_IFNAME
], rule
->ifname
))
399 if (tb
[FRA_FWMARK
] &&
400 (rule
->mark
!= nla_get_u32(tb
[FRA_FWMARK
])))
403 if (tb
[FRA_FWMASK
] &&
404 (rule
->mark_mask
!= nla_get_u32(tb
[FRA_FWMASK
])))
407 if (!ops
->compare(rule
, frh
, tb
))
410 if (rule
->flags
& FIB_RULE_PERMANENT
) {
415 list_del_rcu(&rule
->list
);
417 if (rule
->action
== FR_ACT_GOTO
)
418 ops
->nr_goto_rules
--;
421 * Check if this rule is a target to any of them. If so,
422 * disable them. As this operation is eventually very
423 * expensive, it is only performed if goto rules have
424 * actually been added.
426 if (ops
->nr_goto_rules
> 0) {
427 list_for_each_entry(tmp
, &ops
->rules_list
, list
) {
428 if (tmp
->ctarget
== rule
) {
429 rcu_assign_pointer(tmp
->ctarget
, NULL
);
430 ops
->unresolved_rules
++;
436 notify_rule_change(RTM_DELRULE
, rule
, ops
, nlh
,
437 NETLINK_CB(skb
).pid
);
439 flush_route_cache(ops
);
450 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops
*ops
,
451 struct fib_rule
*rule
)
453 size_t payload
= NLMSG_ALIGN(sizeof(struct fib_rule_hdr
))
454 + nla_total_size(IFNAMSIZ
) /* FRA_IFNAME */
455 + nla_total_size(4) /* FRA_PRIORITY */
456 + nla_total_size(4) /* FRA_TABLE */
457 + nla_total_size(4) /* FRA_FWMARK */
458 + nla_total_size(4); /* FRA_FWMASK */
460 if (ops
->nlmsg_payload
)
461 payload
+= ops
->nlmsg_payload(rule
);
466 static int fib_nl_fill_rule(struct sk_buff
*skb
, struct fib_rule
*rule
,
467 u32 pid
, u32 seq
, int type
, int flags
,
468 struct fib_rules_ops
*ops
)
470 struct nlmsghdr
*nlh
;
471 struct fib_rule_hdr
*frh
;
473 nlh
= nlmsg_put(skb
, pid
, seq
, type
, sizeof(*frh
), flags
);
477 frh
= nlmsg_data(nlh
);
478 frh
->table
= rule
->table
;
479 NLA_PUT_U32(skb
, FRA_TABLE
, rule
->table
);
482 frh
->action
= rule
->action
;
483 frh
->flags
= rule
->flags
;
485 if (rule
->action
== FR_ACT_GOTO
&& rule
->ctarget
== NULL
)
486 frh
->flags
|= FIB_RULE_UNRESOLVED
;
488 if (rule
->ifname
[0]) {
489 NLA_PUT_STRING(skb
, FRA_IFNAME
, rule
->ifname
);
491 if (rule
->ifindex
== -1)
492 frh
->flags
|= FIB_RULE_DEV_DETACHED
;
496 NLA_PUT_U32(skb
, FRA_PRIORITY
, rule
->pref
);
499 NLA_PUT_U32(skb
, FRA_FWMARK
, rule
->mark
);
501 if (rule
->mark_mask
|| rule
->mark
)
502 NLA_PUT_U32(skb
, FRA_FWMASK
, rule
->mark_mask
);
505 NLA_PUT_U32(skb
, FRA_GOTO
, rule
->target
);
507 if (ops
->fill(rule
, skb
, nlh
, frh
) < 0)
508 goto nla_put_failure
;
510 return nlmsg_end(skb
, nlh
);
513 nlmsg_cancel(skb
, nlh
);
517 static int dump_rules(struct sk_buff
*skb
, struct netlink_callback
*cb
,
518 struct fib_rules_ops
*ops
)
521 struct fib_rule
*rule
;
523 list_for_each_entry(rule
, &ops
->rules_list
, list
) {
524 if (idx
< cb
->args
[1])
527 if (fib_nl_fill_rule(skb
, rule
, NETLINK_CB(cb
->skb
).pid
,
528 cb
->nlh
->nlmsg_seq
, RTM_NEWRULE
,
529 NLM_F_MULTI
, ops
) < 0)
540 static int fib_nl_dumprule(struct sk_buff
*skb
, struct netlink_callback
*cb
)
542 struct fib_rules_ops
*ops
;
545 family
= rtnl_msg_family(cb
->nlh
);
546 if (family
!= AF_UNSPEC
) {
547 /* Protocol specific dump request */
548 ops
= lookup_rules_ops(family
);
550 return -EAFNOSUPPORT
;
552 return dump_rules(skb
, cb
, ops
);
556 list_for_each_entry_rcu(ops
, &rules_ops
, list
) {
557 if (idx
< cb
->args
[0] || !try_module_get(ops
->owner
))
560 if (dump_rules(skb
, cb
, ops
) < 0)
573 static void notify_rule_change(int event
, struct fib_rule
*rule
,
574 struct fib_rules_ops
*ops
, struct nlmsghdr
*nlh
,
580 skb
= nlmsg_new(fib_rule_nlmsg_size(ops
, rule
), GFP_KERNEL
);
584 err
= fib_nl_fill_rule(skb
, rule
, pid
, nlh
->nlmsg_seq
, event
, 0, ops
);
586 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
587 WARN_ON(err
== -EMSGSIZE
);
591 err
= rtnl_notify(skb
, pid
, ops
->nlgroup
, nlh
, GFP_KERNEL
);
594 rtnl_set_sk_err(ops
->nlgroup
, err
);
597 static void attach_rules(struct list_head
*rules
, struct net_device
*dev
)
599 struct fib_rule
*rule
;
601 list_for_each_entry(rule
, rules
, list
) {
602 if (rule
->ifindex
== -1 &&
603 strcmp(dev
->name
, rule
->ifname
) == 0)
604 rule
->ifindex
= dev
->ifindex
;
608 static void detach_rules(struct list_head
*rules
, struct net_device
*dev
)
610 struct fib_rule
*rule
;
612 list_for_each_entry(rule
, rules
, list
)
613 if (rule
->ifindex
== dev
->ifindex
)
618 static int fib_rules_event(struct notifier_block
*this, unsigned long event
,
621 struct net_device
*dev
= ptr
;
622 struct fib_rules_ops
*ops
;
624 if (dev
->nd_net
!= &init_net
)
631 case NETDEV_REGISTER
:
632 list_for_each_entry(ops
, &rules_ops
, list
)
633 attach_rules(&ops
->rules_list
, dev
);
636 case NETDEV_UNREGISTER
:
637 list_for_each_entry(ops
, &rules_ops
, list
)
638 detach_rules(&ops
->rules_list
, dev
);
647 static struct notifier_block fib_rules_notifier
= {
648 .notifier_call
= fib_rules_event
,
651 static int __init
fib_rules_init(void)
653 rtnl_register(PF_UNSPEC
, RTM_NEWRULE
, fib_nl_newrule
, NULL
);
654 rtnl_register(PF_UNSPEC
, RTM_DELRULE
, fib_nl_delrule
, NULL
);
655 rtnl_register(PF_UNSPEC
, RTM_GETRULE
, NULL
, fib_nl_dumprule
);
657 return register_netdevice_notifier(&fib_rules_notifier
);
660 subsys_initcall(fib_rules_init
);