phy: mdio-octeon: Use devm_mdiobus_alloc_size()
[linux-2.6/btrfs-unstable.git] / net / sched / act_api.c
blob06e7c4a372451916b77d81d32d94e2a20d96a527
1 /*
2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <net/net_namespace.h>
25 #include <net/sock.h>
26 #include <net/sch_generic.h>
27 #include <net/act_api.h>
28 #include <net/netlink.h>
30 static void free_tcf(struct rcu_head *head)
32 struct tcf_common *p = container_of(head, struct tcf_common, tcfc_rcu);
34 free_percpu(p->cpu_bstats);
35 free_percpu(p->cpu_qstats);
36 kfree(p);
39 static void tcf_hash_destroy(struct tc_action *a)
41 struct tcf_common *p = a->priv;
42 struct tcf_hashinfo *hinfo = a->ops->hinfo;
44 spin_lock_bh(&hinfo->lock);
45 hlist_del(&p->tcfc_head);
46 spin_unlock_bh(&hinfo->lock);
47 gen_kill_estimator(&p->tcfc_bstats,
48 &p->tcfc_rate_est);
50 * gen_estimator est_timer() might access p->tcfc_lock
51 * or bstats, wait a RCU grace period before freeing p
53 call_rcu(&p->tcfc_rcu, free_tcf);
56 int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
58 struct tcf_common *p = a->priv;
59 int ret = 0;
61 if (p) {
62 if (bind)
63 p->tcfc_bindcnt--;
64 else if (strict && p->tcfc_bindcnt > 0)
65 return -EPERM;
67 p->tcfc_refcnt--;
68 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
69 if (a->ops->cleanup)
70 a->ops->cleanup(a, bind);
71 tcf_hash_destroy(a);
72 ret = 1;
76 return ret;
78 EXPORT_SYMBOL(__tcf_hash_release);
80 static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
81 struct tc_action *a)
83 struct tcf_hashinfo *hinfo = a->ops->hinfo;
84 struct hlist_head *head;
85 struct tcf_common *p;
86 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
87 struct nlattr *nest;
89 spin_lock_bh(&hinfo->lock);
91 s_i = cb->args[0];
93 for (i = 0; i < (hinfo->hmask + 1); i++) {
94 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
96 hlist_for_each_entry_rcu(p, head, tcfc_head) {
97 index++;
98 if (index < s_i)
99 continue;
100 a->priv = p;
101 a->order = n_i;
103 nest = nla_nest_start(skb, a->order);
104 if (nest == NULL)
105 goto nla_put_failure;
106 err = tcf_action_dump_1(skb, a, 0, 0);
107 if (err < 0) {
108 index--;
109 nlmsg_trim(skb, nest);
110 goto done;
112 nla_nest_end(skb, nest);
113 n_i++;
114 if (n_i >= TCA_ACT_MAX_PRIO)
115 goto done;
118 done:
119 spin_unlock_bh(&hinfo->lock);
120 if (n_i)
121 cb->args[0] += n_i;
122 return n_i;
124 nla_put_failure:
125 nla_nest_cancel(skb, nest);
126 goto done;
129 static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
131 struct tcf_hashinfo *hinfo = a->ops->hinfo;
132 struct hlist_head *head;
133 struct hlist_node *n;
134 struct tcf_common *p;
135 struct nlattr *nest;
136 int i = 0, n_i = 0;
137 int ret = -EINVAL;
139 nest = nla_nest_start(skb, a->order);
140 if (nest == NULL)
141 goto nla_put_failure;
142 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
143 goto nla_put_failure;
144 for (i = 0; i < (hinfo->hmask + 1); i++) {
145 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
146 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
147 a->priv = p;
148 ret = __tcf_hash_release(a, false, true);
149 if (ret == ACT_P_DELETED) {
150 module_put(a->ops->owner);
151 n_i++;
152 } else if (ret < 0)
153 goto nla_put_failure;
156 if (nla_put_u32(skb, TCA_FCNT, n_i))
157 goto nla_put_failure;
158 nla_nest_end(skb, nest);
160 return n_i;
161 nla_put_failure:
162 nla_nest_cancel(skb, nest);
163 return ret;
166 static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
167 int type, struct tc_action *a)
169 if (type == RTM_DELACTION) {
170 return tcf_del_walker(skb, a);
171 } else if (type == RTM_GETACTION) {
172 return tcf_dump_walker(skb, cb, a);
173 } else {
174 WARN(1, "tcf_generic_walker: unknown action %d\n", type);
175 return -EINVAL;
179 static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
181 struct tcf_common *p = NULL;
182 struct hlist_head *head;
184 spin_lock_bh(&hinfo->lock);
185 head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
186 hlist_for_each_entry_rcu(p, head, tcfc_head)
187 if (p->tcfc_index == index)
188 break;
189 spin_unlock_bh(&hinfo->lock);
191 return p;
194 u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo)
196 u32 val = hinfo->index;
198 do {
199 if (++val == 0)
200 val = 1;
201 } while (tcf_hash_lookup(val, hinfo));
203 hinfo->index = val;
204 return val;
206 EXPORT_SYMBOL(tcf_hash_new_index);
208 int tcf_hash_search(struct tc_action *a, u32 index)
210 struct tcf_hashinfo *hinfo = a->ops->hinfo;
211 struct tcf_common *p = tcf_hash_lookup(index, hinfo);
213 if (p) {
214 a->priv = p;
215 return 1;
217 return 0;
219 EXPORT_SYMBOL(tcf_hash_search);
221 int tcf_hash_check(u32 index, struct tc_action *a, int bind)
223 struct tcf_hashinfo *hinfo = a->ops->hinfo;
224 struct tcf_common *p = NULL;
225 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
226 if (bind)
227 p->tcfc_bindcnt++;
228 p->tcfc_refcnt++;
229 a->priv = p;
230 return 1;
232 return 0;
234 EXPORT_SYMBOL(tcf_hash_check);
236 void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
238 struct tcf_common *pc = a->priv;
239 if (est)
240 gen_kill_estimator(&pc->tcfc_bstats,
241 &pc->tcfc_rate_est);
242 call_rcu(&pc->tcfc_rcu, free_tcf);
244 EXPORT_SYMBOL(tcf_hash_cleanup);
246 int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
247 int size, int bind, bool cpustats)
249 struct tcf_hashinfo *hinfo = a->ops->hinfo;
250 struct tcf_common *p = kzalloc(size, GFP_KERNEL);
251 int err = -ENOMEM;
253 if (unlikely(!p))
254 return -ENOMEM;
255 p->tcfc_refcnt = 1;
256 if (bind)
257 p->tcfc_bindcnt = 1;
259 if (cpustats) {
260 p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
261 if (!p->cpu_bstats) {
262 err1:
263 kfree(p);
264 return err;
266 p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
267 if (!p->cpu_qstats) {
268 err2:
269 free_percpu(p->cpu_bstats);
270 goto err1;
273 spin_lock_init(&p->tcfc_lock);
274 INIT_HLIST_NODE(&p->tcfc_head);
275 p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
276 p->tcfc_tm.install = jiffies;
277 p->tcfc_tm.lastuse = jiffies;
278 if (est) {
279 err = gen_new_estimator(&p->tcfc_bstats, p->cpu_bstats,
280 &p->tcfc_rate_est,
281 &p->tcfc_lock, est);
282 if (err) {
283 free_percpu(p->cpu_qstats);
284 goto err2;
288 a->priv = (void *) p;
289 return 0;
291 EXPORT_SYMBOL(tcf_hash_create);
293 void tcf_hash_insert(struct tc_action *a)
295 struct tcf_common *p = a->priv;
296 struct tcf_hashinfo *hinfo = a->ops->hinfo;
297 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
299 spin_lock_bh(&hinfo->lock);
300 hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
301 spin_unlock_bh(&hinfo->lock);
303 EXPORT_SYMBOL(tcf_hash_insert);
305 static LIST_HEAD(act_base);
306 static DEFINE_RWLOCK(act_mod_lock);
308 int tcf_register_action(struct tc_action_ops *act, unsigned int mask)
310 struct tc_action_ops *a;
311 int err;
313 /* Must supply act, dump and init */
314 if (!act->act || !act->dump || !act->init)
315 return -EINVAL;
317 /* Supply defaults */
318 if (!act->lookup)
319 act->lookup = tcf_hash_search;
320 if (!act->walk)
321 act->walk = tcf_generic_walker;
323 act->hinfo = kmalloc(sizeof(struct tcf_hashinfo), GFP_KERNEL);
324 if (!act->hinfo)
325 return -ENOMEM;
326 err = tcf_hashinfo_init(act->hinfo, mask);
327 if (err) {
328 kfree(act->hinfo);
329 return err;
332 write_lock(&act_mod_lock);
333 list_for_each_entry(a, &act_base, head) {
334 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
335 write_unlock(&act_mod_lock);
336 tcf_hashinfo_destroy(act->hinfo);
337 kfree(act->hinfo);
338 return -EEXIST;
341 list_add_tail(&act->head, &act_base);
342 write_unlock(&act_mod_lock);
343 return 0;
345 EXPORT_SYMBOL(tcf_register_action);
347 int tcf_unregister_action(struct tc_action_ops *act)
349 struct tc_action_ops *a;
350 int err = -ENOENT;
352 write_lock(&act_mod_lock);
353 list_for_each_entry(a, &act_base, head) {
354 if (a == act) {
355 list_del(&act->head);
356 tcf_hashinfo_destroy(act->hinfo);
357 kfree(act->hinfo);
358 err = 0;
359 break;
362 write_unlock(&act_mod_lock);
363 return err;
365 EXPORT_SYMBOL(tcf_unregister_action);
367 /* lookup by name */
368 static struct tc_action_ops *tc_lookup_action_n(char *kind)
370 struct tc_action_ops *a, *res = NULL;
372 if (kind) {
373 read_lock(&act_mod_lock);
374 list_for_each_entry(a, &act_base, head) {
375 if (strcmp(kind, a->kind) == 0) {
376 if (try_module_get(a->owner))
377 res = a;
378 break;
381 read_unlock(&act_mod_lock);
383 return res;
386 /* lookup by nlattr */
387 static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
389 struct tc_action_ops *a, *res = NULL;
391 if (kind) {
392 read_lock(&act_mod_lock);
393 list_for_each_entry(a, &act_base, head) {
394 if (nla_strcmp(kind, a->kind) == 0) {
395 if (try_module_get(a->owner))
396 res = a;
397 break;
400 read_unlock(&act_mod_lock);
402 return res;
405 int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
406 struct tcf_result *res)
408 const struct tc_action *a;
409 int ret = -1;
411 if (skb->tc_verd & TC_NCLS) {
412 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
413 ret = TC_ACT_OK;
414 goto exec_done;
416 list_for_each_entry(a, actions, list) {
417 repeat:
418 ret = a->ops->act(skb, a, res);
419 if (ret == TC_ACT_REPEAT)
420 goto repeat; /* we need a ttl - JHS */
421 if (ret != TC_ACT_PIPE)
422 goto exec_done;
424 exec_done:
425 return ret;
427 EXPORT_SYMBOL(tcf_action_exec);
429 int tcf_action_destroy(struct list_head *actions, int bind)
431 struct tc_action *a, *tmp;
432 int ret = 0;
434 list_for_each_entry_safe(a, tmp, actions, list) {
435 ret = __tcf_hash_release(a, bind, true);
436 if (ret == ACT_P_DELETED)
437 module_put(a->ops->owner);
438 else if (ret < 0)
439 return ret;
440 list_del(&a->list);
441 kfree(a);
443 return ret;
447 tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
449 return a->ops->dump(skb, a, bind, ref);
453 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
455 int err = -EINVAL;
456 unsigned char *b = skb_tail_pointer(skb);
457 struct nlattr *nest;
459 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
460 goto nla_put_failure;
461 if (tcf_action_copy_stats(skb, a, 0))
462 goto nla_put_failure;
463 nest = nla_nest_start(skb, TCA_OPTIONS);
464 if (nest == NULL)
465 goto nla_put_failure;
466 err = tcf_action_dump_old(skb, a, bind, ref);
467 if (err > 0) {
468 nla_nest_end(skb, nest);
469 return err;
472 nla_put_failure:
473 nlmsg_trim(skb, b);
474 return -1;
476 EXPORT_SYMBOL(tcf_action_dump_1);
479 tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
481 struct tc_action *a;
482 int err = -EINVAL;
483 struct nlattr *nest;
485 list_for_each_entry(a, actions, list) {
486 nest = nla_nest_start(skb, a->order);
487 if (nest == NULL)
488 goto nla_put_failure;
489 err = tcf_action_dump_1(skb, a, bind, ref);
490 if (err < 0)
491 goto errout;
492 nla_nest_end(skb, nest);
495 return 0;
497 nla_put_failure:
498 err = -EINVAL;
499 errout:
500 nla_nest_cancel(skb, nest);
501 return err;
504 struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
505 struct nlattr *est, char *name, int ovr,
506 int bind)
508 struct tc_action *a;
509 struct tc_action_ops *a_o;
510 char act_name[IFNAMSIZ];
511 struct nlattr *tb[TCA_ACT_MAX + 1];
512 struct nlattr *kind;
513 int err;
515 if (name == NULL) {
516 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
517 if (err < 0)
518 goto err_out;
519 err = -EINVAL;
520 kind = tb[TCA_ACT_KIND];
521 if (kind == NULL)
522 goto err_out;
523 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ)
524 goto err_out;
525 } else {
526 err = -EINVAL;
527 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ)
528 goto err_out;
531 a_o = tc_lookup_action_n(act_name);
532 if (a_o == NULL) {
533 #ifdef CONFIG_MODULES
534 rtnl_unlock();
535 request_module("act_%s", act_name);
536 rtnl_lock();
538 a_o = tc_lookup_action_n(act_name);
540 /* We dropped the RTNL semaphore in order to
541 * perform the module load. So, even if we
542 * succeeded in loading the module we have to
543 * tell the caller to replay the request. We
544 * indicate this using -EAGAIN.
546 if (a_o != NULL) {
547 err = -EAGAIN;
548 goto err_mod;
550 #endif
551 err = -ENOENT;
552 goto err_out;
555 err = -ENOMEM;
556 a = kzalloc(sizeof(*a), GFP_KERNEL);
557 if (a == NULL)
558 goto err_mod;
560 a->ops = a_o;
561 INIT_LIST_HEAD(&a->list);
562 /* backward compatibility for policer */
563 if (name == NULL)
564 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
565 else
566 err = a_o->init(net, nla, est, a, ovr, bind);
567 if (err < 0)
568 goto err_free;
570 /* module count goes up only when brand new policy is created
571 * if it exists and is only bound to in a_o->init() then
572 * ACT_P_CREATED is not returned (a zero is).
574 if (err != ACT_P_CREATED)
575 module_put(a_o->owner);
577 return a;
579 err_free:
580 kfree(a);
581 err_mod:
582 module_put(a_o->owner);
583 err_out:
584 return ERR_PTR(err);
587 int tcf_action_init(struct net *net, struct nlattr *nla,
588 struct nlattr *est, char *name, int ovr,
589 int bind, struct list_head *actions)
591 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
592 struct tc_action *act;
593 int err;
594 int i;
596 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
597 if (err < 0)
598 return err;
600 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
601 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
602 if (IS_ERR(act)) {
603 err = PTR_ERR(act);
604 goto err;
606 act->order = i;
607 list_add_tail(&act->list, actions);
609 return 0;
611 err:
612 tcf_action_destroy(actions, bind);
613 return err;
616 int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
617 int compat_mode)
619 int err = 0;
620 struct gnet_dump d;
621 struct tcf_common *p = a->priv;
623 if (p == NULL)
624 goto errout;
626 /* compat_mode being true specifies a call that is supposed
627 * to add additional backward compatibility statistic TLVs.
629 if (compat_mode) {
630 if (a->type == TCA_OLD_COMPAT)
631 err = gnet_stats_start_copy_compat(skb, 0,
632 TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d);
633 else
634 return 0;
635 } else
636 err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
637 &p->tcfc_lock, &d);
639 if (err < 0)
640 goto errout;
642 if (gnet_stats_copy_basic(&d, p->cpu_bstats, &p->tcfc_bstats) < 0 ||
643 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
644 &p->tcfc_rate_est) < 0 ||
645 gnet_stats_copy_queue(&d, p->cpu_qstats,
646 &p->tcfc_qstats,
647 p->tcfc_qstats.qlen) < 0)
648 goto errout;
650 if (gnet_stats_finish_copy(&d) < 0)
651 goto errout;
653 return 0;
655 errout:
656 return -1;
659 static int
660 tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
661 u16 flags, int event, int bind, int ref)
663 struct tcamsg *t;
664 struct nlmsghdr *nlh;
665 unsigned char *b = skb_tail_pointer(skb);
666 struct nlattr *nest;
668 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
669 if (!nlh)
670 goto out_nlmsg_trim;
671 t = nlmsg_data(nlh);
672 t->tca_family = AF_UNSPEC;
673 t->tca__pad1 = 0;
674 t->tca__pad2 = 0;
676 nest = nla_nest_start(skb, TCA_ACT_TAB);
677 if (nest == NULL)
678 goto out_nlmsg_trim;
680 if (tcf_action_dump(skb, actions, bind, ref) < 0)
681 goto out_nlmsg_trim;
683 nla_nest_end(skb, nest);
685 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
686 return skb->len;
688 out_nlmsg_trim:
689 nlmsg_trim(skb, b);
690 return -1;
693 static int
694 act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
695 struct list_head *actions, int event)
697 struct sk_buff *skb;
699 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
700 if (!skb)
701 return -ENOBUFS;
702 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
703 kfree_skb(skb);
704 return -EINVAL;
707 return rtnl_unicast(skb, net, portid);
710 static struct tc_action *create_a(int i)
712 struct tc_action *act;
714 act = kzalloc(sizeof(*act), GFP_KERNEL);
715 if (act == NULL) {
716 pr_debug("create_a: failed to alloc!\n");
717 return NULL;
719 act->order = i;
720 INIT_LIST_HEAD(&act->list);
721 return act;
724 static struct tc_action *
725 tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
727 struct nlattr *tb[TCA_ACT_MAX + 1];
728 struct tc_action *a;
729 int index;
730 int err;
732 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
733 if (err < 0)
734 goto err_out;
736 err = -EINVAL;
737 if (tb[TCA_ACT_INDEX] == NULL ||
738 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index))
739 goto err_out;
740 index = nla_get_u32(tb[TCA_ACT_INDEX]);
742 err = -ENOMEM;
743 a = create_a(0);
744 if (a == NULL)
745 goto err_out;
747 err = -EINVAL;
748 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
749 if (a->ops == NULL) /* could happen in batch of actions */
750 goto err_free;
751 err = -ENOENT;
752 if (a->ops->lookup(a, index) == 0)
753 goto err_mod;
755 module_put(a->ops->owner);
756 return a;
758 err_mod:
759 module_put(a->ops->owner);
760 err_free:
761 kfree(a);
762 err_out:
763 return ERR_PTR(err);
766 static void cleanup_a(struct list_head *actions)
768 struct tc_action *a, *tmp;
770 list_for_each_entry_safe(a, tmp, actions, list) {
771 list_del(&a->list);
772 kfree(a);
776 static int tca_action_flush(struct net *net, struct nlattr *nla,
777 struct nlmsghdr *n, u32 portid)
779 struct sk_buff *skb;
780 unsigned char *b;
781 struct nlmsghdr *nlh;
782 struct tcamsg *t;
783 struct netlink_callback dcb;
784 struct nlattr *nest;
785 struct nlattr *tb[TCA_ACT_MAX + 1];
786 struct nlattr *kind;
787 struct tc_action a;
788 int err = -ENOMEM;
790 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
791 if (!skb) {
792 pr_debug("tca_action_flush: failed skb alloc\n");
793 return err;
796 b = skb_tail_pointer(skb);
798 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL);
799 if (err < 0)
800 goto err_out;
802 err = -EINVAL;
803 kind = tb[TCA_ACT_KIND];
804 memset(&a, 0, sizeof(struct tc_action));
805 INIT_LIST_HEAD(&a.list);
806 a.ops = tc_lookup_action(kind);
807 if (a.ops == NULL) /*some idjot trying to flush unknown action */
808 goto err_out;
810 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
811 if (!nlh)
812 goto out_module_put;
813 t = nlmsg_data(nlh);
814 t->tca_family = AF_UNSPEC;
815 t->tca__pad1 = 0;
816 t->tca__pad2 = 0;
818 nest = nla_nest_start(skb, TCA_ACT_TAB);
819 if (nest == NULL)
820 goto out_module_put;
822 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
823 if (err < 0)
824 goto out_module_put;
825 if (err == 0)
826 goto noflush_out;
828 nla_nest_end(skb, nest);
830 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
831 nlh->nlmsg_flags |= NLM_F_ROOT;
832 module_put(a.ops->owner);
833 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
834 n->nlmsg_flags & NLM_F_ECHO);
835 if (err > 0)
836 return 0;
838 return err;
840 out_module_put:
841 module_put(a.ops->owner);
842 err_out:
843 noflush_out:
844 kfree_skb(skb);
845 return err;
848 static int
849 tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
850 u32 portid)
852 int ret;
853 struct sk_buff *skb;
855 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
856 if (!skb)
857 return -ENOBUFS;
859 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
860 0, 1) <= 0) {
861 kfree_skb(skb);
862 return -EINVAL;
865 /* now do the delete */
866 ret = tcf_action_destroy(actions, 0);
867 if (ret < 0) {
868 kfree_skb(skb);
869 return ret;
872 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
873 n->nlmsg_flags & NLM_F_ECHO);
874 if (ret > 0)
875 return 0;
876 return ret;
879 static int
880 tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
881 u32 portid, int event)
883 int i, ret;
884 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
885 struct tc_action *act;
886 LIST_HEAD(actions);
888 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
889 if (ret < 0)
890 return ret;
892 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
893 if (tb[1] != NULL)
894 return tca_action_flush(net, tb[1], n, portid);
895 else
896 return -EINVAL;
899 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
900 act = tcf_action_get_1(tb[i], n, portid);
901 if (IS_ERR(act)) {
902 ret = PTR_ERR(act);
903 goto err;
905 act->order = i;
906 list_add_tail(&act->list, &actions);
909 if (event == RTM_GETACTION)
910 ret = act_get_notify(net, portid, n, &actions, event);
911 else { /* delete */
912 ret = tcf_del_notify(net, n, &actions, portid);
913 if (ret)
914 goto err;
915 return ret;
917 err:
918 cleanup_a(&actions);
919 return ret;
922 static int
923 tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
924 u32 portid)
926 struct sk_buff *skb;
927 int err = 0;
929 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
930 if (!skb)
931 return -ENOBUFS;
933 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
934 RTM_NEWACTION, 0, 0) <= 0) {
935 kfree_skb(skb);
936 return -EINVAL;
939 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
940 n->nlmsg_flags & NLM_F_ECHO);
941 if (err > 0)
942 err = 0;
943 return err;
946 static int
947 tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
948 u32 portid, int ovr)
950 int ret = 0;
951 LIST_HEAD(actions);
953 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
954 if (ret)
955 goto done;
957 /* dump then free all the actions after update; inserted policy
958 * stays intact
960 ret = tcf_add_notify(net, n, &actions, portid);
961 cleanup_a(&actions);
962 done:
963 return ret;
966 static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
968 struct net *net = sock_net(skb->sk);
969 struct nlattr *tca[TCA_ACT_MAX + 1];
970 u32 portid = skb ? NETLINK_CB(skb).portid : 0;
971 int ret = 0, ovr = 0;
973 if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
974 return -EPERM;
976 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
977 if (ret < 0)
978 return ret;
980 if (tca[TCA_ACT_TAB] == NULL) {
981 pr_notice("tc_ctl_action: received NO action attribs\n");
982 return -EINVAL;
985 /* n->nlmsg_flags & NLM_F_CREATE */
986 switch (n->nlmsg_type) {
987 case RTM_NEWACTION:
988 /* we are going to assume all other flags
989 * imply create only if it doesn't exist
990 * Note that CREATE | EXCL implies that
991 * but since we want avoid ambiguity (eg when flags
992 * is zero) then just set this
994 if (n->nlmsg_flags & NLM_F_REPLACE)
995 ovr = 1;
996 replay:
997 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr);
998 if (ret == -EAGAIN)
999 goto replay;
1000 break;
1001 case RTM_DELACTION:
1002 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1003 portid, RTM_DELACTION);
1004 break;
1005 case RTM_GETACTION:
1006 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1007 portid, RTM_GETACTION);
1008 break;
1009 default:
1010 BUG();
1013 return ret;
1016 static struct nlattr *
1017 find_dump_kind(const struct nlmsghdr *n)
1019 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
1020 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
1021 struct nlattr *nla[TCAA_MAX + 1];
1022 struct nlattr *kind;
1024 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0)
1025 return NULL;
1026 tb1 = nla[TCA_ACT_TAB];
1027 if (tb1 == NULL)
1028 return NULL;
1030 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1),
1031 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0)
1032 return NULL;
1034 if (tb[1] == NULL)
1035 return NULL;
1036 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]),
1037 nla_len(tb[1]), NULL) < 0)
1038 return NULL;
1039 kind = tb2[TCA_ACT_KIND];
1041 return kind;
1044 static int
1045 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1047 struct nlmsghdr *nlh;
1048 unsigned char *b = skb_tail_pointer(skb);
1049 struct nlattr *nest;
1050 struct tc_action_ops *a_o;
1051 struct tc_action a;
1052 int ret = 0;
1053 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
1054 struct nlattr *kind = find_dump_kind(cb->nlh);
1056 if (kind == NULL) {
1057 pr_info("tc_dump_action: action bad kind\n");
1058 return 0;
1061 a_o = tc_lookup_action(kind);
1062 if (a_o == NULL)
1063 return 0;
1065 memset(&a, 0, sizeof(struct tc_action));
1066 a.ops = a_o;
1068 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1069 cb->nlh->nlmsg_type, sizeof(*t), 0);
1070 if (!nlh)
1071 goto out_module_put;
1072 t = nlmsg_data(nlh);
1073 t->tca_family = AF_UNSPEC;
1074 t->tca__pad1 = 0;
1075 t->tca__pad2 = 0;
1077 nest = nla_nest_start(skb, TCA_ACT_TAB);
1078 if (nest == NULL)
1079 goto out_module_put;
1081 ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
1082 if (ret < 0)
1083 goto out_module_put;
1085 if (ret > 0) {
1086 nla_nest_end(skb, nest);
1087 ret = skb->len;
1088 } else
1089 nla_nest_cancel(skb, nest);
1091 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1092 if (NETLINK_CB(cb->skb).portid && ret)
1093 nlh->nlmsg_flags |= NLM_F_MULTI;
1094 module_put(a_o->owner);
1095 return skb->len;
1097 out_module_put:
1098 module_put(a_o->owner);
1099 nlmsg_trim(skb, b);
1100 return skb->len;
1103 static int __init tc_action_init(void)
1105 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
1106 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
1107 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1108 NULL);
1110 return 0;
1113 subsys_initcall(tc_action_init);