media: mmp-driver: add needed __iomem marks to power_regs
[linux-2.6/btrfs-unstable.git] / net / sched / cls_api.c
blobb66754f52a9f1cfd18fbddb807a67f015b7c6f0c
1 /*
2 * net/sched/cls_api.c Packet classifier API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Changes:
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/string.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/kmod.h>
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <net/net_namespace.h>
29 #include <net/sock.h>
30 #include <net/netlink.h>
31 #include <net/pkt_sched.h>
32 #include <net/pkt_cls.h>
34 /* The list of all installed classifier types */
35 static LIST_HEAD(tcf_proto_base);
37 /* Protects list of registered TC modules. It is pure SMP lock. */
38 static DEFINE_RWLOCK(cls_mod_lock);
40 /* Find classifier type by string name */
42 static const struct tcf_proto_ops *tcf_proto_lookup_ops(const char *kind)
44 const struct tcf_proto_ops *t, *res = NULL;
46 if (kind) {
47 read_lock(&cls_mod_lock);
48 list_for_each_entry(t, &tcf_proto_base, head) {
49 if (strcmp(kind, t->kind) == 0) {
50 if (try_module_get(t->owner))
51 res = t;
52 break;
55 read_unlock(&cls_mod_lock);
57 return res;
60 /* Register(unregister) new classifier type */
62 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
64 struct tcf_proto_ops *t;
65 int rc = -EEXIST;
67 write_lock(&cls_mod_lock);
68 list_for_each_entry(t, &tcf_proto_base, head)
69 if (!strcmp(ops->kind, t->kind))
70 goto out;
72 list_add_tail(&ops->head, &tcf_proto_base);
73 rc = 0;
74 out:
75 write_unlock(&cls_mod_lock);
76 return rc;
78 EXPORT_SYMBOL(register_tcf_proto_ops);
80 static struct workqueue_struct *tc_filter_wq;
82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
84 struct tcf_proto_ops *t;
85 int rc = -ENOENT;
87 /* Wait for outstanding call_rcu()s, if any, from a
88 * tcf_proto_ops's destroy() handler.
90 rcu_barrier();
91 flush_workqueue(tc_filter_wq);
93 write_lock(&cls_mod_lock);
94 list_for_each_entry(t, &tcf_proto_base, head) {
95 if (t == ops) {
96 list_del(&t->head);
97 rc = 0;
98 break;
101 write_unlock(&cls_mod_lock);
102 return rc;
104 EXPORT_SYMBOL(unregister_tcf_proto_ops);
106 bool tcf_queue_work(struct work_struct *work)
108 return queue_work(tc_filter_wq, work);
110 EXPORT_SYMBOL(tcf_queue_work);
112 /* Select new prio value from the range, managed by kernel. */
114 static inline u32 tcf_auto_prio(struct tcf_proto *tp)
116 u32 first = TC_H_MAKE(0xC0000000U, 0U);
118 if (tp)
119 first = tp->prio - 1;
121 return TC_H_MAJ(first);
124 static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
125 u32 prio, struct tcf_chain *chain,
126 struct netlink_ext_ack *extack)
128 struct tcf_proto *tp;
129 int err;
131 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
132 if (!tp)
133 return ERR_PTR(-ENOBUFS);
135 err = -ENOENT;
136 tp->ops = tcf_proto_lookup_ops(kind);
137 if (!tp->ops) {
138 #ifdef CONFIG_MODULES
139 rtnl_unlock();
140 request_module("cls_%s", kind);
141 rtnl_lock();
142 tp->ops = tcf_proto_lookup_ops(kind);
143 /* We dropped the RTNL semaphore in order to perform
144 * the module load. So, even if we succeeded in loading
145 * the module we have to replay the request. We indicate
146 * this using -EAGAIN.
148 if (tp->ops) {
149 module_put(tp->ops->owner);
150 err = -EAGAIN;
151 } else {
152 NL_SET_ERR_MSG(extack, "TC classifier not found");
153 err = -ENOENT;
155 goto errout;
156 #endif
158 tp->classify = tp->ops->classify;
159 tp->protocol = protocol;
160 tp->prio = prio;
161 tp->chain = chain;
163 err = tp->ops->init(tp);
164 if (err) {
165 module_put(tp->ops->owner);
166 goto errout;
168 return tp;
170 errout:
171 kfree(tp);
172 return ERR_PTR(err);
175 static void tcf_proto_destroy(struct tcf_proto *tp,
176 struct netlink_ext_ack *extack)
178 tp->ops->destroy(tp, extack);
179 module_put(tp->ops->owner);
180 kfree_rcu(tp, rcu);
183 struct tcf_filter_chain_list_item {
184 struct list_head list;
185 tcf_chain_head_change_t *chain_head_change;
186 void *chain_head_change_priv;
189 static struct tcf_chain *tcf_chain_create(struct tcf_block *block,
190 u32 chain_index)
192 struct tcf_chain *chain;
194 chain = kzalloc(sizeof(*chain), GFP_KERNEL);
195 if (!chain)
196 return NULL;
197 INIT_LIST_HEAD(&chain->filter_chain_list);
198 list_add_tail(&chain->list, &block->chain_list);
199 chain->block = block;
200 chain->index = chain_index;
201 chain->refcnt = 1;
202 return chain;
205 static void tcf_chain_head_change_item(struct tcf_filter_chain_list_item *item,
206 struct tcf_proto *tp_head)
208 if (item->chain_head_change)
209 item->chain_head_change(tp_head, item->chain_head_change_priv);
211 static void tcf_chain_head_change(struct tcf_chain *chain,
212 struct tcf_proto *tp_head)
214 struct tcf_filter_chain_list_item *item;
216 list_for_each_entry(item, &chain->filter_chain_list, list)
217 tcf_chain_head_change_item(item, tp_head);
220 static void tcf_chain_flush(struct tcf_chain *chain)
222 struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
224 tcf_chain_head_change(chain, NULL);
225 while (tp) {
226 RCU_INIT_POINTER(chain->filter_chain, tp->next);
227 tcf_proto_destroy(tp, NULL);
228 tp = rtnl_dereference(chain->filter_chain);
229 tcf_chain_put(chain);
233 static void tcf_chain_destroy(struct tcf_chain *chain)
235 struct tcf_block *block = chain->block;
237 list_del(&chain->list);
238 kfree(chain);
239 if (list_empty(&block->chain_list))
240 kfree(block);
243 static void tcf_chain_hold(struct tcf_chain *chain)
245 ++chain->refcnt;
248 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
249 bool create)
251 struct tcf_chain *chain;
253 list_for_each_entry(chain, &block->chain_list, list) {
254 if (chain->index == chain_index) {
255 tcf_chain_hold(chain);
256 return chain;
260 return create ? tcf_chain_create(block, chain_index) : NULL;
262 EXPORT_SYMBOL(tcf_chain_get);
264 void tcf_chain_put(struct tcf_chain *chain)
266 if (--chain->refcnt == 0)
267 tcf_chain_destroy(chain);
269 EXPORT_SYMBOL(tcf_chain_put);
271 static bool tcf_block_offload_in_use(struct tcf_block *block)
273 return block->offloadcnt;
276 static int tcf_block_offload_cmd(struct tcf_block *block,
277 struct net_device *dev,
278 struct tcf_block_ext_info *ei,
279 enum tc_block_command command)
281 struct tc_block_offload bo = {};
283 bo.command = command;
284 bo.binder_type = ei->binder_type;
285 bo.block = block;
286 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
289 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
290 struct tcf_block_ext_info *ei)
292 struct net_device *dev = q->dev_queue->dev;
293 int err;
295 if (!dev->netdev_ops->ndo_setup_tc)
296 goto no_offload_dev_inc;
298 /* If tc offload feature is disabled and the block we try to bind
299 * to already has some offloaded filters, forbid to bind.
301 if (!tc_can_offload(dev) && tcf_block_offload_in_use(block))
302 return -EOPNOTSUPP;
304 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_BIND);
305 if (err == -EOPNOTSUPP)
306 goto no_offload_dev_inc;
307 return err;
309 no_offload_dev_inc:
310 if (tcf_block_offload_in_use(block))
311 return -EOPNOTSUPP;
312 block->nooffloaddevcnt++;
313 return 0;
316 static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
317 struct tcf_block_ext_info *ei)
319 struct net_device *dev = q->dev_queue->dev;
320 int err;
322 if (!dev->netdev_ops->ndo_setup_tc)
323 goto no_offload_dev_dec;
324 err = tcf_block_offload_cmd(block, dev, ei, TC_BLOCK_UNBIND);
325 if (err == -EOPNOTSUPP)
326 goto no_offload_dev_dec;
327 return;
329 no_offload_dev_dec:
330 WARN_ON(block->nooffloaddevcnt-- == 0);
333 static int
334 tcf_chain_head_change_cb_add(struct tcf_chain *chain,
335 struct tcf_block_ext_info *ei,
336 struct netlink_ext_ack *extack)
338 struct tcf_filter_chain_list_item *item;
340 item = kmalloc(sizeof(*item), GFP_KERNEL);
341 if (!item) {
342 NL_SET_ERR_MSG(extack, "Memory allocation for head change callback item failed");
343 return -ENOMEM;
345 item->chain_head_change = ei->chain_head_change;
346 item->chain_head_change_priv = ei->chain_head_change_priv;
347 if (chain->filter_chain)
348 tcf_chain_head_change_item(item, chain->filter_chain);
349 list_add(&item->list, &chain->filter_chain_list);
350 return 0;
353 static void
354 tcf_chain_head_change_cb_del(struct tcf_chain *chain,
355 struct tcf_block_ext_info *ei)
357 struct tcf_filter_chain_list_item *item;
359 list_for_each_entry(item, &chain->filter_chain_list, list) {
360 if ((!ei->chain_head_change && !ei->chain_head_change_priv) ||
361 (item->chain_head_change == ei->chain_head_change &&
362 item->chain_head_change_priv == ei->chain_head_change_priv)) {
363 tcf_chain_head_change_item(item, NULL);
364 list_del(&item->list);
365 kfree(item);
366 return;
369 WARN_ON(1);
372 struct tcf_net {
373 struct idr idr;
376 static unsigned int tcf_net_id;
378 static int tcf_block_insert(struct tcf_block *block, struct net *net,
379 struct netlink_ext_ack *extack)
381 struct tcf_net *tn = net_generic(net, tcf_net_id);
383 return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
384 GFP_KERNEL);
387 static void tcf_block_remove(struct tcf_block *block, struct net *net)
389 struct tcf_net *tn = net_generic(net, tcf_net_id);
391 idr_remove(&tn->idr, block->index);
394 static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
395 u32 block_index,
396 struct netlink_ext_ack *extack)
398 struct tcf_block *block;
399 struct tcf_chain *chain;
400 int err;
402 block = kzalloc(sizeof(*block), GFP_KERNEL);
403 if (!block) {
404 NL_SET_ERR_MSG(extack, "Memory allocation for block failed");
405 return ERR_PTR(-ENOMEM);
407 INIT_LIST_HEAD(&block->chain_list);
408 INIT_LIST_HEAD(&block->cb_list);
409 INIT_LIST_HEAD(&block->owner_list);
411 /* Create chain 0 by default, it has to be always present. */
412 chain = tcf_chain_create(block, 0);
413 if (!chain) {
414 NL_SET_ERR_MSG(extack, "Failed to create new tcf chain");
415 err = -ENOMEM;
416 goto err_chain_create;
418 block->refcnt = 1;
419 block->net = net;
420 block->index = block_index;
422 /* Don't store q pointer for blocks which are shared */
423 if (!tcf_block_shared(block))
424 block->q = q;
425 return block;
427 err_chain_create:
428 kfree(block);
429 return ERR_PTR(err);
432 static struct tcf_block *tcf_block_lookup(struct net *net, u32 block_index)
434 struct tcf_net *tn = net_generic(net, tcf_net_id);
436 return idr_find(&tn->idr, block_index);
439 static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block)
441 return list_first_entry(&block->chain_list, struct tcf_chain, list);
444 struct tcf_block_owner_item {
445 struct list_head list;
446 struct Qdisc *q;
447 enum tcf_block_binder_type binder_type;
450 static void
451 tcf_block_owner_netif_keep_dst(struct tcf_block *block,
452 struct Qdisc *q,
453 enum tcf_block_binder_type binder_type)
455 if (block->keep_dst &&
456 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
457 binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
458 netif_keep_dst(qdisc_dev(q));
461 void tcf_block_netif_keep_dst(struct tcf_block *block)
463 struct tcf_block_owner_item *item;
465 block->keep_dst = true;
466 list_for_each_entry(item, &block->owner_list, list)
467 tcf_block_owner_netif_keep_dst(block, item->q,
468 item->binder_type);
470 EXPORT_SYMBOL(tcf_block_netif_keep_dst);
472 static int tcf_block_owner_add(struct tcf_block *block,
473 struct Qdisc *q,
474 enum tcf_block_binder_type binder_type)
476 struct tcf_block_owner_item *item;
478 item = kmalloc(sizeof(*item), GFP_KERNEL);
479 if (!item)
480 return -ENOMEM;
481 item->q = q;
482 item->binder_type = binder_type;
483 list_add(&item->list, &block->owner_list);
484 return 0;
487 static void tcf_block_owner_del(struct tcf_block *block,
488 struct Qdisc *q,
489 enum tcf_block_binder_type binder_type)
491 struct tcf_block_owner_item *item;
493 list_for_each_entry(item, &block->owner_list, list) {
494 if (item->q == q && item->binder_type == binder_type) {
495 list_del(&item->list);
496 kfree(item);
497 return;
500 WARN_ON(1);
503 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
504 struct tcf_block_ext_info *ei,
505 struct netlink_ext_ack *extack)
507 struct net *net = qdisc_net(q);
508 struct tcf_block *block = NULL;
509 bool created = false;
510 int err;
512 if (ei->block_index) {
513 /* block_index not 0 means the shared block is requested */
514 block = tcf_block_lookup(net, ei->block_index);
515 if (block)
516 block->refcnt++;
519 if (!block) {
520 block = tcf_block_create(net, q, ei->block_index, extack);
521 if (IS_ERR(block))
522 return PTR_ERR(block);
523 created = true;
524 if (tcf_block_shared(block)) {
525 err = tcf_block_insert(block, net, extack);
526 if (err)
527 goto err_block_insert;
531 err = tcf_block_owner_add(block, q, ei->binder_type);
532 if (err)
533 goto err_block_owner_add;
535 tcf_block_owner_netif_keep_dst(block, q, ei->binder_type);
537 err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block),
538 ei, extack);
539 if (err)
540 goto err_chain_head_change_cb_add;
542 err = tcf_block_offload_bind(block, q, ei);
543 if (err)
544 goto err_block_offload_bind;
546 *p_block = block;
547 return 0;
549 err_block_offload_bind:
550 tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
551 err_chain_head_change_cb_add:
552 tcf_block_owner_del(block, q, ei->binder_type);
553 err_block_owner_add:
554 if (created) {
555 if (tcf_block_shared(block))
556 tcf_block_remove(block, net);
557 err_block_insert:
558 kfree(tcf_block_chain_zero(block));
559 kfree(block);
560 } else {
561 block->refcnt--;
563 return err;
565 EXPORT_SYMBOL(tcf_block_get_ext);
567 static void tcf_chain_head_change_dflt(struct tcf_proto *tp_head, void *priv)
569 struct tcf_proto __rcu **p_filter_chain = priv;
571 rcu_assign_pointer(*p_filter_chain, tp_head);
574 int tcf_block_get(struct tcf_block **p_block,
575 struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
576 struct netlink_ext_ack *extack)
578 struct tcf_block_ext_info ei = {
579 .chain_head_change = tcf_chain_head_change_dflt,
580 .chain_head_change_priv = p_filter_chain,
583 WARN_ON(!p_filter_chain);
584 return tcf_block_get_ext(p_block, q, &ei, extack);
586 EXPORT_SYMBOL(tcf_block_get);
588 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
589 * actions should be all removed after flushing.
591 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
592 struct tcf_block_ext_info *ei)
594 struct tcf_chain *chain, *tmp;
596 if (!block)
597 return;
598 tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei);
599 tcf_block_owner_del(block, q, ei->binder_type);
601 if (--block->refcnt == 0) {
602 if (tcf_block_shared(block))
603 tcf_block_remove(block, block->net);
605 /* Hold a refcnt for all chains, so that they don't disappear
606 * while we are iterating.
608 list_for_each_entry(chain, &block->chain_list, list)
609 tcf_chain_hold(chain);
611 list_for_each_entry(chain, &block->chain_list, list)
612 tcf_chain_flush(chain);
615 tcf_block_offload_unbind(block, q, ei);
617 if (block->refcnt == 0) {
618 /* At this point, all the chains should have refcnt >= 1. */
619 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
620 tcf_chain_put(chain);
622 /* Finally, put chain 0 and allow block to be freed. */
623 tcf_chain_put(tcf_block_chain_zero(block));
626 EXPORT_SYMBOL(tcf_block_put_ext);
628 void tcf_block_put(struct tcf_block *block)
630 struct tcf_block_ext_info ei = {0, };
632 if (!block)
633 return;
634 tcf_block_put_ext(block, block->q, &ei);
637 EXPORT_SYMBOL(tcf_block_put);
639 struct tcf_block_cb {
640 struct list_head list;
641 tc_setup_cb_t *cb;
642 void *cb_ident;
643 void *cb_priv;
644 unsigned int refcnt;
647 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
649 return block_cb->cb_priv;
651 EXPORT_SYMBOL(tcf_block_cb_priv);
653 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
654 tc_setup_cb_t *cb, void *cb_ident)
655 { struct tcf_block_cb *block_cb;
657 list_for_each_entry(block_cb, &block->cb_list, list)
658 if (block_cb->cb == cb && block_cb->cb_ident == cb_ident)
659 return block_cb;
660 return NULL;
662 EXPORT_SYMBOL(tcf_block_cb_lookup);
664 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
666 block_cb->refcnt++;
668 EXPORT_SYMBOL(tcf_block_cb_incref);
670 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
672 return --block_cb->refcnt;
674 EXPORT_SYMBOL(tcf_block_cb_decref);
676 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
677 tc_setup_cb_t *cb, void *cb_ident,
678 void *cb_priv)
680 struct tcf_block_cb *block_cb;
682 /* At this point, playback of previous block cb calls is not supported,
683 * so forbid to register to block which already has some offloaded
684 * filters present.
686 if (tcf_block_offload_in_use(block))
687 return ERR_PTR(-EOPNOTSUPP);
689 block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
690 if (!block_cb)
691 return ERR_PTR(-ENOMEM);
692 block_cb->cb = cb;
693 block_cb->cb_ident = cb_ident;
694 block_cb->cb_priv = cb_priv;
695 list_add(&block_cb->list, &block->cb_list);
696 return block_cb;
698 EXPORT_SYMBOL(__tcf_block_cb_register);
700 int tcf_block_cb_register(struct tcf_block *block,
701 tc_setup_cb_t *cb, void *cb_ident,
702 void *cb_priv)
704 struct tcf_block_cb *block_cb;
706 block_cb = __tcf_block_cb_register(block, cb, cb_ident, cb_priv);
707 return IS_ERR(block_cb) ? PTR_ERR(block_cb) : 0;
709 EXPORT_SYMBOL(tcf_block_cb_register);
711 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
713 list_del(&block_cb->list);
714 kfree(block_cb);
716 EXPORT_SYMBOL(__tcf_block_cb_unregister);
718 void tcf_block_cb_unregister(struct tcf_block *block,
719 tc_setup_cb_t *cb, void *cb_ident)
721 struct tcf_block_cb *block_cb;
723 block_cb = tcf_block_cb_lookup(block, cb, cb_ident);
724 if (!block_cb)
725 return;
726 __tcf_block_cb_unregister(block_cb);
728 EXPORT_SYMBOL(tcf_block_cb_unregister);
730 static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type,
731 void *type_data, bool err_stop)
733 struct tcf_block_cb *block_cb;
734 int ok_count = 0;
735 int err;
737 /* Make sure all netdevs sharing this block are offload-capable. */
738 if (block->nooffloaddevcnt && err_stop)
739 return -EOPNOTSUPP;
741 list_for_each_entry(block_cb, &block->cb_list, list) {
742 err = block_cb->cb(type, type_data, block_cb->cb_priv);
743 if (err) {
744 if (err_stop)
745 return err;
746 } else {
747 ok_count++;
750 return ok_count;
753 /* Main classifier routine: scans classifier chain attached
754 * to this qdisc, (optionally) tests for protocol and asks
755 * specific classifiers.
757 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
758 struct tcf_result *res, bool compat_mode)
760 __be16 protocol = tc_skb_protocol(skb);
761 #ifdef CONFIG_NET_CLS_ACT
762 const int max_reclassify_loop = 4;
763 const struct tcf_proto *orig_tp = tp;
764 const struct tcf_proto *first_tp;
765 int limit = 0;
767 reclassify:
768 #endif
769 for (; tp; tp = rcu_dereference_bh(tp->next)) {
770 int err;
772 if (tp->protocol != protocol &&
773 tp->protocol != htons(ETH_P_ALL))
774 continue;
776 err = tp->classify(skb, tp, res);
777 #ifdef CONFIG_NET_CLS_ACT
778 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode)) {
779 first_tp = orig_tp;
780 goto reset;
781 } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) {
782 first_tp = res->goto_tp;
783 goto reset;
785 #endif
786 if (err >= 0)
787 return err;
790 return TC_ACT_UNSPEC; /* signal: continue lookup */
791 #ifdef CONFIG_NET_CLS_ACT
792 reset:
793 if (unlikely(limit++ >= max_reclassify_loop)) {
794 net_notice_ratelimited("%u: reclassify loop, rule prio %u, protocol %02x\n",
795 tp->chain->block->index,
796 tp->prio & 0xffff,
797 ntohs(tp->protocol));
798 return TC_ACT_SHOT;
801 tp = first_tp;
802 protocol = tc_skb_protocol(skb);
803 goto reclassify;
804 #endif
806 EXPORT_SYMBOL(tcf_classify);
808 struct tcf_chain_info {
809 struct tcf_proto __rcu **pprev;
810 struct tcf_proto __rcu *next;
813 static struct tcf_proto *tcf_chain_tp_prev(struct tcf_chain_info *chain_info)
815 return rtnl_dereference(*chain_info->pprev);
818 static void tcf_chain_tp_insert(struct tcf_chain *chain,
819 struct tcf_chain_info *chain_info,
820 struct tcf_proto *tp)
822 if (*chain_info->pprev == chain->filter_chain)
823 tcf_chain_head_change(chain, tp);
824 RCU_INIT_POINTER(tp->next, tcf_chain_tp_prev(chain_info));
825 rcu_assign_pointer(*chain_info->pprev, tp);
826 tcf_chain_hold(chain);
829 static void tcf_chain_tp_remove(struct tcf_chain *chain,
830 struct tcf_chain_info *chain_info,
831 struct tcf_proto *tp)
833 struct tcf_proto *next = rtnl_dereference(chain_info->next);
835 if (tp == chain->filter_chain)
836 tcf_chain_head_change(chain, next);
837 RCU_INIT_POINTER(*chain_info->pprev, next);
838 tcf_chain_put(chain);
841 static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
842 struct tcf_chain_info *chain_info,
843 u32 protocol, u32 prio,
844 bool prio_allocate)
846 struct tcf_proto **pprev;
847 struct tcf_proto *tp;
849 /* Check the chain for existence of proto-tcf with this priority */
850 for (pprev = &chain->filter_chain;
851 (tp = rtnl_dereference(*pprev)); pprev = &tp->next) {
852 if (tp->prio >= prio) {
853 if (tp->prio == prio) {
854 if (prio_allocate ||
855 (tp->protocol != protocol && protocol))
856 return ERR_PTR(-EINVAL);
857 } else {
858 tp = NULL;
860 break;
863 chain_info->pprev = pprev;
864 chain_info->next = tp ? tp->next : NULL;
865 return tp;
868 static int tcf_fill_node(struct net *net, struct sk_buff *skb,
869 struct tcf_proto *tp, struct tcf_block *block,
870 struct Qdisc *q, u32 parent, void *fh,
871 u32 portid, u32 seq, u16 flags, int event)
873 struct tcmsg *tcm;
874 struct nlmsghdr *nlh;
875 unsigned char *b = skb_tail_pointer(skb);
877 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
878 if (!nlh)
879 goto out_nlmsg_trim;
880 tcm = nlmsg_data(nlh);
881 tcm->tcm_family = AF_UNSPEC;
882 tcm->tcm__pad1 = 0;
883 tcm->tcm__pad2 = 0;
884 if (q) {
885 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
886 tcm->tcm_parent = parent;
887 } else {
888 tcm->tcm_ifindex = TCM_IFINDEX_MAGIC_BLOCK;
889 tcm->tcm_block_index = block->index;
891 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
892 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
893 goto nla_put_failure;
894 if (nla_put_u32(skb, TCA_CHAIN, tp->chain->index))
895 goto nla_put_failure;
896 if (!fh) {
897 tcm->tcm_handle = 0;
898 } else {
899 if (tp->ops->dump && tp->ops->dump(net, tp, fh, skb, tcm) < 0)
900 goto nla_put_failure;
902 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
903 return skb->len;
905 out_nlmsg_trim:
906 nla_put_failure:
907 nlmsg_trim(skb, b);
908 return -1;
911 static int tfilter_notify(struct net *net, struct sk_buff *oskb,
912 struct nlmsghdr *n, struct tcf_proto *tp,
913 struct tcf_block *block, struct Qdisc *q,
914 u32 parent, void *fh, int event, bool unicast)
916 struct sk_buff *skb;
917 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
919 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
920 if (!skb)
921 return -ENOBUFS;
923 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
924 n->nlmsg_seq, n->nlmsg_flags, event) <= 0) {
925 kfree_skb(skb);
926 return -EINVAL;
929 if (unicast)
930 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
932 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
933 n->nlmsg_flags & NLM_F_ECHO);
936 static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
937 struct nlmsghdr *n, struct tcf_proto *tp,
938 struct tcf_block *block, struct Qdisc *q,
939 u32 parent, void *fh, bool unicast, bool *last,
940 struct netlink_ext_ack *extack)
942 struct sk_buff *skb;
943 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
944 int err;
946 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
947 if (!skb)
948 return -ENOBUFS;
950 if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
951 n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER) <= 0) {
952 NL_SET_ERR_MSG(extack, "Failed to build del event notification");
953 kfree_skb(skb);
954 return -EINVAL;
957 err = tp->ops->delete(tp, fh, last, extack);
958 if (err) {
959 kfree_skb(skb);
960 return err;
963 if (unicast)
964 return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
966 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
967 n->nlmsg_flags & NLM_F_ECHO);
968 if (err < 0)
969 NL_SET_ERR_MSG(extack, "Failed to send filter delete notification");
970 return err;
973 static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
974 struct tcf_block *block, struct Qdisc *q,
975 u32 parent, struct nlmsghdr *n,
976 struct tcf_chain *chain, int event)
978 struct tcf_proto *tp;
980 for (tp = rtnl_dereference(chain->filter_chain);
981 tp; tp = rtnl_dereference(tp->next))
982 tfilter_notify(net, oskb, n, tp, block,
983 q, parent, 0, event, false);
986 /* Add/change/delete/get a filter node */
988 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
989 struct netlink_ext_ack *extack)
991 struct net *net = sock_net(skb->sk);
992 struct nlattr *tca[TCA_MAX + 1];
993 struct tcmsg *t;
994 u32 protocol;
995 u32 prio;
996 bool prio_allocate;
997 u32 parent;
998 u32 chain_index;
999 struct Qdisc *q = NULL;
1000 struct tcf_chain_info chain_info;
1001 struct tcf_chain *chain = NULL;
1002 struct tcf_block *block;
1003 struct tcf_proto *tp;
1004 unsigned long cl;
1005 void *fh;
1006 int err;
1007 int tp_created;
1009 if ((n->nlmsg_type != RTM_GETTFILTER) &&
1010 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1011 return -EPERM;
1013 replay:
1014 tp_created = 0;
1016 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
1017 if (err < 0)
1018 return err;
1020 t = nlmsg_data(n);
1021 protocol = TC_H_MIN(t->tcm_info);
1022 prio = TC_H_MAJ(t->tcm_info);
1023 prio_allocate = false;
1024 parent = t->tcm_parent;
1025 cl = 0;
1027 if (prio == 0) {
1028 switch (n->nlmsg_type) {
1029 case RTM_DELTFILTER:
1030 if (protocol || t->tcm_handle || tca[TCA_KIND]) {
1031 NL_SET_ERR_MSG(extack, "Cannot flush filters with protocol, handle or kind set");
1032 return -ENOENT;
1034 break;
1035 case RTM_NEWTFILTER:
1036 /* If no priority is provided by the user,
1037 * we allocate one.
1039 if (n->nlmsg_flags & NLM_F_CREATE) {
1040 prio = TC_H_MAKE(0x80000000U, 0U);
1041 prio_allocate = true;
1042 break;
1044 /* fall-through */
1045 default:
1046 NL_SET_ERR_MSG(extack, "Invalid filter command with priority of zero");
1047 return -ENOENT;
1051 /* Find head of filter chain. */
1053 if (t->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1054 block = tcf_block_lookup(net, t->tcm_block_index);
1055 if (!block) {
1056 NL_SET_ERR_MSG(extack, "Block of given index was not found");
1057 err = -EINVAL;
1058 goto errout;
1060 } else {
1061 const struct Qdisc_class_ops *cops;
1062 struct net_device *dev;
1064 /* Find link */
1065 dev = __dev_get_by_index(net, t->tcm_ifindex);
1066 if (!dev)
1067 return -ENODEV;
1069 /* Find qdisc */
1070 if (!parent) {
1071 q = dev->qdisc;
1072 parent = q->handle;
1073 } else {
1074 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
1075 if (!q) {
1076 NL_SET_ERR_MSG(extack, "Parent Qdisc doesn't exists");
1077 return -EINVAL;
1081 /* Is it classful? */
1082 cops = q->ops->cl_ops;
1083 if (!cops) {
1084 NL_SET_ERR_MSG(extack, "Qdisc not classful");
1085 return -EINVAL;
1088 if (!cops->tcf_block) {
1089 NL_SET_ERR_MSG(extack, "Class doesn't support blocks");
1090 return -EOPNOTSUPP;
1093 /* Do we search for filter, attached to class? */
1094 if (TC_H_MIN(parent)) {
1095 cl = cops->find(q, parent);
1096 if (cl == 0) {
1097 NL_SET_ERR_MSG(extack, "Specified class doesn't exist");
1098 return -ENOENT;
1102 /* And the last stroke */
1103 block = cops->tcf_block(q, cl, extack);
1104 if (!block) {
1105 err = -EINVAL;
1106 goto errout;
1108 if (tcf_block_shared(block)) {
1109 NL_SET_ERR_MSG(extack, "This filter block is shared. Please use the block index to manipulate the filters");
1110 err = -EOPNOTSUPP;
1111 goto errout;
1115 chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
1116 if (chain_index > TC_ACT_EXT_VAL_MASK) {
1117 NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
1118 err = -EINVAL;
1119 goto errout;
1121 chain = tcf_chain_get(block, chain_index,
1122 n->nlmsg_type == RTM_NEWTFILTER);
1123 if (!chain) {
1124 NL_SET_ERR_MSG(extack, "Cannot find specified filter chain");
1125 err = n->nlmsg_type == RTM_NEWTFILTER ? -ENOMEM : -EINVAL;
1126 goto errout;
1129 if (n->nlmsg_type == RTM_DELTFILTER && prio == 0) {
1130 tfilter_notify_chain(net, skb, block, q, parent, n,
1131 chain, RTM_DELTFILTER);
1132 tcf_chain_flush(chain);
1133 err = 0;
1134 goto errout;
1137 tp = tcf_chain_tp_find(chain, &chain_info, protocol,
1138 prio, prio_allocate);
1139 if (IS_ERR(tp)) {
1140 NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
1141 err = PTR_ERR(tp);
1142 goto errout;
1145 if (tp == NULL) {
1146 /* Proto-tcf does not exist, create new one */
1148 if (tca[TCA_KIND] == NULL || !protocol) {
1149 NL_SET_ERR_MSG(extack, "Filter kind and protocol must be specified");
1150 err = -EINVAL;
1151 goto errout;
1154 if (n->nlmsg_type != RTM_NEWTFILTER ||
1155 !(n->nlmsg_flags & NLM_F_CREATE)) {
1156 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1157 err = -ENOENT;
1158 goto errout;
1161 if (prio_allocate)
1162 prio = tcf_auto_prio(tcf_chain_tp_prev(&chain_info));
1164 tp = tcf_proto_create(nla_data(tca[TCA_KIND]),
1165 protocol, prio, chain, extack);
1166 if (IS_ERR(tp)) {
1167 err = PTR_ERR(tp);
1168 goto errout;
1170 tp_created = 1;
1171 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
1172 NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
1173 err = -EINVAL;
1174 goto errout;
1177 fh = tp->ops->get(tp, t->tcm_handle);
1179 if (!fh) {
1180 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
1181 tcf_chain_tp_remove(chain, &chain_info, tp);
1182 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1183 RTM_DELTFILTER, false);
1184 tcf_proto_destroy(tp, extack);
1185 err = 0;
1186 goto errout;
1189 if (n->nlmsg_type != RTM_NEWTFILTER ||
1190 !(n->nlmsg_flags & NLM_F_CREATE)) {
1191 NL_SET_ERR_MSG(extack, "Need both RTM_NEWTFILTER and NLM_F_CREATE to create a new filter");
1192 err = -ENOENT;
1193 goto errout;
1195 } else {
1196 bool last;
1198 switch (n->nlmsg_type) {
1199 case RTM_NEWTFILTER:
1200 if (n->nlmsg_flags & NLM_F_EXCL) {
1201 if (tp_created)
1202 tcf_proto_destroy(tp, NULL);
1203 NL_SET_ERR_MSG(extack, "Filter already exists");
1204 err = -EEXIST;
1205 goto errout;
1207 break;
1208 case RTM_DELTFILTER:
1209 err = tfilter_del_notify(net, skb, n, tp, block,
1210 q, parent, fh, false, &last,
1211 extack);
1212 if (err)
1213 goto errout;
1214 if (last) {
1215 tcf_chain_tp_remove(chain, &chain_info, tp);
1216 tcf_proto_destroy(tp, extack);
1218 goto errout;
1219 case RTM_GETTFILTER:
1220 err = tfilter_notify(net, skb, n, tp, block, q, parent,
1221 fh, RTM_NEWTFILTER, true);
1222 if (err < 0)
1223 NL_SET_ERR_MSG(extack, "Failed to send filter notify message");
1224 goto errout;
1225 default:
1226 NL_SET_ERR_MSG(extack, "Invalid netlink message type");
1227 err = -EINVAL;
1228 goto errout;
1232 err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
1233 n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE,
1234 extack);
1235 if (err == 0) {
1236 if (tp_created)
1237 tcf_chain_tp_insert(chain, &chain_info, tp);
1238 tfilter_notify(net, skb, n, tp, block, q, parent, fh,
1239 RTM_NEWTFILTER, false);
1240 } else {
1241 if (tp_created)
1242 tcf_proto_destroy(tp, NULL);
1245 errout:
1246 if (chain)
1247 tcf_chain_put(chain);
1248 if (err == -EAGAIN)
1249 /* Replay the request. */
1250 goto replay;
1251 return err;
1254 struct tcf_dump_args {
1255 struct tcf_walker w;
1256 struct sk_buff *skb;
1257 struct netlink_callback *cb;
1258 struct tcf_block *block;
1259 struct Qdisc *q;
1260 u32 parent;
1263 static int tcf_node_dump(struct tcf_proto *tp, void *n, struct tcf_walker *arg)
1265 struct tcf_dump_args *a = (void *)arg;
1266 struct net *net = sock_net(a->skb->sk);
1268 return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent,
1269 n, NETLINK_CB(a->cb->skb).portid,
1270 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1271 RTM_NEWTFILTER);
1274 static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
1275 struct sk_buff *skb, struct netlink_callback *cb,
1276 long index_start, long *p_index)
1278 struct net *net = sock_net(skb->sk);
1279 struct tcf_block *block = chain->block;
1280 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1281 struct tcf_dump_args arg;
1282 struct tcf_proto *tp;
1284 for (tp = rtnl_dereference(chain->filter_chain);
1285 tp; tp = rtnl_dereference(tp->next), (*p_index)++) {
1286 if (*p_index < index_start)
1287 continue;
1288 if (TC_H_MAJ(tcm->tcm_info) &&
1289 TC_H_MAJ(tcm->tcm_info) != tp->prio)
1290 continue;
1291 if (TC_H_MIN(tcm->tcm_info) &&
1292 TC_H_MIN(tcm->tcm_info) != tp->protocol)
1293 continue;
1294 if (*p_index > index_start)
1295 memset(&cb->args[1], 0,
1296 sizeof(cb->args) - sizeof(cb->args[0]));
1297 if (cb->args[1] == 0) {
1298 if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
1299 NETLINK_CB(cb->skb).portid,
1300 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1301 RTM_NEWTFILTER) <= 0)
1302 return false;
1304 cb->args[1] = 1;
1306 if (!tp->ops->walk)
1307 continue;
1308 arg.w.fn = tcf_node_dump;
1309 arg.skb = skb;
1310 arg.cb = cb;
1311 arg.block = block;
1312 arg.q = q;
1313 arg.parent = parent;
1314 arg.w.stop = 0;
1315 arg.w.skip = cb->args[1] - 1;
1316 arg.w.count = 0;
1317 tp->ops->walk(tp, &arg.w);
1318 cb->args[1] = arg.w.count + 1;
1319 if (arg.w.stop)
1320 return false;
1322 return true;
1325 /* called with RTNL */
1326 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
1328 struct net *net = sock_net(skb->sk);
1329 struct nlattr *tca[TCA_MAX + 1];
1330 struct Qdisc *q = NULL;
1331 struct tcf_block *block;
1332 struct tcf_chain *chain;
1333 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1334 long index_start;
1335 long index;
1336 u32 parent;
1337 int err;
1339 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1340 return skb->len;
1342 err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1343 if (err)
1344 return err;
1346 if (tcm->tcm_ifindex == TCM_IFINDEX_MAGIC_BLOCK) {
1347 block = tcf_block_lookup(net, tcm->tcm_block_index);
1348 if (!block)
1349 goto out;
1350 /* If we work with block index, q is NULL and parent value
1351 * will never be used in the following code. The check
1352 * in tcf_fill_node prevents it. However, compiler does not
1353 * see that far, so set parent to zero to silence the warning
1354 * about parent being uninitialized.
1356 parent = 0;
1357 } else {
1358 const struct Qdisc_class_ops *cops;
1359 struct net_device *dev;
1360 unsigned long cl = 0;
1362 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1363 if (!dev)
1364 return skb->len;
1366 parent = tcm->tcm_parent;
1367 if (!parent) {
1368 q = dev->qdisc;
1369 parent = q->handle;
1370 } else {
1371 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
1373 if (!q)
1374 goto out;
1375 cops = q->ops->cl_ops;
1376 if (!cops)
1377 goto out;
1378 if (!cops->tcf_block)
1379 goto out;
1380 if (TC_H_MIN(tcm->tcm_parent)) {
1381 cl = cops->find(q, tcm->tcm_parent);
1382 if (cl == 0)
1383 goto out;
1385 block = cops->tcf_block(q, cl, NULL);
1386 if (!block)
1387 goto out;
1388 if (tcf_block_shared(block))
1389 q = NULL;
1392 index_start = cb->args[0];
1393 index = 0;
1395 list_for_each_entry(chain, &block->chain_list, list) {
1396 if (tca[TCA_CHAIN] &&
1397 nla_get_u32(tca[TCA_CHAIN]) != chain->index)
1398 continue;
1399 if (!tcf_chain_dump(chain, q, parent, skb, cb,
1400 index_start, &index)) {
1401 err = -EMSGSIZE;
1402 break;
1406 cb->args[0] = index;
1408 out:
1409 /* If we did no progress, the error (EMSGSIZE) is real */
1410 if (skb->len == 0 && err)
1411 return err;
1412 return skb->len;
1415 void tcf_exts_destroy(struct tcf_exts *exts)
1417 #ifdef CONFIG_NET_CLS_ACT
1418 LIST_HEAD(actions);
1420 ASSERT_RTNL();
1421 tcf_exts_to_list(exts, &actions);
1422 tcf_action_destroy(&actions, TCA_ACT_UNBIND);
1423 kfree(exts->actions);
1424 exts->nr_actions = 0;
1425 #endif
1427 EXPORT_SYMBOL(tcf_exts_destroy);
1429 int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
1430 struct nlattr *rate_tlv, struct tcf_exts *exts, bool ovr,
1431 struct netlink_ext_ack *extack)
1433 #ifdef CONFIG_NET_CLS_ACT
1435 struct tc_action *act;
1436 size_t attr_size = 0;
1438 if (exts->police && tb[exts->police]) {
1439 act = tcf_action_init_1(net, tp, tb[exts->police],
1440 rate_tlv, "police", ovr,
1441 TCA_ACT_BIND, extack);
1442 if (IS_ERR(act))
1443 return PTR_ERR(act);
1445 act->type = exts->type = TCA_OLD_COMPAT;
1446 exts->actions[0] = act;
1447 exts->nr_actions = 1;
1448 } else if (exts->action && tb[exts->action]) {
1449 LIST_HEAD(actions);
1450 int err, i = 0;
1452 err = tcf_action_init(net, tp, tb[exts->action],
1453 rate_tlv, NULL, ovr, TCA_ACT_BIND,
1454 &actions, &attr_size, extack);
1455 if (err)
1456 return err;
1457 list_for_each_entry(act, &actions, list)
1458 exts->actions[i++] = act;
1459 exts->nr_actions = i;
1461 exts->net = net;
1463 #else
1464 if ((exts->action && tb[exts->action]) ||
1465 (exts->police && tb[exts->police])) {
1466 NL_SET_ERR_MSG(extack, "Classifier actions are not supported per compile options (CONFIG_NET_CLS_ACT)");
1467 return -EOPNOTSUPP;
1469 #endif
1471 return 0;
1473 EXPORT_SYMBOL(tcf_exts_validate);
1475 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src)
1477 #ifdef CONFIG_NET_CLS_ACT
1478 struct tcf_exts old = *dst;
1480 *dst = *src;
1481 tcf_exts_destroy(&old);
1482 #endif
1484 EXPORT_SYMBOL(tcf_exts_change);
1486 #ifdef CONFIG_NET_CLS_ACT
1487 static struct tc_action *tcf_exts_first_act(struct tcf_exts *exts)
1489 if (exts->nr_actions == 0)
1490 return NULL;
1491 else
1492 return exts->actions[0];
1494 #endif
1496 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
1498 #ifdef CONFIG_NET_CLS_ACT
1499 struct nlattr *nest;
1501 if (exts->action && tcf_exts_has_actions(exts)) {
1503 * again for backward compatible mode - we want
1504 * to work with both old and new modes of entering
1505 * tc data even if iproute2 was newer - jhs
1507 if (exts->type != TCA_OLD_COMPAT) {
1508 LIST_HEAD(actions);
1510 nest = nla_nest_start(skb, exts->action);
1511 if (nest == NULL)
1512 goto nla_put_failure;
1514 tcf_exts_to_list(exts, &actions);
1515 if (tcf_action_dump(skb, &actions, 0, 0) < 0)
1516 goto nla_put_failure;
1517 nla_nest_end(skb, nest);
1518 } else if (exts->police) {
1519 struct tc_action *act = tcf_exts_first_act(exts);
1520 nest = nla_nest_start(skb, exts->police);
1521 if (nest == NULL || !act)
1522 goto nla_put_failure;
1523 if (tcf_action_dump_old(skb, act, 0, 0) < 0)
1524 goto nla_put_failure;
1525 nla_nest_end(skb, nest);
1528 return 0;
1530 nla_put_failure:
1531 nla_nest_cancel(skb, nest);
1532 return -1;
1533 #else
1534 return 0;
1535 #endif
1537 EXPORT_SYMBOL(tcf_exts_dump);
1540 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts)
1542 #ifdef CONFIG_NET_CLS_ACT
1543 struct tc_action *a = tcf_exts_first_act(exts);
1544 if (a != NULL && tcf_action_copy_stats(skb, a, 1) < 0)
1545 return -1;
1546 #endif
1547 return 0;
1549 EXPORT_SYMBOL(tcf_exts_dump_stats);
1551 static int tc_exts_setup_cb_egdev_call(struct tcf_exts *exts,
1552 enum tc_setup_type type,
1553 void *type_data, bool err_stop)
1555 int ok_count = 0;
1556 #ifdef CONFIG_NET_CLS_ACT
1557 const struct tc_action *a;
1558 struct net_device *dev;
1559 int i, ret;
1561 if (!tcf_exts_has_actions(exts))
1562 return 0;
1564 for (i = 0; i < exts->nr_actions; i++) {
1565 a = exts->actions[i];
1566 if (!a->ops->get_dev)
1567 continue;
1568 dev = a->ops->get_dev(a);
1569 if (!dev)
1570 continue;
1571 ret = tc_setup_cb_egdev_call(dev, type, type_data, err_stop);
1572 if (ret < 0)
1573 return ret;
1574 ok_count += ret;
1576 #endif
1577 return ok_count;
1580 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
1581 enum tc_setup_type type, void *type_data, bool err_stop)
1583 int ok_count;
1584 int ret;
1586 ret = tcf_block_cb_call(block, type, type_data, err_stop);
1587 if (ret < 0)
1588 return ret;
1589 ok_count = ret;
1591 if (!exts)
1592 return ok_count;
1593 ret = tc_exts_setup_cb_egdev_call(exts, type, type_data, err_stop);
1594 if (ret < 0)
1595 return ret;
1596 ok_count += ret;
1598 return ok_count;
1600 EXPORT_SYMBOL(tc_setup_cb_call);
1602 static __net_init int tcf_net_init(struct net *net)
1604 struct tcf_net *tn = net_generic(net, tcf_net_id);
1606 idr_init(&tn->idr);
1607 return 0;
1610 static void __net_exit tcf_net_exit(struct net *net)
1612 struct tcf_net *tn = net_generic(net, tcf_net_id);
1614 idr_destroy(&tn->idr);
1617 static struct pernet_operations tcf_net_ops = {
1618 .init = tcf_net_init,
1619 .exit = tcf_net_exit,
1620 .id = &tcf_net_id,
1621 .size = sizeof(struct tcf_net),
1624 static int __init tc_filter_init(void)
1626 int err;
1628 tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
1629 if (!tc_filter_wq)
1630 return -ENOMEM;
1632 err = register_pernet_subsys(&tcf_net_ops);
1633 if (err)
1634 goto err_register_pernet_subsys;
1636 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
1637 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
1638 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
1639 tc_dump_tfilter, 0);
1641 return 0;
1643 err_register_pernet_subsys:
1644 destroy_workqueue(tc_filter_wq);
1645 return err;
1648 subsys_initcall(tc_filter_init);