2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <net/net_namespace.h>
25 #include <net/sch_generic.h>
26 #include <net/act_api.h>
27 #include <net/netlink.h>
29 static void tcf_common_free_rcu(struct rcu_head
*head
)
31 kfree(container_of(head
, struct tcf_common
, tcfc_rcu
));
34 void tcf_hash_destroy(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
36 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
37 struct tcf_common
**p1p
;
39 for (p1p
= &hinfo
->htab
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
41 write_lock_bh(hinfo
->lock
);
43 write_unlock_bh(hinfo
->lock
);
44 gen_kill_estimator(&p
->tcfc_bstats
,
47 * gen_estimator est_timer() might access p->tcfc_lock
48 * or bstats, wait a RCU grace period before freeing p
50 call_rcu(&p
->tcfc_rcu
, tcf_common_free_rcu
);
56 EXPORT_SYMBOL(tcf_hash_destroy
);
58 int tcf_hash_release(struct tcf_common
*p
, int bind
,
59 struct tcf_hashinfo
*hinfo
)
68 if (p
->tcfc_bindcnt
<= 0 && p
->tcfc_refcnt
<= 0) {
69 tcf_hash_destroy(p
, hinfo
);
75 EXPORT_SYMBOL(tcf_hash_release
);
77 static int tcf_dump_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
78 struct tc_action
*a
, struct tcf_hashinfo
*hinfo
)
81 int err
= 0, index
= -1,i
= 0, s_i
= 0, n_i
= 0;
84 read_lock_bh(hinfo
->lock
);
88 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
89 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
91 for (; p
; p
= p
->tcfc_next
) {
98 nest
= nla_nest_start(skb
, a
->order
);
100 goto nla_put_failure
;
101 err
= tcf_action_dump_1(skb
, a
, 0, 0);
104 nlmsg_trim(skb
, nest
);
107 nla_nest_end(skb
, nest
);
109 if (n_i
>= TCA_ACT_MAX_PRIO
)
114 read_unlock_bh(hinfo
->lock
);
120 nla_nest_cancel(skb
, nest
);
124 static int tcf_del_walker(struct sk_buff
*skb
, struct tc_action
*a
,
125 struct tcf_hashinfo
*hinfo
)
127 struct tcf_common
*p
, *s_p
;
131 nest
= nla_nest_start(skb
, a
->order
);
133 goto nla_put_failure
;
134 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
135 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
136 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
140 if (ACT_P_DELETED
== tcf_hash_release(p
, 0, hinfo
))
141 module_put(a
->ops
->owner
);
146 NLA_PUT_U32(skb
, TCA_FCNT
, n_i
);
147 nla_nest_end(skb
, nest
);
151 nla_nest_cancel(skb
, nest
);
155 int tcf_generic_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
156 int type
, struct tc_action
*a
)
158 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
160 if (type
== RTM_DELACTION
) {
161 return tcf_del_walker(skb
, a
, hinfo
);
162 } else if (type
== RTM_GETACTION
) {
163 return tcf_dump_walker(skb
, cb
, a
, hinfo
);
165 WARN(1, "tcf_generic_walker: unknown action %d\n", type
);
169 EXPORT_SYMBOL(tcf_generic_walker
);
171 struct tcf_common
*tcf_hash_lookup(u32 index
, struct tcf_hashinfo
*hinfo
)
173 struct tcf_common
*p
;
175 read_lock_bh(hinfo
->lock
);
176 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
178 if (p
->tcfc_index
== index
)
181 read_unlock_bh(hinfo
->lock
);
185 EXPORT_SYMBOL(tcf_hash_lookup
);
187 u32
tcf_hash_new_index(u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
194 } while (tcf_hash_lookup(val
, hinfo
));
196 return (*idx_gen
= val
);
198 EXPORT_SYMBOL(tcf_hash_new_index
);
200 int tcf_hash_search(struct tc_action
*a
, u32 index
)
202 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
203 struct tcf_common
*p
= tcf_hash_lookup(index
, hinfo
);
211 EXPORT_SYMBOL(tcf_hash_search
);
213 struct tcf_common
*tcf_hash_check(u32 index
, struct tc_action
*a
, int bind
,
214 struct tcf_hashinfo
*hinfo
)
216 struct tcf_common
*p
= NULL
;
217 if (index
&& (p
= tcf_hash_lookup(index
, hinfo
)) != NULL
) {
225 EXPORT_SYMBOL(tcf_hash_check
);
227 struct tcf_common
*tcf_hash_create(u32 index
, struct nlattr
*est
,
228 struct tc_action
*a
, int size
, int bind
,
229 u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
231 struct tcf_common
*p
= kzalloc(size
, GFP_KERNEL
);
234 return ERR_PTR(-ENOMEM
);
239 spin_lock_init(&p
->tcfc_lock
);
240 p
->tcfc_index
= index
? index
: tcf_hash_new_index(idx_gen
, hinfo
);
241 p
->tcfc_tm
.install
= jiffies
;
242 p
->tcfc_tm
.lastuse
= jiffies
;
244 int err
= gen_new_estimator(&p
->tcfc_bstats
, &p
->tcfc_rate_est
,
252 a
->priv
= (void *) p
;
255 EXPORT_SYMBOL(tcf_hash_create
);
257 void tcf_hash_insert(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
259 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
261 write_lock_bh(hinfo
->lock
);
262 p
->tcfc_next
= hinfo
->htab
[h
];
264 write_unlock_bh(hinfo
->lock
);
266 EXPORT_SYMBOL(tcf_hash_insert
);
268 static struct tc_action_ops
*act_base
= NULL
;
269 static DEFINE_RWLOCK(act_mod_lock
);
271 int tcf_register_action(struct tc_action_ops
*act
)
273 struct tc_action_ops
*a
, **ap
;
275 write_lock(&act_mod_lock
);
276 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
) {
277 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
278 write_unlock(&act_mod_lock
);
284 write_unlock(&act_mod_lock
);
287 EXPORT_SYMBOL(tcf_register_action
);
289 int tcf_unregister_action(struct tc_action_ops
*act
)
291 struct tc_action_ops
*a
, **ap
;
294 write_lock(&act_mod_lock
);
295 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
)
303 write_unlock(&act_mod_lock
);
306 EXPORT_SYMBOL(tcf_unregister_action
);
309 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
311 struct tc_action_ops
*a
= NULL
;
314 read_lock(&act_mod_lock
);
315 for (a
= act_base
; a
; a
= a
->next
) {
316 if (strcmp(kind
, a
->kind
) == 0) {
317 if (!try_module_get(a
->owner
)) {
318 read_unlock(&act_mod_lock
);
324 read_unlock(&act_mod_lock
);
329 /* lookup by nlattr */
330 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
332 struct tc_action_ops
*a
= NULL
;
335 read_lock(&act_mod_lock
);
336 for (a
= act_base
; a
; a
= a
->next
) {
337 if (nla_strcmp(kind
, a
->kind
) == 0) {
338 if (!try_module_get(a
->owner
)) {
339 read_unlock(&act_mod_lock
);
345 read_unlock(&act_mod_lock
);
352 static struct tc_action_ops
*tc_lookup_action_id(u32 type
)
354 struct tc_action_ops
*a
= NULL
;
357 read_lock(&act_mod_lock
);
358 for (a
= act_base
; a
; a
= a
->next
) {
359 if (a
->type
== type
) {
360 if (!try_module_get(a
->owner
)) {
361 read_unlock(&act_mod_lock
);
367 read_unlock(&act_mod_lock
);
373 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
*act
,
374 struct tcf_result
*res
)
379 if (skb
->tc_verd
& TC_NCLS
) {
380 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
384 while ((a
= act
) != NULL
) {
386 if (a
->ops
&& a
->ops
->act
) {
387 ret
= a
->ops
->act(skb
, a
, res
);
388 if (TC_MUNGED
& skb
->tc_verd
) {
389 /* copied already, allow trampling */
390 skb
->tc_verd
= SET_TC_OK2MUNGE(skb
->tc_verd
);
391 skb
->tc_verd
= CLR_TC_MUNGED(skb
->tc_verd
);
393 if (ret
== TC_ACT_REPEAT
)
394 goto repeat
; /* we need a ttl - JHS */
395 if (ret
!= TC_ACT_PIPE
)
403 EXPORT_SYMBOL(tcf_action_exec
);
405 void tcf_action_destroy(struct tc_action
*act
, int bind
)
409 for (a
= act
; a
; a
= act
) {
410 if (a
->ops
&& a
->ops
->cleanup
) {
411 if (a
->ops
->cleanup(a
, bind
) == ACT_P_DELETED
)
412 module_put(a
->ops
->owner
);
416 /*FIXME: Remove later - catch insertion bugs*/
417 WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n");
425 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
429 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
431 return a
->ops
->dump(skb
, a
, bind
, ref
);
435 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
438 unsigned char *b
= skb_tail_pointer(skb
);
441 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
444 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
445 if (tcf_action_copy_stats(skb
, a
, 0))
446 goto nla_put_failure
;
447 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
449 goto nla_put_failure
;
450 if ((err
= tcf_action_dump_old(skb
, a
, bind
, ref
)) > 0) {
451 nla_nest_end(skb
, nest
);
459 EXPORT_SYMBOL(tcf_action_dump_1
);
462 tcf_action_dump(struct sk_buff
*skb
, struct tc_action
*act
, int bind
, int ref
)
468 while ((a
= act
) != NULL
) {
470 nest
= nla_nest_start(skb
, a
->order
);
472 goto nla_put_failure
;
473 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
476 nla_nest_end(skb
, nest
);
484 nla_nest_cancel(skb
, nest
);
488 struct tc_action
*tcf_action_init_1(struct nlattr
*nla
, struct nlattr
*est
,
489 char *name
, int ovr
, int bind
)
492 struct tc_action_ops
*a_o
;
493 char act_name
[IFNAMSIZ
];
494 struct nlattr
*tb
[TCA_ACT_MAX
+1];
499 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
503 kind
= tb
[TCA_ACT_KIND
];
506 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
)
510 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
)
514 a_o
= tc_lookup_action_n(act_name
);
516 #ifdef CONFIG_MODULES
518 request_module("act_%s", act_name
);
521 a_o
= tc_lookup_action_n(act_name
);
523 /* We dropped the RTNL semaphore in order to
524 * perform the module load. So, even if we
525 * succeeded in loading the module we have to
526 * tell the caller to replay the request. We
527 * indicate this using -EAGAIN.
539 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
543 /* backward compatibility for policer */
545 err
= a_o
->init(tb
[TCA_ACT_OPTIONS
], est
, a
, ovr
, bind
);
547 err
= a_o
->init(nla
, est
, a
, ovr
, bind
);
551 /* module count goes up only when brand new policy is created
552 if it exists and is only bound to in a_o->init() then
553 ACT_P_CREATED is not returned (a zero is).
555 if (err
!= ACT_P_CREATED
)
556 module_put(a_o
->owner
);
564 module_put(a_o
->owner
);
569 struct tc_action
*tcf_action_init(struct nlattr
*nla
, struct nlattr
*est
,
570 char *name
, int ovr
, int bind
)
572 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+1];
573 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
577 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
581 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
582 act
= tcf_action_init_1(tb
[i
], est
, name
, ovr
, bind
);
590 act_prev
->next
= act
;
597 tcf_action_destroy(head
, bind
);
601 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*a
,
606 struct tcf_act_hdr
*h
= a
->priv
;
611 /* compat_mode being true specifies a call that is supposed
612 * to add additional backward compatibility statistic TLVs.
615 if (a
->type
== TCA_OLD_COMPAT
)
616 err
= gnet_stats_start_copy_compat(skb
, 0,
617 TCA_STATS
, TCA_XSTATS
, &h
->tcf_lock
, &d
);
621 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
627 if (a
->ops
!= NULL
&& a
->ops
->get_stats
!= NULL
)
628 if (a
->ops
->get_stats(skb
, a
) < 0)
631 if (gnet_stats_copy_basic(&d
, &h
->tcf_bstats
) < 0 ||
632 gnet_stats_copy_rate_est(&d
, &h
->tcf_bstats
,
633 &h
->tcf_rate_est
) < 0 ||
634 gnet_stats_copy_queue(&d
, &h
->tcf_qstats
) < 0)
637 if (gnet_stats_finish_copy(&d
) < 0)
647 tca_get_fill(struct sk_buff
*skb
, struct tc_action
*a
, u32 pid
, u32 seq
,
648 u16 flags
, int event
, int bind
, int ref
)
651 struct nlmsghdr
*nlh
;
652 unsigned char *b
= skb_tail_pointer(skb
);
655 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
658 t
->tca_family
= AF_UNSPEC
;
662 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
664 goto nla_put_failure
;
666 if (tcf_action_dump(skb
, a
, bind
, ref
) < 0)
667 goto nla_put_failure
;
669 nla_nest_end(skb
, nest
);
671 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
681 act_get_notify(struct net
*net
, u32 pid
, struct nlmsghdr
*n
,
682 struct tc_action
*a
, int event
)
686 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
689 if (tca_get_fill(skb
, a
, pid
, n
->nlmsg_seq
, 0, event
, 0, 0) <= 0) {
694 return rtnl_unicast(skb
, net
, pid
);
697 static struct tc_action
*
698 tcf_action_get_1(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
)
700 struct nlattr
*tb
[TCA_ACT_MAX
+1];
705 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
710 if (tb
[TCA_ACT_INDEX
] == NULL
||
711 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
))
713 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
716 a
= kzalloc(sizeof(struct tc_action
), GFP_KERNEL
);
721 a
->ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
724 if (a
->ops
->lookup
== NULL
)
727 if (a
->ops
->lookup(a
, index
) == 0)
730 module_put(a
->ops
->owner
);
734 module_put(a
->ops
->owner
);
741 static void cleanup_a(struct tc_action
*act
)
745 for (a
= act
; a
; a
= act
) {
751 static struct tc_action
*create_a(int i
)
753 struct tc_action
*act
;
755 act
= kzalloc(sizeof(*act
), GFP_KERNEL
);
757 pr_debug("create_a: failed to alloc!\n");
764 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
765 struct nlmsghdr
*n
, u32 pid
)
769 struct nlmsghdr
*nlh
;
771 struct netlink_callback dcb
;
773 struct nlattr
*tb
[TCA_ACT_MAX
+1];
775 struct tc_action
*a
= create_a(0);
779 pr_debug("tca_action_flush: couldnt create tc_action\n");
783 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
785 pr_debug("tca_action_flush: failed skb alloc\n");
790 b
= skb_tail_pointer(skb
);
792 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
797 kind
= tb
[TCA_ACT_KIND
];
798 a
->ops
= tc_lookup_action(kind
);
802 nlh
= NLMSG_PUT(skb
, pid
, n
->nlmsg_seq
, RTM_DELACTION
, sizeof(*t
));
804 t
->tca_family
= AF_UNSPEC
;
808 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
810 goto nla_put_failure
;
812 err
= a
->ops
->walk(skb
, &dcb
, RTM_DELACTION
, a
);
814 goto nla_put_failure
;
818 nla_nest_end(skb
, nest
);
820 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
821 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
822 module_put(a
->ops
->owner
);
824 err
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
832 module_put(a
->ops
->owner
);
841 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
845 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+1];
846 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
848 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
852 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
&NLM_F_ROOT
) {
854 return tca_action_flush(net
, tb
[1], n
, pid
);
859 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
860 act
= tcf_action_get_1(tb
[i
], n
, pid
);
870 act_prev
->next
= act
;
874 if (event
== RTM_GETACTION
)
875 ret
= act_get_notify(net
, pid
, n
, head
, event
);
879 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
885 if (tca_get_fill(skb
, head
, pid
, n
->nlmsg_seq
, 0, event
,
892 /* now do the delete */
893 tcf_action_destroy(head
, 0);
894 ret
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
,
895 n
->nlmsg_flags
&NLM_F_ECHO
);
905 static int tcf_add_notify(struct net
*net
, struct tc_action
*a
,
906 u32 pid
, u32 seq
, int event
, u16 flags
)
909 struct nlmsghdr
*nlh
;
915 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
919 b
= skb_tail_pointer(skb
);
921 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
923 t
->tca_family
= AF_UNSPEC
;
927 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
929 goto nla_put_failure
;
931 if (tcf_action_dump(skb
, a
, 0, 0) < 0)
932 goto nla_put_failure
;
934 nla_nest_end(skb
, nest
);
936 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
937 NETLINK_CB(skb
).dst_group
= RTNLGRP_TC
;
939 err
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, flags
&NLM_F_ECHO
);
952 tcf_action_add(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
956 struct tc_action
*act
;
958 u32 seq
= n
->nlmsg_seq
;
960 act
= tcf_action_init(nla
, NULL
, NULL
, ovr
, 0);
968 /* dump then free all the actions after update; inserted policy
971 ret
= tcf_add_notify(net
, act
, pid
, seq
, RTM_NEWACTION
, n
->nlmsg_flags
);
972 for (a
= act
; a
; a
= act
) {
980 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
982 struct net
*net
= sock_net(skb
->sk
);
983 struct nlattr
*tca
[TCA_ACT_MAX
+ 1];
984 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
985 int ret
= 0, ovr
= 0;
987 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ACT_MAX
, NULL
);
991 if (tca
[TCA_ACT_TAB
] == NULL
) {
992 pr_notice("tc_ctl_action: received NO action attribs\n");
996 /* n->nlmsg_flags&NLM_F_CREATE
998 switch (n
->nlmsg_type
) {
1000 /* we are going to assume all other flags
1001 * imply create only if it doesnt exist
1002 * Note that CREATE | EXCL implies that
1003 * but since we want avoid ambiguity (eg when flags
1004 * is zero) then just set this
1006 if (n
->nlmsg_flags
&NLM_F_REPLACE
)
1009 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, pid
, ovr
);
1014 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1015 pid
, RTM_DELACTION
);
1018 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1019 pid
, RTM_GETACTION
);
1028 static struct nlattr
*
1029 find_dump_kind(const struct nlmsghdr
*n
)
1031 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+1];
1032 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1033 struct nlattr
*nla
[TCAA_MAX
+ 1];
1034 struct nlattr
*kind
;
1036 if (nlmsg_parse(n
, sizeof(struct tcamsg
), nla
, TCAA_MAX
, NULL
) < 0)
1038 tb1
= nla
[TCA_ACT_TAB
];
1042 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1043 NLMSG_ALIGN(nla_len(tb1
)), NULL
) < 0)
1048 if (nla_parse(tb2
, TCA_ACT_MAX
, nla_data(tb
[1]),
1049 nla_len(tb
[1]), NULL
) < 0)
1051 kind
= tb2
[TCA_ACT_KIND
];
1057 tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1059 struct nlmsghdr
*nlh
;
1060 unsigned char *b
= skb_tail_pointer(skb
);
1061 struct nlattr
*nest
;
1062 struct tc_action_ops
*a_o
;
1065 struct tcamsg
*t
= (struct tcamsg
*) NLMSG_DATA(cb
->nlh
);
1066 struct nlattr
*kind
= find_dump_kind(cb
->nlh
);
1069 pr_info("tc_dump_action: action bad kind\n");
1073 a_o
= tc_lookup_action(kind
);
1078 memset(&a
, 0, sizeof(struct tc_action
));
1081 if (a_o
->walk
== NULL
) {
1082 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1084 goto nla_put_failure
;
1087 nlh
= NLMSG_PUT(skb
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
,
1088 cb
->nlh
->nlmsg_type
, sizeof(*t
));
1089 t
= NLMSG_DATA(nlh
);
1090 t
->tca_family
= AF_UNSPEC
;
1094 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1096 goto nla_put_failure
;
1098 ret
= a_o
->walk(skb
, cb
, RTM_GETACTION
, &a
);
1100 goto nla_put_failure
;
1103 nla_nest_end(skb
, nest
);
1106 nla_nest_cancel(skb
, nest
);
1108 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1109 if (NETLINK_CB(cb
->skb
).pid
&& ret
)
1110 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1111 module_put(a_o
->owner
);
1116 module_put(a_o
->owner
);
1121 static int __init
tc_action_init(void)
1123 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
);
1124 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
);
1125 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
);
1130 subsys_initcall(tc_action_init
);