2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <net/net_namespace.h>
25 #include <net/sch_generic.h>
26 #include <net/act_api.h>
27 #include <net/netlink.h>
29 static void tcf_common_free_rcu(struct rcu_head
*head
)
31 kfree(container_of(head
, struct tcf_common
, tcfc_rcu
));
34 void tcf_hash_destroy(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
36 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
37 struct tcf_common
**p1p
;
39 for (p1p
= &hinfo
->htab
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
41 write_lock_bh(hinfo
->lock
);
43 write_unlock_bh(hinfo
->lock
);
44 gen_kill_estimator(&p
->tcfc_bstats
,
47 * gen_estimator est_timer() might access p->tcfc_lock
48 * or bstats, wait a RCU grace period before freeing p
50 call_rcu(&p
->tcfc_rcu
, tcf_common_free_rcu
);
56 EXPORT_SYMBOL(tcf_hash_destroy
);
58 int tcf_hash_release(struct tcf_common
*p
, int bind
,
59 struct tcf_hashinfo
*hinfo
)
68 if (p
->tcfc_bindcnt
<= 0 && p
->tcfc_refcnt
<= 0) {
69 tcf_hash_destroy(p
, hinfo
);
75 EXPORT_SYMBOL(tcf_hash_release
);
77 static int tcf_dump_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
78 struct tc_action
*a
, struct tcf_hashinfo
*hinfo
)
81 int err
= 0, index
= -1, i
= 0, s_i
= 0, n_i
= 0;
84 read_lock_bh(hinfo
->lock
);
88 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
89 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
91 for (; p
; p
= p
->tcfc_next
) {
98 nest
= nla_nest_start(skb
, a
->order
);
100 goto nla_put_failure
;
101 err
= tcf_action_dump_1(skb
, a
, 0, 0);
104 nlmsg_trim(skb
, nest
);
107 nla_nest_end(skb
, nest
);
109 if (n_i
>= TCA_ACT_MAX_PRIO
)
114 read_unlock_bh(hinfo
->lock
);
120 nla_nest_cancel(skb
, nest
);
124 static int tcf_del_walker(struct sk_buff
*skb
, struct tc_action
*a
,
125 struct tcf_hashinfo
*hinfo
)
127 struct tcf_common
*p
, *s_p
;
131 nest
= nla_nest_start(skb
, a
->order
);
133 goto nla_put_failure
;
134 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
135 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
136 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
140 if (ACT_P_DELETED
== tcf_hash_release(p
, 0, hinfo
))
141 module_put(a
->ops
->owner
);
146 NLA_PUT_U32(skb
, TCA_FCNT
, n_i
);
147 nla_nest_end(skb
, nest
);
151 nla_nest_cancel(skb
, nest
);
155 int tcf_generic_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
156 int type
, struct tc_action
*a
)
158 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
160 if (type
== RTM_DELACTION
) {
161 return tcf_del_walker(skb
, a
, hinfo
);
162 } else if (type
== RTM_GETACTION
) {
163 return tcf_dump_walker(skb
, cb
, a
, hinfo
);
165 WARN(1, "tcf_generic_walker: unknown action %d\n", type
);
169 EXPORT_SYMBOL(tcf_generic_walker
);
171 struct tcf_common
*tcf_hash_lookup(u32 index
, struct tcf_hashinfo
*hinfo
)
173 struct tcf_common
*p
;
175 read_lock_bh(hinfo
->lock
);
176 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
178 if (p
->tcfc_index
== index
)
181 read_unlock_bh(hinfo
->lock
);
185 EXPORT_SYMBOL(tcf_hash_lookup
);
187 u32
tcf_hash_new_index(u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
194 } while (tcf_hash_lookup(val
, hinfo
));
196 return (*idx_gen
= val
);
198 EXPORT_SYMBOL(tcf_hash_new_index
);
200 int tcf_hash_search(struct tc_action
*a
, u32 index
)
202 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
203 struct tcf_common
*p
= tcf_hash_lookup(index
, hinfo
);
211 EXPORT_SYMBOL(tcf_hash_search
);
213 struct tcf_common
*tcf_hash_check(u32 index
, struct tc_action
*a
, int bind
,
214 struct tcf_hashinfo
*hinfo
)
216 struct tcf_common
*p
= NULL
;
217 if (index
&& (p
= tcf_hash_lookup(index
, hinfo
)) != NULL
) {
225 EXPORT_SYMBOL(tcf_hash_check
);
227 struct tcf_common
*tcf_hash_create(u32 index
, struct nlattr
*est
,
228 struct tc_action
*a
, int size
, int bind
,
229 u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
231 struct tcf_common
*p
= kzalloc(size
, GFP_KERNEL
);
234 return ERR_PTR(-ENOMEM
);
239 spin_lock_init(&p
->tcfc_lock
);
240 p
->tcfc_index
= index
? index
: tcf_hash_new_index(idx_gen
, hinfo
);
241 p
->tcfc_tm
.install
= jiffies
;
242 p
->tcfc_tm
.lastuse
= jiffies
;
244 int err
= gen_new_estimator(&p
->tcfc_bstats
, &p
->tcfc_rate_est
,
252 a
->priv
= (void *) p
;
255 EXPORT_SYMBOL(tcf_hash_create
);
257 void tcf_hash_insert(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
259 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
261 write_lock_bh(hinfo
->lock
);
262 p
->tcfc_next
= hinfo
->htab
[h
];
264 write_unlock_bh(hinfo
->lock
);
266 EXPORT_SYMBOL(tcf_hash_insert
);
268 static struct tc_action_ops
*act_base
= NULL
;
269 static DEFINE_RWLOCK(act_mod_lock
);
271 int tcf_register_action(struct tc_action_ops
*act
)
273 struct tc_action_ops
*a
, **ap
;
275 write_lock(&act_mod_lock
);
276 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
) {
277 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
278 write_unlock(&act_mod_lock
);
284 write_unlock(&act_mod_lock
);
287 EXPORT_SYMBOL(tcf_register_action
);
289 int tcf_unregister_action(struct tc_action_ops
*act
)
291 struct tc_action_ops
*a
, **ap
;
294 write_lock(&act_mod_lock
);
295 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
)
303 write_unlock(&act_mod_lock
);
306 EXPORT_SYMBOL(tcf_unregister_action
);
309 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
311 struct tc_action_ops
*a
= NULL
;
314 read_lock(&act_mod_lock
);
315 for (a
= act_base
; a
; a
= a
->next
) {
316 if (strcmp(kind
, a
->kind
) == 0) {
317 if (!try_module_get(a
->owner
)) {
318 read_unlock(&act_mod_lock
);
324 read_unlock(&act_mod_lock
);
329 /* lookup by nlattr */
330 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
332 struct tc_action_ops
*a
= NULL
;
335 read_lock(&act_mod_lock
);
336 for (a
= act_base
; a
; a
= a
->next
) {
337 if (nla_strcmp(kind
, a
->kind
) == 0) {
338 if (!try_module_get(a
->owner
)) {
339 read_unlock(&act_mod_lock
);
345 read_unlock(&act_mod_lock
);
352 static struct tc_action_ops
*tc_lookup_action_id(u32 type
)
354 struct tc_action_ops
*a
= NULL
;
357 read_lock(&act_mod_lock
);
358 for (a
= act_base
; a
; a
= a
->next
) {
359 if (a
->type
== type
) {
360 if (!try_module_get(a
->owner
)) {
361 read_unlock(&act_mod_lock
);
367 read_unlock(&act_mod_lock
);
373 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
*act
,
374 struct tcf_result
*res
)
379 if (skb
->tc_verd
& TC_NCLS
) {
380 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
384 while ((a
= act
) != NULL
) {
386 if (a
->ops
&& a
->ops
->act
) {
387 ret
= a
->ops
->act(skb
, a
, res
);
388 if (TC_MUNGED
& skb
->tc_verd
) {
389 /* copied already, allow trampling */
390 skb
->tc_verd
= SET_TC_OK2MUNGE(skb
->tc_verd
);
391 skb
->tc_verd
= CLR_TC_MUNGED(skb
->tc_verd
);
393 if (ret
== TC_ACT_REPEAT
)
394 goto repeat
; /* we need a ttl - JHS */
395 if (ret
!= TC_ACT_PIPE
)
403 EXPORT_SYMBOL(tcf_action_exec
);
405 void tcf_action_destroy(struct tc_action
*act
, int bind
)
409 for (a
= act
; a
; a
= act
) {
410 if (a
->ops
&& a
->ops
->cleanup
) {
411 if (a
->ops
->cleanup(a
, bind
) == ACT_P_DELETED
)
412 module_put(a
->ops
->owner
);
416 /*FIXME: Remove later - catch insertion bugs*/
417 WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n");
425 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
429 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
431 return a
->ops
->dump(skb
, a
, bind
, ref
);
435 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
438 unsigned char *b
= skb_tail_pointer(skb
);
441 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
444 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
445 if (tcf_action_copy_stats(skb
, a
, 0))
446 goto nla_put_failure
;
447 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
449 goto nla_put_failure
;
450 err
= tcf_action_dump_old(skb
, a
, bind
, ref
);
452 nla_nest_end(skb
, nest
);
460 EXPORT_SYMBOL(tcf_action_dump_1
);
463 tcf_action_dump(struct sk_buff
*skb
, struct tc_action
*act
, int bind
, int ref
)
469 while ((a
= act
) != NULL
) {
471 nest
= nla_nest_start(skb
, a
->order
);
473 goto nla_put_failure
;
474 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
477 nla_nest_end(skb
, nest
);
485 nla_nest_cancel(skb
, nest
);
489 struct tc_action
*tcf_action_init_1(struct nlattr
*nla
, struct nlattr
*est
,
490 char *name
, int ovr
, int bind
)
493 struct tc_action_ops
*a_o
;
494 char act_name
[IFNAMSIZ
];
495 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
500 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
504 kind
= tb
[TCA_ACT_KIND
];
507 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
)
511 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
)
515 a_o
= tc_lookup_action_n(act_name
);
517 #ifdef CONFIG_MODULES
519 request_module("act_%s", act_name
);
522 a_o
= tc_lookup_action_n(act_name
);
524 /* We dropped the RTNL semaphore in order to
525 * perform the module load. So, even if we
526 * succeeded in loading the module we have to
527 * tell the caller to replay the request. We
528 * indicate this using -EAGAIN.
540 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
544 /* backward compatibility for policer */
546 err
= a_o
->init(tb
[TCA_ACT_OPTIONS
], est
, a
, ovr
, bind
);
548 err
= a_o
->init(nla
, est
, a
, ovr
, bind
);
552 /* module count goes up only when brand new policy is created
553 * if it exists and is only bound to in a_o->init() then
554 * ACT_P_CREATED is not returned (a zero is).
556 if (err
!= ACT_P_CREATED
)
557 module_put(a_o
->owner
);
565 module_put(a_o
->owner
);
570 struct tc_action
*tcf_action_init(struct nlattr
*nla
, struct nlattr
*est
,
571 char *name
, int ovr
, int bind
)
573 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
574 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
578 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
582 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
583 act
= tcf_action_init_1(tb
[i
], est
, name
, ovr
, bind
);
591 act_prev
->next
= act
;
598 tcf_action_destroy(head
, bind
);
602 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*a
,
607 struct tcf_act_hdr
*h
= a
->priv
;
612 /* compat_mode being true specifies a call that is supposed
613 * to add additional backward compatibility statistic TLVs.
616 if (a
->type
== TCA_OLD_COMPAT
)
617 err
= gnet_stats_start_copy_compat(skb
, 0,
618 TCA_STATS
, TCA_XSTATS
, &h
->tcf_lock
, &d
);
622 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
628 if (a
->ops
!= NULL
&& a
->ops
->get_stats
!= NULL
)
629 if (a
->ops
->get_stats(skb
, a
) < 0)
632 if (gnet_stats_copy_basic(&d
, &h
->tcf_bstats
) < 0 ||
633 gnet_stats_copy_rate_est(&d
, &h
->tcf_bstats
,
634 &h
->tcf_rate_est
) < 0 ||
635 gnet_stats_copy_queue(&d
, &h
->tcf_qstats
) < 0)
638 if (gnet_stats_finish_copy(&d
) < 0)
648 tca_get_fill(struct sk_buff
*skb
, struct tc_action
*a
, u32 pid
, u32 seq
,
649 u16 flags
, int event
, int bind
, int ref
)
652 struct nlmsghdr
*nlh
;
653 unsigned char *b
= skb_tail_pointer(skb
);
656 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
659 t
->tca_family
= AF_UNSPEC
;
663 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
665 goto nla_put_failure
;
667 if (tcf_action_dump(skb
, a
, bind
, ref
) < 0)
668 goto nla_put_failure
;
670 nla_nest_end(skb
, nest
);
672 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
682 act_get_notify(struct net
*net
, u32 pid
, struct nlmsghdr
*n
,
683 struct tc_action
*a
, int event
)
687 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
690 if (tca_get_fill(skb
, a
, pid
, n
->nlmsg_seq
, 0, event
, 0, 0) <= 0) {
695 return rtnl_unicast(skb
, net
, pid
);
698 static struct tc_action
*
699 tcf_action_get_1(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
)
701 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
706 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
711 if (tb
[TCA_ACT_INDEX
] == NULL
||
712 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
))
714 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
717 a
= kzalloc(sizeof(struct tc_action
), GFP_KERNEL
);
722 a
->ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
725 if (a
->ops
->lookup
== NULL
)
728 if (a
->ops
->lookup(a
, index
) == 0)
731 module_put(a
->ops
->owner
);
735 module_put(a
->ops
->owner
);
742 static void cleanup_a(struct tc_action
*act
)
746 for (a
= act
; a
; a
= act
) {
752 static struct tc_action
*create_a(int i
)
754 struct tc_action
*act
;
756 act
= kzalloc(sizeof(*act
), GFP_KERNEL
);
758 pr_debug("create_a: failed to alloc!\n");
765 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
766 struct nlmsghdr
*n
, u32 pid
)
770 struct nlmsghdr
*nlh
;
772 struct netlink_callback dcb
;
774 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
776 struct tc_action
*a
= create_a(0);
780 pr_debug("tca_action_flush: couldnt create tc_action\n");
784 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
786 pr_debug("tca_action_flush: failed skb alloc\n");
791 b
= skb_tail_pointer(skb
);
793 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
798 kind
= tb
[TCA_ACT_KIND
];
799 a
->ops
= tc_lookup_action(kind
);
803 nlh
= NLMSG_PUT(skb
, pid
, n
->nlmsg_seq
, RTM_DELACTION
, sizeof(*t
));
805 t
->tca_family
= AF_UNSPEC
;
809 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
811 goto nla_put_failure
;
813 err
= a
->ops
->walk(skb
, &dcb
, RTM_DELACTION
, a
);
815 goto nla_put_failure
;
819 nla_nest_end(skb
, nest
);
821 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
822 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
823 module_put(a
->ops
->owner
);
825 err
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
,
826 n
->nlmsg_flags
& NLM_F_ECHO
);
834 module_put(a
->ops
->owner
);
843 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
847 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
848 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
850 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
854 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
& NLM_F_ROOT
) {
856 return tca_action_flush(net
, tb
[1], n
, pid
);
861 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
862 act
= tcf_action_get_1(tb
[i
], n
, pid
);
872 act_prev
->next
= act
;
876 if (event
== RTM_GETACTION
)
877 ret
= act_get_notify(net
, pid
, n
, head
, event
);
881 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
887 if (tca_get_fill(skb
, head
, pid
, n
->nlmsg_seq
, 0, event
,
894 /* now do the delete */
895 tcf_action_destroy(head
, 0);
896 ret
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
,
897 n
->nlmsg_flags
& NLM_F_ECHO
);
907 static int tcf_add_notify(struct net
*net
, struct tc_action
*a
,
908 u32 pid
, u32 seq
, int event
, u16 flags
)
911 struct nlmsghdr
*nlh
;
917 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
921 b
= skb_tail_pointer(skb
);
923 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
925 t
->tca_family
= AF_UNSPEC
;
929 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
931 goto nla_put_failure
;
933 if (tcf_action_dump(skb
, a
, 0, 0) < 0)
934 goto nla_put_failure
;
936 nla_nest_end(skb
, nest
);
938 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
939 NETLINK_CB(skb
).dst_group
= RTNLGRP_TC
;
941 err
= rtnetlink_send(skb
, net
, pid
, RTNLGRP_TC
, flags
& NLM_F_ECHO
);
954 tcf_action_add(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
958 struct tc_action
*act
;
960 u32 seq
= n
->nlmsg_seq
;
962 act
= tcf_action_init(nla
, NULL
, NULL
, ovr
, 0);
970 /* dump then free all the actions after update; inserted policy
973 ret
= tcf_add_notify(net
, act
, pid
, seq
, RTM_NEWACTION
, n
->nlmsg_flags
);
974 for (a
= act
; a
; a
= act
) {
982 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
984 struct net
*net
= sock_net(skb
->sk
);
985 struct nlattr
*tca
[TCA_ACT_MAX
+ 1];
986 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
987 int ret
= 0, ovr
= 0;
989 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ACT_MAX
, NULL
);
993 if (tca
[TCA_ACT_TAB
] == NULL
) {
994 pr_notice("tc_ctl_action: received NO action attribs\n");
998 /* n->nlmsg_flags & NLM_F_CREATE */
999 switch (n
->nlmsg_type
) {
1001 /* we are going to assume all other flags
1002 * imply create only if it doesn't exist
1003 * Note that CREATE | EXCL implies that
1004 * but since we want avoid ambiguity (eg when flags
1005 * is zero) then just set this
1007 if (n
->nlmsg_flags
& NLM_F_REPLACE
)
1010 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, pid
, ovr
);
1015 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1016 pid
, RTM_DELACTION
);
1019 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1020 pid
, RTM_GETACTION
);
1029 static struct nlattr
*
1030 find_dump_kind(const struct nlmsghdr
*n
)
1032 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+ 1];
1033 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1034 struct nlattr
*nla
[TCAA_MAX
+ 1];
1035 struct nlattr
*kind
;
1037 if (nlmsg_parse(n
, sizeof(struct tcamsg
), nla
, TCAA_MAX
, NULL
) < 0)
1039 tb1
= nla
[TCA_ACT_TAB
];
1043 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1044 NLMSG_ALIGN(nla_len(tb1
)), NULL
) < 0)
1049 if (nla_parse(tb2
, TCA_ACT_MAX
, nla_data(tb
[1]),
1050 nla_len(tb
[1]), NULL
) < 0)
1052 kind
= tb2
[TCA_ACT_KIND
];
1058 tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1060 struct nlmsghdr
*nlh
;
1061 unsigned char *b
= skb_tail_pointer(skb
);
1062 struct nlattr
*nest
;
1063 struct tc_action_ops
*a_o
;
1066 struct tcamsg
*t
= (struct tcamsg
*) NLMSG_DATA(cb
->nlh
);
1067 struct nlattr
*kind
= find_dump_kind(cb
->nlh
);
1070 pr_info("tc_dump_action: action bad kind\n");
1074 a_o
= tc_lookup_action(kind
);
1078 memset(&a
, 0, sizeof(struct tc_action
));
1081 if (a_o
->walk
== NULL
) {
1082 WARN(1, "tc_dump_action: %s !capable of dumping table\n",
1084 goto nla_put_failure
;
1087 nlh
= NLMSG_PUT(skb
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
,
1088 cb
->nlh
->nlmsg_type
, sizeof(*t
));
1089 t
= NLMSG_DATA(nlh
);
1090 t
->tca_family
= AF_UNSPEC
;
1094 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1096 goto nla_put_failure
;
1098 ret
= a_o
->walk(skb
, cb
, RTM_GETACTION
, &a
);
1100 goto nla_put_failure
;
1103 nla_nest_end(skb
, nest
);
1106 nla_nest_cancel(skb
, nest
);
1108 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1109 if (NETLINK_CB(cb
->skb
).pid
&& ret
)
1110 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1111 module_put(a_o
->owner
);
1116 module_put(a_o
->owner
);
1121 static int __init
tc_action_init(void)
1123 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
);
1124 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
);
1125 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
);
1130 subsys_initcall(tc_action_init
);