2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/kmod.h>
21 #include <linux/err.h>
22 #include <net/net_namespace.h>
24 #include <net/sch_generic.h>
25 #include <net/act_api.h>
26 #include <net/netlink.h>
28 void tcf_hash_destroy(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
30 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
31 struct tcf_common
**p1p
;
33 for (p1p
= &hinfo
->htab
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
35 write_lock_bh(hinfo
->lock
);
37 write_unlock_bh(hinfo
->lock
);
38 gen_kill_estimator(&p
->tcfc_bstats
,
46 EXPORT_SYMBOL(tcf_hash_destroy
);
48 int tcf_hash_release(struct tcf_common
*p
, int bind
,
49 struct tcf_hashinfo
*hinfo
)
58 if (p
->tcfc_bindcnt
<= 0 && p
->tcfc_refcnt
<= 0) {
59 tcf_hash_destroy(p
, hinfo
);
65 EXPORT_SYMBOL(tcf_hash_release
);
67 static int tcf_dump_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
68 struct tc_action
*a
, struct tcf_hashinfo
*hinfo
)
71 int err
= 0, index
= -1,i
= 0, s_i
= 0, n_i
= 0;
74 read_lock_bh(hinfo
->lock
);
78 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
79 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
81 for (; p
; p
= p
->tcfc_next
) {
88 nest
= nla_nest_start(skb
, a
->order
);
91 err
= tcf_action_dump_1(skb
, a
, 0, 0);
94 nlmsg_trim(skb
, nest
);
97 nla_nest_end(skb
, nest
);
99 if (n_i
>= TCA_ACT_MAX_PRIO
)
104 read_unlock_bh(hinfo
->lock
);
110 nla_nest_cancel(skb
, nest
);
114 static int tcf_del_walker(struct sk_buff
*skb
, struct tc_action
*a
,
115 struct tcf_hashinfo
*hinfo
)
117 struct tcf_common
*p
, *s_p
;
121 nest
= nla_nest_start(skb
, a
->order
);
123 goto nla_put_failure
;
124 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
125 for (i
= 0; i
< (hinfo
->hmask
+ 1); i
++) {
126 p
= hinfo
->htab
[tcf_hash(i
, hinfo
->hmask
)];
130 if (ACT_P_DELETED
== tcf_hash_release(p
, 0, hinfo
))
131 module_put(a
->ops
->owner
);
136 NLA_PUT_U32(skb
, TCA_FCNT
, n_i
);
137 nla_nest_end(skb
, nest
);
141 nla_nest_cancel(skb
, nest
);
145 int tcf_generic_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
146 int type
, struct tc_action
*a
)
148 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
150 if (type
== RTM_DELACTION
) {
151 return tcf_del_walker(skb
, a
, hinfo
);
152 } else if (type
== RTM_GETACTION
) {
153 return tcf_dump_walker(skb
, cb
, a
, hinfo
);
155 printk("tcf_generic_walker: unknown action %d\n", type
);
159 EXPORT_SYMBOL(tcf_generic_walker
);
161 struct tcf_common
*tcf_hash_lookup(u32 index
, struct tcf_hashinfo
*hinfo
)
163 struct tcf_common
*p
;
165 read_lock_bh(hinfo
->lock
);
166 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
168 if (p
->tcfc_index
== index
)
171 read_unlock_bh(hinfo
->lock
);
175 EXPORT_SYMBOL(tcf_hash_lookup
);
177 u32
tcf_hash_new_index(u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
184 } while (tcf_hash_lookup(val
, hinfo
));
186 return (*idx_gen
= val
);
188 EXPORT_SYMBOL(tcf_hash_new_index
);
190 int tcf_hash_search(struct tc_action
*a
, u32 index
)
192 struct tcf_hashinfo
*hinfo
= a
->ops
->hinfo
;
193 struct tcf_common
*p
= tcf_hash_lookup(index
, hinfo
);
201 EXPORT_SYMBOL(tcf_hash_search
);
203 struct tcf_common
*tcf_hash_check(u32 index
, struct tc_action
*a
, int bind
,
204 struct tcf_hashinfo
*hinfo
)
206 struct tcf_common
*p
= NULL
;
207 if (index
&& (p
= tcf_hash_lookup(index
, hinfo
)) != NULL
) {
215 EXPORT_SYMBOL(tcf_hash_check
);
217 struct tcf_common
*tcf_hash_create(u32 index
, struct nlattr
*est
,
218 struct tc_action
*a
, int size
, int bind
,
219 u32
*idx_gen
, struct tcf_hashinfo
*hinfo
)
221 struct tcf_common
*p
= kzalloc(size
, GFP_KERNEL
);
224 return ERR_PTR(-ENOMEM
);
229 spin_lock_init(&p
->tcfc_lock
);
230 p
->tcfc_index
= index
? index
: tcf_hash_new_index(idx_gen
, hinfo
);
231 p
->tcfc_tm
.install
= jiffies
;
232 p
->tcfc_tm
.lastuse
= jiffies
;
234 int err
= gen_new_estimator(&p
->tcfc_bstats
, &p
->tcfc_rate_est
,
242 a
->priv
= (void *) p
;
245 EXPORT_SYMBOL(tcf_hash_create
);
247 void tcf_hash_insert(struct tcf_common
*p
, struct tcf_hashinfo
*hinfo
)
249 unsigned int h
= tcf_hash(p
->tcfc_index
, hinfo
->hmask
);
251 write_lock_bh(hinfo
->lock
);
252 p
->tcfc_next
= hinfo
->htab
[h
];
254 write_unlock_bh(hinfo
->lock
);
256 EXPORT_SYMBOL(tcf_hash_insert
);
258 static struct tc_action_ops
*act_base
= NULL
;
259 static DEFINE_RWLOCK(act_mod_lock
);
261 int tcf_register_action(struct tc_action_ops
*act
)
263 struct tc_action_ops
*a
, **ap
;
265 write_lock(&act_mod_lock
);
266 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
) {
267 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
268 write_unlock(&act_mod_lock
);
274 write_unlock(&act_mod_lock
);
277 EXPORT_SYMBOL(tcf_register_action
);
279 int tcf_unregister_action(struct tc_action_ops
*act
)
281 struct tc_action_ops
*a
, **ap
;
284 write_lock(&act_mod_lock
);
285 for (ap
= &act_base
; (a
= *ap
) != NULL
; ap
= &a
->next
)
293 write_unlock(&act_mod_lock
);
296 EXPORT_SYMBOL(tcf_unregister_action
);
299 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
301 struct tc_action_ops
*a
= NULL
;
304 read_lock(&act_mod_lock
);
305 for (a
= act_base
; a
; a
= a
->next
) {
306 if (strcmp(kind
, a
->kind
) == 0) {
307 if (!try_module_get(a
->owner
)) {
308 read_unlock(&act_mod_lock
);
314 read_unlock(&act_mod_lock
);
319 /* lookup by nlattr */
320 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
322 struct tc_action_ops
*a
= NULL
;
325 read_lock(&act_mod_lock
);
326 for (a
= act_base
; a
; a
= a
->next
) {
327 if (nla_strcmp(kind
, a
->kind
) == 0) {
328 if (!try_module_get(a
->owner
)) {
329 read_unlock(&act_mod_lock
);
335 read_unlock(&act_mod_lock
);
342 static struct tc_action_ops
*tc_lookup_action_id(u32 type
)
344 struct tc_action_ops
*a
= NULL
;
347 read_lock(&act_mod_lock
);
348 for (a
= act_base
; a
; a
= a
->next
) {
349 if (a
->type
== type
) {
350 if (!try_module_get(a
->owner
)) {
351 read_unlock(&act_mod_lock
);
357 read_unlock(&act_mod_lock
);
363 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
*act
,
364 struct tcf_result
*res
)
369 if (skb
->tc_verd
& TC_NCLS
) {
370 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
374 while ((a
= act
) != NULL
) {
376 if (a
->ops
&& a
->ops
->act
) {
377 ret
= a
->ops
->act(skb
, a
, res
);
378 if (TC_MUNGED
& skb
->tc_verd
) {
379 /* copied already, allow trampling */
380 skb
->tc_verd
= SET_TC_OK2MUNGE(skb
->tc_verd
);
381 skb
->tc_verd
= CLR_TC_MUNGED(skb
->tc_verd
);
383 if (ret
== TC_ACT_REPEAT
)
384 goto repeat
; /* we need a ttl - JHS */
385 if (ret
!= TC_ACT_PIPE
)
393 EXPORT_SYMBOL(tcf_action_exec
);
395 void tcf_action_destroy(struct tc_action
*act
, int bind
)
399 for (a
= act
; a
; a
= act
) {
400 if (a
->ops
&& a
->ops
->cleanup
) {
401 if (a
->ops
->cleanup(a
, bind
) == ACT_P_DELETED
)
402 module_put(a
->ops
->owner
);
405 } else { /*FIXME: Remove later - catch insertion bugs*/
406 printk("tcf_action_destroy: BUG? destroying NULL ops\n");
414 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
418 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
420 return a
->ops
->dump(skb
, a
, bind
, ref
);
424 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
427 unsigned char *b
= skb_tail_pointer(skb
);
430 if (a
->ops
== NULL
|| a
->ops
->dump
== NULL
)
433 NLA_PUT_STRING(skb
, TCA_KIND
, a
->ops
->kind
);
434 if (tcf_action_copy_stats(skb
, a
, 0))
435 goto nla_put_failure
;
436 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
438 goto nla_put_failure
;
439 if ((err
= tcf_action_dump_old(skb
, a
, bind
, ref
)) > 0) {
440 nla_nest_end(skb
, nest
);
448 EXPORT_SYMBOL(tcf_action_dump_1
);
451 tcf_action_dump(struct sk_buff
*skb
, struct tc_action
*act
, int bind
, int ref
)
457 while ((a
= act
) != NULL
) {
459 nest
= nla_nest_start(skb
, a
->order
);
461 goto nla_put_failure
;
462 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
465 nla_nest_end(skb
, nest
);
473 nla_nest_cancel(skb
, nest
);
477 struct tc_action
*tcf_action_init_1(struct nlattr
*nla
, struct nlattr
*est
,
478 char *name
, int ovr
, int bind
)
481 struct tc_action_ops
*a_o
;
482 char act_name
[IFNAMSIZ
];
483 struct nlattr
*tb
[TCA_ACT_MAX
+1];
488 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
492 kind
= tb
[TCA_ACT_KIND
];
495 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
)
499 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
)
503 a_o
= tc_lookup_action_n(act_name
);
505 #ifdef CONFIG_MODULES
507 request_module("act_%s", act_name
);
510 a_o
= tc_lookup_action_n(act_name
);
512 /* We dropped the RTNL semaphore in order to
513 * perform the module load. So, even if we
514 * succeeded in loading the module we have to
515 * tell the caller to replay the request. We
516 * indicate this using -EAGAIN.
528 a
= kzalloc(sizeof(*a
), GFP_KERNEL
);
532 /* backward compatibility for policer */
534 err
= a_o
->init(tb
[TCA_ACT_OPTIONS
], est
, a
, ovr
, bind
);
536 err
= a_o
->init(nla
, est
, a
, ovr
, bind
);
540 /* module count goes up only when brand new policy is created
541 if it exists and is only bound to in a_o->init() then
542 ACT_P_CREATED is not returned (a zero is).
544 if (err
!= ACT_P_CREATED
)
545 module_put(a_o
->owner
);
553 module_put(a_o
->owner
);
558 struct tc_action
*tcf_action_init(struct nlattr
*nla
, struct nlattr
*est
,
559 char *name
, int ovr
, int bind
)
561 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+1];
562 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
566 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
570 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
571 act
= tcf_action_init_1(tb
[i
], est
, name
, ovr
, bind
);
579 act_prev
->next
= act
;
586 tcf_action_destroy(head
, bind
);
590 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*a
,
595 struct tcf_act_hdr
*h
= a
->priv
;
600 /* compat_mode being true specifies a call that is supposed
601 * to add additional backward compatiblity statistic TLVs.
604 if (a
->type
== TCA_OLD_COMPAT
)
605 err
= gnet_stats_start_copy_compat(skb
, 0,
606 TCA_STATS
, TCA_XSTATS
, &h
->tcf_lock
, &d
);
610 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
616 if (a
->ops
!= NULL
&& a
->ops
->get_stats
!= NULL
)
617 if (a
->ops
->get_stats(skb
, a
) < 0)
620 if (gnet_stats_copy_basic(&d
, &h
->tcf_bstats
) < 0 ||
621 gnet_stats_copy_rate_est(&d
, &h
->tcf_rate_est
) < 0 ||
622 gnet_stats_copy_queue(&d
, &h
->tcf_qstats
) < 0)
625 if (gnet_stats_finish_copy(&d
) < 0)
635 tca_get_fill(struct sk_buff
*skb
, struct tc_action
*a
, u32 pid
, u32 seq
,
636 u16 flags
, int event
, int bind
, int ref
)
639 struct nlmsghdr
*nlh
;
640 unsigned char *b
= skb_tail_pointer(skb
);
643 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
646 t
->tca_family
= AF_UNSPEC
;
650 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
652 goto nla_put_failure
;
654 if (tcf_action_dump(skb
, a
, bind
, ref
) < 0)
655 goto nla_put_failure
;
657 nla_nest_end(skb
, nest
);
659 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
669 act_get_notify(u32 pid
, struct nlmsghdr
*n
, struct tc_action
*a
, int event
)
673 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
676 if (tca_get_fill(skb
, a
, pid
, n
->nlmsg_seq
, 0, event
, 0, 0) <= 0) {
681 return rtnl_unicast(skb
, &init_net
, pid
);
684 static struct tc_action
*
685 tcf_action_get_1(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
)
687 struct nlattr
*tb
[TCA_ACT_MAX
+1];
692 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
697 if (tb
[TCA_ACT_INDEX
] == NULL
||
698 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
))
700 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
703 a
= kzalloc(sizeof(struct tc_action
), GFP_KERNEL
);
708 a
->ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
711 if (a
->ops
->lookup
== NULL
)
714 if (a
->ops
->lookup(a
, index
) == 0)
717 module_put(a
->ops
->owner
);
721 module_put(a
->ops
->owner
);
728 static void cleanup_a(struct tc_action
*act
)
732 for (a
= act
; a
; a
= act
) {
738 static struct tc_action
*create_a(int i
)
740 struct tc_action
*act
;
742 act
= kzalloc(sizeof(*act
), GFP_KERNEL
);
744 printk("create_a: failed to alloc!\n");
751 static int tca_action_flush(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
)
755 struct nlmsghdr
*nlh
;
757 struct netlink_callback dcb
;
759 struct nlattr
*tb
[TCA_ACT_MAX
+1];
761 struct tc_action
*a
= create_a(0);
765 printk("tca_action_flush: couldnt create tc_action\n");
769 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
771 printk("tca_action_flush: failed skb alloc\n");
776 b
= skb_tail_pointer(skb
);
778 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
);
783 kind
= tb
[TCA_ACT_KIND
];
784 a
->ops
= tc_lookup_action(kind
);
788 nlh
= NLMSG_PUT(skb
, pid
, n
->nlmsg_seq
, RTM_DELACTION
, sizeof(*t
));
790 t
->tca_family
= AF_UNSPEC
;
794 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
796 goto nla_put_failure
;
798 err
= a
->ops
->walk(skb
, &dcb
, RTM_DELACTION
, a
);
800 goto nla_put_failure
;
804 nla_nest_end(skb
, nest
);
806 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
807 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
808 module_put(a
->ops
->owner
);
810 err
= rtnetlink_send(skb
, &init_net
, pid
, RTNLGRP_TC
, n
->nlmsg_flags
&NLM_F_ECHO
);
818 module_put(a
->ops
->owner
);
827 tca_action_gd(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
, int event
)
830 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+1];
831 struct tc_action
*head
= NULL
, *act
, *act_prev
= NULL
;
833 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
);
837 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
&NLM_F_ROOT
) {
839 return tca_action_flush(tb
[1], n
, pid
);
844 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
845 act
= tcf_action_get_1(tb
[i
], n
, pid
);
855 act_prev
->next
= act
;
859 if (event
== RTM_GETACTION
)
860 ret
= act_get_notify(pid
, n
, head
, event
);
864 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
870 if (tca_get_fill(skb
, head
, pid
, n
->nlmsg_seq
, 0, event
,
877 /* now do the delete */
878 tcf_action_destroy(head
, 0);
879 ret
= rtnetlink_send(skb
, &init_net
, pid
, RTNLGRP_TC
,
880 n
->nlmsg_flags
&NLM_F_ECHO
);
890 static int tcf_add_notify(struct tc_action
*a
, u32 pid
, u32 seq
, int event
,
894 struct nlmsghdr
*nlh
;
900 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
904 b
= skb_tail_pointer(skb
);
906 nlh
= NLMSG_NEW(skb
, pid
, seq
, event
, sizeof(*t
), flags
);
908 t
->tca_family
= AF_UNSPEC
;
912 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
914 goto nla_put_failure
;
916 if (tcf_action_dump(skb
, a
, 0, 0) < 0)
917 goto nla_put_failure
;
919 nla_nest_end(skb
, nest
);
921 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
922 NETLINK_CB(skb
).dst_group
= RTNLGRP_TC
;
924 err
= rtnetlink_send(skb
, &init_net
, pid
, RTNLGRP_TC
, flags
&NLM_F_ECHO
);
937 tcf_action_add(struct nlattr
*nla
, struct nlmsghdr
*n
, u32 pid
, int ovr
)
940 struct tc_action
*act
;
942 u32 seq
= n
->nlmsg_seq
;
944 act
= tcf_action_init(nla
, NULL
, NULL
, ovr
, 0);
952 /* dump then free all the actions after update; inserted policy
955 ret
= tcf_add_notify(act
, pid
, seq
, RTM_NEWACTION
, n
->nlmsg_flags
);
956 for (a
= act
; a
; a
= act
) {
964 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
, void *arg
)
966 struct net
*net
= sock_net(skb
->sk
);
967 struct nlattr
*tca
[TCA_ACT_MAX
+ 1];
968 u32 pid
= skb
? NETLINK_CB(skb
).pid
: 0;
969 int ret
= 0, ovr
= 0;
971 if (net
!= &init_net
)
974 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ACT_MAX
, NULL
);
978 if (tca
[TCA_ACT_TAB
] == NULL
) {
979 printk("tc_ctl_action: received NO action attribs\n");
983 /* n->nlmsg_flags&NLM_F_CREATE
985 switch (n
->nlmsg_type
) {
987 /* we are going to assume all other flags
988 * imply create only if it doesnt exist
989 * Note that CREATE | EXCL implies that
990 * but since we want avoid ambiguity (eg when flags
991 * is zero) then just set this
993 if (n
->nlmsg_flags
&NLM_F_REPLACE
)
996 ret
= tcf_action_add(tca
[TCA_ACT_TAB
], n
, pid
, ovr
);
1001 ret
= tca_action_gd(tca
[TCA_ACT_TAB
], n
, pid
, RTM_DELACTION
);
1004 ret
= tca_action_gd(tca
[TCA_ACT_TAB
], n
, pid
, RTM_GETACTION
);
1013 static struct nlattr
*
1014 find_dump_kind(struct nlmsghdr
*n
)
1016 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+1];
1017 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1018 struct nlattr
*nla
[TCAA_MAX
+ 1];
1019 struct nlattr
*kind
;
1021 if (nlmsg_parse(n
, sizeof(struct tcamsg
), nla
, TCAA_MAX
, NULL
) < 0)
1023 tb1
= nla
[TCA_ACT_TAB
];
1027 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1028 NLMSG_ALIGN(nla_len(tb1
)), NULL
) < 0)
1033 if (nla_parse(tb2
, TCA_ACT_MAX
, nla_data(tb
[1]),
1034 nla_len(tb
[1]), NULL
) < 0)
1036 kind
= tb2
[TCA_ACT_KIND
];
1042 tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1044 struct net
*net
= sock_net(skb
->sk
);
1045 struct nlmsghdr
*nlh
;
1046 unsigned char *b
= skb_tail_pointer(skb
);
1047 struct nlattr
*nest
;
1048 struct tc_action_ops
*a_o
;
1051 struct tcamsg
*t
= (struct tcamsg
*) NLMSG_DATA(cb
->nlh
);
1052 struct nlattr
*kind
= find_dump_kind(cb
->nlh
);
1054 if (net
!= &init_net
)
1058 printk("tc_dump_action: action bad kind\n");
1062 a_o
= tc_lookup_action(kind
);
1067 memset(&a
, 0, sizeof(struct tc_action
));
1070 if (a_o
->walk
== NULL
) {
1071 printk("tc_dump_action: %s !capable of dumping table\n", a_o
->kind
);
1072 goto nla_put_failure
;
1075 nlh
= NLMSG_PUT(skb
, NETLINK_CB(cb
->skb
).pid
, cb
->nlh
->nlmsg_seq
,
1076 cb
->nlh
->nlmsg_type
, sizeof(*t
));
1077 t
= NLMSG_DATA(nlh
);
1078 t
->tca_family
= AF_UNSPEC
;
1082 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1084 goto nla_put_failure
;
1086 ret
= a_o
->walk(skb
, cb
, RTM_GETACTION
, &a
);
1088 goto nla_put_failure
;
1091 nla_nest_end(skb
, nest
);
1094 nla_nest_cancel(skb
, nest
);
1096 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1097 if (NETLINK_CB(cb
->skb
).pid
&& ret
)
1098 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1099 module_put(a_o
->owner
);
1104 module_put(a_o
->owner
);
1109 static int __init
tc_action_init(void)
1111 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
);
1112 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
);
1113 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
);
1118 subsys_initcall(tc_action_init
);