2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/netdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/module.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/init.h>
32 #include <net/act_api.h>
33 #include <net/netlink.h>
35 #define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab,L)
36 #define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab,L)
38 #define POL_TAB_MASK 15
39 static struct tcf_common
*tcf_police_ht
[POL_TAB_MASK
+ 1];
40 static u32 police_idx_gen
;
41 static DEFINE_RWLOCK(police_lock
);
43 static struct tcf_hashinfo police_hash_info
= {
44 .htab
= tcf_police_ht
,
45 .hmask
= POL_TAB_MASK
,
49 /* old policer structure from before tc actions */
50 struct tc_police_compat
57 struct tc_ratespec rate
;
58 struct tc_ratespec peakrate
;
61 /* Each policer is serialized by its individual spinlock */
63 #ifdef CONFIG_NET_CLS_ACT
64 static int tcf_act_police_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
65 int type
, struct tc_action
*a
)
68 int err
= 0, index
= -1, i
= 0, s_i
= 0, n_i
= 0;
71 read_lock_bh(&police_lock
);
75 for (i
= 0; i
< (POL_TAB_MASK
+ 1); i
++) {
76 p
= tcf_police_ht
[tcf_hash(i
, POL_TAB_MASK
)];
78 for (; p
; p
= p
->tcfc_next
) {
84 r
= (struct rtattr
*)skb_tail_pointer(skb
);
85 RTA_PUT(skb
, a
->order
, 0, NULL
);
86 if (type
== RTM_DELACTION
)
87 err
= tcf_action_dump_1(skb
, a
, 0, 1);
89 err
= tcf_action_dump_1(skb
, a
, 0, 0);
95 r
->rta_len
= skb_tail_pointer(skb
) - (u8
*)r
;
100 read_unlock_bh(&police_lock
);
111 void tcf_police_destroy(struct tcf_police
*p
)
113 unsigned int h
= tcf_hash(p
->tcf_index
, POL_TAB_MASK
);
114 struct tcf_common
**p1p
;
116 for (p1p
= &tcf_police_ht
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
117 if (*p1p
== &p
->common
) {
118 write_lock_bh(&police_lock
);
120 write_unlock_bh(&police_lock
);
121 #ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p
->tcf_bstats
,
126 qdisc_put_rtab(p
->tcfp_R_tab
);
128 qdisc_put_rtab(p
->tcfp_P_tab
);
136 #ifdef CONFIG_NET_CLS_ACT
137 static int tcf_act_police_locate(struct rtattr
*rta
, struct rtattr
*est
,
138 struct tc_action
*a
, int ovr
, int bind
)
142 struct rtattr
*tb
[TCA_POLICE_MAX
];
143 struct tc_police
*parm
;
144 struct tcf_police
*police
;
145 struct qdisc_rate_table
*R_tab
= NULL
, *P_tab
= NULL
;
148 if (rta
== NULL
|| rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
151 if (tb
[TCA_POLICE_TBF
-1] == NULL
)
153 size
= RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]);
154 if (size
!= sizeof(*parm
) && size
!= sizeof(struct tc_police_compat
))
156 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
158 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
159 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
161 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
162 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
166 struct tcf_common
*pc
;
168 pc
= tcf_hash_lookup(parm
->index
, &police_hash_info
);
171 police
= to_police(pc
);
173 police
->tcf_bindcnt
+= 1;
174 police
->tcf_refcnt
+= 1;
182 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
186 police
->tcf_refcnt
= 1;
187 spin_lock_init(&police
->tcf_lock
);
188 police
->tcf_stats_lock
= &police
->tcf_lock
;
190 police
->tcf_bindcnt
= 1;
192 if (parm
->rate
.rate
) {
194 R_tab
= qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
197 if (parm
->peakrate
.rate
) {
198 P_tab
= qdisc_get_rtab(&parm
->peakrate
,
199 tb
[TCA_POLICE_PEAKRATE
-1]);
201 qdisc_put_rtab(R_tab
);
206 /* No failure allowed after this point */
207 spin_lock_bh(&police
->tcf_lock
);
209 qdisc_put_rtab(police
->tcfp_R_tab
);
210 police
->tcfp_R_tab
= R_tab
;
213 qdisc_put_rtab(police
->tcfp_P_tab
);
214 police
->tcfp_P_tab
= P_tab
;
217 if (tb
[TCA_POLICE_RESULT
-1])
218 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
219 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
220 police
->tcfp_mtu
= parm
->mtu
;
221 if (police
->tcfp_mtu
== 0) {
222 police
->tcfp_mtu
= ~0;
223 if (police
->tcfp_R_tab
)
224 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
226 if (police
->tcfp_P_tab
)
227 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
228 police
->tcf_action
= parm
->action
;
230 #ifdef CONFIG_NET_ESTIMATOR
231 if (tb
[TCA_POLICE_AVRATE
-1])
232 police
->tcfp_ewma_rate
=
233 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
235 gen_replace_estimator(&police
->tcf_bstats
,
236 &police
->tcf_rate_est
,
237 police
->tcf_stats_lock
, est
);
240 spin_unlock_bh(&police
->tcf_lock
);
241 if (ret
!= ACT_P_CREATED
)
244 police
->tcfp_t_c
= psched_get_time();
245 police
->tcf_index
= parm
->index
? parm
->index
:
246 tcf_hash_new_index(&police_idx_gen
, &police_hash_info
);
247 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
248 write_lock_bh(&police_lock
);
249 police
->tcf_next
= tcf_police_ht
[h
];
250 tcf_police_ht
[h
] = &police
->common
;
251 write_unlock_bh(&police_lock
);
257 if (ret
== ACT_P_CREATED
)
262 static int tcf_act_police_cleanup(struct tc_action
*a
, int bind
)
264 struct tcf_police
*p
= a
->priv
;
267 return tcf_police_release(p
, bind
);
271 static int tcf_act_police(struct sk_buff
*skb
, struct tc_action
*a
,
272 struct tcf_result
*res
)
274 struct tcf_police
*police
= a
->priv
;
279 spin_lock(&police
->tcf_lock
);
281 police
->tcf_bstats
.bytes
+= skb
->len
;
282 police
->tcf_bstats
.packets
++;
284 #ifdef CONFIG_NET_ESTIMATOR
285 if (police
->tcfp_ewma_rate
&&
286 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
287 police
->tcf_qstats
.overlimits
++;
288 spin_unlock(&police
->tcf_lock
);
289 return police
->tcf_action
;
293 if (skb
->len
<= police
->tcfp_mtu
) {
294 if (police
->tcfp_R_tab
== NULL
) {
295 spin_unlock(&police
->tcf_lock
);
296 return police
->tcfp_result
;
299 now
= psched_get_time();
300 toks
= psched_tdiff_bounded(now
, police
->tcfp_t_c
,
302 if (police
->tcfp_P_tab
) {
303 ptoks
= toks
+ police
->tcfp_ptoks
;
304 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
305 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
306 ptoks
-= L2T_P(police
, skb
->len
);
308 toks
+= police
->tcfp_toks
;
309 if (toks
> (long)police
->tcfp_burst
)
310 toks
= police
->tcfp_burst
;
311 toks
-= L2T(police
, skb
->len
);
312 if ((toks
|ptoks
) >= 0) {
313 police
->tcfp_t_c
= now
;
314 police
->tcfp_toks
= toks
;
315 police
->tcfp_ptoks
= ptoks
;
316 spin_unlock(&police
->tcf_lock
);
317 return police
->tcfp_result
;
321 police
->tcf_qstats
.overlimits
++;
322 spin_unlock(&police
->tcf_lock
);
323 return police
->tcf_action
;
327 tcf_act_police_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
329 unsigned char *b
= skb_tail_pointer(skb
);
330 struct tcf_police
*police
= a
->priv
;
331 struct tc_police opt
= {
332 .index
= police
->tcf_index
,
333 .action
= police
->tcf_action
,
334 .mtu
= police
->tcfp_mtu
,
335 .burst
= police
->tcfp_burst
,
336 .refcnt
= police
->tcf_refcnt
- ref
,
337 .bindcnt
= police
->tcf_bindcnt
- bind
,
340 if (police
->tcfp_R_tab
)
341 opt
.rate
= police
->tcfp_R_tab
->rate
;
342 if (police
->tcfp_P_tab
)
343 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
344 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
345 if (police
->tcfp_result
)
346 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
347 &police
->tcfp_result
);
348 #ifdef CONFIG_NET_ESTIMATOR
349 if (police
->tcfp_ewma_rate
)
350 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
359 MODULE_AUTHOR("Alexey Kuznetsov");
360 MODULE_DESCRIPTION("Policing actions");
361 MODULE_LICENSE("GPL");
363 static struct tc_action_ops act_police_ops
= {
365 .hinfo
= &police_hash_info
,
366 .type
= TCA_ID_POLICE
,
367 .capab
= TCA_CAP_NONE
,
368 .owner
= THIS_MODULE
,
369 .act
= tcf_act_police
,
370 .dump
= tcf_act_police_dump
,
371 .cleanup
= tcf_act_police_cleanup
,
372 .lookup
= tcf_hash_search
,
373 .init
= tcf_act_police_locate
,
374 .walk
= tcf_act_police_walker
378 police_init_module(void)
380 return tcf_register_action(&act_police_ops
);
384 police_cleanup_module(void)
386 tcf_unregister_action(&act_police_ops
);
389 module_init(police_init_module
);
390 module_exit(police_cleanup_module
);
392 #else /* CONFIG_NET_CLS_ACT */
394 static struct tcf_common
*tcf_police_lookup(u32 index
)
396 struct tcf_hashinfo
*hinfo
= &police_hash_info
;
397 struct tcf_common
*p
;
399 read_lock(hinfo
->lock
);
400 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
402 if (p
->tcfc_index
== index
)
405 read_unlock(hinfo
->lock
);
410 static u32
tcf_police_new_index(void)
412 u32
*idx_gen
= &police_idx_gen
;
418 } while (tcf_police_lookup(val
));
420 return (*idx_gen
= val
);
423 struct tcf_police
*tcf_police_locate(struct rtattr
*rta
, struct rtattr
*est
)
426 struct tcf_police
*police
;
427 struct rtattr
*tb
[TCA_POLICE_MAX
];
428 struct tc_police
*parm
;
431 if (rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
434 if (tb
[TCA_POLICE_TBF
-1] == NULL
)
436 size
= RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]);
437 if (size
!= sizeof(*parm
) && size
!= sizeof(struct tc_police_compat
))
440 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
443 struct tcf_common
*pc
;
445 pc
= tcf_police_lookup(parm
->index
);
447 police
= to_police(pc
);
448 police
->tcf_refcnt
++;
452 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
453 if (unlikely(!police
))
456 police
->tcf_refcnt
= 1;
457 spin_lock_init(&police
->tcf_lock
);
458 police
->tcf_stats_lock
= &police
->tcf_lock
;
459 if (parm
->rate
.rate
) {
461 qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
462 if (police
->tcfp_R_tab
== NULL
)
464 if (parm
->peakrate
.rate
) {
466 qdisc_get_rtab(&parm
->peakrate
,
467 tb
[TCA_POLICE_PEAKRATE
-1]);
468 if (police
->tcfp_P_tab
== NULL
)
472 if (tb
[TCA_POLICE_RESULT
-1]) {
473 if (RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
475 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
477 #ifdef CONFIG_NET_ESTIMATOR
478 if (tb
[TCA_POLICE_AVRATE
-1]) {
479 if (RTA_PAYLOAD(tb
[TCA_POLICE_AVRATE
-1]) != sizeof(u32
))
481 police
->tcfp_ewma_rate
=
482 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
485 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
486 police
->tcfp_mtu
= parm
->mtu
;
487 if (police
->tcfp_mtu
== 0) {
488 police
->tcfp_mtu
= ~0;
489 if (police
->tcfp_R_tab
)
490 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
492 if (police
->tcfp_P_tab
)
493 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
494 police
->tcfp_t_c
= psched_get_time();
495 police
->tcf_index
= parm
->index
? parm
->index
:
496 tcf_police_new_index();
497 police
->tcf_action
= parm
->action
;
498 #ifdef CONFIG_NET_ESTIMATOR
500 gen_new_estimator(&police
->tcf_bstats
, &police
->tcf_rate_est
,
501 police
->tcf_stats_lock
, est
);
503 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
504 write_lock_bh(&police_lock
);
505 police
->tcf_next
= tcf_police_ht
[h
];
506 tcf_police_ht
[h
] = &police
->common
;
507 write_unlock_bh(&police_lock
);
511 if (police
->tcfp_R_tab
)
512 qdisc_put_rtab(police
->tcfp_R_tab
);
517 int tcf_police(struct sk_buff
*skb
, struct tcf_police
*police
)
523 spin_lock(&police
->tcf_lock
);
525 police
->tcf_bstats
.bytes
+= skb
->len
;
526 police
->tcf_bstats
.packets
++;
528 #ifdef CONFIG_NET_ESTIMATOR
529 if (police
->tcfp_ewma_rate
&&
530 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
531 police
->tcf_qstats
.overlimits
++;
532 spin_unlock(&police
->tcf_lock
);
533 return police
->tcf_action
;
536 if (skb
->len
<= police
->tcfp_mtu
) {
537 if (police
->tcfp_R_tab
== NULL
) {
538 spin_unlock(&police
->tcf_lock
);
539 return police
->tcfp_result
;
542 now
= psched_get_time();
543 toks
= psched_tdiff_bounded(now
, police
->tcfp_t_c
,
545 if (police
->tcfp_P_tab
) {
546 ptoks
= toks
+ police
->tcfp_ptoks
;
547 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
548 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
549 ptoks
-= L2T_P(police
, skb
->len
);
551 toks
+= police
->tcfp_toks
;
552 if (toks
> (long)police
->tcfp_burst
)
553 toks
= police
->tcfp_burst
;
554 toks
-= L2T(police
, skb
->len
);
555 if ((toks
|ptoks
) >= 0) {
556 police
->tcfp_t_c
= now
;
557 police
->tcfp_toks
= toks
;
558 police
->tcfp_ptoks
= ptoks
;
559 spin_unlock(&police
->tcf_lock
);
560 return police
->tcfp_result
;
564 police
->tcf_qstats
.overlimits
++;
565 spin_unlock(&police
->tcf_lock
);
566 return police
->tcf_action
;
568 EXPORT_SYMBOL(tcf_police
);
570 int tcf_police_dump(struct sk_buff
*skb
, struct tcf_police
*police
)
572 unsigned char *b
= skb_tail_pointer(skb
);
573 struct tc_police opt
= {
574 .index
= police
->tcf_index
,
575 .action
= police
->tcf_action
,
576 .mtu
= police
->tcfp_mtu
,
577 .burst
= police
->tcfp_burst
,
580 if (police
->tcfp_R_tab
)
581 opt
.rate
= police
->tcfp_R_tab
->rate
;
583 memset(&opt
.rate
, 0, sizeof(opt
.rate
));
584 if (police
->tcfp_P_tab
)
585 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
587 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
588 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
589 if (police
->tcfp_result
)
590 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
591 &police
->tcfp_result
);
592 #ifdef CONFIG_NET_ESTIMATOR
593 if (police
->tcfp_ewma_rate
)
594 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
603 int tcf_police_dump_stats(struct sk_buff
*skb
, struct tcf_police
*police
)
607 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
,
608 TCA_XSTATS
, police
->tcf_stats_lock
,
612 if (gnet_stats_copy_basic(&d
, &police
->tcf_bstats
) < 0 ||
613 #ifdef CONFIG_NET_ESTIMATOR
614 gnet_stats_copy_rate_est(&d
, &police
->tcf_rate_est
) < 0 ||
616 gnet_stats_copy_queue(&d
, &police
->tcf_qstats
) < 0)
619 if (gnet_stats_finish_copy(&d
) < 0)
628 #endif /* CONFIG_NET_CLS_ACT */