2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/module.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/init.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
25 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
26 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
28 #define POL_TAB_MASK 15
29 static struct tcf_common
*tcf_police_ht
[POL_TAB_MASK
+ 1];
30 static u32 police_idx_gen
;
31 static DEFINE_RWLOCK(police_lock
);
33 static struct tcf_hashinfo police_hash_info
= {
34 .htab
= tcf_police_ht
,
35 .hmask
= POL_TAB_MASK
,
39 /* old policer structure from before tc actions */
40 struct tc_police_compat
47 struct tc_ratespec rate
;
48 struct tc_ratespec peakrate
;
51 /* Each policer is serialized by its individual spinlock */
53 #ifdef CONFIG_NET_CLS_ACT
54 static int tcf_act_police_walker(struct sk_buff
*skb
, struct netlink_callback
*cb
,
55 int type
, struct tc_action
*a
)
58 int err
= 0, index
= -1, i
= 0, s_i
= 0, n_i
= 0;
61 read_lock(&police_lock
);
65 for (i
= 0; i
< (POL_TAB_MASK
+ 1); i
++) {
66 p
= tcf_police_ht
[tcf_hash(i
, POL_TAB_MASK
)];
68 for (; p
; p
= p
->tcfc_next
) {
74 r
= (struct rtattr
*)skb_tail_pointer(skb
);
75 RTA_PUT(skb
, a
->order
, 0, NULL
);
76 if (type
== RTM_DELACTION
)
77 err
= tcf_action_dump_1(skb
, a
, 0, 1);
79 err
= tcf_action_dump_1(skb
, a
, 0, 0);
85 r
->rta_len
= skb_tail_pointer(skb
) - (u8
*)r
;
90 read_unlock(&police_lock
);
101 void tcf_police_destroy(struct tcf_police
*p
)
103 unsigned int h
= tcf_hash(p
->tcf_index
, POL_TAB_MASK
);
104 struct tcf_common
**p1p
;
106 for (p1p
= &tcf_police_ht
[h
]; *p1p
; p1p
= &(*p1p
)->tcfc_next
) {
107 if (*p1p
== &p
->common
) {
108 write_lock_bh(&police_lock
);
110 write_unlock_bh(&police_lock
);
111 gen_kill_estimator(&p
->tcf_bstats
,
114 qdisc_put_rtab(p
->tcfp_R_tab
);
116 qdisc_put_rtab(p
->tcfp_P_tab
);
124 #ifdef CONFIG_NET_CLS_ACT
125 static int tcf_act_police_locate(struct rtattr
*rta
, struct rtattr
*est
,
126 struct tc_action
*a
, int ovr
, int bind
)
130 struct rtattr
*tb
[TCA_POLICE_MAX
];
131 struct tc_police
*parm
;
132 struct tcf_police
*police
;
133 struct qdisc_rate_table
*R_tab
= NULL
, *P_tab
= NULL
;
136 if (rta
== NULL
|| rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
139 if (tb
[TCA_POLICE_TBF
-1] == NULL
)
141 size
= RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]);
142 if (size
!= sizeof(*parm
) && size
!= sizeof(struct tc_police_compat
))
144 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
146 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
147 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
149 if (tb
[TCA_POLICE_RESULT
-1] != NULL
&&
150 RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
154 struct tcf_common
*pc
;
156 pc
= tcf_hash_lookup(parm
->index
, &police_hash_info
);
159 police
= to_police(pc
);
161 police
->tcf_bindcnt
+= 1;
162 police
->tcf_refcnt
+= 1;
170 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
174 police
->tcf_refcnt
= 1;
175 spin_lock_init(&police
->tcf_lock
);
177 police
->tcf_bindcnt
= 1;
179 if (parm
->rate
.rate
) {
181 R_tab
= qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
184 if (parm
->peakrate
.rate
) {
185 P_tab
= qdisc_get_rtab(&parm
->peakrate
,
186 tb
[TCA_POLICE_PEAKRATE
-1]);
188 qdisc_put_rtab(R_tab
);
193 /* No failure allowed after this point */
194 spin_lock_bh(&police
->tcf_lock
);
196 qdisc_put_rtab(police
->tcfp_R_tab
);
197 police
->tcfp_R_tab
= R_tab
;
200 qdisc_put_rtab(police
->tcfp_P_tab
);
201 police
->tcfp_P_tab
= P_tab
;
204 if (tb
[TCA_POLICE_RESULT
-1])
205 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
206 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
207 police
->tcfp_mtu
= parm
->mtu
;
208 if (police
->tcfp_mtu
== 0) {
209 police
->tcfp_mtu
= ~0;
210 if (police
->tcfp_R_tab
)
211 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
213 if (police
->tcfp_P_tab
)
214 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
215 police
->tcf_action
= parm
->action
;
217 if (tb
[TCA_POLICE_AVRATE
-1])
218 police
->tcfp_ewma_rate
=
219 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
221 gen_replace_estimator(&police
->tcf_bstats
,
222 &police
->tcf_rate_est
,
223 &police
->tcf_lock
, est
);
225 spin_unlock_bh(&police
->tcf_lock
);
226 if (ret
!= ACT_P_CREATED
)
229 police
->tcfp_t_c
= psched_get_time();
230 police
->tcf_index
= parm
->index
? parm
->index
:
231 tcf_hash_new_index(&police_idx_gen
, &police_hash_info
);
232 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
233 write_lock_bh(&police_lock
);
234 police
->tcf_next
= tcf_police_ht
[h
];
235 tcf_police_ht
[h
] = &police
->common
;
236 write_unlock_bh(&police_lock
);
242 if (ret
== ACT_P_CREATED
)
247 static int tcf_act_police_cleanup(struct tc_action
*a
, int bind
)
249 struct tcf_police
*p
= a
->priv
;
252 return tcf_police_release(p
, bind
);
256 static int tcf_act_police(struct sk_buff
*skb
, struct tc_action
*a
,
257 struct tcf_result
*res
)
259 struct tcf_police
*police
= a
->priv
;
264 spin_lock(&police
->tcf_lock
);
266 police
->tcf_bstats
.bytes
+= skb
->len
;
267 police
->tcf_bstats
.packets
++;
269 if (police
->tcfp_ewma_rate
&&
270 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
271 police
->tcf_qstats
.overlimits
++;
272 spin_unlock(&police
->tcf_lock
);
273 return police
->tcf_action
;
276 if (skb
->len
<= police
->tcfp_mtu
) {
277 if (police
->tcfp_R_tab
== NULL
) {
278 spin_unlock(&police
->tcf_lock
);
279 return police
->tcfp_result
;
282 now
= psched_get_time();
283 toks
= psched_tdiff_bounded(now
, police
->tcfp_t_c
,
285 if (police
->tcfp_P_tab
) {
286 ptoks
= toks
+ police
->tcfp_ptoks
;
287 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
288 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
289 ptoks
-= L2T_P(police
, skb
->len
);
291 toks
+= police
->tcfp_toks
;
292 if (toks
> (long)police
->tcfp_burst
)
293 toks
= police
->tcfp_burst
;
294 toks
-= L2T(police
, skb
->len
);
295 if ((toks
|ptoks
) >= 0) {
296 police
->tcfp_t_c
= now
;
297 police
->tcfp_toks
= toks
;
298 police
->tcfp_ptoks
= ptoks
;
299 spin_unlock(&police
->tcf_lock
);
300 return police
->tcfp_result
;
304 police
->tcf_qstats
.overlimits
++;
305 spin_unlock(&police
->tcf_lock
);
306 return police
->tcf_action
;
310 tcf_act_police_dump(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
312 unsigned char *b
= skb_tail_pointer(skb
);
313 struct tcf_police
*police
= a
->priv
;
314 struct tc_police opt
;
316 opt
.index
= police
->tcf_index
;
317 opt
.action
= police
->tcf_action
;
318 opt
.mtu
= police
->tcfp_mtu
;
319 opt
.burst
= police
->tcfp_burst
;
320 opt
.refcnt
= police
->tcf_refcnt
- ref
;
321 opt
.bindcnt
= police
->tcf_bindcnt
- bind
;
322 if (police
->tcfp_R_tab
)
323 opt
.rate
= police
->tcfp_R_tab
->rate
;
325 memset(&opt
.rate
, 0, sizeof(opt
.rate
));
326 if (police
->tcfp_P_tab
)
327 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
329 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
330 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
331 if (police
->tcfp_result
)
332 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
333 &police
->tcfp_result
);
334 if (police
->tcfp_ewma_rate
)
335 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
343 MODULE_AUTHOR("Alexey Kuznetsov");
344 MODULE_DESCRIPTION("Policing actions");
345 MODULE_LICENSE("GPL");
347 static struct tc_action_ops act_police_ops
= {
349 .hinfo
= &police_hash_info
,
350 .type
= TCA_ID_POLICE
,
351 .capab
= TCA_CAP_NONE
,
352 .owner
= THIS_MODULE
,
353 .act
= tcf_act_police
,
354 .dump
= tcf_act_police_dump
,
355 .cleanup
= tcf_act_police_cleanup
,
356 .lookup
= tcf_hash_search
,
357 .init
= tcf_act_police_locate
,
358 .walk
= tcf_act_police_walker
362 police_init_module(void)
364 return tcf_register_action(&act_police_ops
);
368 police_cleanup_module(void)
370 tcf_unregister_action(&act_police_ops
);
373 module_init(police_init_module
);
374 module_exit(police_cleanup_module
);
376 #else /* CONFIG_NET_CLS_ACT */
378 static struct tcf_common
*tcf_police_lookup(u32 index
)
380 struct tcf_hashinfo
*hinfo
= &police_hash_info
;
381 struct tcf_common
*p
;
383 read_lock(hinfo
->lock
);
384 for (p
= hinfo
->htab
[tcf_hash(index
, hinfo
->hmask
)]; p
;
386 if (p
->tcfc_index
== index
)
389 read_unlock(hinfo
->lock
);
394 static u32
tcf_police_new_index(void)
396 u32
*idx_gen
= &police_idx_gen
;
402 } while (tcf_police_lookup(val
));
404 return (*idx_gen
= val
);
407 struct tcf_police
*tcf_police_locate(struct rtattr
*rta
, struct rtattr
*est
)
410 struct tcf_police
*police
;
411 struct rtattr
*tb
[TCA_POLICE_MAX
];
412 struct tc_police
*parm
;
415 if (rtattr_parse_nested(tb
, TCA_POLICE_MAX
, rta
) < 0)
418 if (tb
[TCA_POLICE_TBF
-1] == NULL
)
420 size
= RTA_PAYLOAD(tb
[TCA_POLICE_TBF
-1]);
421 if (size
!= sizeof(*parm
) && size
!= sizeof(struct tc_police_compat
))
424 parm
= RTA_DATA(tb
[TCA_POLICE_TBF
-1]);
427 struct tcf_common
*pc
;
429 pc
= tcf_police_lookup(parm
->index
);
431 police
= to_police(pc
);
432 police
->tcf_refcnt
++;
436 police
= kzalloc(sizeof(*police
), GFP_KERNEL
);
437 if (unlikely(!police
))
440 police
->tcf_refcnt
= 1;
441 spin_lock_init(&police
->tcf_lock
);
442 if (parm
->rate
.rate
) {
444 qdisc_get_rtab(&parm
->rate
, tb
[TCA_POLICE_RATE
-1]);
445 if (police
->tcfp_R_tab
== NULL
)
447 if (parm
->peakrate
.rate
) {
449 qdisc_get_rtab(&parm
->peakrate
,
450 tb
[TCA_POLICE_PEAKRATE
-1]);
451 if (police
->tcfp_P_tab
== NULL
)
455 if (tb
[TCA_POLICE_RESULT
-1]) {
456 if (RTA_PAYLOAD(tb
[TCA_POLICE_RESULT
-1]) != sizeof(u32
))
458 police
->tcfp_result
= *(u32
*)RTA_DATA(tb
[TCA_POLICE_RESULT
-1]);
460 if (tb
[TCA_POLICE_AVRATE
-1]) {
461 if (RTA_PAYLOAD(tb
[TCA_POLICE_AVRATE
-1]) != sizeof(u32
))
463 police
->tcfp_ewma_rate
=
464 *(u32
*)RTA_DATA(tb
[TCA_POLICE_AVRATE
-1]);
466 police
->tcfp_toks
= police
->tcfp_burst
= parm
->burst
;
467 police
->tcfp_mtu
= parm
->mtu
;
468 if (police
->tcfp_mtu
== 0) {
469 police
->tcfp_mtu
= ~0;
470 if (police
->tcfp_R_tab
)
471 police
->tcfp_mtu
= 255<<police
->tcfp_R_tab
->rate
.cell_log
;
473 if (police
->tcfp_P_tab
)
474 police
->tcfp_ptoks
= L2T_P(police
, police
->tcfp_mtu
);
475 police
->tcfp_t_c
= psched_get_time();
476 police
->tcf_index
= parm
->index
? parm
->index
:
477 tcf_police_new_index();
478 police
->tcf_action
= parm
->action
;
480 gen_new_estimator(&police
->tcf_bstats
, &police
->tcf_rate_est
,
481 &police
->tcf_lock
, est
);
482 h
= tcf_hash(police
->tcf_index
, POL_TAB_MASK
);
483 write_lock_bh(&police_lock
);
484 police
->tcf_next
= tcf_police_ht
[h
];
485 tcf_police_ht
[h
] = &police
->common
;
486 write_unlock_bh(&police_lock
);
490 if (police
->tcfp_R_tab
)
491 qdisc_put_rtab(police
->tcfp_R_tab
);
496 int tcf_police(struct sk_buff
*skb
, struct tcf_police
*police
)
502 spin_lock(&police
->tcf_lock
);
504 police
->tcf_bstats
.bytes
+= skb
->len
;
505 police
->tcf_bstats
.packets
++;
507 if (police
->tcfp_ewma_rate
&&
508 police
->tcf_rate_est
.bps
>= police
->tcfp_ewma_rate
) {
509 police
->tcf_qstats
.overlimits
++;
510 spin_unlock(&police
->tcf_lock
);
511 return police
->tcf_action
;
513 if (skb
->len
<= police
->tcfp_mtu
) {
514 if (police
->tcfp_R_tab
== NULL
) {
515 spin_unlock(&police
->tcf_lock
);
516 return police
->tcfp_result
;
519 now
= psched_get_time();
520 toks
= psched_tdiff_bounded(now
, police
->tcfp_t_c
,
522 if (police
->tcfp_P_tab
) {
523 ptoks
= toks
+ police
->tcfp_ptoks
;
524 if (ptoks
> (long)L2T_P(police
, police
->tcfp_mtu
))
525 ptoks
= (long)L2T_P(police
, police
->tcfp_mtu
);
526 ptoks
-= L2T_P(police
, skb
->len
);
528 toks
+= police
->tcfp_toks
;
529 if (toks
> (long)police
->tcfp_burst
)
530 toks
= police
->tcfp_burst
;
531 toks
-= L2T(police
, skb
->len
);
532 if ((toks
|ptoks
) >= 0) {
533 police
->tcfp_t_c
= now
;
534 police
->tcfp_toks
= toks
;
535 police
->tcfp_ptoks
= ptoks
;
536 spin_unlock(&police
->tcf_lock
);
537 return police
->tcfp_result
;
541 police
->tcf_qstats
.overlimits
++;
542 spin_unlock(&police
->tcf_lock
);
543 return police
->tcf_action
;
545 EXPORT_SYMBOL(tcf_police
);
547 int tcf_police_dump(struct sk_buff
*skb
, struct tcf_police
*police
)
549 unsigned char *b
= skb_tail_pointer(skb
);
550 struct tc_police opt
;
552 opt
.index
= police
->tcf_index
;
553 opt
.action
= police
->tcf_action
;
554 opt
.mtu
= police
->tcfp_mtu
;
555 opt
.burst
= police
->tcfp_burst
;
556 if (police
->tcfp_R_tab
)
557 opt
.rate
= police
->tcfp_R_tab
->rate
;
559 memset(&opt
.rate
, 0, sizeof(opt
.rate
));
560 if (police
->tcfp_P_tab
)
561 opt
.peakrate
= police
->tcfp_P_tab
->rate
;
563 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
564 RTA_PUT(skb
, TCA_POLICE_TBF
, sizeof(opt
), &opt
);
565 if (police
->tcfp_result
)
566 RTA_PUT(skb
, TCA_POLICE_RESULT
, sizeof(int),
567 &police
->tcfp_result
);
568 if (police
->tcfp_ewma_rate
)
569 RTA_PUT(skb
, TCA_POLICE_AVRATE
, 4, &police
->tcfp_ewma_rate
);
577 int tcf_police_dump_stats(struct sk_buff
*skb
, struct tcf_police
*police
)
581 if (gnet_stats_start_copy_compat(skb
, TCA_STATS2
, TCA_STATS
,
582 TCA_XSTATS
, &police
->tcf_lock
,
586 if (gnet_stats_copy_basic(&d
, &police
->tcf_bstats
) < 0 ||
587 gnet_stats_copy_rate_est(&d
, &police
->tcf_rate_est
) < 0 ||
588 gnet_stats_copy_queue(&d
, &police
->tcf_qstats
) < 0)
591 if (gnet_stats_finish_copy(&d
) < 0)
600 #endif /* CONFIG_NET_CLS_ACT */