checkpatch: if should not continue a preceeding brace
[linux-2.6/mini2440.git] / net / sched / act_police.c
blob5c72a116b1a4513405735349391a01ff1c9b376a
1 /*
2 * net/sched/police.c Input police filter.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <net/act_api.h>
22 #include <net/netlink.h>
24 #define L2T(p,L) qdisc_l2t((p)->tcfp_R_tab, L)
25 #define L2T_P(p,L) qdisc_l2t((p)->tcfp_P_tab, L)
27 #define POL_TAB_MASK 15
28 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
29 static u32 police_idx_gen;
30 static DEFINE_RWLOCK(police_lock);
32 static struct tcf_hashinfo police_hash_info = {
33 .htab = tcf_police_ht,
34 .hmask = POL_TAB_MASK,
35 .lock = &police_lock,
38 /* old policer structure from before tc actions */
39 struct tc_police_compat
41 u32 index;
42 int action;
43 u32 limit;
44 u32 burst;
45 u32 mtu;
46 struct tc_ratespec rate;
47 struct tc_ratespec peakrate;
50 /* Each policer is serialized by its individual spinlock */
52 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
53 int type, struct tc_action *a)
55 struct tcf_common *p;
56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
57 struct nlattr *nest;
59 read_lock_bh(&police_lock);
61 s_i = cb->args[0];
63 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
64 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
66 for (; p; p = p->tcfc_next) {
67 index++;
68 if (index < s_i)
69 continue;
70 a->priv = p;
71 a->order = index;
72 nest = nla_nest_start(skb, a->order);
73 if (nest == NULL)
74 goto nla_put_failure;
75 if (type == RTM_DELACTION)
76 err = tcf_action_dump_1(skb, a, 0, 1);
77 else
78 err = tcf_action_dump_1(skb, a, 0, 0);
79 if (err < 0) {
80 index--;
81 nla_nest_cancel(skb, nest);
82 goto done;
84 nla_nest_end(skb, nest);
85 n_i++;
88 done:
89 read_unlock_bh(&police_lock);
90 if (n_i)
91 cb->args[0] += n_i;
92 return n_i;
94 nla_put_failure:
95 nla_nest_cancel(skb, nest);
96 goto done;
99 static void tcf_police_destroy(struct tcf_police *p)
101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
102 struct tcf_common **p1p;
104 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
105 if (*p1p == &p->common) {
106 write_lock_bh(&police_lock);
107 *p1p = p->tcf_next;
108 write_unlock_bh(&police_lock);
109 gen_kill_estimator(&p->tcf_bstats,
110 &p->tcf_rate_est);
111 if (p->tcfp_R_tab)
112 qdisc_put_rtab(p->tcfp_R_tab);
113 if (p->tcfp_P_tab)
114 qdisc_put_rtab(p->tcfp_P_tab);
115 kfree(p);
116 return;
119 WARN_ON(1);
122 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
123 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
124 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
125 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
126 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
129 static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
130 struct tc_action *a, int ovr, int bind)
132 unsigned h;
133 int ret = 0, err;
134 struct nlattr *tb[TCA_POLICE_MAX + 1];
135 struct tc_police *parm;
136 struct tcf_police *police;
137 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
138 int size;
140 if (nla == NULL)
141 return -EINVAL;
143 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy);
144 if (err < 0)
145 return err;
147 if (tb[TCA_POLICE_TBF] == NULL)
148 return -EINVAL;
149 size = nla_len(tb[TCA_POLICE_TBF]);
150 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
151 return -EINVAL;
152 parm = nla_data(tb[TCA_POLICE_TBF]);
154 if (parm->index) {
155 struct tcf_common *pc;
157 pc = tcf_hash_lookup(parm->index, &police_hash_info);
158 if (pc != NULL) {
159 a->priv = pc;
160 police = to_police(pc);
161 if (bind) {
162 police->tcf_bindcnt += 1;
163 police->tcf_refcnt += 1;
165 if (ovr)
166 goto override;
167 return ret;
171 police = kzalloc(sizeof(*police), GFP_KERNEL);
172 if (police == NULL)
173 return -ENOMEM;
174 ret = ACT_P_CREATED;
175 police->tcf_refcnt = 1;
176 spin_lock_init(&police->tcf_lock);
177 if (bind)
178 police->tcf_bindcnt = 1;
179 override:
180 if (parm->rate.rate) {
181 err = -ENOMEM;
182 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE]);
183 if (R_tab == NULL)
184 goto failure;
186 if (!est && (ret == ACT_P_CREATED ||
187 !gen_estimator_active(&police->tcf_bstats,
188 &police->tcf_rate_est))) {
189 err = -EINVAL;
190 goto failure;
193 if (parm->peakrate.rate) {
194 P_tab = qdisc_get_rtab(&parm->peakrate,
195 tb[TCA_POLICE_PEAKRATE]);
196 if (P_tab == NULL)
197 goto failure;
201 spin_lock_bh(&police->tcf_lock);
202 if (est) {
203 err = gen_replace_estimator(&police->tcf_bstats,
204 &police->tcf_rate_est,
205 &police->tcf_lock, est);
206 if (err)
207 goto failure_unlock;
210 /* No failure allowed after this point */
211 if (R_tab != NULL) {
212 qdisc_put_rtab(police->tcfp_R_tab);
213 police->tcfp_R_tab = R_tab;
215 if (P_tab != NULL) {
216 qdisc_put_rtab(police->tcfp_P_tab);
217 police->tcfp_P_tab = P_tab;
220 if (tb[TCA_POLICE_RESULT])
221 police->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
222 police->tcfp_toks = police->tcfp_burst = parm->burst;
223 police->tcfp_mtu = parm->mtu;
224 if (police->tcfp_mtu == 0) {
225 police->tcfp_mtu = ~0;
226 if (police->tcfp_R_tab)
227 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
229 if (police->tcfp_P_tab)
230 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
231 police->tcf_action = parm->action;
233 if (tb[TCA_POLICE_AVRATE])
234 police->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
236 spin_unlock_bh(&police->tcf_lock);
237 if (ret != ACT_P_CREATED)
238 return ret;
240 police->tcfp_t_c = psched_get_time();
241 police->tcf_index = parm->index ? parm->index :
242 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
243 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
244 write_lock_bh(&police_lock);
245 police->tcf_next = tcf_police_ht[h];
246 tcf_police_ht[h] = &police->common;
247 write_unlock_bh(&police_lock);
249 a->priv = police;
250 return ret;
252 failure_unlock:
253 spin_unlock_bh(&police->tcf_lock);
254 failure:
255 if (P_tab)
256 qdisc_put_rtab(P_tab);
257 if (R_tab)
258 qdisc_put_rtab(R_tab);
259 if (ret == ACT_P_CREATED)
260 kfree(police);
261 return err;
264 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
266 struct tcf_police *p = a->priv;
267 int ret = 0;
269 if (p != NULL) {
270 if (bind)
271 p->tcf_bindcnt--;
273 p->tcf_refcnt--;
274 if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) {
275 tcf_police_destroy(p);
276 ret = 1;
279 return ret;
282 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
283 struct tcf_result *res)
285 struct tcf_police *police = a->priv;
286 psched_time_t now;
287 long toks;
288 long ptoks = 0;
290 spin_lock(&police->tcf_lock);
292 police->tcf_bstats.bytes += qdisc_pkt_len(skb);
293 police->tcf_bstats.packets++;
295 if (police->tcfp_ewma_rate &&
296 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
297 police->tcf_qstats.overlimits++;
298 spin_unlock(&police->tcf_lock);
299 return police->tcf_action;
302 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
303 if (police->tcfp_R_tab == NULL) {
304 spin_unlock(&police->tcf_lock);
305 return police->tcfp_result;
308 now = psched_get_time();
309 toks = psched_tdiff_bounded(now, police->tcfp_t_c,
310 police->tcfp_burst);
311 if (police->tcfp_P_tab) {
312 ptoks = toks + police->tcfp_ptoks;
313 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
314 ptoks = (long)L2T_P(police, police->tcfp_mtu);
315 ptoks -= L2T_P(police, qdisc_pkt_len(skb));
317 toks += police->tcfp_toks;
318 if (toks > (long)police->tcfp_burst)
319 toks = police->tcfp_burst;
320 toks -= L2T(police, qdisc_pkt_len(skb));
321 if ((toks|ptoks) >= 0) {
322 police->tcfp_t_c = now;
323 police->tcfp_toks = toks;
324 police->tcfp_ptoks = ptoks;
325 spin_unlock(&police->tcf_lock);
326 return police->tcfp_result;
330 police->tcf_qstats.overlimits++;
331 spin_unlock(&police->tcf_lock);
332 return police->tcf_action;
335 static int
336 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
338 unsigned char *b = skb_tail_pointer(skb);
339 struct tcf_police *police = a->priv;
340 struct tc_police opt;
342 opt.index = police->tcf_index;
343 opt.action = police->tcf_action;
344 opt.mtu = police->tcfp_mtu;
345 opt.burst = police->tcfp_burst;
346 opt.refcnt = police->tcf_refcnt - ref;
347 opt.bindcnt = police->tcf_bindcnt - bind;
348 if (police->tcfp_R_tab)
349 opt.rate = police->tcfp_R_tab->rate;
350 else
351 memset(&opt.rate, 0, sizeof(opt.rate));
352 if (police->tcfp_P_tab)
353 opt.peakrate = police->tcfp_P_tab->rate;
354 else
355 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
356 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
357 if (police->tcfp_result)
358 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
359 if (police->tcfp_ewma_rate)
360 NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate);
361 return skb->len;
363 nla_put_failure:
364 nlmsg_trim(skb, b);
365 return -1;
368 MODULE_AUTHOR("Alexey Kuznetsov");
369 MODULE_DESCRIPTION("Policing actions");
370 MODULE_LICENSE("GPL");
372 static struct tc_action_ops act_police_ops = {
373 .kind = "police",
374 .hinfo = &police_hash_info,
375 .type = TCA_ID_POLICE,
376 .capab = TCA_CAP_NONE,
377 .owner = THIS_MODULE,
378 .act = tcf_act_police,
379 .dump = tcf_act_police_dump,
380 .cleanup = tcf_act_police_cleanup,
381 .lookup = tcf_hash_search,
382 .init = tcf_act_police_locate,
383 .walk = tcf_act_police_walker
386 static int __init
387 police_init_module(void)
389 return tcf_register_action(&act_police_ops);
392 static void __exit
393 police_cleanup_module(void)
395 tcf_unregister_action(&act_police_ops);
398 module_init(police_init_module);
399 module_exit(police_cleanup_module);