IB/mlx4: Actually return L_Key and R_Key for fast register MRs
[linux-2.6/linux-loongson.git] / net / sched / sch_tbf.c
blob94c61598b86ae1c0f52a23817f797a216b77492f
1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
25 /* Simple Token Bucket Filter.
26 =======================================
28 SOURCE.
29 -------
31 None.
33 Description.
34 ------------
36 A data flow obeys TBF with rate R and depth B, if for any
37 time interval t_i...t_f the number of transmitted bits
38 does not exceed B + R*(t_f-t_i).
40 Packetized version of this definition:
41 The sequence of packets of sizes s_i served at moments t_i
42 obeys TBF, if for any i<=k:
44 s_i+....+s_k <= B + R*(t_k - t_i)
46 Algorithm.
47 ----------
49 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51 N(t+delta) = min{B/R, N(t) + delta}
53 If the first packet in queue has length S, it may be
54 transmitted only at the time t_* when S/R <= N(t_*),
55 and in this case N(t) jumps:
57 N(t_* + 0) = N(t_* - 0) - S/R.
61 Actually, QoS requires two TBF to be applied to a data stream.
62 One of them controls steady state burst size, another
63 one with rate P (peak rate) and depth M (equal to link MTU)
64 limits bursts at a smaller time scale.
66 It is easy to see that P>R, and B>M. If P is infinity, this double
67 TBF is equivalent to a single one.
69 When TBF works in reshaping mode, latency is estimated as:
71 lat = max ((L-B)/R, (L-M)/P)
74 NOTES.
75 ------
77 If TBF throttles, it starts a watchdog timer, which will wake it up
78 when it is ready to transmit.
79 Note that the minimal timer resolution is 1/HZ.
80 If no new packets arrive during this period,
81 or if the device is not awaken by EOI for some previous packet,
82 TBF can stop its activity for 1/HZ.
85 This means, that with depth B, the maximal rate is
87 R_crit = B*HZ
89 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91 Note that the peak rate TBF is much more tough: with MTU 1500
92 P_crit = 150Kbytes/sec. So, if you need greater peak
93 rates, use alpha with HZ=1000 :-)
95 With classful TBF, limit is just kept for backwards compatibility.
96 It is passed to the default bfifo qdisc - if the inner qdisc is
97 changed the limit is not effective anymore.
100 struct tbf_sched_data
102 /* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
104 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
105 u32 mtu;
106 u32 max_size;
107 struct qdisc_rate_table *R_tab;
108 struct qdisc_rate_table *P_tab;
110 /* Variables */
111 long tokens; /* Current number of B tokens */
112 long ptokens; /* Current number of P tokens */
113 psched_time_t t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
118 #define L2T(q,L) qdisc_l2t((q)->R_tab,L)
119 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
121 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
123 struct tbf_sched_data *q = qdisc_priv(sch);
124 int ret;
126 if (qdisc_pkt_len(skb) > q->max_size)
127 return qdisc_reshape_fail(skb, sch);
129 ret = qdisc_enqueue(skb, q->qdisc);
130 if (ret != 0) {
131 if (net_xmit_drop_count(ret))
132 sch->qstats.drops++;
133 return ret;
136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb);
138 sch->bstats.packets++;
139 return 0;
142 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
144 struct tbf_sched_data *q = qdisc_priv(sch);
145 int ret;
147 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
148 sch->q.qlen++;
149 sch->qstats.requeues++;
152 return ret;
155 static unsigned int tbf_drop(struct Qdisc* sch)
157 struct tbf_sched_data *q = qdisc_priv(sch);
158 unsigned int len = 0;
160 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
161 sch->q.qlen--;
162 sch->qstats.drops++;
164 return len;
167 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
169 struct tbf_sched_data *q = qdisc_priv(sch);
170 struct sk_buff *skb;
172 skb = q->qdisc->dequeue(q->qdisc);
174 if (skb) {
175 psched_time_t now;
176 long toks;
177 long ptoks = 0;
178 unsigned int len = qdisc_pkt_len(skb);
180 now = psched_get_time();
181 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
183 if (q->P_tab) {
184 ptoks = toks + q->ptokens;
185 if (ptoks > (long)q->mtu)
186 ptoks = q->mtu;
187 ptoks -= L2T_P(q, len);
189 toks += q->tokens;
190 if (toks > (long)q->buffer)
191 toks = q->buffer;
192 toks -= L2T(q, len);
194 if ((toks|ptoks) >= 0) {
195 q->t_c = now;
196 q->tokens = toks;
197 q->ptokens = ptoks;
198 sch->q.qlen--;
199 sch->flags &= ~TCQ_F_THROTTLED;
200 return skb;
203 qdisc_watchdog_schedule(&q->watchdog,
204 now + max_t(long, -toks, -ptoks));
206 /* Maybe we have a shorter packet in the queue,
207 which can be sent now. It sounds cool,
208 but, however, this is wrong in principle.
209 We MUST NOT reorder packets under these circumstances.
211 Really, if we split the flow into independent
212 subflows, it would be a very good solution.
213 This is the main idea of all FQ algorithms
214 (cf. CSZ, HPFQ, HFSC)
217 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
218 /* When requeue fails skb is dropped */
219 qdisc_tree_decrease_qlen(q->qdisc, 1);
220 sch->qstats.drops++;
223 sch->qstats.overlimits++;
225 return NULL;
228 static void tbf_reset(struct Qdisc* sch)
230 struct tbf_sched_data *q = qdisc_priv(sch);
232 qdisc_reset(q->qdisc);
233 sch->q.qlen = 0;
234 q->t_c = psched_get_time();
235 q->tokens = q->buffer;
236 q->ptokens = q->mtu;
237 qdisc_watchdog_cancel(&q->watchdog);
240 static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
241 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
242 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
243 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
246 static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
248 int err;
249 struct tbf_sched_data *q = qdisc_priv(sch);
250 struct nlattr *tb[TCA_TBF_PTAB + 1];
251 struct tc_tbf_qopt *qopt;
252 struct qdisc_rate_table *rtab = NULL;
253 struct qdisc_rate_table *ptab = NULL;
254 struct Qdisc *child = NULL;
255 int max_size,n;
257 err = nla_parse_nested(tb, TCA_TBF_PTAB, opt, tbf_policy);
258 if (err < 0)
259 return err;
261 err = -EINVAL;
262 if (tb[TCA_TBF_PARMS] == NULL)
263 goto done;
265 qopt = nla_data(tb[TCA_TBF_PARMS]);
266 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
267 if (rtab == NULL)
268 goto done;
270 if (qopt->peakrate.rate) {
271 if (qopt->peakrate.rate > qopt->rate.rate)
272 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
273 if (ptab == NULL)
274 goto done;
277 for (n = 0; n < 256; n++)
278 if (rtab->data[n] > qopt->buffer) break;
279 max_size = (n << qopt->rate.cell_log)-1;
280 if (ptab) {
281 int size;
283 for (n = 0; n < 256; n++)
284 if (ptab->data[n] > qopt->mtu) break;
285 size = (n << qopt->peakrate.cell_log)-1;
286 if (size < max_size) max_size = size;
288 if (max_size < 0)
289 goto done;
291 if (qopt->limit > 0) {
292 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
293 if (IS_ERR(child)) {
294 err = PTR_ERR(child);
295 goto done;
299 sch_tree_lock(sch);
300 if (child) {
301 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
302 qdisc_destroy(xchg(&q->qdisc, child));
304 q->limit = qopt->limit;
305 q->mtu = qopt->mtu;
306 q->max_size = max_size;
307 q->buffer = qopt->buffer;
308 q->tokens = q->buffer;
309 q->ptokens = q->mtu;
310 rtab = xchg(&q->R_tab, rtab);
311 ptab = xchg(&q->P_tab, ptab);
312 sch_tree_unlock(sch);
313 err = 0;
314 done:
315 if (rtab)
316 qdisc_put_rtab(rtab);
317 if (ptab)
318 qdisc_put_rtab(ptab);
319 return err;
322 static int tbf_init(struct Qdisc* sch, struct nlattr *opt)
324 struct tbf_sched_data *q = qdisc_priv(sch);
326 if (opt == NULL)
327 return -EINVAL;
329 q->t_c = psched_get_time();
330 qdisc_watchdog_init(&q->watchdog, sch);
331 q->qdisc = &noop_qdisc;
333 return tbf_change(sch, opt);
336 static void tbf_destroy(struct Qdisc *sch)
338 struct tbf_sched_data *q = qdisc_priv(sch);
340 qdisc_watchdog_cancel(&q->watchdog);
342 if (q->P_tab)
343 qdisc_put_rtab(q->P_tab);
344 if (q->R_tab)
345 qdisc_put_rtab(q->R_tab);
347 qdisc_destroy(q->qdisc);
350 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
352 struct tbf_sched_data *q = qdisc_priv(sch);
353 struct nlattr *nest;
354 struct tc_tbf_qopt opt;
356 nest = nla_nest_start(skb, TCA_OPTIONS);
357 if (nest == NULL)
358 goto nla_put_failure;
360 opt.limit = q->limit;
361 opt.rate = q->R_tab->rate;
362 if (q->P_tab)
363 opt.peakrate = q->P_tab->rate;
364 else
365 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
366 opt.mtu = q->mtu;
367 opt.buffer = q->buffer;
368 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
370 nla_nest_end(skb, nest);
371 return skb->len;
373 nla_put_failure:
374 nla_nest_cancel(skb, nest);
375 return -1;
378 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
379 struct sk_buff *skb, struct tcmsg *tcm)
381 struct tbf_sched_data *q = qdisc_priv(sch);
383 if (cl != 1) /* only one class */
384 return -ENOENT;
386 tcm->tcm_handle |= TC_H_MIN(1);
387 tcm->tcm_info = q->qdisc->handle;
389 return 0;
392 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
393 struct Qdisc **old)
395 struct tbf_sched_data *q = qdisc_priv(sch);
397 if (new == NULL)
398 new = &noop_qdisc;
400 sch_tree_lock(sch);
401 *old = xchg(&q->qdisc, new);
402 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
403 qdisc_reset(*old);
404 sch_tree_unlock(sch);
406 return 0;
409 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
411 struct tbf_sched_data *q = qdisc_priv(sch);
412 return q->qdisc;
415 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
417 return 1;
420 static void tbf_put(struct Qdisc *sch, unsigned long arg)
424 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
425 struct nlattr **tca, unsigned long *arg)
427 return -ENOSYS;
430 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
432 return -ENOSYS;
435 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
437 if (!walker->stop) {
438 if (walker->count >= walker->skip)
439 if (walker->fn(sch, 1, walker) < 0) {
440 walker->stop = 1;
441 return;
443 walker->count++;
447 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
449 return NULL;
452 static const struct Qdisc_class_ops tbf_class_ops =
454 .graft = tbf_graft,
455 .leaf = tbf_leaf,
456 .get = tbf_get,
457 .put = tbf_put,
458 .change = tbf_change_class,
459 .delete = tbf_delete,
460 .walk = tbf_walk,
461 .tcf_chain = tbf_find_tcf,
462 .dump = tbf_dump_class,
465 static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
466 .next = NULL,
467 .cl_ops = &tbf_class_ops,
468 .id = "tbf",
469 .priv_size = sizeof(struct tbf_sched_data),
470 .enqueue = tbf_enqueue,
471 .dequeue = tbf_dequeue,
472 .requeue = tbf_requeue,
473 .drop = tbf_drop,
474 .init = tbf_init,
475 .reset = tbf_reset,
476 .destroy = tbf_destroy,
477 .change = tbf_change,
478 .dump = tbf_dump,
479 .owner = THIS_MODULE,
482 static int __init tbf_module_init(void)
484 return register_qdisc(&tbf_qdisc_ops);
487 static void __exit tbf_module_exit(void)
489 unregister_qdisc(&tbf_qdisc_ops);
491 module_init(tbf_module_init)
492 module_exit(tbf_module_exit)
493 MODULE_LICENSE("GPL");