Changes for kernel and Busybox
[tomato.git] / release / src / linux / linux / net / sched / sch_tbf.c
blobfe950eb1762056ea31a1d23890b8ea357d0390f4
1 /*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/config.h>
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <asm/system.h>
19 #include <asm/bitops.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/in.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/if_ether.h>
31 #include <linux/inet.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/notifier.h>
35 #include <net/ip.h>
36 #include <net/route.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <net/pkt_sched.h>
42 /* Simple Token Bucket Filter.
43 =======================================
45 SOURCE.
46 -------
48 None.
50 Description.
51 ------------
53 A data flow obeys TBF with rate R and depth B, if for any
54 time interval t_i...t_f the number of transmitted bits
55 does not exceed B + R*(t_f-t_i).
57 Packetized version of this definition:
58 The sequence of packets of sizes s_i served at moments t_i
59 obeys TBF, if for any i<=k:
61 s_i+....+s_k <= B + R*(t_k - t_i)
63 Algorithm.
64 ----------
66 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
68 N(t+delta) = min{B/R, N(t) + delta}
70 If the first packet in queue has length S, it may be
71 transmitted only at the time t_* when S/R <= N(t_*),
72 and in this case N(t) jumps:
74 N(t_* + 0) = N(t_* - 0) - S/R.
78 Actually, QoS requires two TBF to be applied to a data stream.
79 One of them controls steady state burst size, another
80 one with rate P (peak rate) and depth M (equal to link MTU)
81 limits bursts at a smaller time scale.
83 It is easy to see that P>R, and B>M. If P is infinity, this double
84 TBF is equivalent to a single one.
86 When TBF works in reshaping mode, latency is estimated as:
88 lat = max ((L-B)/R, (L-M)/P)
91 NOTES.
92 ------
94 If TBF throttles, it starts a watchdog timer, which will wake it up
95 when it is ready to transmit.
96 Note that the minimal timer resolution is 1/HZ.
97 If no new packets arrive during this period,
98 or if the device is not awaken by EOI for some previous packet,
99 TBF can stop its activity for 1/HZ.
102 This means, that with depth B, the maximal rate is
104 R_crit = B*HZ
106 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
108 Note that the peak rate TBF is much more tough: with MTU 1500
109 P_crit = 150Kbytes/sec. So, if you need greater peak
110 rates, use alpha with HZ=1000 :-)
112 With classful TBF, limit is just kept for backwards compatibility.
113 It is passed to the default bfifo qdisc - if the inner qdisc is
114 changed the limit is not effective anymore.
117 struct tbf_sched_data
119 /* Parameters */
120 u32 limit; /* Maximal length of backlog: bytes */
121 u32 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
122 u32 mtu;
123 u32 max_size;
124 struct qdisc_rate_table *R_tab;
125 struct qdisc_rate_table *P_tab;
127 /* Variables */
128 long tokens; /* Current number of B tokens */
129 long ptokens; /* Current number of P tokens */
130 psched_time_t t_c; /* Time check-point */
131 struct timer_list wd_timer; /* Watchdog timer */
132 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
135 #define L2T(q,L) qdisc_l2t((q)->R_tab,L)
136 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
138 static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
140 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
141 int ret;
143 if (skb->len > q->max_size) {
144 sch->stats.drops++;
145 #ifdef CONFIG_NET_CLS_POLICE
146 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
147 #endif
148 kfree_skb(skb);
150 return NET_XMIT_DROP;
153 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) {
154 sch->stats.drops++;
155 return ret;
158 sch->q.qlen++;
159 sch->stats.bytes += skb->len;
160 sch->stats.packets++;
161 return 0;
164 static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch)
166 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
167 int ret;
169 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0)
170 sch->q.qlen++;
172 return ret;
175 static unsigned int tbf_drop(struct Qdisc* sch)
177 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
178 unsigned int len;
180 if ((len = q->qdisc->ops->drop(q->qdisc)) != 0) {
181 sch->q.qlen--;
182 sch->stats.drops++;
184 return len;
187 static void tbf_watchdog(unsigned long arg)
189 struct Qdisc *sch = (struct Qdisc*)arg;
191 sch->flags &= ~TCQ_F_THROTTLED;
192 netif_schedule(sch->dev);
195 static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
197 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
198 struct sk_buff *skb;
200 skb = q->qdisc->dequeue(q->qdisc);
202 if (skb) {
203 psched_time_t now;
204 long toks, delay;
205 long ptoks = 0;
206 unsigned int len = skb->len;
208 PSCHED_GET_TIME(now);
210 toks = PSCHED_TDIFF_SAFE(now, q->t_c, q->buffer, 0);
212 if (q->P_tab) {
213 ptoks = toks + q->ptokens;
214 if (ptoks > (long)q->mtu)
215 ptoks = q->mtu;
216 ptoks -= L2T_P(q, len);
218 toks += q->tokens;
219 if (toks > (long)q->buffer)
220 toks = q->buffer;
221 toks -= L2T(q, len);
223 if ((toks|ptoks) >= 0) {
224 q->t_c = now;
225 q->tokens = toks;
226 q->ptokens = ptoks;
227 sch->q.qlen--;
228 sch->flags &= ~TCQ_F_THROTTLED;
229 return skb;
232 delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
234 if (delay == 0)
235 delay = 1;
237 mod_timer(&q->wd_timer, jiffies+delay);
239 /* Maybe we have a shorter packet in the queue,
240 which can be sent now. It sounds cool,
241 but, however, this is wrong in principle.
242 We MUST NOT reorder packets under these circumstances.
244 Really, if we split the flow into independent
245 subflows, it would be a very good solution.
246 This is the main idea of all FQ algorithms
247 (cf. CSZ, HPFQ, HFSC)
250 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
251 /* When requeue fails skb is dropped */
252 sch->q.qlen--;
253 sch->stats.drops++;
256 sch->flags |= TCQ_F_THROTTLED;
257 sch->stats.overlimits++;
259 return NULL;
262 static void tbf_reset(struct Qdisc* sch)
264 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
266 qdisc_reset(q->qdisc);
267 sch->q.qlen = 0;
268 PSCHED_GET_TIME(q->t_c);
269 q->tokens = q->buffer;
270 q->ptokens = q->mtu;
271 sch->flags &= ~TCQ_F_THROTTLED;
272 del_timer(&q->wd_timer);
275 static struct Qdisc *tbf_create_dflt_qdisc(struct net_device *dev, u32 limit)
277 struct Qdisc *q = qdisc_create_dflt(dev, &bfifo_qdisc_ops);
278 struct rtattr *rta;
279 int ret;
281 if (q) {
282 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
283 if (rta) {
284 rta->rta_type = RTM_NEWQDISC;
285 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
286 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
288 ret = q->ops->change(q, rta);
289 kfree(rta);
291 if (ret == 0)
292 return q;
294 qdisc_destroy(q);
297 return NULL;
300 static int tbf_change(struct Qdisc* sch, struct rtattr *opt)
302 int err = -EINVAL;
303 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
304 struct rtattr *tb[TCA_TBF_PTAB];
305 struct tc_tbf_qopt *qopt;
306 struct qdisc_rate_table *rtab = NULL;
307 struct qdisc_rate_table *ptab = NULL;
308 struct Qdisc *child = NULL;
309 int max_size,n;
311 if (rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
312 tb[TCA_TBF_PARMS-1] == NULL ||
313 RTA_PAYLOAD(tb[TCA_TBF_PARMS-1]) < sizeof(*qopt))
314 goto done;
316 qopt = RTA_DATA(tb[TCA_TBF_PARMS-1]);
317 rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB-1]);
318 if (rtab == NULL)
319 goto done;
321 if (qopt->peakrate.rate) {
322 if (qopt->peakrate.rate > qopt->rate.rate)
323 ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB-1]);
324 if (ptab == NULL)
325 goto done;
328 for (n = 0; n < 256; n++)
329 if (rtab->data[n] > qopt->buffer) break;
330 max_size = (n << qopt->rate.cell_log)-1;
331 if (ptab) {
332 int size;
334 for (n = 0; n < 256; n++)
335 if (ptab->data[n] > qopt->mtu) break;
336 size = (n << qopt->peakrate.cell_log)-1;
337 if (size < max_size) max_size = size;
339 if (max_size < 0)
340 goto done;
342 if (q->qdisc == &noop_qdisc) {
343 if ((child = tbf_create_dflt_qdisc(sch->dev, qopt->limit)) == NULL)
344 goto done;
347 sch_tree_lock(sch);
348 if (child) q->qdisc = child;
349 q->limit = qopt->limit;
350 q->mtu = qopt->mtu;
351 q->max_size = max_size;
352 q->buffer = qopt->buffer;
353 q->tokens = q->buffer;
354 q->ptokens = q->mtu;
355 rtab = xchg(&q->R_tab, rtab);
356 ptab = xchg(&q->P_tab, ptab);
357 sch_tree_unlock(sch);
358 err = 0;
359 done:
360 if (rtab)
361 qdisc_put_rtab(rtab);
362 if (ptab)
363 qdisc_put_rtab(ptab);
364 return err;
367 static int tbf_init(struct Qdisc* sch, struct rtattr *opt)
369 int err;
370 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
372 if (opt == NULL)
373 return -EINVAL;
375 MOD_INC_USE_COUNT;
377 PSCHED_GET_TIME(q->t_c);
378 init_timer(&q->wd_timer);
379 q->wd_timer.function = tbf_watchdog;
380 q->wd_timer.data = (unsigned long)sch;
382 q->qdisc = &noop_qdisc;
384 if ((err = tbf_change(sch, opt)) != 0) {
385 MOD_DEC_USE_COUNT;
387 return err;
390 static void tbf_destroy(struct Qdisc *sch)
392 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
394 del_timer(&q->wd_timer);
396 if (q->P_tab)
397 qdisc_put_rtab(q->P_tab);
398 if (q->R_tab)
399 qdisc_put_rtab(q->R_tab);
401 qdisc_destroy(q->qdisc);
402 q->qdisc = &noop_qdisc;
404 MOD_DEC_USE_COUNT;
407 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
409 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
410 unsigned char *b = skb->tail;
411 struct rtattr *rta;
412 struct tc_tbf_qopt opt;
414 rta = (struct rtattr*)b;
415 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
417 opt.limit = q->limit;
418 opt.rate = q->R_tab->rate;
419 if (q->P_tab)
420 opt.peakrate = q->P_tab->rate;
421 else
422 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
423 opt.mtu = q->mtu;
424 opt.buffer = q->buffer;
425 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt);
426 rta->rta_len = skb->tail - b;
428 return skb->len;
430 rtattr_failure:
431 skb_trim(skb, b - skb->data);
432 return -1;
435 static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
436 struct sk_buff *skb, struct tcmsg *tcm)
438 struct tbf_sched_data *q = (struct tbf_sched_data*)sch->data;
440 if (cl != 1) /* only one class */
441 return -ENOENT;
443 tcm->tcm_handle |= TC_H_MIN(1);
444 tcm->tcm_info = q->qdisc->handle;
446 return 0;
449 static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
450 struct Qdisc **old)
452 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
454 if (new == NULL)
455 new = &noop_qdisc;
457 sch_tree_lock(sch);
458 *old = xchg(&q->qdisc, new);
459 qdisc_reset(*old);
460 sch->q.qlen = 0;
461 sch_tree_unlock(sch);
463 return 0;
466 static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
468 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
469 return q->qdisc;
472 static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
474 return 1;
477 static void tbf_put(struct Qdisc *sch, unsigned long arg)
481 static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
482 struct rtattr **tca, unsigned long *arg)
484 return -ENOSYS;
487 static int tbf_delete(struct Qdisc *sch, unsigned long arg)
489 return -ENOSYS;
492 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
494 if (!walker->stop) {
495 if (walker->count >= walker->skip)
496 if (walker->fn(sch, 1, walker) < 0) {
497 walker->stop = 1;
498 return;
500 walker->count++;
504 static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
506 return NULL;
509 static struct Qdisc_class_ops tbf_class_ops =
511 .graft = tbf_graft,
512 .leaf = tbf_leaf,
513 .get = tbf_get,
514 .put = tbf_put,
515 .change = tbf_change_class,
516 .delete = tbf_delete,
517 .walk = tbf_walk,
518 .tcf_chain = tbf_find_tcf,
519 .dump = tbf_dump_class,
522 struct Qdisc_ops tbf_qdisc_ops =
524 NULL,
525 &tbf_class_ops,
526 "tbf",
527 sizeof(struct tbf_sched_data),
529 tbf_enqueue,
530 tbf_dequeue,
531 tbf_requeue,
532 tbf_drop,
534 tbf_init,
535 tbf_reset,
536 tbf_destroy,
537 tbf_change,
539 tbf_dump,
543 #ifdef MODULE
544 int init_module(void)
546 return register_qdisc(&tbf_qdisc_ops);
549 void cleanup_module(void)
551 unregister_qdisc(&tbf_qdisc_ops);
553 #endif
554 MODULE_LICENSE("GPL");