2 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <net/netlink.h>
22 #include <net/pkt_sched.h>
25 /* Simple Token Bucket Filter.
26 =======================================
36 A data flow obeys TBF with rate R and depth B, if for any
37 time interval t_i...t_f the number of transmitted bits
38 does not exceed B + R*(t_f-t_i).
40 Packetized version of this definition:
41 The sequence of packets of sizes s_i served at moments t_i
42 obeys TBF, if for any i<=k:
44 s_i+....+s_k <= B + R*(t_k - t_i)
49 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51 N(t+delta) = min{B/R, N(t) + delta}
53 If the first packet in queue has length S, it may be
54 transmitted only at the time t_* when S/R <= N(t_*),
55 and in this case N(t) jumps:
57 N(t_* + 0) = N(t_* - 0) - S/R.
61 Actually, QoS requires two TBF to be applied to a data stream.
62 One of them controls steady state burst size, another
63 one with rate P (peak rate) and depth M (equal to link MTU)
64 limits bursts at a smaller time scale.
66 It is easy to see that P>R, and B>M. If P is infinity, this double
67 TBF is equivalent to a single one.
69 When TBF works in reshaping mode, latency is estimated as:
71 lat = max ((L-B)/R, (L-M)/P)
77 If TBF throttles, it starts a watchdog timer, which will wake it up
78 when it is ready to transmit.
79 Note that the minimal timer resolution is 1/HZ.
80 If no new packets arrive during this period,
81 or if the device is not awaken by EOI for some previous packet,
82 TBF can stop its activity for 1/HZ.
85 This means, that with depth B, the maximal rate is
89 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91 Note that the peak rate TBF is much more tough: with MTU 1500
92 P_crit = 150Kbytes/sec. So, if you need greater peak
93 rates, use alpha with HZ=1000 :-)
95 With classful TBF, limit is just kept for backwards compatibility.
96 It is passed to the default bfifo qdisc - if the inner qdisc is
97 changed the limit is not effective anymore.
100 struct tbf_sched_data
103 u32 limit
; /* Maximal length of backlog: bytes */
104 u32 buffer
; /* Token bucket depth/rate: MUST BE >= MTU/B */
107 struct qdisc_rate_table
*R_tab
;
108 struct qdisc_rate_table
*P_tab
;
111 long tokens
; /* Current number of B tokens */
112 long ptokens
; /* Current number of P tokens */
113 psched_time_t t_c
; /* Time check-point */
114 struct Qdisc
*qdisc
; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog
; /* Watchdog timer */
118 #define L2T(q,L) qdisc_l2t((q)->R_tab,L)
119 #define L2T_P(q,L) qdisc_l2t((q)->P_tab,L)
121 static int tbf_enqueue(struct sk_buff
*skb
, struct Qdisc
* sch
)
123 struct tbf_sched_data
*q
= qdisc_priv(sch
);
126 if (qdisc_pkt_len(skb
) > q
->max_size
)
127 return qdisc_reshape_fail(skb
, sch
);
129 ret
= qdisc_enqueue(skb
, q
->qdisc
);
130 if (ret
!= NET_XMIT_SUCCESS
) {
131 if (net_xmit_drop_count(ret
))
137 return NET_XMIT_SUCCESS
;
140 static unsigned int tbf_drop(struct Qdisc
* sch
)
142 struct tbf_sched_data
*q
= qdisc_priv(sch
);
143 unsigned int len
= 0;
145 if (q
->qdisc
->ops
->drop
&& (len
= q
->qdisc
->ops
->drop(q
->qdisc
)) != 0) {
152 static struct sk_buff
*tbf_dequeue(struct Qdisc
* sch
)
154 struct tbf_sched_data
*q
= qdisc_priv(sch
);
157 skb
= q
->qdisc
->ops
->peek(q
->qdisc
);
163 unsigned int len
= qdisc_pkt_len(skb
);
165 now
= psched_get_time();
166 toks
= psched_tdiff_bounded(now
, q
->t_c
, q
->buffer
);
169 ptoks
= toks
+ q
->ptokens
;
170 if (ptoks
> (long)q
->mtu
)
172 ptoks
-= L2T_P(q
, len
);
175 if (toks
> (long)q
->buffer
)
179 if ((toks
|ptoks
) >= 0) {
180 skb
= qdisc_dequeue_peeked(q
->qdisc
);
188 sch
->flags
&= ~TCQ_F_THROTTLED
;
189 qdisc_bstats_update(sch
, skb
);
193 qdisc_watchdog_schedule(&q
->watchdog
,
194 now
+ max_t(long, -toks
, -ptoks
));
196 /* Maybe we have a shorter packet in the queue,
197 which can be sent now. It sounds cool,
198 but, however, this is wrong in principle.
199 We MUST NOT reorder packets under these circumstances.
201 Really, if we split the flow into independent
202 subflows, it would be a very good solution.
203 This is the main idea of all FQ algorithms
204 (cf. CSZ, HPFQ, HFSC)
207 sch
->qstats
.overlimits
++;
212 static void tbf_reset(struct Qdisc
* sch
)
214 struct tbf_sched_data
*q
= qdisc_priv(sch
);
216 qdisc_reset(q
->qdisc
);
218 q
->t_c
= psched_get_time();
219 q
->tokens
= q
->buffer
;
221 qdisc_watchdog_cancel(&q
->watchdog
);
224 static const struct nla_policy tbf_policy
[TCA_TBF_MAX
+ 1] = {
225 [TCA_TBF_PARMS
] = { .len
= sizeof(struct tc_tbf_qopt
) },
226 [TCA_TBF_RTAB
] = { .type
= NLA_BINARY
, .len
= TC_RTAB_SIZE
},
227 [TCA_TBF_PTAB
] = { .type
= NLA_BINARY
, .len
= TC_RTAB_SIZE
},
230 static int tbf_change(struct Qdisc
* sch
, struct nlattr
*opt
)
233 struct tbf_sched_data
*q
= qdisc_priv(sch
);
234 struct nlattr
*tb
[TCA_TBF_PTAB
+ 1];
235 struct tc_tbf_qopt
*qopt
;
236 struct qdisc_rate_table
*rtab
= NULL
;
237 struct qdisc_rate_table
*ptab
= NULL
;
238 struct Qdisc
*child
= NULL
;
241 err
= nla_parse_nested(tb
, TCA_TBF_PTAB
, opt
, tbf_policy
);
246 if (tb
[TCA_TBF_PARMS
] == NULL
)
249 qopt
= nla_data(tb
[TCA_TBF_PARMS
]);
250 rtab
= qdisc_get_rtab(&qopt
->rate
, tb
[TCA_TBF_RTAB
]);
254 if (qopt
->peakrate
.rate
) {
255 if (qopt
->peakrate
.rate
> qopt
->rate
.rate
)
256 ptab
= qdisc_get_rtab(&qopt
->peakrate
, tb
[TCA_TBF_PTAB
]);
261 for (n
= 0; n
< 256; n
++)
262 if (rtab
->data
[n
] > qopt
->buffer
) break;
263 max_size
= (n
<< qopt
->rate
.cell_log
)-1;
267 for (n
= 0; n
< 256; n
++)
268 if (ptab
->data
[n
] > qopt
->mtu
) break;
269 size
= (n
<< qopt
->peakrate
.cell_log
)-1;
270 if (size
< max_size
) max_size
= size
;
275 if (q
->qdisc
!= &noop_qdisc
) {
276 err
= fifo_set_limit(q
->qdisc
, qopt
->limit
);
279 } else if (qopt
->limit
> 0) {
280 child
= fifo_create_dflt(sch
, &bfifo_qdisc_ops
, qopt
->limit
);
282 err
= PTR_ERR(child
);
289 qdisc_tree_decrease_qlen(q
->qdisc
, q
->qdisc
->q
.qlen
);
290 qdisc_destroy(q
->qdisc
);
293 q
->limit
= qopt
->limit
;
295 q
->max_size
= max_size
;
296 q
->buffer
= qopt
->buffer
;
297 q
->tokens
= q
->buffer
;
300 swap(q
->R_tab
, rtab
);
301 swap(q
->P_tab
, ptab
);
303 sch_tree_unlock(sch
);
307 qdisc_put_rtab(rtab
);
309 qdisc_put_rtab(ptab
);
313 static int tbf_init(struct Qdisc
* sch
, struct nlattr
*opt
)
315 struct tbf_sched_data
*q
= qdisc_priv(sch
);
320 q
->t_c
= psched_get_time();
321 qdisc_watchdog_init(&q
->watchdog
, sch
);
322 q
->qdisc
= &noop_qdisc
;
324 return tbf_change(sch
, opt
);
327 static void tbf_destroy(struct Qdisc
*sch
)
329 struct tbf_sched_data
*q
= qdisc_priv(sch
);
331 qdisc_watchdog_cancel(&q
->watchdog
);
334 qdisc_put_rtab(q
->P_tab
);
336 qdisc_put_rtab(q
->R_tab
);
338 qdisc_destroy(q
->qdisc
);
341 static int tbf_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
343 struct tbf_sched_data
*q
= qdisc_priv(sch
);
345 struct tc_tbf_qopt opt
;
347 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
349 goto nla_put_failure
;
351 opt
.limit
= q
->limit
;
352 opt
.rate
= q
->R_tab
->rate
;
354 opt
.peakrate
= q
->P_tab
->rate
;
356 memset(&opt
.peakrate
, 0, sizeof(opt
.peakrate
));
358 opt
.buffer
= q
->buffer
;
359 NLA_PUT(skb
, TCA_TBF_PARMS
, sizeof(opt
), &opt
);
361 nla_nest_end(skb
, nest
);
365 nla_nest_cancel(skb
, nest
);
369 static int tbf_dump_class(struct Qdisc
*sch
, unsigned long cl
,
370 struct sk_buff
*skb
, struct tcmsg
*tcm
)
372 struct tbf_sched_data
*q
= qdisc_priv(sch
);
374 tcm
->tcm_handle
|= TC_H_MIN(1);
375 tcm
->tcm_info
= q
->qdisc
->handle
;
380 static int tbf_graft(struct Qdisc
*sch
, unsigned long arg
, struct Qdisc
*new,
383 struct tbf_sched_data
*q
= qdisc_priv(sch
);
391 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
393 sch_tree_unlock(sch
);
398 static struct Qdisc
*tbf_leaf(struct Qdisc
*sch
, unsigned long arg
)
400 struct tbf_sched_data
*q
= qdisc_priv(sch
);
404 static unsigned long tbf_get(struct Qdisc
*sch
, u32 classid
)
409 static void tbf_put(struct Qdisc
*sch
, unsigned long arg
)
413 static void tbf_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
416 if (walker
->count
>= walker
->skip
)
417 if (walker
->fn(sch
, 1, walker
) < 0) {
425 static const struct Qdisc_class_ops tbf_class_ops
=
432 .dump
= tbf_dump_class
,
435 static struct Qdisc_ops tbf_qdisc_ops __read_mostly
= {
437 .cl_ops
= &tbf_class_ops
,
439 .priv_size
= sizeof(struct tbf_sched_data
),
440 .enqueue
= tbf_enqueue
,
441 .dequeue
= tbf_dequeue
,
442 .peek
= qdisc_peek_dequeued
,
446 .destroy
= tbf_destroy
,
447 .change
= tbf_change
,
449 .owner
= THIS_MODULE
,
452 static int __init
tbf_module_init(void)
454 return register_qdisc(&tbf_qdisc_ops
);
457 static void __exit
tbf_module_exit(void)
459 unregister_qdisc(&tbf_qdisc_ops
);
461 module_init(tbf_module_init
)
462 module_exit(tbf_module_exit
)
463 MODULE_LICENSE("GPL");