[NETEM]: use better types for time values
[linux-2.6.22.y-op.git] / net / sched / sch_netem.c
blob4ac6df0a5b3551be6e373a40d47835092fc95038
1 /*
2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/module.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
28 #define VERSION "1.2"
30 /* Network Emulation Queuing algorithm.
31 ====================================
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
37 ----------------------------------------------------------------
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
52 The simulator is limited by the Linux timer resolution
53 and will create packet bursts on the HZ boundary (1ms).
56 struct netem_sched_data {
57 struct Qdisc *qdisc;
58 struct qdisc_watchdog watchdog;
60 psched_tdiff_t latency;
61 psched_tdiff_t jitter;
63 u32 loss;
64 u32 limit;
65 u32 counter;
66 u32 gap;
67 u32 duplicate;
68 u32 reorder;
69 u32 corrupt;
71 struct crndstate {
72 u32 last;
73 u32 rho;
74 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
76 struct disttable {
77 u32 size;
78 s16 table[0];
79 } *delay_dist;
82 /* Time stamp put into socket buffer control block */
83 struct netem_skb_cb {
84 psched_time_t time_to_send;
87 /* init_crandom - initialize correlated random number generator
88 * Use entropy source for initial seed.
90 static void init_crandom(struct crndstate *state, unsigned long rho)
92 state->rho = rho;
93 state->last = net_random();
96 /* get_crandom - correlated random number generator
97 * Next number depends on last value.
98 * rho is scaled to avoid floating point.
100 static u32 get_crandom(struct crndstate *state)
102 u64 value, rho;
103 unsigned long answer;
105 if (state->rho == 0) /* no correllation */
106 return net_random();
108 value = net_random();
109 rho = (u64)state->rho + 1;
110 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
111 state->last = answer;
112 return answer;
115 /* tabledist - return a pseudo-randomly distributed value with mean mu and
116 * std deviation sigma. Uses table lookup to approximate the desired
117 * distribution, and a uniformly-distributed pseudo-random source.
119 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
120 struct crndstate *state,
121 const struct disttable *dist)
123 psched_tdiff_t x;
124 long t;
125 u32 rnd;
127 if (sigma == 0)
128 return mu;
130 rnd = get_crandom(state);
132 /* default uniform distribution */
133 if (dist == NULL)
134 return (rnd % (2*sigma)) - sigma + mu;
136 t = dist->table[rnd % dist->size];
137 x = (sigma % NETEM_DIST_SCALE) * t;
138 if (x >= 0)
139 x += NETEM_DIST_SCALE/2;
140 else
141 x -= NETEM_DIST_SCALE/2;
143 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
147 * Insert one skb into qdisc.
148 * Note: parent depends on return value to account for queue length.
149 * NET_XMIT_DROP: queue length didn't change.
150 * NET_XMIT_SUCCESS: one skb was queued.
152 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
154 struct netem_sched_data *q = qdisc_priv(sch);
155 /* We don't fill cb now as skb_unshare() may invalidate it */
156 struct netem_skb_cb *cb;
157 struct sk_buff *skb2;
158 int ret;
159 int count = 1;
161 pr_debug("netem_enqueue skb=%p\n", skb);
163 /* Random duplication */
164 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
165 ++count;
167 /* Random packet drop 0 => none, ~0 => all */
168 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
169 --count;
171 if (count == 0) {
172 sch->qstats.drops++;
173 kfree_skb(skb);
174 return NET_XMIT_BYPASS;
177 skb_orphan(skb);
180 * If we need to duplicate packet, then re-insert at top of the
181 * qdisc tree, since parent queuer expects that only one
182 * skb will be queued.
184 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
185 struct Qdisc *rootq = sch->dev->qdisc;
186 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
187 q->duplicate = 0;
189 rootq->enqueue(skb2, rootq);
190 q->duplicate = dupsave;
194 * Randomized packet corruption.
195 * Make copy if needed since we are modifying
196 * If packet is going to be hardware checksummed, then
197 * do it now in software before we mangle it.
199 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
200 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
201 || (skb->ip_summed == CHECKSUM_PARTIAL
202 && skb_checksum_help(skb))) {
203 sch->qstats.drops++;
204 return NET_XMIT_DROP;
207 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
210 cb = (struct netem_skb_cb *)skb->cb;
211 if (q->gap == 0 /* not doing reordering */
212 || q->counter < q->gap /* inside last reordering gap */
213 || q->reorder < get_crandom(&q->reorder_cor)) {
214 psched_time_t now;
215 psched_tdiff_t delay;
217 delay = tabledist(q->latency, q->jitter,
218 &q->delay_cor, q->delay_dist);
220 PSCHED_GET_TIME(now);
221 PSCHED_TADD2(now, delay, cb->time_to_send);
222 ++q->counter;
223 ret = q->qdisc->enqueue(skb, q->qdisc);
224 } else {
226 * Do re-ordering by putting one out of N packets at the front
227 * of the queue.
229 PSCHED_GET_TIME(cb->time_to_send);
230 q->counter = 0;
231 ret = q->qdisc->ops->requeue(skb, q->qdisc);
234 if (likely(ret == NET_XMIT_SUCCESS)) {
235 sch->q.qlen++;
236 sch->bstats.bytes += skb->len;
237 sch->bstats.packets++;
238 } else
239 sch->qstats.drops++;
241 pr_debug("netem: enqueue ret %d\n", ret);
242 return ret;
245 /* Requeue packets but don't change time stamp */
246 static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
248 struct netem_sched_data *q = qdisc_priv(sch);
249 int ret;
251 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
252 sch->q.qlen++;
253 sch->qstats.requeues++;
256 return ret;
259 static unsigned int netem_drop(struct Qdisc* sch)
261 struct netem_sched_data *q = qdisc_priv(sch);
262 unsigned int len = 0;
264 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
265 sch->q.qlen--;
266 sch->qstats.drops++;
268 return len;
271 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
273 struct netem_sched_data *q = qdisc_priv(sch);
274 struct sk_buff *skb;
276 skb = q->qdisc->dequeue(q->qdisc);
277 if (skb) {
278 const struct netem_skb_cb *cb
279 = (const struct netem_skb_cb *)skb->cb;
280 psched_time_t now;
282 /* if more time remaining? */
283 PSCHED_GET_TIME(now);
285 if (PSCHED_TLESS(cb->time_to_send, now)) {
286 pr_debug("netem_dequeue: return skb=%p\n", skb);
287 sch->q.qlen--;
288 sch->flags &= ~TCQ_F_THROTTLED;
289 return skb;
290 } else {
291 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
293 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
294 qdisc_tree_decrease_qlen(q->qdisc, 1);
295 sch->qstats.drops++;
296 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
297 q->qdisc->ops->id);
302 return NULL;
305 static void netem_reset(struct Qdisc *sch)
307 struct netem_sched_data *q = qdisc_priv(sch);
309 qdisc_reset(q->qdisc);
310 sch->q.qlen = 0;
311 qdisc_watchdog_cancel(&q->watchdog);
314 /* Pass size change message down to embedded FIFO */
315 static int set_fifo_limit(struct Qdisc *q, int limit)
317 struct rtattr *rta;
318 int ret = -ENOMEM;
320 /* Hack to avoid sending change message to non-FIFO */
321 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
322 return 0;
324 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
325 if (rta) {
326 rta->rta_type = RTM_NEWQDISC;
327 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
328 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
330 ret = q->ops->change(q, rta);
331 kfree(rta);
333 return ret;
337 * Distribution data is a variable size payload containing
338 * signed 16 bit values.
340 static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
342 struct netem_sched_data *q = qdisc_priv(sch);
343 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
344 const __s16 *data = RTA_DATA(attr);
345 struct disttable *d;
346 int i;
348 if (n > 65536)
349 return -EINVAL;
351 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
352 if (!d)
353 return -ENOMEM;
355 d->size = n;
356 for (i = 0; i < n; i++)
357 d->table[i] = data[i];
359 spin_lock_bh(&sch->dev->queue_lock);
360 d = xchg(&q->delay_dist, d);
361 spin_unlock_bh(&sch->dev->queue_lock);
363 kfree(d);
364 return 0;
367 static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
369 struct netem_sched_data *q = qdisc_priv(sch);
370 const struct tc_netem_corr *c = RTA_DATA(attr);
372 if (RTA_PAYLOAD(attr) != sizeof(*c))
373 return -EINVAL;
375 init_crandom(&q->delay_cor, c->delay_corr);
376 init_crandom(&q->loss_cor, c->loss_corr);
377 init_crandom(&q->dup_cor, c->dup_corr);
378 return 0;
381 static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
383 struct netem_sched_data *q = qdisc_priv(sch);
384 const struct tc_netem_reorder *r = RTA_DATA(attr);
386 if (RTA_PAYLOAD(attr) != sizeof(*r))
387 return -EINVAL;
389 q->reorder = r->probability;
390 init_crandom(&q->reorder_cor, r->correlation);
391 return 0;
394 static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
396 struct netem_sched_data *q = qdisc_priv(sch);
397 const struct tc_netem_corrupt *r = RTA_DATA(attr);
399 if (RTA_PAYLOAD(attr) != sizeof(*r))
400 return -EINVAL;
402 q->corrupt = r->probability;
403 init_crandom(&q->corrupt_cor, r->correlation);
404 return 0;
407 /* Parse netlink message to set options */
408 static int netem_change(struct Qdisc *sch, struct rtattr *opt)
410 struct netem_sched_data *q = qdisc_priv(sch);
411 struct tc_netem_qopt *qopt;
412 int ret;
414 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
415 return -EINVAL;
417 qopt = RTA_DATA(opt);
418 ret = set_fifo_limit(q->qdisc, qopt->limit);
419 if (ret) {
420 pr_debug("netem: can't set fifo limit\n");
421 return ret;
424 q->latency = qopt->latency;
425 q->jitter = qopt->jitter;
426 q->limit = qopt->limit;
427 q->gap = qopt->gap;
428 q->counter = 0;
429 q->loss = qopt->loss;
430 q->duplicate = qopt->duplicate;
432 /* for compatiablity with earlier versions.
433 * if gap is set, need to assume 100% probablity
435 if (q->gap)
436 q->reorder = ~0;
438 /* Handle nested options after initial queue options.
439 * Should have put all options in nested format but too late now.
441 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
442 struct rtattr *tb[TCA_NETEM_MAX];
443 if (rtattr_parse(tb, TCA_NETEM_MAX,
444 RTA_DATA(opt) + sizeof(*qopt),
445 RTA_PAYLOAD(opt) - sizeof(*qopt)))
446 return -EINVAL;
448 if (tb[TCA_NETEM_CORR-1]) {
449 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
450 if (ret)
451 return ret;
454 if (tb[TCA_NETEM_DELAY_DIST-1]) {
455 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
456 if (ret)
457 return ret;
460 if (tb[TCA_NETEM_REORDER-1]) {
461 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
462 if (ret)
463 return ret;
466 if (tb[TCA_NETEM_CORRUPT-1]) {
467 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
468 if (ret)
469 return ret;
473 return 0;
477 * Special case version of FIFO queue for use by netem.
478 * It queues in order based on timestamps in skb's
480 struct fifo_sched_data {
481 u32 limit;
484 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
486 struct fifo_sched_data *q = qdisc_priv(sch);
487 struct sk_buff_head *list = &sch->q;
488 const struct netem_skb_cb *ncb
489 = (const struct netem_skb_cb *)nskb->cb;
490 struct sk_buff *skb;
492 if (likely(skb_queue_len(list) < q->limit)) {
493 skb_queue_reverse_walk(list, skb) {
494 const struct netem_skb_cb *cb
495 = (const struct netem_skb_cb *)skb->cb;
497 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
498 break;
501 __skb_queue_after(list, skb, nskb);
503 sch->qstats.backlog += nskb->len;
504 sch->bstats.bytes += nskb->len;
505 sch->bstats.packets++;
507 return NET_XMIT_SUCCESS;
510 return qdisc_drop(nskb, sch);
513 static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
515 struct fifo_sched_data *q = qdisc_priv(sch);
517 if (opt) {
518 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
519 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
520 return -EINVAL;
522 q->limit = ctl->limit;
523 } else
524 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
526 return 0;
529 static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
531 struct fifo_sched_data *q = qdisc_priv(sch);
532 struct tc_fifo_qopt opt = { .limit = q->limit };
534 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
535 return skb->len;
537 rtattr_failure:
538 return -1;
541 static struct Qdisc_ops tfifo_qdisc_ops = {
542 .id = "tfifo",
543 .priv_size = sizeof(struct fifo_sched_data),
544 .enqueue = tfifo_enqueue,
545 .dequeue = qdisc_dequeue_head,
546 .requeue = qdisc_requeue,
547 .drop = qdisc_queue_drop,
548 .init = tfifo_init,
549 .reset = qdisc_reset_queue,
550 .change = tfifo_init,
551 .dump = tfifo_dump,
554 static int netem_init(struct Qdisc *sch, struct rtattr *opt)
556 struct netem_sched_data *q = qdisc_priv(sch);
557 int ret;
559 if (!opt)
560 return -EINVAL;
562 qdisc_watchdog_init(&q->watchdog, sch);
564 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
565 TC_H_MAKE(sch->handle, 1));
566 if (!q->qdisc) {
567 pr_debug("netem: qdisc create failed\n");
568 return -ENOMEM;
571 ret = netem_change(sch, opt);
572 if (ret) {
573 pr_debug("netem: change failed\n");
574 qdisc_destroy(q->qdisc);
576 return ret;
579 static void netem_destroy(struct Qdisc *sch)
581 struct netem_sched_data *q = qdisc_priv(sch);
583 qdisc_watchdog_cancel(&q->watchdog);
584 qdisc_destroy(q->qdisc);
585 kfree(q->delay_dist);
588 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
590 const struct netem_sched_data *q = qdisc_priv(sch);
591 unsigned char *b = skb_tail_pointer(skb);
592 struct rtattr *rta = (struct rtattr *) b;
593 struct tc_netem_qopt qopt;
594 struct tc_netem_corr cor;
595 struct tc_netem_reorder reorder;
596 struct tc_netem_corrupt corrupt;
598 qopt.latency = q->latency;
599 qopt.jitter = q->jitter;
600 qopt.limit = q->limit;
601 qopt.loss = q->loss;
602 qopt.gap = q->gap;
603 qopt.duplicate = q->duplicate;
604 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
606 cor.delay_corr = q->delay_cor.rho;
607 cor.loss_corr = q->loss_cor.rho;
608 cor.dup_corr = q->dup_cor.rho;
609 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
611 reorder.probability = q->reorder;
612 reorder.correlation = q->reorder_cor.rho;
613 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
615 corrupt.probability = q->corrupt;
616 corrupt.correlation = q->corrupt_cor.rho;
617 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
619 rta->rta_len = skb_tail_pointer(skb) - b;
621 return skb->len;
623 rtattr_failure:
624 nlmsg_trim(skb, b);
625 return -1;
628 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
629 struct sk_buff *skb, struct tcmsg *tcm)
631 struct netem_sched_data *q = qdisc_priv(sch);
633 if (cl != 1) /* only one class */
634 return -ENOENT;
636 tcm->tcm_handle |= TC_H_MIN(1);
637 tcm->tcm_info = q->qdisc->handle;
639 return 0;
642 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
643 struct Qdisc **old)
645 struct netem_sched_data *q = qdisc_priv(sch);
647 if (new == NULL)
648 new = &noop_qdisc;
650 sch_tree_lock(sch);
651 *old = xchg(&q->qdisc, new);
652 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
653 qdisc_reset(*old);
654 sch_tree_unlock(sch);
656 return 0;
659 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
661 struct netem_sched_data *q = qdisc_priv(sch);
662 return q->qdisc;
665 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
667 return 1;
670 static void netem_put(struct Qdisc *sch, unsigned long arg)
674 static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
675 struct rtattr **tca, unsigned long *arg)
677 return -ENOSYS;
680 static int netem_delete(struct Qdisc *sch, unsigned long arg)
682 return -ENOSYS;
685 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
687 if (!walker->stop) {
688 if (walker->count >= walker->skip)
689 if (walker->fn(sch, 1, walker) < 0) {
690 walker->stop = 1;
691 return;
693 walker->count++;
697 static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
699 return NULL;
702 static struct Qdisc_class_ops netem_class_ops = {
703 .graft = netem_graft,
704 .leaf = netem_leaf,
705 .get = netem_get,
706 .put = netem_put,
707 .change = netem_change_class,
708 .delete = netem_delete,
709 .walk = netem_walk,
710 .tcf_chain = netem_find_tcf,
711 .dump = netem_dump_class,
714 static struct Qdisc_ops netem_qdisc_ops = {
715 .id = "netem",
716 .cl_ops = &netem_class_ops,
717 .priv_size = sizeof(struct netem_sched_data),
718 .enqueue = netem_enqueue,
719 .dequeue = netem_dequeue,
720 .requeue = netem_requeue,
721 .drop = netem_drop,
722 .init = netem_init,
723 .reset = netem_reset,
724 .destroy = netem_destroy,
725 .change = netem_change,
726 .dump = netem_dump,
727 .owner = THIS_MODULE,
731 static int __init netem_module_init(void)
733 pr_info("netem: version " VERSION "\n");
734 return register_qdisc(&netem_qdisc_ops);
736 static void __exit netem_module_exit(void)
738 unregister_qdisc(&netem_qdisc_ops);
740 module_init(netem_module_init)
741 module_exit(netem_module_exit)
742 MODULE_LICENSE("GPL");