serial: meson: make use of uart_port member mapsize
[linux-2.6/btrfs-unstable.git] / net / sched / sch_netem.c
blob1b3dd6190e9386c6f8104153eb9fdc0255e03ac5
1 /*
2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License.
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
32 #define VERSION "1.3"
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
61 References:
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
75 /* optional qdisc for classful handling (NULL at netem init) */
76 struct Qdisc *qdisc;
78 struct qdisc_watchdog watchdog;
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
83 u32 loss;
84 u32 ecn;
85 u32 limit;
86 u32 counter;
87 u32 gap;
88 u32 duplicate;
89 u32 reorder;
90 u32 corrupt;
91 u64 rate;
92 s32 packet_overhead;
93 u32 cell_size;
94 struct reciprocal_value cell_size_reciprocal;
95 s32 cell_overhead;
97 struct crndstate {
98 u32 last;
99 u32 rho;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
102 struct disttable {
103 u32 size;
104 s16 table[0];
105 } *delay_dist;
107 enum {
108 CLG_RANDOM,
109 CLG_4_STATES,
110 CLG_GILB_ELL,
111 } loss_model;
113 enum {
114 TX_IN_GAP_PERIOD = 1,
115 TX_IN_BURST_PERIOD,
116 LOST_IN_GAP_PERIOD,
117 LOST_IN_BURST_PERIOD,
118 } _4_state_model;
120 enum {
121 GOOD_STATE = 1,
122 BAD_STATE,
123 } GE_state_model;
125 /* Correlated Loss Generation models */
126 struct clgstate {
127 /* state of the Markov chain */
128 u8 state;
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
136 } clg;
140 /* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
143 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
144 * and skb->next & skb->prev are scratch space for a qdisc,
145 * we save skb->tstamp value in skb->cb[] before destroying it.
147 struct netem_skb_cb {
148 psched_time_t time_to_send;
149 ktime_t tstamp_save;
153 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
155 return rb_entry(rb, struct sk_buff, rbnode);
158 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
160 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
161 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
162 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
165 /* init_crandom - initialize correlated random number generator
166 * Use entropy source for initial seed.
168 static void init_crandom(struct crndstate *state, unsigned long rho)
170 state->rho = rho;
171 state->last = prandom_u32();
174 /* get_crandom - correlated random number generator
175 * Next number depends on last value.
176 * rho is scaled to avoid floating point.
178 static u32 get_crandom(struct crndstate *state)
180 u64 value, rho;
181 unsigned long answer;
183 if (state->rho == 0) /* no correlation */
184 return prandom_u32();
186 value = prandom_u32();
187 rho = (u64)state->rho + 1;
188 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
189 state->last = answer;
190 return answer;
193 /* loss_4state - 4-state model loss generator
194 * Generates losses according to the 4-state Markov chain adopted in
195 * the GI (General and Intuitive) loss model.
197 static bool loss_4state(struct netem_sched_data *q)
199 struct clgstate *clg = &q->clg;
200 u32 rnd = prandom_u32();
203 * Makes a comparison between rnd and the transition
204 * probabilities outgoing from the current state, then decides the
205 * next state and if the next packet has to be transmitted or lost.
206 * The four states correspond to:
207 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
208 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
209 * LOST_IN_GAP_PERIOD => lost packets within a burst period
210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
212 switch (clg->state) {
213 case TX_IN_GAP_PERIOD:
214 if (rnd < clg->a4) {
215 clg->state = LOST_IN_BURST_PERIOD;
216 return true;
217 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
218 clg->state = LOST_IN_GAP_PERIOD;
219 return true;
220 } else if (clg->a1 + clg->a4 < rnd) {
221 clg->state = TX_IN_GAP_PERIOD;
224 break;
225 case TX_IN_BURST_PERIOD:
226 if (rnd < clg->a5) {
227 clg->state = LOST_IN_GAP_PERIOD;
228 return true;
229 } else {
230 clg->state = TX_IN_BURST_PERIOD;
233 break;
234 case LOST_IN_GAP_PERIOD:
235 if (rnd < clg->a3)
236 clg->state = TX_IN_BURST_PERIOD;
237 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
238 clg->state = TX_IN_GAP_PERIOD;
239 } else if (clg->a2 + clg->a3 < rnd) {
240 clg->state = LOST_IN_GAP_PERIOD;
241 return true;
243 break;
244 case LOST_IN_BURST_PERIOD:
245 clg->state = TX_IN_GAP_PERIOD;
246 break;
249 return false;
252 /* loss_gilb_ell - Gilbert-Elliot model loss generator
253 * Generates losses according to the Gilbert-Elliot loss model or
254 * its special cases (Gilbert or Simple Gilbert)
256 * Makes a comparison between random number and the transition
257 * probabilities outgoing from the current state, then decides the
258 * next state. A second random number is extracted and the comparison
259 * with the loss probability of the current state decides if the next
260 * packet will be transmitted or lost.
262 static bool loss_gilb_ell(struct netem_sched_data *q)
264 struct clgstate *clg = &q->clg;
266 switch (clg->state) {
267 case GOOD_STATE:
268 if (prandom_u32() < clg->a1)
269 clg->state = BAD_STATE;
270 if (prandom_u32() < clg->a4)
271 return true;
272 break;
273 case BAD_STATE:
274 if (prandom_u32() < clg->a2)
275 clg->state = GOOD_STATE;
276 if (prandom_u32() > clg->a3)
277 return true;
280 return false;
283 static bool loss_event(struct netem_sched_data *q)
285 switch (q->loss_model) {
286 case CLG_RANDOM:
287 /* Random packet drop 0 => none, ~0 => all */
288 return q->loss && q->loss >= get_crandom(&q->loss_cor);
290 case CLG_4_STATES:
291 /* 4state loss model algorithm (used also for GI model)
292 * Extracts a value from the markov 4 state loss generator,
293 * if it is 1 drops a packet and if needed writes the event in
294 * the kernel logs
296 return loss_4state(q);
298 case CLG_GILB_ELL:
299 /* Gilbert-Elliot loss model algorithm
300 * Extracts a value from the Gilbert-Elliot loss generator,
301 * if it is 1 drops a packet and if needed writes the event in
302 * the kernel logs
304 return loss_gilb_ell(q);
307 return false; /* not reached */
311 /* tabledist - return a pseudo-randomly distributed value with mean mu and
312 * std deviation sigma. Uses table lookup to approximate the desired
313 * distribution, and a uniformly-distributed pseudo-random source.
315 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
316 struct crndstate *state,
317 const struct disttable *dist)
319 psched_tdiff_t x;
320 long t;
321 u32 rnd;
323 if (sigma == 0)
324 return mu;
326 rnd = get_crandom(state);
328 /* default uniform distribution */
329 if (dist == NULL)
330 return (rnd % (2*sigma)) - sigma + mu;
332 t = dist->table[rnd % dist->size];
333 x = (sigma % NETEM_DIST_SCALE) * t;
334 if (x >= 0)
335 x += NETEM_DIST_SCALE/2;
336 else
337 x -= NETEM_DIST_SCALE/2;
339 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
344 u64 ticks;
346 len += q->packet_overhead;
348 if (q->cell_size) {
349 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351 if (len > cells * q->cell_size) /* extra cell needed for remainder */
352 cells++;
353 len = cells * (q->cell_size + q->cell_overhead);
356 ticks = (u64)len * NSEC_PER_SEC;
358 do_div(ticks, q->rate);
359 return PSCHED_NS2TICKS(ticks);
362 static void tfifo_reset(struct Qdisc *sch)
364 struct netem_sched_data *q = qdisc_priv(sch);
365 struct rb_node *p;
367 while ((p = rb_first(&q->t_root))) {
368 struct sk_buff *skb = netem_rb_to_skb(p);
370 rb_erase(p, &q->t_root);
371 rtnl_kfree_skbs(skb, skb);
375 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
377 struct netem_sched_data *q = qdisc_priv(sch);
378 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
379 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
381 while (*p) {
382 struct sk_buff *skb;
384 parent = *p;
385 skb = netem_rb_to_skb(parent);
386 if (tnext >= netem_skb_cb(skb)->time_to_send)
387 p = &parent->rb_right;
388 else
389 p = &parent->rb_left;
391 rb_link_node(&nskb->rbnode, parent, p);
392 rb_insert_color(&nskb->rbnode, &q->t_root);
393 sch->q.qlen++;
396 /* netem can't properly corrupt a megapacket (like we get from GSO), so instead
397 * when we statistically choose to corrupt one, we instead segment it, returning
398 * the first packet to be corrupted, and re-enqueue the remaining frames
400 static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
401 struct sk_buff **to_free)
403 struct sk_buff *segs;
404 netdev_features_t features = netif_skb_features(skb);
406 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
408 if (IS_ERR_OR_NULL(segs)) {
409 qdisc_drop(skb, sch, to_free);
410 return NULL;
412 consume_skb(skb);
413 return segs;
416 static void netem_enqueue_skb_head(struct qdisc_skb_head *qh, struct sk_buff *skb)
418 skb->next = qh->head;
420 if (!qh->head)
421 qh->tail = skb;
422 qh->head = skb;
423 qh->qlen++;
427 * Insert one skb into qdisc.
428 * Note: parent depends on return value to account for queue length.
429 * NET_XMIT_DROP: queue length didn't change.
430 * NET_XMIT_SUCCESS: one skb was queued.
432 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
433 struct sk_buff **to_free)
435 struct netem_sched_data *q = qdisc_priv(sch);
436 /* We don't fill cb now as skb_unshare() may invalidate it */
437 struct netem_skb_cb *cb;
438 struct sk_buff *skb2;
439 struct sk_buff *segs = NULL;
440 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
441 int nb = 0;
442 int count = 1;
443 int rc = NET_XMIT_SUCCESS;
445 /* Random duplication */
446 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
447 ++count;
449 /* Drop packet? */
450 if (loss_event(q)) {
451 if (q->ecn && INET_ECN_set_ce(skb))
452 qdisc_qstats_drop(sch); /* mark packet */
453 else
454 --count;
456 if (count == 0) {
457 qdisc_qstats_drop(sch);
458 __qdisc_drop(skb, to_free);
459 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
462 /* If a delay is expected, orphan the skb. (orphaning usually takes
463 * place at TX completion time, so _before_ the link transit delay)
465 if (q->latency || q->jitter || q->rate)
466 skb_orphan_partial(skb);
469 * If we need to duplicate packet, then re-insert at top of the
470 * qdisc tree, since parent queuer expects that only one
471 * skb will be queued.
473 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
474 struct Qdisc *rootq = qdisc_root(sch);
475 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
477 q->duplicate = 0;
478 rootq->enqueue(skb2, rootq, to_free);
479 q->duplicate = dupsave;
483 * Randomized packet corruption.
484 * Make copy if needed since we are modifying
485 * If packet is going to be hardware checksummed, then
486 * do it now in software before we mangle it.
488 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
489 if (skb_is_gso(skb)) {
490 segs = netem_segment(skb, sch, to_free);
491 if (!segs)
492 return NET_XMIT_DROP;
493 } else {
494 segs = skb;
497 skb = segs;
498 segs = segs->next;
500 skb = skb_unshare(skb, GFP_ATOMIC);
501 if (unlikely(!skb)) {
502 qdisc_qstats_drop(sch);
503 goto finish_segs;
505 if (skb->ip_summed == CHECKSUM_PARTIAL &&
506 skb_checksum_help(skb)) {
507 qdisc_drop(skb, sch, to_free);
508 goto finish_segs;
511 skb->data[prandom_u32() % skb_headlen(skb)] ^=
512 1<<(prandom_u32() % 8);
515 if (unlikely(sch->q.qlen >= sch->limit))
516 return qdisc_drop(skb, sch, to_free);
518 qdisc_qstats_backlog_inc(sch, skb);
520 cb = netem_skb_cb(skb);
521 if (q->gap == 0 || /* not doing reordering */
522 q->counter < q->gap - 1 || /* inside last reordering gap */
523 q->reorder < get_crandom(&q->reorder_cor)) {
524 psched_time_t now;
525 psched_tdiff_t delay;
527 delay = tabledist(q->latency, q->jitter,
528 &q->delay_cor, q->delay_dist);
530 now = psched_get_time();
532 if (q->rate) {
533 struct netem_skb_cb *last = NULL;
535 if (sch->q.tail)
536 last = netem_skb_cb(sch->q.tail);
537 if (q->t_root.rb_node) {
538 struct sk_buff *t_skb;
539 struct netem_skb_cb *t_last;
541 t_skb = netem_rb_to_skb(rb_last(&q->t_root));
542 t_last = netem_skb_cb(t_skb);
543 if (!last ||
544 t_last->time_to_send > last->time_to_send) {
545 last = t_last;
549 if (last) {
551 * Last packet in queue is reference point (now),
552 * calculate this time bonus and subtract
553 * from delay.
555 delay -= last->time_to_send - now;
556 delay = max_t(psched_tdiff_t, 0, delay);
557 now = last->time_to_send;
560 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
563 cb->time_to_send = now + delay;
564 cb->tstamp_save = skb->tstamp;
565 ++q->counter;
566 tfifo_enqueue(skb, sch);
567 } else {
569 * Do re-ordering by putting one out of N packets at the front
570 * of the queue.
572 cb->time_to_send = psched_get_time();
573 q->counter = 0;
575 netem_enqueue_skb_head(&sch->q, skb);
576 sch->qstats.requeues++;
579 finish_segs:
580 if (segs) {
581 while (segs) {
582 skb2 = segs->next;
583 segs->next = NULL;
584 qdisc_skb_cb(segs)->pkt_len = segs->len;
585 last_len = segs->len;
586 rc = qdisc_enqueue(segs, sch, to_free);
587 if (rc != NET_XMIT_SUCCESS) {
588 if (net_xmit_drop_count(rc))
589 qdisc_qstats_drop(sch);
590 } else {
591 nb++;
592 len += last_len;
594 segs = skb2;
596 sch->q.qlen += nb;
597 if (nb > 1)
598 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
600 return NET_XMIT_SUCCESS;
603 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
605 struct netem_sched_data *q = qdisc_priv(sch);
606 struct sk_buff *skb;
607 struct rb_node *p;
609 tfifo_dequeue:
610 skb = __qdisc_dequeue_head(&sch->q);
611 if (skb) {
612 qdisc_qstats_backlog_dec(sch, skb);
613 deliver:
614 qdisc_bstats_update(sch, skb);
615 return skb;
617 p = rb_first(&q->t_root);
618 if (p) {
619 psched_time_t time_to_send;
621 skb = netem_rb_to_skb(p);
623 /* if more time remaining? */
624 time_to_send = netem_skb_cb(skb)->time_to_send;
625 if (time_to_send <= psched_get_time()) {
626 rb_erase(p, &q->t_root);
628 sch->q.qlen--;
629 qdisc_qstats_backlog_dec(sch, skb);
630 skb->next = NULL;
631 skb->prev = NULL;
632 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
634 #ifdef CONFIG_NET_CLS_ACT
636 * If it's at ingress let's pretend the delay is
637 * from the network (tstamp will be updated).
639 if (skb->tc_redirected && skb->tc_from_ingress)
640 skb->tstamp = 0;
641 #endif
643 if (q->qdisc) {
644 unsigned int pkt_len = qdisc_pkt_len(skb);
645 struct sk_buff *to_free = NULL;
646 int err;
648 err = qdisc_enqueue(skb, q->qdisc, &to_free);
649 kfree_skb_list(to_free);
650 if (err != NET_XMIT_SUCCESS &&
651 net_xmit_drop_count(err)) {
652 qdisc_qstats_drop(sch);
653 qdisc_tree_reduce_backlog(sch, 1,
654 pkt_len);
656 goto tfifo_dequeue;
658 goto deliver;
661 if (q->qdisc) {
662 skb = q->qdisc->ops->dequeue(q->qdisc);
663 if (skb)
664 goto deliver;
666 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
669 if (q->qdisc) {
670 skb = q->qdisc->ops->dequeue(q->qdisc);
671 if (skb)
672 goto deliver;
674 return NULL;
677 static void netem_reset(struct Qdisc *sch)
679 struct netem_sched_data *q = qdisc_priv(sch);
681 qdisc_reset_queue(sch);
682 tfifo_reset(sch);
683 if (q->qdisc)
684 qdisc_reset(q->qdisc);
685 qdisc_watchdog_cancel(&q->watchdog);
688 static void dist_free(struct disttable *d)
690 kvfree(d);
694 * Distribution data is a variable size payload containing
695 * signed 16 bit values.
697 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
699 struct netem_sched_data *q = qdisc_priv(sch);
700 size_t n = nla_len(attr)/sizeof(__s16);
701 const __s16 *data = nla_data(attr);
702 spinlock_t *root_lock;
703 struct disttable *d;
704 int i;
706 if (n > NETEM_DIST_MAX)
707 return -EINVAL;
709 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
710 if (!d)
711 return -ENOMEM;
713 d->size = n;
714 for (i = 0; i < n; i++)
715 d->table[i] = data[i];
717 root_lock = qdisc_root_sleeping_lock(sch);
719 spin_lock_bh(root_lock);
720 swap(q->delay_dist, d);
721 spin_unlock_bh(root_lock);
723 dist_free(d);
724 return 0;
727 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
729 const struct tc_netem_corr *c = nla_data(attr);
731 init_crandom(&q->delay_cor, c->delay_corr);
732 init_crandom(&q->loss_cor, c->loss_corr);
733 init_crandom(&q->dup_cor, c->dup_corr);
736 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
738 const struct tc_netem_reorder *r = nla_data(attr);
740 q->reorder = r->probability;
741 init_crandom(&q->reorder_cor, r->correlation);
744 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
746 const struct tc_netem_corrupt *r = nla_data(attr);
748 q->corrupt = r->probability;
749 init_crandom(&q->corrupt_cor, r->correlation);
752 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
754 const struct tc_netem_rate *r = nla_data(attr);
756 q->rate = r->rate;
757 q->packet_overhead = r->packet_overhead;
758 q->cell_size = r->cell_size;
759 q->cell_overhead = r->cell_overhead;
760 if (q->cell_size)
761 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
762 else
763 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
766 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
768 const struct nlattr *la;
769 int rem;
771 nla_for_each_nested(la, attr, rem) {
772 u16 type = nla_type(la);
774 switch (type) {
775 case NETEM_LOSS_GI: {
776 const struct tc_netem_gimodel *gi = nla_data(la);
778 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
779 pr_info("netem: incorrect gi model size\n");
780 return -EINVAL;
783 q->loss_model = CLG_4_STATES;
785 q->clg.state = TX_IN_GAP_PERIOD;
786 q->clg.a1 = gi->p13;
787 q->clg.a2 = gi->p31;
788 q->clg.a3 = gi->p32;
789 q->clg.a4 = gi->p14;
790 q->clg.a5 = gi->p23;
791 break;
794 case NETEM_LOSS_GE: {
795 const struct tc_netem_gemodel *ge = nla_data(la);
797 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
798 pr_info("netem: incorrect ge model size\n");
799 return -EINVAL;
802 q->loss_model = CLG_GILB_ELL;
803 q->clg.state = GOOD_STATE;
804 q->clg.a1 = ge->p;
805 q->clg.a2 = ge->r;
806 q->clg.a3 = ge->h;
807 q->clg.a4 = ge->k1;
808 break;
811 default:
812 pr_info("netem: unknown loss type %u\n", type);
813 return -EINVAL;
817 return 0;
820 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
821 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
822 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
823 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
824 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
825 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
826 [TCA_NETEM_ECN] = { .type = NLA_U32 },
827 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
830 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
831 const struct nla_policy *policy, int len)
833 int nested_len = nla_len(nla) - NLA_ALIGN(len);
835 if (nested_len < 0) {
836 pr_info("netem: invalid attributes len %d\n", nested_len);
837 return -EINVAL;
840 if (nested_len >= nla_attr_size(0))
841 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
842 nested_len, policy, NULL);
844 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
845 return 0;
848 /* Parse netlink message to set options */
849 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
851 struct netem_sched_data *q = qdisc_priv(sch);
852 struct nlattr *tb[TCA_NETEM_MAX + 1];
853 struct tc_netem_qopt *qopt;
854 struct clgstate old_clg;
855 int old_loss_model = CLG_RANDOM;
856 int ret;
858 if (opt == NULL)
859 return -EINVAL;
861 qopt = nla_data(opt);
862 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
863 if (ret < 0)
864 return ret;
866 /* backup q->clg and q->loss_model */
867 old_clg = q->clg;
868 old_loss_model = q->loss_model;
870 if (tb[TCA_NETEM_LOSS]) {
871 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
872 if (ret) {
873 q->loss_model = old_loss_model;
874 return ret;
876 } else {
877 q->loss_model = CLG_RANDOM;
880 if (tb[TCA_NETEM_DELAY_DIST]) {
881 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
882 if (ret) {
883 /* recover clg and loss_model, in case of
884 * q->clg and q->loss_model were modified
885 * in get_loss_clg()
887 q->clg = old_clg;
888 q->loss_model = old_loss_model;
889 return ret;
893 sch->limit = qopt->limit;
895 q->latency = qopt->latency;
896 q->jitter = qopt->jitter;
897 q->limit = qopt->limit;
898 q->gap = qopt->gap;
899 q->counter = 0;
900 q->loss = qopt->loss;
901 q->duplicate = qopt->duplicate;
903 /* for compatibility with earlier versions.
904 * if gap is set, need to assume 100% probability
906 if (q->gap)
907 q->reorder = ~0;
909 if (tb[TCA_NETEM_CORR])
910 get_correlation(q, tb[TCA_NETEM_CORR]);
912 if (tb[TCA_NETEM_REORDER])
913 get_reorder(q, tb[TCA_NETEM_REORDER]);
915 if (tb[TCA_NETEM_CORRUPT])
916 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
918 if (tb[TCA_NETEM_RATE])
919 get_rate(q, tb[TCA_NETEM_RATE]);
921 if (tb[TCA_NETEM_RATE64])
922 q->rate = max_t(u64, q->rate,
923 nla_get_u64(tb[TCA_NETEM_RATE64]));
925 if (tb[TCA_NETEM_ECN])
926 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
928 return ret;
931 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
933 struct netem_sched_data *q = qdisc_priv(sch);
934 int ret;
936 if (!opt)
937 return -EINVAL;
939 qdisc_watchdog_init(&q->watchdog, sch);
941 q->loss_model = CLG_RANDOM;
942 ret = netem_change(sch, opt);
943 if (ret)
944 pr_info("netem: change failed\n");
945 return ret;
948 static void netem_destroy(struct Qdisc *sch)
950 struct netem_sched_data *q = qdisc_priv(sch);
952 qdisc_watchdog_cancel(&q->watchdog);
953 if (q->qdisc)
954 qdisc_destroy(q->qdisc);
955 dist_free(q->delay_dist);
958 static int dump_loss_model(const struct netem_sched_data *q,
959 struct sk_buff *skb)
961 struct nlattr *nest;
963 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
964 if (nest == NULL)
965 goto nla_put_failure;
967 switch (q->loss_model) {
968 case CLG_RANDOM:
969 /* legacy loss model */
970 nla_nest_cancel(skb, nest);
971 return 0; /* no data */
973 case CLG_4_STATES: {
974 struct tc_netem_gimodel gi = {
975 .p13 = q->clg.a1,
976 .p31 = q->clg.a2,
977 .p32 = q->clg.a3,
978 .p14 = q->clg.a4,
979 .p23 = q->clg.a5,
982 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
983 goto nla_put_failure;
984 break;
986 case CLG_GILB_ELL: {
987 struct tc_netem_gemodel ge = {
988 .p = q->clg.a1,
989 .r = q->clg.a2,
990 .h = q->clg.a3,
991 .k1 = q->clg.a4,
994 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
995 goto nla_put_failure;
996 break;
1000 nla_nest_end(skb, nest);
1001 return 0;
1003 nla_put_failure:
1004 nla_nest_cancel(skb, nest);
1005 return -1;
1008 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1010 const struct netem_sched_data *q = qdisc_priv(sch);
1011 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
1012 struct tc_netem_qopt qopt;
1013 struct tc_netem_corr cor;
1014 struct tc_netem_reorder reorder;
1015 struct tc_netem_corrupt corrupt;
1016 struct tc_netem_rate rate;
1018 qopt.latency = q->latency;
1019 qopt.jitter = q->jitter;
1020 qopt.limit = q->limit;
1021 qopt.loss = q->loss;
1022 qopt.gap = q->gap;
1023 qopt.duplicate = q->duplicate;
1024 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1025 goto nla_put_failure;
1027 cor.delay_corr = q->delay_cor.rho;
1028 cor.loss_corr = q->loss_cor.rho;
1029 cor.dup_corr = q->dup_cor.rho;
1030 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1031 goto nla_put_failure;
1033 reorder.probability = q->reorder;
1034 reorder.correlation = q->reorder_cor.rho;
1035 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1036 goto nla_put_failure;
1038 corrupt.probability = q->corrupt;
1039 corrupt.correlation = q->corrupt_cor.rho;
1040 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1041 goto nla_put_failure;
1043 if (q->rate >= (1ULL << 32)) {
1044 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1045 TCA_NETEM_PAD))
1046 goto nla_put_failure;
1047 rate.rate = ~0U;
1048 } else {
1049 rate.rate = q->rate;
1051 rate.packet_overhead = q->packet_overhead;
1052 rate.cell_size = q->cell_size;
1053 rate.cell_overhead = q->cell_overhead;
1054 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1055 goto nla_put_failure;
1057 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1058 goto nla_put_failure;
1060 if (dump_loss_model(q, skb) != 0)
1061 goto nla_put_failure;
1063 return nla_nest_end(skb, nla);
1065 nla_put_failure:
1066 nlmsg_trim(skb, nla);
1067 return -1;
1070 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1071 struct sk_buff *skb, struct tcmsg *tcm)
1073 struct netem_sched_data *q = qdisc_priv(sch);
1075 if (cl != 1 || !q->qdisc) /* only one class */
1076 return -ENOENT;
1078 tcm->tcm_handle |= TC_H_MIN(1);
1079 tcm->tcm_info = q->qdisc->handle;
1081 return 0;
1084 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1085 struct Qdisc **old)
1087 struct netem_sched_data *q = qdisc_priv(sch);
1089 *old = qdisc_replace(sch, new, &q->qdisc);
1090 return 0;
1093 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1095 struct netem_sched_data *q = qdisc_priv(sch);
1096 return q->qdisc;
1099 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1101 return 1;
1104 static void netem_put(struct Qdisc *sch, unsigned long arg)
1108 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1110 if (!walker->stop) {
1111 if (walker->count >= walker->skip)
1112 if (walker->fn(sch, 1, walker) < 0) {
1113 walker->stop = 1;
1114 return;
1116 walker->count++;
1120 static const struct Qdisc_class_ops netem_class_ops = {
1121 .graft = netem_graft,
1122 .leaf = netem_leaf,
1123 .get = netem_get,
1124 .put = netem_put,
1125 .walk = netem_walk,
1126 .dump = netem_dump_class,
1129 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1130 .id = "netem",
1131 .cl_ops = &netem_class_ops,
1132 .priv_size = sizeof(struct netem_sched_data),
1133 .enqueue = netem_enqueue,
1134 .dequeue = netem_dequeue,
1135 .peek = qdisc_peek_dequeued,
1136 .init = netem_init,
1137 .reset = netem_reset,
1138 .destroy = netem_destroy,
1139 .change = netem_change,
1140 .dump = netem_dump,
1141 .owner = THIS_MODULE,
1145 static int __init netem_module_init(void)
1147 pr_info("netem: version " VERSION "\n");
1148 return register_qdisc(&netem_qdisc_ops);
1150 static void __exit netem_module_exit(void)
1152 unregister_qdisc(&netem_qdisc_ops);
1154 module_init(netem_module_init)
1155 module_exit(netem_module_exit)
1156 MODULE_LICENSE("GPL");