linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / net / dummynet / ip_dummynet.c
blob86df63df4bef94a7699b2ff5c2ddb9b599472994
1 /*
2 * Copyright (c) 1998-2002 Luigi Rizzo, Universita` di Pisa
3 * Portions Copyright (c) 2000 Akamba Corp.
4 * All rights reserved
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
27 * $FreeBSD: src/sys/netinet/ip_dummynet.c,v 1.24.2.22 2003/05/13 09:31:06 maxim Exp $
28 * $DragonFly: src/sys/net/dummynet/ip_dummynet.c,v 1.55 2008/09/16 12:30:57 sephe Exp $
31 #include "opt_ipdn.h"
34 * This module implements IP dummynet, a bandwidth limiter/delay emulator.
35 * Description of the data structures used is in ip_dummynet.h
36 * Here you mainly find the following blocks of code:
37 * + variable declarations;
38 * + heap management functions;
39 * + scheduler and dummynet functions;
40 * + configuration and initialization.
42 * Most important Changes:
44 * 011004: KLDable
45 * 010124: Fixed WF2Q behaviour
46 * 010122: Fixed spl protection.
47 * 000601: WF2Q support
48 * 000106: Large rewrite, use heaps to handle very many pipes.
49 * 980513: Initial release
52 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/malloc.h>
55 #include <sys/mbuf.h>
56 #include <sys/socketvar.h>
57 #include <sys/sysctl.h>
58 #include <sys/systimer.h>
59 #include <sys/thread2.h>
61 #include <net/ethernet.h>
62 #include <net/netmsg2.h>
63 #include <net/route.h>
65 #include <netinet/in_var.h>
66 #include <netinet/ip_var.h>
68 #include <net/dummynet/ip_dummynet.h>
70 #ifdef DUMMYNET_DEBUG
71 #define DPRINTF(fmt, ...) kprintf(fmt, __VA_ARGS__)
72 #else
73 #define DPRINTF(fmt, ...) ((void)0)
74 #endif
76 #ifndef DN_CALLOUT_FREQ_MAX
77 #define DN_CALLOUT_FREQ_MAX 10000
78 #endif
81 * The maximum/minimum hash table size for queues.
82 * These values must be a power of 2.
84 #define DN_MIN_HASH_SIZE 4
85 #define DN_MAX_HASH_SIZE 65536
88 * Some macros are used to compare key values and handle wraparounds.
89 * MAX64 returns the largest of two key values.
91 #define DN_KEY_LT(a, b) ((int64_t)((a) - (b)) < 0)
92 #define DN_KEY_LEQ(a, b) ((int64_t)((a) - (b)) <= 0)
93 #define DN_KEY_GT(a, b) ((int64_t)((a) - (b)) > 0)
94 #define DN_KEY_GEQ(a, b) ((int64_t)((a) - (b)) >= 0)
95 #define MAX64(x, y) ((((int64_t)((y) - (x))) > 0) ? (y) : (x))
97 #define DN_NR_HASH_MAX 16
98 #define DN_NR_HASH_MASK (DN_NR_HASH_MAX - 1)
99 #define DN_NR_HASH(nr) \
100 ((((nr) >> 12) ^ ((nr) >> 8) ^ ((nr) >> 4) ^ (nr)) & DN_NR_HASH_MASK)
102 MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
104 extern int ip_dn_cpu;
106 static dn_key curr_time = 0; /* current simulation time */
107 static int dn_hash_size = 64; /* default hash size */
108 static int pipe_expire = 1; /* expire queue if empty */
109 static int dn_max_ratio = 16; /* max queues/buckets ratio */
112 * Statistics on number of queue searches and search steps
114 static int searches;
115 static int search_steps;
118 * RED parameters
120 static int red_lookup_depth = 256; /* default lookup table depth */
121 static int red_avg_pkt_size = 512; /* default medium packet size */
122 static int red_max_pkt_size = 1500;/* default max packet size */
125 * Three heaps contain queues and pipes that the scheduler handles:
127 * + ready_heap contains all dn_flow_queue related to fixed-rate pipes.
128 * + wfq_ready_heap contains the pipes associated with WF2Q flows
129 * + extract_heap contains pipes associated with delay lines.
131 static struct dn_heap ready_heap;
132 static struct dn_heap extract_heap;
133 static struct dn_heap wfq_ready_heap;
135 static struct dn_pipe_head pipe_table[DN_NR_HASH_MAX];
136 static struct dn_flowset_head flowset_table[DN_NR_HASH_MAX];
139 * Variables for dummynet systimer
141 static struct netmsg dn_netmsg;
142 static struct systimer dn_clock;
143 static int dn_hz = 1000;
145 static int sysctl_dn_hz(SYSCTL_HANDLER_ARGS);
147 SYSCTL_DECL(_net_inet_ip_dummynet);
149 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, hash_size, CTLFLAG_RW,
150 &dn_hash_size, 0, "Default hash table size");
151 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, curr_time, CTLFLAG_RD,
152 &curr_time, 0, "Current tick");
153 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, expire, CTLFLAG_RW,
154 &pipe_expire, 0, "Expire queue if empty");
155 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, max_chain_len, CTLFLAG_RW,
156 &dn_max_ratio, 0, "Max ratio between dynamic queues and buckets");
158 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, ready_heap, CTLFLAG_RD,
159 &ready_heap.size, 0, "Size of ready heap");
160 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, extract_heap, CTLFLAG_RD,
161 &extract_heap.size, 0, "Size of extract heap");
163 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, searches, CTLFLAG_RD,
164 &searches, 0, "Number of queue searches");
165 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, search_steps, CTLFLAG_RD,
166 &search_steps, 0, "Number of queue search steps");
168 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth, CTLFLAG_RD,
169 &red_lookup_depth, 0, "Depth of RED lookup table");
170 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size, CTLFLAG_RD,
171 &red_avg_pkt_size, 0, "RED Medium packet size");
172 SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size, CTLFLAG_RD,
173 &red_max_pkt_size, 0, "RED Max packet size");
175 SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hz, CTLTYPE_INT | CTLFLAG_RW,
176 0, 0, sysctl_dn_hz, "I", "Dummynet callout frequency");
178 static int heap_init(struct dn_heap *, int);
179 static int heap_insert(struct dn_heap *, dn_key, void *);
180 static void heap_extract(struct dn_heap *, void *);
182 static void transmit_event(struct dn_pipe *);
183 static void ready_event(struct dn_flow_queue *);
184 static void ready_event_wfq(struct dn_pipe *);
186 static int config_pipe(struct dn_ioc_pipe *);
187 static void dummynet_flush(void);
189 static void dummynet_clock(systimer_t, struct intrframe *);
190 static void dummynet(struct netmsg *);
192 static struct dn_pipe *dn_find_pipe(int);
193 static struct dn_flow_set *dn_locate_flowset(int, int);
195 typedef void (*dn_pipe_iter_t)(struct dn_pipe *, void *);
196 static void dn_iterate_pipe(dn_pipe_iter_t, void *);
198 typedef void (*dn_flowset_iter_t)(struct dn_flow_set *, void *);
199 static void dn_iterate_flowset(dn_flowset_iter_t, void *);
201 static ip_dn_io_t dummynet_io;
202 static ip_dn_ctl_t dummynet_ctl;
205 * Heap management functions.
207 * In the heap, first node is element 0. Children of i are 2i+1 and 2i+2.
208 * Some macros help finding parent/children so we can optimize them.
210 * heap_init() is called to expand the heap when needed.
211 * Increment size in blocks of 16 entries.
212 * XXX failure to allocate a new element is a pretty bad failure
213 * as we basically stall a whole queue forever!!
214 * Returns 1 on error, 0 on success
216 #define HEAP_FATHER(x) (((x) - 1) / 2)
217 #define HEAP_LEFT(x) (2*(x) + 1)
218 #define HEAP_IS_LEFT(x) ((x) & 1)
219 #define HEAP_RIGHT(x) (2*(x) + 2)
220 #define HEAP_SWAP(a, b, buffer) { buffer = a; a = b; b = buffer; }
221 #define HEAP_INCREMENT 15
223 static int
224 heap_init(struct dn_heap *h, int new_size)
226 struct dn_heap_entry *p;
228 if (h->size >= new_size) {
229 kprintf("%s, Bogus call, have %d want %d\n", __func__,
230 h->size, new_size);
231 return 0;
234 new_size = (new_size + HEAP_INCREMENT) & ~HEAP_INCREMENT;
235 p = kmalloc(new_size * sizeof(*p), M_DUMMYNET, M_WAITOK | M_ZERO);
236 if (h->size > 0) {
237 bcopy(h->p, p, h->size * sizeof(*p));
238 kfree(h->p, M_DUMMYNET);
240 h->p = p;
241 h->size = new_size;
242 return 0;
246 * Insert element in heap. Normally, p != NULL, we insert p in
247 * a new position and bubble up. If p == NULL, then the element is
248 * already in place, and key is the position where to start the
249 * bubble-up.
250 * Returns 1 on failure (cannot allocate new heap entry)
252 * If offset > 0 the position (index, int) of the element in the heap is
253 * also stored in the element itself at the given offset in bytes.
255 #define SET_OFFSET(heap, node) \
256 if (heap->offset > 0) \
257 *((int *)((char *)(heap->p[node].object) + heap->offset)) = node;
260 * RESET_OFFSET is used for sanity checks. It sets offset to an invalid value.
262 #define RESET_OFFSET(heap, node) \
263 if (heap->offset > 0) \
264 *((int *)((char *)(heap->p[node].object) + heap->offset)) = -1;
266 static int
267 heap_insert(struct dn_heap *h, dn_key key1, void *p)
269 int son;
271 if (p == NULL) { /* Data already there, set starting point */
272 son = key1;
273 } else { /* Insert new element at the end, possibly resize */
274 son = h->elements;
275 if (son == h->size) { /* Need resize... */
276 if (heap_init(h, h->elements + 1))
277 return 1; /* Failure... */
279 h->p[son].object = p;
280 h->p[son].key = key1;
281 h->elements++;
284 while (son > 0) { /* Bubble up */
285 int father = HEAP_FATHER(son);
286 struct dn_heap_entry tmp;
288 if (DN_KEY_LT(h->p[father].key, h->p[son].key))
289 break; /* Found right position */
291 /* 'son' smaller than 'father', swap and repeat */
292 HEAP_SWAP(h->p[son], h->p[father], tmp);
293 SET_OFFSET(h, son);
294 son = father;
296 SET_OFFSET(h, son);
297 return 0;
301 * Remove top element from heap, or obj if obj != NULL
303 static void
304 heap_extract(struct dn_heap *h, void *obj)
306 int child, father, max = h->elements - 1;
308 if (max < 0) {
309 kprintf("warning, extract from empty heap 0x%p\n", h);
310 return;
313 father = 0; /* Default: move up smallest child */
314 if (obj != NULL) { /* Extract specific element, index is at offset */
315 if (h->offset <= 0)
316 panic("%s from middle not supported on this heap!!!\n", __func__);
318 father = *((int *)((char *)obj + h->offset));
319 if (father < 0 || father >= h->elements) {
320 panic("%s father %d out of bound 0..%d\n", __func__,
321 father, h->elements);
324 RESET_OFFSET(h, father);
326 child = HEAP_LEFT(father); /* Left child */
327 while (child <= max) { /* Valid entry */
328 if (child != max && DN_KEY_LT(h->p[child + 1].key, h->p[child].key))
329 child = child + 1; /* Take right child, otherwise left */
330 h->p[father] = h->p[child];
331 SET_OFFSET(h, father);
332 father = child;
333 child = HEAP_LEFT(child); /* Left child for next loop */
335 h->elements--;
336 if (father != max) {
338 * Fill hole with last entry and bubble up, reusing the insert code
340 h->p[father] = h->p[max];
341 heap_insert(h, father, NULL); /* This one cannot fail */
346 * heapify() will reorganize data inside an array to maintain the
347 * heap property. It is needed when we delete a bunch of entries.
349 static void
350 heapify(struct dn_heap *h)
352 int i;
354 for (i = 0; i < h->elements; i++)
355 heap_insert(h, i , NULL);
359 * Cleanup the heap and free data structure
361 static void
362 heap_free(struct dn_heap *h)
364 if (h->size > 0)
365 kfree(h->p, M_DUMMYNET);
366 bzero(h, sizeof(*h));
370 * --- End of heap management functions ---
374 * Scheduler functions:
376 * transmit_event() is called when the delay-line needs to enter
377 * the scheduler, either because of existing pkts getting ready,
378 * or new packets entering the queue. The event handled is the delivery
379 * time of the packet.
381 * ready_event() does something similar with fixed-rate queues, and the
382 * event handled is the finish time of the head pkt.
384 * ready_event_wfq() does something similar with WF2Q queues, and the
385 * event handled is the start time of the head pkt.
387 * In all cases, we make sure that the data structures are consistent
388 * before passing pkts out, because this might trigger recursive
389 * invocations of the procedures.
391 static void
392 transmit_event(struct dn_pipe *pipe)
394 struct dn_pkt *pkt;
396 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) &&
397 DN_KEY_LEQ(pkt->output_time, curr_time)) {
398 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
399 ip_dn_packet_redispatch(pkt);
403 * If there are leftover packets, put into the heap for next event
405 if ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
407 * XXX should check errors on heap_insert, by draining the
408 * whole pipe and hoping in the future we are more successful
410 heap_insert(&extract_heap, pkt->output_time, pipe);
415 * The following macro computes how many ticks we have to wait
416 * before being able to transmit a packet. The credit is taken from
417 * either a pipe (WF2Q) or a flow_queue (per-flow queueing)
419 #define SET_TICKS(pkt, q, p) \
420 (pkt->dn_m->m_pkthdr.len*8*dn_hz - (q)->numbytes + p->bandwidth - 1 ) / \
421 p->bandwidth;
424 * Extract pkt from queue, compute output time (could be now)
425 * and put into delay line (p_queue)
427 static void
428 move_pkt(struct dn_pkt *pkt, struct dn_flow_queue *q,
429 struct dn_pipe *p, int len)
431 TAILQ_REMOVE(&q->queue, pkt, dn_next);
432 q->len--;
433 q->len_bytes -= len;
435 pkt->output_time = curr_time + p->delay;
437 TAILQ_INSERT_TAIL(&p->p_queue, pkt, dn_next);
441 * ready_event() is invoked every time the queue must enter the
442 * scheduler, either because the first packet arrives, or because
443 * a previously scheduled event fired.
444 * On invokation, drain as many pkts as possible (could be 0) and then
445 * if there are leftover packets reinsert the pkt in the scheduler.
447 static void
448 ready_event(struct dn_flow_queue *q)
450 struct dn_pkt *pkt;
451 struct dn_pipe *p = q->fs->pipe;
452 int p_was_empty;
454 if (p == NULL) {
455 kprintf("ready_event- pipe is gone\n");
456 return;
458 p_was_empty = TAILQ_EMPTY(&p->p_queue);
461 * Schedule fixed-rate queues linked to this pipe:
462 * Account for the bw accumulated since last scheduling, then
463 * drain as many pkts as allowed by q->numbytes and move to
464 * the delay line (in p) computing output time.
465 * bandwidth==0 (no limit) means we can drain the whole queue,
466 * setting len_scaled = 0 does the job.
468 q->numbytes += (curr_time - q->sched_time) * p->bandwidth;
469 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
470 int len = pkt->dn_m->m_pkthdr.len;
471 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
473 if (len_scaled > q->numbytes)
474 break;
475 q->numbytes -= len_scaled;
476 move_pkt(pkt, q, p, len);
480 * If we have more packets queued, schedule next ready event
481 * (can only occur when bandwidth != 0, otherwise we would have
482 * flushed the whole queue in the previous loop).
483 * To this purpose we record the current time and compute how many
484 * ticks to go for the finish time of the packet.
486 if ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
487 /* This implies bandwidth != 0 */
488 dn_key t = SET_TICKS(pkt, q, p); /* ticks i have to wait */
490 q->sched_time = curr_time;
493 * XXX should check errors on heap_insert, and drain the whole
494 * queue on error hoping next time we are luckier.
496 heap_insert(&ready_heap, curr_time + t, q);
497 } else { /* RED needs to know when the queue becomes empty */
498 q->q_time = curr_time;
499 q->numbytes = 0;
503 * If the delay line was empty call transmit_event(p) now.
504 * Otherwise, the scheduler will take care of it.
506 if (p_was_empty)
507 transmit_event(p);
511 * Called when we can transmit packets on WF2Q queues. Take pkts out of
512 * the queues at their start time, and enqueue into the delay line.
513 * Packets are drained until p->numbytes < 0. As long as
514 * len_scaled >= p->numbytes, the packet goes into the delay line
515 * with a deadline p->delay. For the last packet, if p->numbytes < 0,
516 * there is an additional delay.
518 static void
519 ready_event_wfq(struct dn_pipe *p)
521 int p_was_empty = TAILQ_EMPTY(&p->p_queue);
522 struct dn_heap *sch = &p->scheduler_heap;
523 struct dn_heap *neh = &p->not_eligible_heap;
525 p->numbytes += (curr_time - p->sched_time) * p->bandwidth;
528 * While we have backlogged traffic AND credit, we need to do
529 * something on the queue.
531 while (p->numbytes >= 0 && (sch->elements > 0 || neh->elements > 0)) {
532 if (sch->elements > 0) { /* Have some eligible pkts to send out */
533 struct dn_flow_queue *q = sch->p[0].object;
534 struct dn_pkt *pkt = TAILQ_FIRST(&q->queue);
535 struct dn_flow_set *fs = q->fs;
536 uint64_t len = pkt->dn_m->m_pkthdr.len;
537 int len_scaled = p->bandwidth ? len*8*dn_hz : 0;
539 heap_extract(sch, NULL); /* Remove queue from heap */
540 p->numbytes -= len_scaled;
541 move_pkt(pkt, q, p, len);
543 p->V += (len << MY_M) / p->sum; /* Update V */
544 q->S = q->F; /* Update start time */
546 if (q->len == 0) { /* Flow not backlogged any more */
547 fs->backlogged--;
548 heap_insert(&p->idle_heap, q->F, q);
549 } else { /* Still backlogged */
551 * Update F and position in backlogged queue, then
552 * put flow in not_eligible_heap (we will fix this later).
554 len = TAILQ_FIRST(&q->queue)->dn_m->m_pkthdr.len;
555 q->F += (len << MY_M) / (uint64_t)fs->weight;
556 if (DN_KEY_LEQ(q->S, p->V))
557 heap_insert(neh, q->S, q);
558 else
559 heap_insert(sch, q->F, q);
564 * Now compute V = max(V, min(S_i)). Remember that all elements in
565 * sch have by definition S_i <= V so if sch is not empty, V is surely
566 * the max and we must not update it. Conversely, if sch is empty
567 * we only need to look at neh.
569 if (sch->elements == 0 && neh->elements > 0)
570 p->V = MAX64(p->V, neh->p[0].key);
573 * Move from neh to sch any packets that have become eligible
575 while (neh->elements > 0 && DN_KEY_LEQ(neh->p[0].key, p->V)) {
576 struct dn_flow_queue *q = neh->p[0].object;
578 heap_extract(neh, NULL);
579 heap_insert(sch, q->F, q);
583 if (sch->elements == 0 && neh->elements == 0 && p->numbytes >= 0 &&
584 p->idle_heap.elements > 0) {
586 * No traffic and no events scheduled. We can get rid of idle-heap.
588 int i;
590 for (i = 0; i < p->idle_heap.elements; i++) {
591 struct dn_flow_queue *q = p->idle_heap.p[i].object;
593 q->F = 0;
594 q->S = q->F + 1;
596 p->sum = 0;
597 p->V = 0;
598 p->idle_heap.elements = 0;
602 * If we are getting clocks from dummynet and if we are under credit,
603 * schedule the next ready event.
604 * Also fix the delivery time of the last packet.
606 if (p->numbytes < 0) { /* This implies bandwidth>0 */
607 dn_key t = 0; /* Number of ticks i have to wait */
609 if (p->bandwidth > 0)
610 t = (p->bandwidth - 1 - p->numbytes) / p->bandwidth;
611 TAILQ_LAST(&p->p_queue, dn_pkt_queue)->output_time += t;
612 p->sched_time = curr_time;
615 * XXX should check errors on heap_insert, and drain the whole
616 * queue on error hoping next time we are luckier.
618 heap_insert(&wfq_ready_heap, curr_time + t, p);
622 * If the delay line was empty call transmit_event(p) now.
623 * Otherwise, the scheduler will take care of it.
625 if (p_was_empty)
626 transmit_event(p);
629 static void
630 dn_expire_pipe_cb(struct dn_pipe *pipe, void *dummy __unused)
632 if (pipe->idle_heap.elements > 0 &&
633 DN_KEY_LT(pipe->idle_heap.p[0].key, pipe->V)) {
634 struct dn_flow_queue *q = pipe->idle_heap.p[0].object;
636 heap_extract(&pipe->idle_heap, NULL);
637 q->S = q->F + 1; /* Mark timestamp as invalid */
638 pipe->sum -= q->fs->weight;
643 * This is called once per tick, or dn_hz times per second. It is used to
644 * increment the current tick counter and schedule expired events.
646 static void
647 dummynet(struct netmsg *msg)
649 void *p;
650 struct dn_heap *h;
651 struct dn_heap *heaps[3];
652 int i;
654 heaps[0] = &ready_heap; /* Fixed-rate queues */
655 heaps[1] = &wfq_ready_heap; /* WF2Q queues */
656 heaps[2] = &extract_heap; /* Delay line */
658 /* Reply ASAP */
659 crit_enter();
660 lwkt_replymsg(&msg->nm_lmsg, 0);
661 crit_exit();
663 curr_time++;
664 for (i = 0; i < 3; i++) {
665 h = heaps[i];
666 while (h->elements > 0 && DN_KEY_LEQ(h->p[0].key, curr_time)) {
667 if (h->p[0].key > curr_time) {
668 kprintf("-- dummynet: warning, heap %d is %d ticks late\n",
669 i, (int)(curr_time - h->p[0].key));
672 p = h->p[0].object; /* Store a copy before heap_extract */
673 heap_extract(h, NULL); /* Need to extract before processing */
675 if (i == 0)
676 ready_event(p);
677 else if (i == 1)
678 ready_event_wfq(p);
679 else
680 transmit_event(p);
684 /* Sweep pipes trying to expire idle flow_queues */
685 dn_iterate_pipe(dn_expire_pipe_cb, NULL);
689 * Unconditionally expire empty queues in case of shortage.
690 * Returns the number of queues freed.
692 static int
693 expire_queues(struct dn_flow_set *fs)
695 int i, initial_elements = fs->rq_elements;
697 if (fs->last_expired == time_second)
698 return 0;
700 fs->last_expired = time_second;
702 for (i = 0; i <= fs->rq_size; i++) { /* Last one is overflow */
703 struct dn_flow_queue *q, *qn;
705 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
706 if (!TAILQ_EMPTY(&q->queue) || q->S != q->F + 1)
707 continue;
710 * Entry is idle, expire it
712 LIST_REMOVE(q, q_link);
713 kfree(q, M_DUMMYNET);
715 KASSERT(fs->rq_elements > 0,
716 ("invalid rq_elements %d\n", fs->rq_elements));
717 fs->rq_elements--;
720 return initial_elements - fs->rq_elements;
724 * If room, create a new queue and put at head of slot i;
725 * otherwise, create or use the default queue.
727 static struct dn_flow_queue *
728 create_queue(struct dn_flow_set *fs, int i)
730 struct dn_flow_queue *q;
732 if (fs->rq_elements > fs->rq_size * dn_max_ratio &&
733 expire_queues(fs) == 0) {
735 * No way to get room, use or create overflow queue.
737 i = fs->rq_size;
738 if (!LIST_EMPTY(&fs->rq[i]))
739 return LIST_FIRST(&fs->rq[i]);
742 q = kmalloc(sizeof(*q), M_DUMMYNET, M_INTWAIT | M_NULLOK | M_ZERO);
743 if (q == NULL)
744 return NULL;
746 q->fs = fs;
747 q->hash_slot = i;
748 q->S = q->F + 1; /* hack - mark timestamp as invalid */
749 TAILQ_INIT(&q->queue);
751 LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
752 fs->rq_elements++;
754 return q;
758 * Given a flow_set and a pkt in last_pkt, find a matching queue
759 * after appropriate masking. The queue is moved to front
760 * so that further searches take less time.
762 static struct dn_flow_queue *
763 find_queue(struct dn_flow_set *fs, struct dn_flow_id *id)
765 struct dn_flow_queue *q;
766 int i = 0;
768 if (!(fs->flags_fs & DN_HAVE_FLOW_MASK)) {
769 q = LIST_FIRST(&fs->rq[0]);
770 } else {
771 struct dn_flow_queue *qn;
773 /* First, do the masking */
774 id->fid_dst_ip &= fs->flow_mask.fid_dst_ip;
775 id->fid_src_ip &= fs->flow_mask.fid_src_ip;
776 id->fid_dst_port &= fs->flow_mask.fid_dst_port;
777 id->fid_src_port &= fs->flow_mask.fid_src_port;
778 id->fid_proto &= fs->flow_mask.fid_proto;
779 id->fid_flags = 0; /* we don't care about this one */
781 /* Then, hash function */
782 i = ((id->fid_dst_ip) & 0xffff) ^
783 ((id->fid_dst_ip >> 15) & 0xffff) ^
784 ((id->fid_src_ip << 1) & 0xffff) ^
785 ((id->fid_src_ip >> 16 ) & 0xffff) ^
786 (id->fid_dst_port << 1) ^ (id->fid_src_port) ^
787 (id->fid_proto);
788 i = i % fs->rq_size;
791 * Finally, scan the current list for a match and
792 * expire idle flow queues
794 searches++;
795 LIST_FOREACH_MUTABLE(q, &fs->rq[i], q_link, qn) {
796 search_steps++;
797 if (id->fid_dst_ip == q->id.fid_dst_ip &&
798 id->fid_src_ip == q->id.fid_src_ip &&
799 id->fid_dst_port == q->id.fid_dst_port &&
800 id->fid_src_port == q->id.fid_src_port &&
801 id->fid_proto == q->id.fid_proto &&
802 id->fid_flags == q->id.fid_flags) {
803 break; /* Found */
804 } else if (pipe_expire && TAILQ_EMPTY(&q->queue) &&
805 q->S == q->F + 1) {
807 * Entry is idle and not in any heap, expire it
809 LIST_REMOVE(q, q_link);
810 kfree(q, M_DUMMYNET);
812 KASSERT(fs->rq_elements > 0,
813 ("invalid rq_elements %d\n", fs->rq_elements));
814 fs->rq_elements--;
817 if (q && LIST_FIRST(&fs->rq[i]) != q) { /* Found and not in front */
818 LIST_REMOVE(q, q_link);
819 LIST_INSERT_HEAD(&fs->rq[i], q, q_link);
822 if (q == NULL) { /* No match, need to allocate a new entry */
823 q = create_queue(fs, i);
824 if (q != NULL)
825 q->id = *id;
827 return q;
830 static int
831 red_drops(struct dn_flow_set *fs, struct dn_flow_queue *q, int len)
834 * RED algorithm
836 * RED calculates the average queue size (avg) using a low-pass filter
837 * with an exponential weighted (w_q) moving average:
838 * avg <- (1-w_q) * avg + w_q * q_size
839 * where q_size is the queue length (measured in bytes or * packets).
841 * If q_size == 0, we compute the idle time for the link, and set
842 * avg = (1 - w_q)^(idle/s)
843 * where s is the time needed for transmitting a medium-sized packet.
845 * Now, if avg < min_th the packet is enqueued.
846 * If avg > max_th the packet is dropped. Otherwise, the packet is
847 * dropped with probability P function of avg.
850 int64_t p_b = 0;
851 u_int q_size = (fs->flags_fs & DN_QSIZE_IS_BYTES) ? q->len_bytes : q->len;
853 DPRINTF("\n%d q: %2u ", (int)curr_time, q_size);
855 /* Average queue size estimation */
856 if (q_size != 0) {
858 * Queue is not empty, avg <- avg + (q_size - avg) * w_q
860 int diff = SCALE(q_size) - q->avg;
861 int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
863 q->avg += (int)v;
864 } else {
866 * Queue is empty, find for how long the queue has been
867 * empty and use a lookup table for computing
868 * (1 - * w_q)^(idle_time/s) where s is the time to send a
869 * (small) packet.
870 * XXX check wraps...
872 if (q->avg) {
873 u_int t = (curr_time - q->q_time) / fs->lookup_step;
875 q->avg = (t < fs->lookup_depth) ?
876 SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
879 DPRINTF("avg: %u ", SCALE_VAL(q->avg));
881 /* Should i drop? */
883 if (q->avg < fs->min_th) {
884 /* Accept packet */
885 q->count = -1;
886 return 0;
889 if (q->avg >= fs->max_th) { /* Average queue >= Max threshold */
890 if (fs->flags_fs & DN_IS_GENTLE_RED) {
892 * According to Gentle-RED, if avg is greater than max_th the
893 * packet is dropped with a probability
894 * p_b = c_3 * avg - c_4
895 * where c_3 = (1 - max_p) / max_th, and c_4 = 1 - 2 * max_p
897 p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) - fs->c_4;
898 } else {
899 q->count = -1;
900 kprintf("- drop\n");
901 return 1;
903 } else if (q->avg > fs->min_th) {
905 * We compute p_b using the linear dropping function p_b = c_1 *
906 * avg - c_2, where c_1 = max_p / (max_th - min_th), and c_2 =
907 * max_p * min_th / (max_th - min_th)
909 p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
911 if (fs->flags_fs & DN_QSIZE_IS_BYTES)
912 p_b = (p_b * len) / fs->max_pkt_size;
914 if (++q->count == 0) {
915 q->random = krandom() & 0xffff;
916 } else {
918 * q->count counts packets arrived since last drop, so a greater
919 * value of q->count means a greater packet drop probability.
921 if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
922 q->count = 0;
923 DPRINTF("%s", "- red drop");
924 /* After a drop we calculate a new random value */
925 q->random = krandom() & 0xffff;
926 return 1; /* Drop */
929 /* End of RED algorithm */
930 return 0; /* Accept */
933 static void
934 dn_iterate_pipe(dn_pipe_iter_t func, void *arg)
936 int i;
938 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
939 struct dn_pipe_head *pipe_hdr = &pipe_table[i];
940 struct dn_pipe *pipe, *pipe_next;
942 LIST_FOREACH_MUTABLE(pipe, pipe_hdr, p_link, pipe_next)
943 func(pipe, arg);
947 static void
948 dn_iterate_flowset(dn_flowset_iter_t func, void *arg)
950 int i;
952 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
953 struct dn_flowset_head *fs_hdr = &flowset_table[i];
954 struct dn_flow_set *fs, *fs_next;
956 LIST_FOREACH_MUTABLE(fs, fs_hdr, fs_link, fs_next)
957 func(fs, arg);
961 static struct dn_pipe *
962 dn_find_pipe(int pipe_nr)
964 struct dn_pipe_head *pipe_hdr;
965 struct dn_pipe *p;
967 pipe_hdr = &pipe_table[DN_NR_HASH(pipe_nr)];
968 LIST_FOREACH(p, pipe_hdr, p_link) {
969 if (p->pipe_nr == pipe_nr)
970 break;
972 return p;
975 static struct dn_flow_set *
976 dn_find_flowset(int fs_nr)
978 struct dn_flowset_head *fs_hdr;
979 struct dn_flow_set *fs;
981 fs_hdr = &flowset_table[DN_NR_HASH(fs_nr)];
982 LIST_FOREACH(fs, fs_hdr, fs_link) {
983 if (fs->fs_nr == fs_nr)
984 break;
986 return fs;
989 static struct dn_flow_set *
990 dn_locate_flowset(int pipe_nr, int is_pipe)
992 struct dn_flow_set *fs = NULL;
994 if (!is_pipe) {
995 fs = dn_find_flowset(pipe_nr);
996 } else {
997 struct dn_pipe *p;
999 p = dn_find_pipe(pipe_nr);
1000 if (p != NULL)
1001 fs = &p->fs;
1003 return fs;
1007 * Dummynet hook for packets. Below 'pipe' is a pipe or a queue
1008 * depending on whether WF2Q or fixed bw is used.
1010 * pipe_nr pipe or queue the packet is destined for.
1011 * dir where shall we send the packet after dummynet.
1012 * m the mbuf with the packet
1013 * fwa->oif the 'ifp' parameter from the caller.
1014 * NULL in ip_input, destination interface in ip_output
1015 * fwa->ro route parameter (only used in ip_output, NULL otherwise)
1016 * fwa->dst destination address, only used by ip_output
1017 * fwa->rule matching rule, in case of multiple passes
1018 * fwa->flags flags from the caller, only used in ip_output
1020 static int
1021 dummynet_io(struct mbuf *m)
1023 struct dn_pkt *pkt;
1024 struct m_tag *tag;
1025 struct dn_flow_set *fs;
1026 struct dn_pipe *pipe;
1027 uint64_t len = m->m_pkthdr.len;
1028 struct dn_flow_queue *q = NULL;
1029 int is_pipe, pipe_nr;
1031 tag = m_tag_find(m, PACKET_TAG_DUMMYNET, NULL);
1032 pkt = m_tag_data(tag);
1034 is_pipe = pkt->dn_flags & DN_FLAGS_IS_PIPE;
1035 pipe_nr = pkt->pipe_nr;
1038 * This is a dummynet rule, so we expect a O_PIPE or O_QUEUE rule
1040 fs = dn_locate_flowset(pipe_nr, is_pipe);
1041 if (fs == NULL)
1042 goto dropit; /* This queue/pipe does not exist! */
1044 pipe = fs->pipe;
1045 if (pipe == NULL) { /* Must be a queue, try find a matching pipe */
1046 pipe = dn_find_pipe(fs->parent_nr);
1047 if (pipe != NULL) {
1048 fs->pipe = pipe;
1049 } else {
1050 kprintf("No pipe %d for queue %d, drop pkt\n",
1051 fs->parent_nr, fs->fs_nr);
1052 goto dropit;
1056 q = find_queue(fs, &pkt->id);
1057 if (q == NULL)
1058 goto dropit; /* Cannot allocate queue */
1061 * Update statistics, then check reasons to drop pkt
1063 q->tot_bytes += len;
1064 q->tot_pkts++;
1066 if (fs->plr && krandom() < fs->plr)
1067 goto dropit; /* Random pkt drop */
1069 if (fs->flags_fs & DN_QSIZE_IS_BYTES) {
1070 if (q->len_bytes > fs->qsize)
1071 goto dropit; /* Queue size overflow */
1072 } else {
1073 if (q->len >= fs->qsize)
1074 goto dropit; /* Queue count overflow */
1077 if ((fs->flags_fs & DN_IS_RED) && red_drops(fs, q, len))
1078 goto dropit;
1080 TAILQ_INSERT_TAIL(&q->queue, pkt, dn_next);
1081 q->len++;
1082 q->len_bytes += len;
1084 if (TAILQ_FIRST(&q->queue) != pkt) /* Flow was not idle, we are done */
1085 goto done;
1088 * If we reach this point the flow was previously idle, so we need
1089 * to schedule it. This involves different actions for fixed-rate
1090 * or WF2Q queues.
1092 if (is_pipe) {
1094 * Fixed-rate queue: just insert into the ready_heap.
1096 dn_key t = 0;
1098 if (pipe->bandwidth)
1099 t = SET_TICKS(pkt, q, pipe);
1101 q->sched_time = curr_time;
1102 if (t == 0) /* Must process it now */
1103 ready_event(q);
1104 else
1105 heap_insert(&ready_heap, curr_time + t, q);
1106 } else {
1108 * WF2Q:
1109 * First, compute start time S: if the flow was idle (S=F+1)
1110 * set S to the virtual time V for the controlling pipe, and update
1111 * the sum of weights for the pipe; otherwise, remove flow from
1112 * idle_heap and set S to max(F, V).
1113 * Second, compute finish time F = S + len/weight.
1114 * Third, if pipe was idle, update V = max(S, V).
1115 * Fourth, count one more backlogged flow.
1117 if (DN_KEY_GT(q->S, q->F)) { /* Means timestamps are invalid */
1118 q->S = pipe->V;
1119 pipe->sum += fs->weight; /* Add weight of new queue */
1120 } else {
1121 heap_extract(&pipe->idle_heap, q);
1122 q->S = MAX64(q->F, pipe->V);
1124 q->F = q->S + (len << MY_M) / (uint64_t)fs->weight;
1126 if (pipe->not_eligible_heap.elements == 0 &&
1127 pipe->scheduler_heap.elements == 0)
1128 pipe->V = MAX64(q->S, pipe->V);
1130 fs->backlogged++;
1133 * Look at eligibility. A flow is not eligibile if S>V (when
1134 * this happens, it means that there is some other flow already
1135 * scheduled for the same pipe, so the scheduler_heap cannot be
1136 * empty). If the flow is not eligible we just store it in the
1137 * not_eligible_heap. Otherwise, we store in the scheduler_heap
1138 * and possibly invoke ready_event_wfq() right now if there is
1139 * leftover credit.
1140 * Note that for all flows in scheduler_heap (SCH), S_i <= V,
1141 * and for all flows in not_eligible_heap (NEH), S_i > V.
1142 * So when we need to compute max(V, min(S_i)) forall i in SCH+NEH,
1143 * we only need to look into NEH.
1145 if (DN_KEY_GT(q->S, pipe->V)) { /* Not eligible */
1146 if (pipe->scheduler_heap.elements == 0)
1147 kprintf("++ ouch! not eligible but empty scheduler!\n");
1148 heap_insert(&pipe->not_eligible_heap, q->S, q);
1149 } else {
1150 heap_insert(&pipe->scheduler_heap, q->F, q);
1151 if (pipe->numbytes >= 0) { /* Pipe is idle */
1152 if (pipe->scheduler_heap.elements != 1)
1153 kprintf("*** OUCH! pipe should have been idle!\n");
1154 DPRINTF("Waking up pipe %d at %d\n",
1155 pipe->pipe_nr, (int)(q->F >> MY_M));
1156 pipe->sched_time = curr_time;
1157 ready_event_wfq(pipe);
1161 done:
1162 return 0;
1164 dropit:
1165 if (q)
1166 q->drops++;
1167 return ENOBUFS;
1171 * Dispose all packets and flow_queues on a flow_set.
1172 * If all=1, also remove red lookup table and other storage,
1173 * including the descriptor itself.
1174 * For the one in dn_pipe MUST also cleanup ready_heap...
1176 static void
1177 purge_flow_set(struct dn_flow_set *fs, int all)
1179 int i;
1180 #ifdef INVARIANTS
1181 int rq_elements = 0;
1182 #endif
1184 for (i = 0; i <= fs->rq_size; i++) {
1185 struct dn_flow_queue *q;
1187 while ((q = LIST_FIRST(&fs->rq[i])) != NULL) {
1188 struct dn_pkt *pkt;
1190 while ((pkt = TAILQ_FIRST(&q->queue)) != NULL) {
1191 TAILQ_REMOVE(&q->queue, pkt, dn_next);
1192 ip_dn_packet_free(pkt);
1195 LIST_REMOVE(q, q_link);
1196 kfree(q, M_DUMMYNET);
1198 #ifdef INVARIANTS
1199 rq_elements++;
1200 #endif
1203 KASSERT(rq_elements == fs->rq_elements,
1204 ("# rq elements mismatch, freed %d, total %d\n",
1205 rq_elements, fs->rq_elements));
1206 fs->rq_elements = 0;
1208 if (all) {
1209 /* RED - free lookup table */
1210 if (fs->w_q_lookup)
1211 kfree(fs->w_q_lookup, M_DUMMYNET);
1213 if (fs->rq)
1214 kfree(fs->rq, M_DUMMYNET);
1217 * If this fs is not part of a pipe, free it
1219 * fs->pipe == NULL could happen, if 'fs' is a WF2Q and
1220 * - No packet belongs to that flow set is delivered by
1221 * dummynet_io(), i.e. parent pipe is not installed yet.
1222 * - Parent pipe is deleted.
1224 if (fs->pipe == NULL || (fs->pipe && fs != &fs->pipe->fs))
1225 kfree(fs, M_DUMMYNET);
1230 * Dispose all packets queued on a pipe (not a flow_set).
1231 * Also free all resources associated to a pipe, which is about
1232 * to be deleted.
1234 static void
1235 purge_pipe(struct dn_pipe *pipe)
1237 struct dn_pkt *pkt;
1239 purge_flow_set(&pipe->fs, 1);
1241 while ((pkt = TAILQ_FIRST(&pipe->p_queue)) != NULL) {
1242 TAILQ_REMOVE(&pipe->p_queue, pkt, dn_next);
1243 ip_dn_packet_free(pkt);
1246 heap_free(&pipe->scheduler_heap);
1247 heap_free(&pipe->not_eligible_heap);
1248 heap_free(&pipe->idle_heap);
1252 * Delete all pipes and heaps returning memory.
1254 static void
1255 dummynet_flush(void)
1257 struct dn_pipe_head pipe_list;
1258 struct dn_flowset_head fs_list;
1259 struct dn_pipe *p;
1260 struct dn_flow_set *fs;
1261 int i;
1264 * Prevent future matches...
1266 LIST_INIT(&pipe_list);
1267 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1268 struct dn_pipe_head *pipe_hdr = &pipe_table[i];
1270 while ((p = LIST_FIRST(pipe_hdr)) != NULL) {
1271 LIST_REMOVE(p, p_link);
1272 LIST_INSERT_HEAD(&pipe_list, p, p_link);
1276 LIST_INIT(&fs_list);
1277 for (i = 0; i < DN_NR_HASH_MAX; ++i) {
1278 struct dn_flowset_head *fs_hdr = &flowset_table[i];
1280 while ((fs = LIST_FIRST(fs_hdr)) != NULL) {
1281 LIST_REMOVE(fs, fs_link);
1282 LIST_INSERT_HEAD(&fs_list, fs, fs_link);
1286 /* Free heaps so we don't have unwanted events */
1287 heap_free(&ready_heap);
1288 heap_free(&wfq_ready_heap);
1289 heap_free(&extract_heap);
1292 * Now purge all queued pkts and delete all pipes
1294 /* Scan and purge all flow_sets. */
1295 while ((fs = LIST_FIRST(&fs_list)) != NULL) {
1296 LIST_REMOVE(fs, fs_link);
1297 purge_flow_set(fs, 1);
1300 while ((p = LIST_FIRST(&pipe_list)) != NULL) {
1301 LIST_REMOVE(p, p_link);
1302 purge_pipe(p);
1303 kfree(p, M_DUMMYNET);
1308 * setup RED parameters
1310 static int
1311 config_red(const struct dn_ioc_flowset *ioc_fs, struct dn_flow_set *x)
1313 int i;
1315 x->w_q = ioc_fs->w_q;
1316 x->min_th = SCALE(ioc_fs->min_th);
1317 x->max_th = SCALE(ioc_fs->max_th);
1318 x->max_p = ioc_fs->max_p;
1320 x->c_1 = ioc_fs->max_p / (ioc_fs->max_th - ioc_fs->min_th);
1321 x->c_2 = SCALE_MUL(x->c_1, SCALE(ioc_fs->min_th));
1322 if (x->flags_fs & DN_IS_GENTLE_RED) {
1323 x->c_3 = (SCALE(1) - ioc_fs->max_p) / ioc_fs->max_th;
1324 x->c_4 = (SCALE(1) - 2 * ioc_fs->max_p);
1327 /* If the lookup table already exist, free and create it again */
1328 if (x->w_q_lookup) {
1329 kfree(x->w_q_lookup, M_DUMMYNET);
1330 x->w_q_lookup = NULL ;
1333 if (red_lookup_depth == 0) {
1334 kprintf("net.inet.ip.dummynet.red_lookup_depth must be > 0\n");
1335 kfree(x, M_DUMMYNET);
1336 return EINVAL;
1338 x->lookup_depth = red_lookup_depth;
1339 x->w_q_lookup = kmalloc(x->lookup_depth * sizeof(int),
1340 M_DUMMYNET, M_WAITOK);
1342 /* Fill the lookup table with (1 - w_q)^x */
1343 x->lookup_step = ioc_fs->lookup_step;
1344 x->lookup_weight = ioc_fs->lookup_weight;
1346 x->w_q_lookup[0] = SCALE(1) - x->w_q;
1347 for (i = 1; i < x->lookup_depth; i++)
1348 x->w_q_lookup[i] = SCALE_MUL(x->w_q_lookup[i - 1], x->lookup_weight);
1350 if (red_avg_pkt_size < 1)
1351 red_avg_pkt_size = 512;
1352 x->avg_pkt_size = red_avg_pkt_size;
1354 if (red_max_pkt_size < 1)
1355 red_max_pkt_size = 1500;
1356 x->max_pkt_size = red_max_pkt_size;
1358 return 0;
1361 static void
1362 alloc_hash(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1364 int i, alloc_size;
1366 if (x->flags_fs & DN_HAVE_FLOW_MASK) {
1367 int l = ioc_fs->rq_size;
1369 /* Allocate some slots */
1370 if (l == 0)
1371 l = dn_hash_size;
1373 if (l < DN_MIN_HASH_SIZE)
1374 l = DN_MIN_HASH_SIZE;
1375 else if (l > DN_MAX_HASH_SIZE)
1376 l = DN_MAX_HASH_SIZE;
1378 x->rq_size = l;
1379 } else {
1380 /* One is enough for null mask */
1381 x->rq_size = 1;
1383 alloc_size = x->rq_size + 1;
1385 x->rq = kmalloc(alloc_size * sizeof(struct dn_flowqueue_head),
1386 M_DUMMYNET, M_WAITOK | M_ZERO);
1387 x->rq_elements = 0;
1389 for (i = 0; i < alloc_size; ++i)
1390 LIST_INIT(&x->rq[i]);
1393 static void
1394 set_flowid_parms(struct dn_flow_id *id, const struct dn_ioc_flowid *ioc_id)
1396 id->fid_dst_ip = ioc_id->u.ip.dst_ip;
1397 id->fid_src_ip = ioc_id->u.ip.src_ip;
1398 id->fid_dst_port = ioc_id->u.ip.dst_port;
1399 id->fid_src_port = ioc_id->u.ip.src_port;
1400 id->fid_proto = ioc_id->u.ip.proto;
1401 id->fid_flags = ioc_id->u.ip.flags;
1404 static void
1405 set_fs_parms(struct dn_flow_set *x, const struct dn_ioc_flowset *ioc_fs)
1407 x->flags_fs = ioc_fs->flags_fs;
1408 x->qsize = ioc_fs->qsize;
1409 x->plr = ioc_fs->plr;
1410 set_flowid_parms(&x->flow_mask, &ioc_fs->flow_mask);
1411 if (x->flags_fs & DN_QSIZE_IS_BYTES) {
1412 if (x->qsize > 1024 * 1024)
1413 x->qsize = 1024 * 1024;
1414 } else {
1415 if (x->qsize == 0 || x->qsize > 100)
1416 x->qsize = 50;
1419 /* Configuring RED */
1420 if (x->flags_fs & DN_IS_RED)
1421 config_red(ioc_fs, x); /* XXX should check errors */
1425 * setup pipe or queue parameters.
1428 static int
1429 config_pipe(struct dn_ioc_pipe *ioc_pipe)
1431 struct dn_ioc_flowset *ioc_fs = &ioc_pipe->fs;
1432 int error;
1435 * The config program passes parameters as follows:
1436 * bw bits/second (0 means no limits)
1437 * delay ms (must be translated into ticks)
1438 * qsize slots or bytes
1440 ioc_pipe->delay = (ioc_pipe->delay * dn_hz) / 1000;
1443 * We need either a pipe number or a flow_set number
1445 if (ioc_pipe->pipe_nr == 0 && ioc_fs->fs_nr == 0)
1446 return EINVAL;
1447 if (ioc_pipe->pipe_nr != 0 && ioc_fs->fs_nr != 0)
1448 return EINVAL;
1451 * Validate pipe number
1453 if (ioc_pipe->pipe_nr > DN_PIPE_NR_MAX || ioc_pipe->pipe_nr < 0)
1454 return EINVAL;
1456 error = EINVAL;
1457 if (ioc_pipe->pipe_nr != 0) { /* This is a pipe */
1458 struct dn_pipe *x, *p;
1460 /* Locate pipe */
1461 p = dn_find_pipe(ioc_pipe->pipe_nr);
1463 if (p == NULL) { /* New pipe */
1464 x = kmalloc(sizeof(struct dn_pipe), M_DUMMYNET, M_WAITOK | M_ZERO);
1465 x->pipe_nr = ioc_pipe->pipe_nr;
1466 x->fs.pipe = x;
1467 TAILQ_INIT(&x->p_queue);
1470 * idle_heap is the only one from which we extract from the middle.
1472 x->idle_heap.size = x->idle_heap.elements = 0;
1473 x->idle_heap.offset = __offsetof(struct dn_flow_queue, heap_pos);
1474 } else {
1475 int i;
1477 x = p;
1479 /* Flush accumulated credit for all queues */
1480 for (i = 0; i <= x->fs.rq_size; i++) {
1481 struct dn_flow_queue *q;
1483 LIST_FOREACH(q, &x->fs.rq[i], q_link)
1484 q->numbytes = 0;
1488 x->bandwidth = ioc_pipe->bandwidth;
1489 x->numbytes = 0; /* Just in case... */
1490 x->delay = ioc_pipe->delay;
1492 set_fs_parms(&x->fs, ioc_fs);
1494 if (x->fs.rq == NULL) { /* A new pipe */
1495 struct dn_pipe_head *pipe_hdr;
1497 alloc_hash(&x->fs, ioc_fs);
1499 pipe_hdr = &pipe_table[DN_NR_HASH(x->pipe_nr)];
1500 LIST_INSERT_HEAD(pipe_hdr, x, p_link);
1502 } else { /* Config flow_set */
1503 struct dn_flow_set *x, *fs;
1505 /* Locate flow_set */
1506 fs = dn_find_flowset(ioc_fs->fs_nr);
1508 if (fs == NULL) { /* New flow_set */
1509 if (ioc_fs->parent_nr == 0) /* Need link to a pipe */
1510 goto back;
1512 x = kmalloc(sizeof(struct dn_flow_set), M_DUMMYNET,
1513 M_WAITOK | M_ZERO);
1514 x->fs_nr = ioc_fs->fs_nr;
1515 x->parent_nr = ioc_fs->parent_nr;
1516 x->weight = ioc_fs->weight;
1517 if (x->weight == 0)
1518 x->weight = 1;
1519 else if (x->weight > 100)
1520 x->weight = 100;
1521 } else {
1522 /* Change parent pipe not allowed; must delete and recreate */
1523 if (ioc_fs->parent_nr != 0 && fs->parent_nr != ioc_fs->parent_nr)
1524 goto back;
1525 x = fs;
1528 set_fs_parms(x, ioc_fs);
1530 if (x->rq == NULL) { /* A new flow_set */
1531 struct dn_flowset_head *fs_hdr;
1533 alloc_hash(x, ioc_fs);
1535 fs_hdr = &flowset_table[DN_NR_HASH(x->fs_nr)];
1536 LIST_INSERT_HEAD(fs_hdr, x, fs_link);
1539 error = 0;
1541 back:
1542 return error;
1546 * Helper function to remove from a heap queues which are linked to
1547 * a flow_set about to be deleted.
1549 static void
1550 fs_remove_from_heap(struct dn_heap *h, struct dn_flow_set *fs)
1552 int i = 0, found = 0;
1554 while (i < h->elements) {
1555 if (((struct dn_flow_queue *)h->p[i].object)->fs == fs) {
1556 h->elements--;
1557 h->p[i] = h->p[h->elements];
1558 found++;
1559 } else {
1560 i++;
1563 if (found)
1564 heapify(h);
1568 * helper function to remove a pipe from a heap (can be there at most once)
1570 static void
1571 pipe_remove_from_heap(struct dn_heap *h, struct dn_pipe *p)
1573 if (h->elements > 0) {
1574 int i;
1576 for (i = 0; i < h->elements; i++) {
1577 if (h->p[i].object == p) { /* found it */
1578 h->elements--;
1579 h->p[i] = h->p[h->elements];
1580 heapify(h);
1581 break;
1587 static void
1588 dn_unref_pipe_cb(struct dn_flow_set *fs, void *pipe0)
1590 struct dn_pipe *pipe = pipe0;
1592 if (fs->pipe == pipe) {
1593 kprintf("++ ref to pipe %d from fs %d\n",
1594 pipe->pipe_nr, fs->fs_nr);
1595 fs->pipe = NULL;
1596 purge_flow_set(fs, 0);
1601 * Fully delete a pipe or a queue, cleaning up associated info.
1603 static int
1604 delete_pipe(const struct dn_ioc_pipe *ioc_pipe)
1606 struct dn_pipe *p;
1607 int error;
1609 if (ioc_pipe->pipe_nr == 0 && ioc_pipe->fs.fs_nr == 0)
1610 return EINVAL;
1611 if (ioc_pipe->pipe_nr != 0 && ioc_pipe->fs.fs_nr != 0)
1612 return EINVAL;
1614 if (ioc_pipe->pipe_nr > DN_NR_HASH_MAX || ioc_pipe->pipe_nr < 0)
1615 return EINVAL;
1617 error = EINVAL;
1618 if (ioc_pipe->pipe_nr != 0) { /* This is an old-style pipe */
1619 /* Locate pipe */
1620 p = dn_find_pipe(ioc_pipe->pipe_nr);
1621 if (p == NULL)
1622 goto back; /* Not found */
1624 /* Unlink from pipe hash table */
1625 LIST_REMOVE(p, p_link);
1627 /* Remove all references to this pipe from flow_sets */
1628 dn_iterate_flowset(dn_unref_pipe_cb, p);
1630 fs_remove_from_heap(&ready_heap, &p->fs);
1631 purge_pipe(p); /* Remove all data associated to this pipe */
1633 /* Remove reference to here from extract_heap and wfq_ready_heap */
1634 pipe_remove_from_heap(&extract_heap, p);
1635 pipe_remove_from_heap(&wfq_ready_heap, p);
1637 kfree(p, M_DUMMYNET);
1638 } else { /* This is a WF2Q queue (dn_flow_set) */
1639 struct dn_flow_set *fs;
1641 /* Locate flow_set */
1642 fs = dn_find_flowset(ioc_pipe->fs.fs_nr);
1643 if (fs == NULL)
1644 goto back; /* Not found */
1646 LIST_REMOVE(fs, fs_link);
1648 if ((p = fs->pipe) != NULL) {
1649 /* Update total weight on parent pipe and cleanup parent heaps */
1650 p->sum -= fs->weight * fs->backlogged;
1651 fs_remove_from_heap(&p->not_eligible_heap, fs);
1652 fs_remove_from_heap(&p->scheduler_heap, fs);
1653 #if 1 /* XXX should i remove from idle_heap as well ? */
1654 fs_remove_from_heap(&p->idle_heap, fs);
1655 #endif
1657 purge_flow_set(fs, 1);
1659 error = 0;
1661 back:
1662 return error;
1666 * helper function used to copy data from kernel in DUMMYNET_GET
1668 static void
1669 dn_copy_flowid(const struct dn_flow_id *id, struct dn_ioc_flowid *ioc_id)
1671 ioc_id->type = ETHERTYPE_IP;
1672 ioc_id->u.ip.dst_ip = id->fid_dst_ip;
1673 ioc_id->u.ip.src_ip = id->fid_src_ip;
1674 ioc_id->u.ip.dst_port = id->fid_dst_port;
1675 ioc_id->u.ip.src_port = id->fid_src_port;
1676 ioc_id->u.ip.proto = id->fid_proto;
1677 ioc_id->u.ip.flags = id->fid_flags;
1680 static void *
1681 dn_copy_flowqueues(const struct dn_flow_set *fs, void *bp)
1683 struct dn_ioc_flowqueue *ioc_fq = bp;
1684 int i, copied = 0;
1686 for (i = 0; i <= fs->rq_size; i++) {
1687 const struct dn_flow_queue *q;
1689 LIST_FOREACH(q, &fs->rq[i], q_link) {
1690 if (q->hash_slot != i) { /* XXX ASSERT */
1691 kprintf("++ at %d: wrong slot (have %d, "
1692 "should be %d)\n", copied, q->hash_slot, i);
1694 if (q->fs != fs) { /* XXX ASSERT */
1695 kprintf("++ at %d: wrong fs ptr (have %p, should be %p)\n",
1696 i, q->fs, fs);
1699 copied++;
1701 ioc_fq->len = q->len;
1702 ioc_fq->len_bytes = q->len_bytes;
1703 ioc_fq->tot_pkts = q->tot_pkts;
1704 ioc_fq->tot_bytes = q->tot_bytes;
1705 ioc_fq->drops = q->drops;
1706 ioc_fq->hash_slot = q->hash_slot;
1707 ioc_fq->S = q->S;
1708 ioc_fq->F = q->F;
1709 dn_copy_flowid(&q->id, &ioc_fq->id);
1711 ioc_fq++;
1715 if (copied != fs->rq_elements) { /* XXX ASSERT */
1716 kprintf("++ wrong count, have %d should be %d\n",
1717 copied, fs->rq_elements);
1719 return ioc_fq;
1722 static void
1723 dn_copy_flowset(const struct dn_flow_set *fs, struct dn_ioc_flowset *ioc_fs,
1724 u_short fs_type)
1726 ioc_fs->fs_type = fs_type;
1728 ioc_fs->fs_nr = fs->fs_nr;
1729 ioc_fs->flags_fs = fs->flags_fs;
1730 ioc_fs->parent_nr = fs->parent_nr;
1732 ioc_fs->weight = fs->weight;
1733 ioc_fs->qsize = fs->qsize;
1734 ioc_fs->plr = fs->plr;
1736 ioc_fs->rq_size = fs->rq_size;
1737 ioc_fs->rq_elements = fs->rq_elements;
1739 ioc_fs->w_q = fs->w_q;
1740 ioc_fs->max_th = fs->max_th;
1741 ioc_fs->min_th = fs->min_th;
1742 ioc_fs->max_p = fs->max_p;
1744 dn_copy_flowid(&fs->flow_mask, &ioc_fs->flow_mask);
1747 static void
1748 dn_calc_pipe_size_cb(struct dn_pipe *pipe, void *sz)
1750 size_t *size = sz;
1752 *size += sizeof(struct dn_ioc_pipe) +
1753 pipe->fs.rq_elements * sizeof(struct dn_ioc_flowqueue);
1756 static void
1757 dn_calc_fs_size_cb(struct dn_flow_set *fs, void *sz)
1759 size_t *size = sz;
1761 *size += sizeof(struct dn_ioc_flowset) +
1762 fs->rq_elements * sizeof(struct dn_ioc_flowqueue);
1765 static void
1766 dn_copyout_pipe_cb(struct dn_pipe *pipe, void *bp0)
1768 char **bp = bp0;
1769 struct dn_ioc_pipe *ioc_pipe = (struct dn_ioc_pipe *)(*bp);
1772 * Copy flow set descriptor associated with this pipe
1774 dn_copy_flowset(&pipe->fs, &ioc_pipe->fs, DN_IS_PIPE);
1777 * Copy pipe descriptor
1779 ioc_pipe->bandwidth = pipe->bandwidth;
1780 ioc_pipe->pipe_nr = pipe->pipe_nr;
1781 ioc_pipe->V = pipe->V;
1782 /* Convert delay to milliseconds */
1783 ioc_pipe->delay = (pipe->delay * 1000) / dn_hz;
1786 * Copy flow queue descriptors
1788 *bp += sizeof(*ioc_pipe);
1789 *bp = dn_copy_flowqueues(&pipe->fs, *bp);
1792 static void
1793 dn_copyout_fs_cb(struct dn_flow_set *fs, void *bp0)
1795 char **bp = bp0;
1796 struct dn_ioc_flowset *ioc_fs = (struct dn_ioc_flowset *)(*bp);
1799 * Copy flow set descriptor
1801 dn_copy_flowset(fs, ioc_fs, DN_IS_QUEUE);
1804 * Copy flow queue descriptors
1806 *bp += sizeof(*ioc_fs);
1807 *bp = dn_copy_flowqueues(fs, *bp);
1810 static int
1811 dummynet_get(struct dn_sopt *dn_sopt)
1813 char *buf, *bp;
1814 size_t size = 0;
1817 * Compute size of data structures: list of pipes and flow_sets.
1819 dn_iterate_pipe(dn_calc_pipe_size_cb, &size);
1820 dn_iterate_flowset(dn_calc_fs_size_cb, &size);
1823 * Copyout pipe/flow_set/flow_queue
1825 bp = buf = kmalloc(size, M_TEMP, M_WAITOK | M_ZERO);
1826 dn_iterate_pipe(dn_copyout_pipe_cb, &bp);
1827 dn_iterate_flowset(dn_copyout_fs_cb, &bp);
1829 /* Temp memory will be freed by caller */
1830 dn_sopt->dn_sopt_arg = buf;
1831 dn_sopt->dn_sopt_arglen = size;
1832 return 0;
1836 * Handler for the various dummynet socket options (get, flush, config, del)
1838 static int
1839 dummynet_ctl(struct dn_sopt *dn_sopt)
1841 int error = 0;
1843 switch (dn_sopt->dn_sopt_name) {
1844 case IP_DUMMYNET_GET:
1845 error = dummynet_get(dn_sopt);
1846 break;
1848 case IP_DUMMYNET_FLUSH:
1849 dummynet_flush();
1850 break;
1852 case IP_DUMMYNET_CONFIGURE:
1853 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1854 error = config_pipe(dn_sopt->dn_sopt_arg);
1855 break;
1857 case IP_DUMMYNET_DEL: /* Remove a pipe or flow_set */
1858 KKASSERT(dn_sopt->dn_sopt_arglen == sizeof(struct dn_ioc_pipe));
1859 error = delete_pipe(dn_sopt->dn_sopt_arg);
1860 break;
1862 default:
1863 kprintf("%s -- unknown option %d\n", __func__, dn_sopt->dn_sopt_name);
1864 error = EINVAL;
1865 break;
1867 return error;
1870 static void
1871 dummynet_clock(systimer_t info __unused, struct intrframe *frame __unused)
1873 KASSERT(mycpuid == ip_dn_cpu,
1874 ("dummynet systimer comes on cpu%d, should be %d!\n",
1875 mycpuid, ip_dn_cpu));
1877 crit_enter();
1878 if (DUMMYNET_LOADED && (dn_netmsg.nm_lmsg.ms_flags & MSGF_DONE))
1879 lwkt_sendmsg(cpu_portfn(mycpuid), &dn_netmsg.nm_lmsg);
1880 crit_exit();
1883 static int
1884 sysctl_dn_hz(SYSCTL_HANDLER_ARGS)
1886 int error, val;
1888 val = dn_hz;
1889 error = sysctl_handle_int(oidp, &val, 0, req);
1890 if (error || req->newptr == NULL)
1891 return error;
1892 if (val <= 0)
1893 return EINVAL;
1894 else if (val > DN_CALLOUT_FREQ_MAX)
1895 val = DN_CALLOUT_FREQ_MAX;
1897 crit_enter();
1898 dn_hz = val;
1899 systimer_adjust_periodic(&dn_clock, val);
1900 crit_exit();
1902 return 0;
1905 static void
1906 ip_dn_init_dispatch(struct netmsg *msg)
1908 int i, error = 0;
1910 KASSERT(mycpuid == ip_dn_cpu,
1911 ("%s runs on cpu%d, instead of cpu%d", __func__,
1912 mycpuid, ip_dn_cpu));
1914 crit_enter();
1916 if (DUMMYNET_LOADED) {
1917 kprintf("DUMMYNET already loaded\n");
1918 error = EEXIST;
1919 goto back;
1922 kprintf("DUMMYNET initialized (011031)\n");
1924 for (i = 0; i < DN_NR_HASH_MAX; ++i)
1925 LIST_INIT(&pipe_table[i]);
1927 for (i = 0; i < DN_NR_HASH_MAX; ++i)
1928 LIST_INIT(&flowset_table[i]);
1930 ready_heap.size = ready_heap.elements = 0;
1931 ready_heap.offset = 0;
1933 wfq_ready_heap.size = wfq_ready_heap.elements = 0;
1934 wfq_ready_heap.offset = 0;
1936 extract_heap.size = extract_heap.elements = 0;
1937 extract_heap.offset = 0;
1939 ip_dn_ctl_ptr = dummynet_ctl;
1940 ip_dn_io_ptr = dummynet_io;
1942 netmsg_init(&dn_netmsg, NULL, &netisr_adone_rport,
1943 0, dummynet);
1944 systimer_init_periodic_nq(&dn_clock, dummynet_clock, NULL, dn_hz);
1946 back:
1947 crit_exit();
1948 lwkt_replymsg(&msg->nm_lmsg, error);
1951 static int
1952 ip_dn_init(void)
1954 struct netmsg smsg;
1956 if (ip_dn_cpu >= ncpus) {
1957 kprintf("%s: CPU%d does not exist, switch to CPU0\n",
1958 __func__, ip_dn_cpu);
1959 ip_dn_cpu = 0;
1962 netmsg_init(&smsg, NULL, &curthread->td_msgport,
1963 0, ip_dn_init_dispatch);
1964 lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.nm_lmsg, 0);
1965 return smsg.nm_lmsg.ms_error;
1968 #ifdef KLD_MODULE
1970 static void
1971 ip_dn_stop_dispatch(struct netmsg *msg)
1973 crit_enter();
1975 dummynet_flush();
1977 ip_dn_ctl_ptr = NULL;
1978 ip_dn_io_ptr = NULL;
1980 systimer_del(&dn_clock);
1982 crit_exit();
1983 lwkt_replymsg(&msg->nm_lmsg, 0);
1987 static void
1988 ip_dn_stop(void)
1990 struct netmsg smsg;
1992 netmsg_init(&smsg, NULL, &curthread->td_msgport,
1993 0, ip_dn_stop_dispatch);
1994 lwkt_domsg(cpu_portfn(ip_dn_cpu), &smsg.nm_lmsg, 0);
1996 netmsg_service_sync();
1999 #endif /* KLD_MODULE */
2001 static int
2002 dummynet_modevent(module_t mod, int type, void *data)
2004 switch (type) {
2005 case MOD_LOAD:
2006 return ip_dn_init();
2008 case MOD_UNLOAD:
2009 #ifndef KLD_MODULE
2010 kprintf("dummynet statically compiled, cannot unload\n");
2011 return EINVAL;
2012 #else
2013 ip_dn_stop();
2014 #endif
2015 break;
2017 default:
2018 break;
2020 return 0;
2023 static moduledata_t dummynet_mod = {
2024 "dummynet",
2025 dummynet_modevent,
2026 NULL
2028 DECLARE_MODULE(dummynet, dummynet_mod, SI_SUB_PROTO_END, SI_ORDER_ANY);
2029 MODULE_VERSION(dummynet, 1);