linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / net / altq / altq_rmclass.c
blob28824ffa58d8e57be4f05a2569983c7fcf327a5b
1 /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_rmclass.c,v 1.8 2006/12/22 23:44:55 swildner Exp $ */
4 /*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
40 #ident "@(#)rm_class.c 1.48 97/12/05 SMI"
42 #include "opt_altq.h"
43 #include "opt_inet.h"
44 #include "opt_inet6.h"
46 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
48 #include <sys/param.h>
49 #include <sys/malloc.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/systm.h>
53 #include <sys/callout.h>
54 #include <sys/errno.h>
55 #include <sys/time.h>
56 #include <sys/thread.h>
58 #include <net/if.h>
60 #include <net/altq/altq.h>
61 #include <net/altq/altq_rmclass.h>
62 #include <net/altq/altq_rmclass_debug.h>
63 #include <net/altq/altq_red.h>
64 #include <net/altq/altq_rio.h>
66 #include <sys/thread2.h>
68 #ifdef CBQ_TRACE
69 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
70 static struct cbqtrace *cbqtrace_ptr = NULL;
71 static int cbqtrace_count;
72 #endif
75 * Local Macros
78 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
81 * Local routines.
84 static int rmc_satisfied(struct rm_class *, struct timeval *);
85 static void rmc_wrr_set_weights(struct rm_ifdat *);
86 static void rmc_depth_compute(struct rm_class *);
87 static void rmc_depth_recompute(rm_class_t *);
89 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
90 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
92 static int _rmc_addq(rm_class_t *, struct mbuf *);
93 static void _rmc_dropq(rm_class_t *);
94 static struct mbuf *_rmc_getq(rm_class_t *);
95 static struct mbuf *_rmc_pollq(rm_class_t *);
97 static int rmc_under_limit(struct rm_class *, struct timeval *);
98 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
99 static void rmc_drop_action(struct rm_class *);
100 static void rmc_restart(void *);
101 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
103 #define BORROW_OFFTIME
105 * BORROW_OFFTIME (experimental):
106 * borrow the offtime of the class borrowing from.
107 * the reason is that when its own offtime is set, the class is unable
108 * to borrow much, especially when cutoff is taking effect.
109 * but when the borrowed class is overloaded (advidle is close to minidle),
110 * use the borrowing class's offtime to avoid overload.
112 #define ADJUST_CUTOFF
114 * ADJUST_CUTOFF (experimental):
115 * if no underlimit class is found due to cutoff, increase cutoff and
116 * retry the scheduling loop.
117 * also, don't invoke delay_actions while cutoff is taking effect,
118 * since a sleeping class won't have a chance to be scheduled in the
119 * next loop.
121 * now heuristics for setting the top-level variable (cutoff_) becomes:
122 * 1. if a packet arrives for a not-overlimit class, set cutoff
123 * to the depth of the class.
124 * 2. if cutoff is i, and a packet arrives for an overlimit class
125 * with an underlimit ancestor at a lower level than i (say j),
126 * then set cutoff to j.
127 * 3. at scheduling a packet, if there is no underlimit class
128 * due to the current cutoff level, increase cutoff by 1 and
129 * then try to schedule again.
133 * rm_class_t *
134 * rmc_newclass(...) - Create a new resource management class at priority
135 * 'pri' on the interface given by 'ifd'.
137 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
138 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
139 * than 100% of the bandwidth, this number should be the
140 * 'effective' rate for the class. Let f be the
141 * bandwidth fraction allocated to this class, and let
142 * nsPerByte be the data rate of the output link in
143 * nanoseconds/byte. Then nsecPerByte is set to
144 * nsPerByte / f. E.g., 1600 (= 800 / .5)
145 * for a class that gets 50% of an ethernet's bandwidth.
147 * action the routine to call when the class is over limit.
149 * maxq max allowable queue size for class (in packets).
151 * parent parent class pointer.
153 * borrow class to borrow from (should be either 'parent' or null).
155 * maxidle max value allowed for class 'idle' time estimate (this
156 * parameter determines how large an initial burst of packets
157 * can be before overlimit action is invoked.
159 * offtime how long 'delay' action will delay when class goes over
160 * limit (this parameter determines the steady-state burst
161 * size when a class is running over its limit).
163 * Maxidle and offtime have to be computed from the following: If the
164 * average packet size is s, the bandwidth fraction allocated to this
165 * class is f, we want to allow b packet bursts, and the gain of the
166 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
168 * ptime = s * nsPerByte * (1 - f) / f
169 * maxidle = ptime * (1 - g^b) / g^b
170 * minidle = -ptime * (1 / (f - 1))
171 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
173 * Operationally, it's convenient to specify maxidle & offtime in units
174 * independent of the link bandwidth so the maxidle & offtime passed to
175 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
176 * (The constant factor is a scale factor needed to make the parameters
177 * integers. This scaling also means that the 'unscaled' values of
178 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
179 * not nanoseconds.) Also note that the 'idle' filter computation keeps
180 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
181 * maxidle also must be scaled upward by this value. Thus, the passed
182 * values for maxidle and offtime can be computed as follows:
184 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
185 * offtime = offtime * 8 / (1000 * nsecPerByte)
187 * When USE_HRTIME is employed, then maxidle and offtime become:
188 * maxidle = maxilde * (8.0 / nsecPerByte);
189 * offtime = offtime * (8.0 / nsecPerByte);
191 struct rm_class *
192 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
193 void (*action)(rm_class_t *, rm_class_t *), int maxq,
194 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
195 int minidle, u_int offtime, int pktsize, int flags)
197 struct rm_class *cl;
198 struct rm_class *peer;
200 if (pri >= RM_MAXPRIO)
201 return (NULL);
202 #ifndef ALTQ_RED
203 if (flags & RMCF_RED) {
204 #ifdef ALTQ_DEBUG
205 kprintf("rmc_newclass: RED not configured for CBQ!\n");
206 #endif
207 return (NULL);
209 #endif
210 #ifndef ALTQ_RIO
211 if (flags & RMCF_RIO) {
212 #ifdef ALTQ_DEBUG
213 kprintf("rmc_newclass: RIO not configured for CBQ!\n");
214 #endif
215 return (NULL);
217 #endif
219 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
220 callout_init(&cl->callout_);
221 cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
224 * Class initialization.
226 cl->children_ = NULL;
227 cl->parent_ = parent;
228 cl->borrow_ = borrow;
229 cl->leaf_ = 1;
230 cl->ifdat_ = ifd;
231 cl->pri_ = pri;
232 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
233 cl->depth_ = 0;
234 cl->qthresh_ = 0;
235 cl->ns_per_byte_ = nsecPerByte;
237 qlimit(cl->q_) = maxq;
238 qtype(cl->q_) = Q_DROPHEAD;
239 qlen(cl->q_) = 0;
240 cl->flags_ = flags;
242 #if 1 /* minidle is also scaled in ALTQ */
243 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
244 if (cl->minidle_ > 0)
245 cl->minidle_ = 0;
246 #else
247 cl->minidle_ = minidle;
248 #endif
249 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
250 if (cl->maxidle_ == 0)
251 cl->maxidle_ = 1;
252 #if 1 /* offtime is also scaled in ALTQ */
253 cl->avgidle_ = cl->maxidle_;
254 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
255 if (cl->offtime_ == 0)
256 cl->offtime_ = 1;
257 #else
258 cl->avgidle_ = 0;
259 cl->offtime_ = (offtime * nsecPerByte) / 8;
260 #endif
261 cl->overlimit = action;
263 #ifdef ALTQ_RED
264 if (flags & (RMCF_RED|RMCF_RIO)) {
265 int red_flags, red_pkttime;
267 red_flags = 0;
268 if (flags & RMCF_ECN)
269 red_flags |= REDF_ECN;
270 #ifdef ALTQ_RIO
271 if (flags & RMCF_CLEARDSCP)
272 red_flags |= RIOF_CLEARDSCP;
273 #endif
274 red_pkttime = nsecPerByte * pktsize / 1000;
276 if (flags & RMCF_RED) {
277 cl->red_ = red_alloc(0, 0,
278 qlimit(cl->q_) * 10/100,
279 qlimit(cl->q_) * 30/100,
280 red_flags, red_pkttime);
281 if (cl->red_ != NULL)
282 qtype(cl->q_) = Q_RED;
284 #ifdef ALTQ_RIO
285 else {
286 cl->red_ = (red_t *)rio_alloc(0, NULL,
287 red_flags, red_pkttime);
288 if (cl->red_ != NULL)
289 qtype(cl->q_) = Q_RIO;
291 #endif
293 #endif /* ALTQ_RED */
296 * put the class into the class tree
298 crit_enter();
299 if ((peer = ifd->active_[pri]) != NULL) {
300 /* find the last class at this pri */
301 cl->peer_ = peer;
302 while (peer->peer_ != ifd->active_[pri])
303 peer = peer->peer_;
304 peer->peer_ = cl;
305 } else {
306 ifd->active_[pri] = cl;
307 cl->peer_ = cl;
310 if (cl->parent_) {
311 cl->next_ = parent->children_;
312 parent->children_ = cl;
313 parent->leaf_ = 0;
317 * Compute the depth of this class and its ancestors in the class
318 * hierarchy.
320 rmc_depth_compute(cl);
323 * If CBQ's WRR is enabled, then initialize the class WRR state.
325 if (ifd->wrr_) {
326 ifd->num_[pri]++;
327 ifd->alloc_[pri] += cl->allotment_;
328 rmc_wrr_set_weights(ifd);
330 crit_exit();
331 return (cl);
335 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
336 int minidle, u_int offtime, int pktsize)
338 struct rm_ifdat *ifd;
339 u_int old_allotment;
341 ifd = cl->ifdat_;
342 old_allotment = cl->allotment_;
344 crit_enter();
345 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
346 cl->qthresh_ = 0;
347 cl->ns_per_byte_ = nsecPerByte;
349 qlimit(cl->q_) = maxq;
351 #if 1 /* minidle is also scaled in ALTQ */
352 cl->minidle_ = (minidle * nsecPerByte) / 8;
353 if (cl->minidle_ > 0)
354 cl->minidle_ = 0;
355 #else
356 cl->minidle_ = minidle;
357 #endif
358 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
359 if (cl->maxidle_ == 0)
360 cl->maxidle_ = 1;
361 #if 1 /* offtime is also scaled in ALTQ */
362 cl->avgidle_ = cl->maxidle_;
363 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
364 if (cl->offtime_ == 0)
365 cl->offtime_ = 1;
366 #else
367 cl->avgidle_ = 0;
368 cl->offtime_ = (offtime * nsecPerByte) / 8;
369 #endif
372 * If CBQ's WRR is enabled, then initialize the class WRR state.
374 if (ifd->wrr_) {
375 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
376 rmc_wrr_set_weights(ifd);
378 crit_exit();
379 return (0);
383 * static void
384 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
385 * the appropriate run robin weights for the CBQ weighted round robin
386 * algorithm.
388 * Returns: NONE
391 static void
392 rmc_wrr_set_weights(struct rm_ifdat *ifd)
394 int i;
395 struct rm_class *cl, *clh;
397 for (i = 0; i < RM_MAXPRIO; i++) {
399 * This is inverted from that of the simulator to
400 * maintain precision.
402 if (ifd->num_[i] == 0)
403 ifd->M_[i] = 0;
404 else
405 ifd->M_[i] = ifd->alloc_[i] /
406 (ifd->num_[i] * ifd->maxpkt_);
408 * Compute the weighted allotment for each class.
409 * This takes the expensive div instruction out
410 * of the main loop for the wrr scheduling path.
411 * These only get recomputed when a class comes or
412 * goes.
414 if (ifd->active_[i] != NULL) {
415 clh = cl = ifd->active_[i];
416 do {
417 /* safe-guard for slow link or alloc_ == 0 */
418 if (ifd->M_[i] == 0)
419 cl->w_allotment_ = 0;
420 else
421 cl->w_allotment_ = cl->allotment_ /
422 ifd->M_[i];
423 cl = cl->peer_;
424 } while ((cl != NULL) && (cl != clh));
430 rmc_get_weight(struct rm_ifdat *ifd, int pri)
432 if ((pri >= 0) && (pri < RM_MAXPRIO))
433 return (ifd->M_[pri]);
434 else
435 return (0);
439 * static void
440 * rmc_depth_compute(struct rm_class *cl) - This function computes the
441 * appropriate depth of class 'cl' and its ancestors.
443 * Returns: NONE
446 static void
447 rmc_depth_compute(struct rm_class *cl)
449 rm_class_t *t = cl, *p;
452 * Recompute the depth for the branch of the tree.
454 while (t != NULL) {
455 p = t->parent_;
456 if (p && (t->depth_ >= p->depth_)) {
457 p->depth_ = t->depth_ + 1;
458 t = p;
459 } else
460 t = NULL;
465 * static void
466 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
467 * the depth of the tree after a class has been deleted.
469 * Returns: NONE
472 static void
473 rmc_depth_recompute(rm_class_t *cl)
475 #if 1 /* ALTQ */
476 rm_class_t *p, *t;
478 p = cl;
479 while (p != NULL) {
480 if ((t = p->children_) == NULL) {
481 p->depth_ = 0;
482 } else {
483 int cdepth = 0;
485 while (t != NULL) {
486 if (t->depth_ > cdepth)
487 cdepth = t->depth_;
488 t = t->next_;
491 if (p->depth_ == cdepth + 1)
492 /* no change to this parent */
493 return;
495 p->depth_ = cdepth + 1;
498 p = p->parent_;
500 #else
501 rm_class_t *t;
503 if (cl->depth_ >= 1) {
504 if (cl->children_ == NULL) {
505 cl->depth_ = 0;
506 } else if ((t = cl->children_) != NULL) {
507 while (t != NULL) {
508 if (t->children_ != NULL)
509 rmc_depth_recompute(t);
510 t = t->next_;
512 } else
513 rmc_depth_compute(cl);
515 #endif
519 * void
520 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
521 * function deletes a class from the link-sharing structure and frees
522 * all resources associated with the class.
524 * Returns: NONE
527 void
528 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
530 struct rm_class *p, *head, *previous;
532 KKASSERT(cl->children_ == NULL);
534 if (cl->sleeping_)
535 callout_stop(&cl->callout_);
537 crit_enter();
539 * Free packets in the packet queue.
540 * XXX - this may not be a desired behavior. Packets should be
541 * re-queued.
543 rmc_dropall(cl);
546 * If the class has a parent, then remove the class from the
547 * class from the parent's children chain.
549 if (cl->parent_ != NULL) {
550 head = cl->parent_->children_;
551 p = previous = head;
552 if (head->next_ == NULL) {
553 KKASSERT(head == cl);
554 cl->parent_->children_ = NULL;
555 cl->parent_->leaf_ = 1;
556 } else while (p != NULL) {
557 if (p == cl) {
558 if (cl == head)
559 cl->parent_->children_ = cl->next_;
560 else
561 previous->next_ = cl->next_;
562 cl->next_ = NULL;
563 p = NULL;
564 } else {
565 previous = p;
566 p = p->next_;
572 * Delete class from class priority peer list.
574 if ((p = ifd->active_[cl->pri_]) != NULL) {
576 * If there is more than one member of this priority
577 * level, then look for class(cl) in the priority level.
579 if (p != p->peer_) {
580 while (p->peer_ != cl)
581 p = p->peer_;
582 p->peer_ = cl->peer_;
584 if (ifd->active_[cl->pri_] == cl)
585 ifd->active_[cl->pri_] = cl->peer_;
586 } else {
587 KKASSERT(p == cl);
588 ifd->active_[cl->pri_] = NULL;
593 * Recompute the WRR weights.
595 if (ifd->wrr_) {
596 ifd->alloc_[cl->pri_] -= cl->allotment_;
597 ifd->num_[cl->pri_]--;
598 rmc_wrr_set_weights(ifd);
602 * Re-compute the depth of the tree.
604 #if 1 /* ALTQ */
605 rmc_depth_recompute(cl->parent_);
606 #else
607 rmc_depth_recompute(ifd->root_);
608 #endif
610 crit_exit();
613 * Free the class structure.
615 if (cl->red_ != NULL) {
616 #ifdef ALTQ_RIO
617 if (q_is_rio(cl->q_))
618 rio_destroy((rio_t *)cl->red_);
619 #endif
620 #ifdef ALTQ_RED
621 if (q_is_red(cl->q_))
622 red_destroy(cl->red_);
623 #endif
625 kfree(cl->q_, M_ALTQ);
626 kfree(cl, M_ALTQ);
630 * void
631 * rmc_init(...) - Initialize the resource management data structures
632 * associated with the output portion of interface 'ifp'. 'ifd' is
633 * where the structures will be built (for backwards compatibility, the
634 * structures aren't kept in the ifnet struct). 'nsecPerByte'
635 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
636 * 'restart' is the driver-specific routine that the generic 'delay
637 * until under limit' action will call to restart output. `maxq'
638 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
639 * is the maximum number of packets that the resource management
640 * code will allow to be queued 'downstream' (this is typically 1).
642 * Returns: NONE
645 void
646 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
647 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
648 int minidle, u_int offtime, int flags)
650 int i, mtu;
653 * Initialize the CBQ tracing/debug facility.
655 CBQTRACEINIT();
657 bzero(ifd, sizeof (*ifd));
658 mtu = ifq->altq_ifp->if_mtu;
659 ifd->ifq_ = ifq;
660 ifd->restart = restart;
661 ifd->maxqueued_ = maxqueued;
662 ifd->ns_per_byte_ = nsecPerByte;
663 ifd->maxpkt_ = mtu;
664 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
665 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
666 #if 1
667 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
668 if (mtu * nsecPerByte > 10 * 1000000)
669 ifd->maxiftime_ /= 4;
670 #endif
672 reset_cutoff(ifd);
673 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
676 * Initialize the CBQ's WRR state.
678 for (i = 0; i < RM_MAXPRIO; i++) {
679 ifd->alloc_[i] = 0;
680 ifd->M_[i] = 0;
681 ifd->num_[i] = 0;
682 ifd->na_[i] = 0;
683 ifd->active_[i] = NULL;
687 * Initialize current packet state.
689 ifd->qi_ = 0;
690 ifd->qo_ = 0;
691 for (i = 0; i < RM_MAXQUEUED; i++) {
692 ifd->class_[i] = NULL;
693 ifd->curlen_[i] = 0;
694 ifd->borrowed_[i] = NULL;
698 * Create the root class of the link-sharing structure.
700 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
701 maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
702 if (ifd->root_ == NULL) {
703 kprintf("rmc_init: root class not allocated\n");
704 return ;
706 ifd->root_->depth_ = 0;
710 * void
711 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
712 * mbuf 'm' to queue for resource class 'cl'. This routine is called
713 * by a driver's if_output routine. This routine must be called with
714 * output packet completion interrupts locked out (to avoid racing with
715 * rmc_dequeue_next).
717 * Returns: 0 on successful queueing
718 * -1 when packet drop occurs
721 rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
723 struct timeval now;
724 struct rm_ifdat *ifd = cl->ifdat_;
725 int cpri = cl->pri_;
726 int is_empty = qempty(cl->q_);
728 RM_GETTIME(now);
729 if (ifd->cutoff_ > 0) {
730 if (TV_LT(&cl->undertime_, &now)) {
731 if (ifd->cutoff_ > cl->depth_)
732 ifd->cutoff_ = cl->depth_;
733 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
735 #if 1 /* ALTQ */
736 else {
738 * the class is overlimit. if the class has
739 * underlimit ancestors, set cutoff to the lowest
740 * depth among them.
742 struct rm_class *borrow = cl->borrow_;
744 while (borrow != NULL &&
745 borrow->depth_ < ifd->cutoff_) {
746 if (TV_LT(&borrow->undertime_, &now)) {
747 ifd->cutoff_ = borrow->depth_;
748 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
749 break;
751 borrow = borrow->borrow_;
754 #else /* !ALTQ */
755 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
756 if (TV_LT(&cl->borrow_->undertime_, &now)) {
757 ifd->cutoff_ = cl->borrow_->depth_;
758 CBQTRACE(rmc_queue_packet, 'ffob',
759 cl->borrow_->depth_);
762 #endif /* !ALTQ */
765 if (_rmc_addq(cl, m) < 0)
766 /* failed */
767 return (-1);
769 if (is_empty) {
770 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
771 ifd->na_[cpri]++;
774 if (qlen(cl->q_) > qlimit(cl->q_)) {
775 /* note: qlimit can be set to 0 or 1 */
776 rmc_drop_action(cl);
777 return (-1);
779 return (0);
783 * void
784 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
785 * classes to see if there are satified.
788 static void
789 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
791 int i;
792 rm_class_t *p, *bp;
794 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
795 if ((bp = ifd->active_[i]) != NULL) {
796 p = bp;
797 do {
798 if (!rmc_satisfied(p, now)) {
799 ifd->cutoff_ = p->depth_;
800 return;
802 p = p->peer_;
803 } while (p != bp);
807 reset_cutoff(ifd);
811 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
814 static int
815 rmc_satisfied(struct rm_class *cl, struct timeval *now)
817 rm_class_t *p;
819 if (cl == NULL)
820 return (1);
821 if (TV_LT(now, &cl->undertime_))
822 return (1);
823 if (cl->depth_ == 0) {
824 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
825 return (0);
826 else
827 return (1);
829 if (cl->children_ != NULL) {
830 p = cl->children_;
831 while (p != NULL) {
832 if (!rmc_satisfied(p, now))
833 return (0);
834 p = p->next_;
838 return (1);
842 * Return 1 if class 'cl' is under limit or can borrow from a parent,
843 * 0 if overlimit. As a side-effect, this routine will invoke the
844 * class overlimit action if the class if overlimit.
847 static int
848 rmc_under_limit(struct rm_class *cl, struct timeval *now)
850 rm_class_t *p = cl;
851 rm_class_t *top;
852 struct rm_ifdat *ifd = cl->ifdat_;
854 ifd->borrowed_[ifd->qi_] = NULL;
856 * If cl is the root class, then always return that it is
857 * underlimit. Otherwise, check to see if the class is underlimit.
859 if (cl->parent_ == NULL)
860 return (1);
862 if (cl->sleeping_) {
863 if (TV_LT(now, &cl->undertime_))
864 return (0);
866 callout_stop(&cl->callout_);
867 cl->sleeping_ = 0;
868 cl->undertime_.tv_sec = 0;
869 return (1);
872 top = NULL;
873 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
874 if (((cl = cl->borrow_) == NULL) ||
875 (cl->depth_ > ifd->cutoff_)) {
876 #ifdef ADJUST_CUTOFF
877 if (cl != NULL)
878 /* cutoff is taking effect, just
879 return false without calling
880 the delay action. */
881 return (0);
882 #endif
883 #ifdef BORROW_OFFTIME
885 * check if the class can borrow offtime too.
886 * borrow offtime from the top of the borrow
887 * chain if the top class is not overloaded.
889 if (cl != NULL) {
890 /* cutoff is taking effect, use this class as top. */
891 top = cl;
892 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
894 if (top != NULL && top->avgidle_ == top->minidle_)
895 top = NULL;
896 p->overtime_ = *now;
897 (p->overlimit)(p, top);
898 #else
899 p->overtime_ = *now;
900 (p->overlimit)(p, NULL);
901 #endif
902 return (0);
904 top = cl;
907 if (cl != p)
908 ifd->borrowed_[ifd->qi_] = cl;
909 return (1);
913 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
914 * Packet-by-packet round robin.
916 * The heart of the weighted round-robin scheduler, which decides which
917 * class next gets to send a packet. Highest priority first, then
918 * weighted round-robin within priorites.
920 * Each able-to-send class gets to send until its byte allocation is
921 * exhausted. Thus, the active pointer is only changed after a class has
922 * exhausted its allocation.
924 * If the scheduler finds no class that is underlimit or able to borrow,
925 * then the first class found that had a nonzero queue and is allowed to
926 * borrow gets to send.
929 static struct mbuf *
930 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
932 struct rm_class *cl = NULL, *first = NULL;
933 u_int deficit;
934 int cpri;
935 struct mbuf *m;
936 struct timeval now;
938 RM_GETTIME(now);
941 * if the driver polls the top of the queue and then removes
942 * the polled packet, we must return the same packet.
944 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
945 cl = ifd->pollcache_;
946 cpri = cl->pri_;
947 if (ifd->efficient_) {
948 /* check if this class is overlimit */
949 if (cl->undertime_.tv_sec != 0 &&
950 rmc_under_limit(cl, &now) == 0)
951 first = cl;
953 ifd->pollcache_ = NULL;
954 goto _wrr_out;
956 else {
957 /* mode == ALTDQ_POLL || pollcache == NULL */
958 ifd->pollcache_ = NULL;
959 ifd->borrowed_[ifd->qi_] = NULL;
961 #ifdef ADJUST_CUTOFF
962 _again:
963 #endif
964 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
965 if (ifd->na_[cpri] == 0)
966 continue;
967 deficit = 0;
969 * Loop through twice for a priority level, if some class
970 * was unable to send a packet the first round because
971 * of the weighted round-robin mechanism.
972 * During the second loop at this level, deficit==2.
973 * (This second loop is not needed if for every class,
974 * "M[cl->pri_])" times "cl->allotment" is greater than
975 * the byte size for the largest packet in the class.)
977 _wrr_loop:
978 cl = ifd->active_[cpri];
979 KKASSERT(cl != NULL);
980 do {
981 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
982 cl->bytes_alloc_ += cl->w_allotment_;
983 if (!qempty(cl->q_)) {
984 if ((cl->undertime_.tv_sec == 0) ||
985 rmc_under_limit(cl, &now)) {
986 if (cl->bytes_alloc_ > 0 || deficit > 1)
987 goto _wrr_out;
989 /* underlimit but no alloc */
990 deficit = 1;
991 #if 1
992 ifd->borrowed_[ifd->qi_] = NULL;
993 #endif
995 else if (first == NULL && cl->borrow_ != NULL)
996 first = cl; /* borrowing candidate */
999 cl->bytes_alloc_ = 0;
1000 cl = cl->peer_;
1001 } while (cl != ifd->active_[cpri]);
1003 if (deficit == 1) {
1004 /* first loop found an underlimit class with deficit */
1005 /* Loop on same priority level, with new deficit. */
1006 deficit = 2;
1007 goto _wrr_loop;
1011 #ifdef ADJUST_CUTOFF
1013 * no underlimit class found. if cutoff is taking effect,
1014 * increase cutoff and try again.
1016 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1017 ifd->cutoff_++;
1018 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1019 goto _again;
1021 #endif /* ADJUST_CUTOFF */
1023 * If LINK_EFFICIENCY is turned on, then the first overlimit
1024 * class we encounter will send a packet if all the classes
1025 * of the link-sharing structure are overlimit.
1027 reset_cutoff(ifd);
1028 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1030 if (!ifd->efficient_ || first == NULL)
1031 return (NULL);
1033 cl = first;
1034 cpri = cl->pri_;
1035 #if 0 /* too time-consuming for nothing */
1036 if (cl->sleeping_)
1037 callout_stop(&cl->callout_);
1038 cl->sleeping_ = 0;
1039 cl->undertime_.tv_sec = 0;
1040 #endif
1041 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1042 ifd->cutoff_ = cl->borrow_->depth_;
1045 * Deque the packet and do the book keeping...
1047 _wrr_out:
1048 if (op == ALTDQ_REMOVE) {
1049 m = _rmc_getq(cl);
1050 if (m == NULL)
1051 panic("_rmc_wrr_dequeue_next");
1052 if (qempty(cl->q_))
1053 ifd->na_[cpri]--;
1056 * Update class statistics and link data.
1058 if (cl->bytes_alloc_ > 0)
1059 cl->bytes_alloc_ -= m_pktlen(m);
1061 if ((cl->bytes_alloc_ <= 0) || first == cl)
1062 ifd->active_[cl->pri_] = cl->peer_;
1063 else
1064 ifd->active_[cl->pri_] = cl;
1066 ifd->class_[ifd->qi_] = cl;
1067 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1068 ifd->now_[ifd->qi_] = now;
1069 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1070 ifd->queued_++;
1071 } else {
1072 /* mode == ALTDQ_PPOLL */
1073 m = _rmc_pollq(cl);
1074 ifd->pollcache_ = cl;
1076 return (m);
1080 * Dequeue & return next packet from the highest priority class that
1081 * has a packet to send & has enough allocation to send it. This
1082 * routine is called by a driver whenever it needs a new packet to
1083 * output.
1085 static struct mbuf *
1086 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1088 struct mbuf *m;
1089 int cpri;
1090 struct rm_class *cl, *first = NULL;
1091 struct timeval now;
1093 RM_GETTIME(now);
1096 * if the driver polls the top of the queue and then removes
1097 * the polled packet, we must return the same packet.
1099 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1100 cl = ifd->pollcache_;
1101 cpri = cl->pri_;
1102 ifd->pollcache_ = NULL;
1103 goto _prr_out;
1104 } else {
1105 /* mode == ALTDQ_POLL || pollcache == NULL */
1106 ifd->pollcache_ = NULL;
1107 ifd->borrowed_[ifd->qi_] = NULL;
1109 #ifdef ADJUST_CUTOFF
1110 _again:
1111 #endif
1112 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1113 if (ifd->na_[cpri] == 0)
1114 continue;
1115 cl = ifd->active_[cpri];
1116 KKASSERT(cl != NULL);
1117 do {
1118 if (!qempty(cl->q_)) {
1119 if ((cl->undertime_.tv_sec == 0) ||
1120 rmc_under_limit(cl, &now))
1121 goto _prr_out;
1122 if (first == NULL && cl->borrow_ != NULL)
1123 first = cl;
1125 cl = cl->peer_;
1126 } while (cl != ifd->active_[cpri]);
1129 #ifdef ADJUST_CUTOFF
1131 * no underlimit class found. if cutoff is taking effect, increase
1132 * cutoff and try again.
1134 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1135 ifd->cutoff_++;
1136 goto _again;
1138 #endif /* ADJUST_CUTOFF */
1140 * If LINK_EFFICIENCY is turned on, then the first overlimit
1141 * class we encounter will send a packet if all the classes
1142 * of the link-sharing structure are overlimit.
1144 reset_cutoff(ifd);
1145 if (!ifd->efficient_ || first == NULL)
1146 return (NULL);
1148 cl = first;
1149 cpri = cl->pri_;
1150 #if 0 /* too time-consuming for nothing */
1151 if (cl->sleeping_)
1152 callout_stop(&cl->callout_);
1153 cl->sleeping_ = 0;
1154 cl->undertime_.tv_sec = 0;
1155 #endif
1156 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1157 ifd->cutoff_ = cl->borrow_->depth_;
1160 * Deque the packet and do the book keeping...
1162 _prr_out:
1163 if (op == ALTDQ_REMOVE) {
1164 m = _rmc_getq(cl);
1165 if (m == NULL)
1166 panic("_rmc_prr_dequeue_next");
1167 if (qempty(cl->q_))
1168 ifd->na_[cpri]--;
1170 ifd->active_[cpri] = cl->peer_;
1172 ifd->class_[ifd->qi_] = cl;
1173 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1174 ifd->now_[ifd->qi_] = now;
1175 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1176 ifd->queued_++;
1177 } else {
1178 /* mode == ALTDQ_POLL */
1179 m = _rmc_pollq(cl);
1180 ifd->pollcache_ = cl;
1182 return (m);
1186 * struct mbuf *
1187 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1188 * is invoked by the packet driver to get the next packet to be
1189 * dequeued and output on the link. If WRR is enabled, then the
1190 * WRR dequeue next routine will determine the next packet to sent.
1191 * Otherwise, packet-by-packet round robin is invoked.
1193 * Returns: NULL, if a packet is not available or if all
1194 * classes are overlimit.
1196 * Otherwise, Pointer to the next packet.
1199 struct mbuf *
1200 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1202 if (ifd->queued_ >= ifd->maxqueued_)
1203 return (NULL);
1204 else if (ifd->wrr_)
1205 return (_rmc_wrr_dequeue_next(ifd, mode));
1206 else
1207 return (_rmc_prr_dequeue_next(ifd, mode));
1211 * Update the utilization estimate for the packet that just completed.
1212 * The packet's class & the parent(s) of that class all get their
1213 * estimators updated. This routine is called by the driver's output-
1214 * packet-completion interrupt service routine.
1218 * a macro to approximate "divide by 1000" that gives 0.000999,
1219 * if a value has enough effective digits.
1220 * (on pentium, mul takes 9 cycles but div takes 46!)
1222 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1223 void
1224 rmc_update_class_util(struct rm_ifdat *ifd)
1226 int idle, avgidle, pktlen;
1227 int pkt_time, tidle;
1228 rm_class_t *cl, *borrowed;
1229 rm_class_t *borrows;
1230 struct timeval *nowp;
1233 * Get the most recent completed class.
1235 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1236 return;
1238 pktlen = ifd->curlen_[ifd->qo_];
1239 borrowed = ifd->borrowed_[ifd->qo_];
1240 borrows = borrowed;
1242 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1245 * Run estimator on class and its ancestors.
1248 * rm_update_class_util is designed to be called when the
1249 * transfer is completed from a xmit complete interrupt,
1250 * but most drivers don't implement an upcall for that.
1251 * so, just use estimated completion time.
1252 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1254 nowp = &ifd->now_[ifd->qo_];
1255 /* get pkt_time (for link) in usec */
1256 #if 1 /* use approximation */
1257 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1258 pkt_time = NSEC_TO_USEC(pkt_time);
1259 #else
1260 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1261 #endif
1262 #if 1 /* ALTQ4PPP */
1263 if (TV_LT(nowp, &ifd->ifnow_)) {
1264 int iftime;
1267 * make sure the estimated completion time does not go
1268 * too far. it can happen when the link layer supports
1269 * data compression or the interface speed is set to
1270 * a much lower value.
1272 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1273 if (iftime+pkt_time < ifd->maxiftime_) {
1274 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1275 } else {
1276 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1278 } else {
1279 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1281 #else
1282 if (TV_LT(nowp, &ifd->ifnow_)) {
1283 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1284 } else {
1285 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1287 #endif
1289 while (cl != NULL) {
1290 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1291 if (idle >= 2000000)
1293 * this class is idle enough, reset avgidle.
1294 * (TV_DELTA returns 2000000 us when delta is large.)
1296 cl->avgidle_ = cl->maxidle_;
1298 /* get pkt_time (for class) in usec */
1299 #if 1 /* use approximation */
1300 pkt_time = pktlen * cl->ns_per_byte_;
1301 pkt_time = NSEC_TO_USEC(pkt_time);
1302 #else
1303 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1304 #endif
1305 idle -= pkt_time;
1307 avgidle = cl->avgidle_;
1308 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1309 cl->avgidle_ = avgidle;
1311 /* Are we overlimit ? */
1312 if (avgidle <= 0) {
1313 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1314 #if 1 /* ALTQ */
1316 * need some lower bound for avgidle, otherwise
1317 * a borrowing class gets unbounded penalty.
1319 if (avgidle < cl->minidle_)
1320 avgidle = cl->avgidle_ = cl->minidle_;
1321 #endif
1322 /* set next idle to make avgidle 0 */
1323 tidle = pkt_time +
1324 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1325 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1326 ++cl->stats_.over;
1327 } else {
1328 cl->avgidle_ =
1329 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1330 cl->undertime_.tv_sec = 0;
1331 if (cl->sleeping_) {
1332 callout_stop(&cl->callout_);
1333 cl->sleeping_ = 0;
1337 if (borrows != NULL) {
1338 if (borrows != cl)
1339 ++cl->stats_.borrows;
1340 else
1341 borrows = NULL;
1343 cl->last_ = ifd->ifnow_;
1344 cl->last_pkttime_ = pkt_time;
1346 #if 1
1347 if (cl->parent_ == NULL) {
1348 /* take stats of root class */
1349 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1351 #endif
1353 cl = cl->parent_;
1357 * Check to see if cutoff needs to set to a new level.
1359 cl = ifd->class_[ifd->qo_];
1360 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1361 #if 1 /* ALTQ */
1362 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1363 rmc_tl_satisfied(ifd, nowp);
1364 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1365 } else {
1366 ifd->cutoff_ = borrowed->depth_;
1367 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1369 #else /* !ALTQ */
1370 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1371 reset_cutoff(ifd);
1372 #ifdef notdef
1373 rmc_tl_satisfied(ifd, &now);
1374 #endif
1375 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1376 } else {
1377 ifd->cutoff_ = borrowed->depth_;
1378 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1380 #endif /* !ALTQ */
1384 * Release class slot
1386 ifd->borrowed_[ifd->qo_] = NULL;
1387 ifd->class_[ifd->qo_] = NULL;
1388 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1389 ifd->queued_--;
1393 * void
1394 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1395 * over-limit action routines. These get invoked by rmc_under_limit()
1396 * if a class with packets to send if over its bandwidth limit & can't
1397 * borrow from a parent class.
1399 * Returns: NONE
1402 static void
1403 rmc_drop_action(struct rm_class *cl)
1405 struct rm_ifdat *ifd = cl->ifdat_;
1407 KKASSERT(qlen(cl->q_) > 0);
1408 _rmc_dropq(cl);
1409 if (qempty(cl->q_))
1410 ifd->na_[cl->pri_]--;
1413 void
1414 rmc_dropall(struct rm_class *cl)
1416 struct rm_ifdat *ifd = cl->ifdat_;
1418 if (!qempty(cl->q_)) {
1419 _flushq(cl->q_);
1421 ifd->na_[cl->pri_]--;
1426 * void
1427 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1428 * delay action routine. It is invoked via rmc_under_limit when the
1429 * packet is discoverd to be overlimit.
1431 * If the delay action is result of borrow class being overlimit, then
1432 * delay for the offtime of the borrowing class that is overlimit.
1434 * Returns: NONE
1437 void
1438 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1440 int delay, t, extradelay;
1442 cl->stats_.overactions++;
1443 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1444 #ifndef BORROW_OFFTIME
1445 delay += cl->offtime_;
1446 #endif
1448 if (!cl->sleeping_) {
1449 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1450 #ifdef BORROW_OFFTIME
1451 if (borrow != NULL)
1452 extradelay = borrow->offtime_;
1453 else
1454 #endif
1455 extradelay = cl->offtime_;
1457 #ifdef ALTQ
1459 * XXX recalculate suspend time:
1460 * current undertime is (tidle + pkt_time) calculated
1461 * from the last transmission.
1462 * tidle: time required to bring avgidle back to 0
1463 * pkt_time: target waiting time for this class
1464 * we need to replace pkt_time by offtime
1466 extradelay -= cl->last_pkttime_;
1467 #endif
1468 if (extradelay > 0) {
1469 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1470 delay += extradelay;
1473 cl->sleeping_ = 1;
1474 cl->stats_.delays++;
1477 * Since packets are phased randomly with respect to the
1478 * clock, 1 tick (the next clock tick) can be an arbitrarily
1479 * short time so we have to wait for at least two ticks.
1480 * NOTE: If there's no other traffic, we need the timer as
1481 * a 'backstop' to restart this class.
1483 if (delay > ustick * 2)
1484 t = (delay + ustick - 1) / ustick;
1485 else
1486 t = 2;
1487 callout_reset(&cl->callout_, t, rmc_restart, cl);
1492 * void
1493 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1494 * called by the system timer code & is responsible checking if the
1495 * class is still sleeping (it might have been restarted as a side
1496 * effect of the queue scan on a packet arrival) and, if so, restarting
1497 * output for the class. Inspecting the class state & restarting output
1498 * require locking the class structure. In general the driver is
1499 * responsible for locking but this is the only routine that is not
1500 * called directly or indirectly from the interface driver so it has
1501 * know about system locking conventions. Under bsd, locking is done
1502 * by raising IPL to splimp so that's what's implemented here. On a
1503 * different system this would probably need to be changed.
1505 * Since this function is called from an independant timeout, we
1506 * have to set up the lock conditions expected for the ALTQ operation.
1507 * Note that the restart will probably fall through to an if_start.
1509 * Returns: NONE
1512 static void
1513 rmc_restart(void *arg)
1515 struct rm_class *cl = arg;
1516 struct rm_ifdat *ifd = cl->ifdat_;
1518 ALTQ_LOCK(ifd->ifq_);
1519 if (cl->sleeping_) {
1520 cl->sleeping_ = 0;
1521 cl->undertime_.tv_sec = 0;
1523 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1524 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1525 (ifd->restart)(ifd->ifq_);
1528 ALTQ_UNLOCK(ifd->ifq_);
1532 * void
1533 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1534 * handling routine for the root class of the link sharing structure.
1536 * Returns: NONE
1539 static void
1540 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1542 panic("rmc_root_overlimit");
1546 * Packet Queue handling routines. Eventually, this is to localize the
1547 * effects on the code whether queues are red queues or droptail
1548 * queues.
1551 static int
1552 _rmc_addq(rm_class_t *cl, struct mbuf *m)
1554 #ifdef ALTQ_RIO
1555 if (q_is_rio(cl->q_))
1556 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1557 #endif
1558 #ifdef ALTQ_RED
1559 if (q_is_red(cl->q_))
1560 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1561 #endif /* ALTQ_RED */
1563 if (cl->flags_ & RMCF_CLEARDSCP)
1564 write_dsfield(m, cl->pktattr_, 0);
1566 _addq(cl->q_, m);
1567 return (0);
1570 /* note: _rmc_dropq is not called for red */
1571 static void
1572 _rmc_dropq(rm_class_t *cl)
1574 struct mbuf *m;
1576 if ((m = _getq(cl->q_)) != NULL)
1577 m_freem(m);
1580 static struct mbuf *
1581 _rmc_getq(rm_class_t *cl)
1583 #ifdef ALTQ_RIO
1584 if (q_is_rio(cl->q_))
1585 return rio_getq((rio_t *)cl->red_, cl->q_);
1586 #endif
1587 #ifdef ALTQ_RED
1588 if (q_is_red(cl->q_))
1589 return red_getq(cl->red_, cl->q_);
1590 #endif
1591 return _getq(cl->q_);
1594 static struct mbuf *
1595 _rmc_pollq(rm_class_t *cl)
1597 return qhead(cl->q_);
1600 #ifdef CBQ_TRACE
1602 * DDB hook to trace cbq events:
1603 * the last 1024 events are held in a circular buffer.
1604 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1606 void cbqtrace_dump(int);
1607 static char *rmc_funcname(void *);
1609 static struct rmc_funcs {
1610 void *func;
1611 char *name;
1612 } rmc_funcs[] = {
1613 rmc_init, "rmc_init",
1614 rmc_queue_packet, "rmc_queue_packet",
1615 rmc_under_limit, "rmc_under_limit",
1616 rmc_update_class_util, "rmc_update_class_util",
1617 rmc_delay_action, "rmc_delay_action",
1618 rmc_restart, "rmc_restart",
1619 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1620 NULL, NULL
1623 static chari *
1624 rmc_funcname(void *func)
1626 struct rmc_funcs *fp;
1628 for (fp = rmc_funcs; fp->func != NULL; fp++) {
1629 if (fp->func == func)
1630 return (fp->name);
1633 return ("unknown");
1636 void
1637 cbqtrace_dump(int counter)
1639 int i, *p;
1640 char *cp;
1642 counter = counter % NCBQTRACE;
1643 p = (int *)&cbqtrace_buffer[counter];
1645 for (i=0; i<20; i++) {
1646 kprintf("[0x%x] ", *p++);
1647 kprintf("%s: ", rmc_funcname((void *)*p++));
1648 cp = (char *)p++;
1649 kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1650 kprintf("%d\n",*p++);
1652 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1653 p = (int *)cbqtrace_buffer;
1656 #endif /* CBQ_TRACE */
1657 #endif /* ALTQ_CBQ */