kernel - cleanup vfs_cache debugging
[dragonfly.git] / sys / net / altq / altq_rmclass.c
blob73c601e22e62fba69fad0b1121ed7fcc21757b20
1 /* @(#)rm_class.c 1.48 97/12/05 SMI */
2 /* $KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $ */
4 /*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the Network Research
19 * Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 * to endorse or promote products derived from this software without
22 * specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
40 #include "opt_altq.h"
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
44 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
46 #include <sys/param.h>
47 #include <sys/malloc.h>
48 #include <sys/mbuf.h>
49 #include <sys/socket.h>
50 #include <sys/systm.h>
51 #include <sys/callout.h>
52 #include <sys/errno.h>
53 #include <sys/time.h>
54 #include <sys/thread.h>
56 #include <net/if.h>
58 #include <net/altq/altq.h>
59 #include <net/altq/altq_rmclass.h>
60 #include <net/altq/altq_rmclass_debug.h>
61 #include <net/altq/altq_red.h>
62 #include <net/altq/altq_rio.h>
64 #include <sys/thread2.h>
66 #ifdef CBQ_TRACE
67 static struct cbqtrace cbqtrace_buffer[NCBQTRACE+1];
68 static struct cbqtrace *cbqtrace_ptr = NULL;
69 static int cbqtrace_count;
70 #endif
73 * Local Macros
76 #define reset_cutoff(ifd) { ifd->cutoff_ = RM_MAXDEPTH; }
79 * Local routines.
82 static int rmc_satisfied(struct rm_class *, struct timeval *);
83 static void rmc_wrr_set_weights(struct rm_ifdat *);
84 static void rmc_depth_compute(struct rm_class *);
85 static void rmc_depth_recompute(rm_class_t *);
87 static struct mbuf *_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
88 static struct mbuf *_rmc_prr_dequeue_next(struct rm_ifdat *, int);
90 static int _rmc_addq(rm_class_t *, struct mbuf *);
91 static void _rmc_dropq(rm_class_t *);
92 static struct mbuf *_rmc_getq(rm_class_t *);
93 static struct mbuf *_rmc_pollq(rm_class_t *);
95 static int rmc_under_limit(struct rm_class *, struct timeval *);
96 static void rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
97 static void rmc_drop_action(struct rm_class *);
98 static void rmc_restart(void *);
99 static void rmc_root_overlimit(struct rm_class *, struct rm_class *);
101 #define BORROW_OFFTIME
103 * BORROW_OFFTIME (experimental):
104 * borrow the offtime of the class borrowing from.
105 * the reason is that when its own offtime is set, the class is unable
106 * to borrow much, especially when cutoff is taking effect.
107 * but when the borrowed class is overloaded (advidle is close to minidle),
108 * use the borrowing class's offtime to avoid overload.
110 #define ADJUST_CUTOFF
112 * ADJUST_CUTOFF (experimental):
113 * if no underlimit class is found due to cutoff, increase cutoff and
114 * retry the scheduling loop.
115 * also, don't invoke delay_actions while cutoff is taking effect,
116 * since a sleeping class won't have a chance to be scheduled in the
117 * next loop.
119 * now heuristics for setting the top-level variable (cutoff_) becomes:
120 * 1. if a packet arrives for a not-overlimit class, set cutoff
121 * to the depth of the class.
122 * 2. if cutoff is i, and a packet arrives for an overlimit class
123 * with an underlimit ancestor at a lower level than i (say j),
124 * then set cutoff to j.
125 * 3. at scheduling a packet, if there is no underlimit class
126 * due to the current cutoff level, increase cutoff by 1 and
127 * then try to schedule again.
131 * rm_class_t *
132 * rmc_newclass(...) - Create a new resource management class at priority
133 * 'pri' on the interface given by 'ifd'.
135 * nsecPerByte is the data rate of the interface in nanoseconds/byte.
136 * E.g., 800 for a 10Mb/s ethernet. If the class gets less
137 * than 100% of the bandwidth, this number should be the
138 * 'effective' rate for the class. Let f be the
139 * bandwidth fraction allocated to this class, and let
140 * nsPerByte be the data rate of the output link in
141 * nanoseconds/byte. Then nsecPerByte is set to
142 * nsPerByte / f. E.g., 1600 (= 800 / .5)
143 * for a class that gets 50% of an ethernet's bandwidth.
145 * action the routine to call when the class is over limit.
147 * maxq max allowable queue size for class (in packets).
149 * parent parent class pointer.
151 * borrow class to borrow from (should be either 'parent' or null).
153 * maxidle max value allowed for class 'idle' time estimate (this
154 * parameter determines how large an initial burst of packets
155 * can be before overlimit action is invoked.
157 * offtime how long 'delay' action will delay when class goes over
158 * limit (this parameter determines the steady-state burst
159 * size when a class is running over its limit).
161 * Maxidle and offtime have to be computed from the following: If the
162 * average packet size is s, the bandwidth fraction allocated to this
163 * class is f, we want to allow b packet bursts, and the gain of the
164 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
166 * ptime = s * nsPerByte * (1 - f) / f
167 * maxidle = ptime * (1 - g^b) / g^b
168 * minidle = -ptime * (1 / (f - 1))
169 * offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
171 * Operationally, it's convenient to specify maxidle & offtime in units
172 * independent of the link bandwidth so the maxidle & offtime passed to
173 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
174 * (The constant factor is a scale factor needed to make the parameters
175 * integers. This scaling also means that the 'unscaled' values of
176 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
177 * not nanoseconds.) Also note that the 'idle' filter computation keeps
178 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
179 * maxidle also must be scaled upward by this value. Thus, the passed
180 * values for maxidle and offtime can be computed as follows:
182 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
183 * offtime = offtime * 8 / (1000 * nsecPerByte)
185 * When USE_HRTIME is employed, then maxidle and offtime become:
186 * maxidle = maxilde * (8.0 / nsecPerByte);
187 * offtime = offtime * (8.0 / nsecPerByte);
189 struct rm_class *
190 rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
191 void (*action)(rm_class_t *, rm_class_t *), int maxq,
192 struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
193 int minidle, u_int offtime, int pktsize, int flags)
195 struct rm_class *cl;
196 struct rm_class *peer;
198 if (pri >= RM_MAXPRIO)
199 return (NULL);
200 #ifndef ALTQ_RED
201 if (flags & RMCF_RED) {
202 #ifdef ALTQ_DEBUG
203 kprintf("rmc_newclass: RED not configured for CBQ!\n");
204 #endif
205 return (NULL);
207 #endif
208 #ifndef ALTQ_RIO
209 if (flags & RMCF_RIO) {
210 #ifdef ALTQ_DEBUG
211 kprintf("rmc_newclass: RIO not configured for CBQ!\n");
212 #endif
213 return (NULL);
215 #endif
217 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
218 callout_init(&cl->callout_);
219 cl->q_ = kmalloc(sizeof(*cl->q_), M_ALTQ, M_WAITOK | M_ZERO);
222 * Class initialization.
224 cl->children_ = NULL;
225 cl->parent_ = parent;
226 cl->borrow_ = borrow;
227 cl->leaf_ = 1;
228 cl->ifdat_ = ifd;
229 cl->pri_ = pri;
230 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
231 cl->depth_ = 0;
232 cl->qthresh_ = 0;
233 cl->ns_per_byte_ = nsecPerByte;
235 qlimit(cl->q_) = maxq;
236 qtype(cl->q_) = Q_DROPHEAD;
237 qlen(cl->q_) = 0;
238 cl->flags_ = flags;
240 #if 1 /* minidle is also scaled in ALTQ */
241 cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
242 if (cl->minidle_ > 0)
243 cl->minidle_ = 0;
244 #else
245 cl->minidle_ = minidle;
246 #endif
247 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
248 if (cl->maxidle_ == 0)
249 cl->maxidle_ = 1;
250 #if 1 /* offtime is also scaled in ALTQ */
251 cl->avgidle_ = cl->maxidle_;
252 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
253 if (cl->offtime_ == 0)
254 cl->offtime_ = 1;
255 #else
256 cl->avgidle_ = 0;
257 cl->offtime_ = (offtime * nsecPerByte) / 8;
258 #endif
259 cl->overlimit = action;
261 #ifdef ALTQ_RED
262 if (flags & (RMCF_RED|RMCF_RIO)) {
263 int red_flags, red_pkttime;
265 red_flags = 0;
266 if (flags & RMCF_ECN)
267 red_flags |= REDF_ECN;
268 #ifdef ALTQ_RIO
269 if (flags & RMCF_CLEARDSCP)
270 red_flags |= RIOF_CLEARDSCP;
271 #endif
272 red_pkttime = nsecPerByte * pktsize / 1000;
274 if (flags & RMCF_RED) {
275 cl->red_ = red_alloc(0, 0,
276 qlimit(cl->q_) * 10/100,
277 qlimit(cl->q_) * 30/100,
278 red_flags, red_pkttime);
279 if (cl->red_ != NULL)
280 qtype(cl->q_) = Q_RED;
282 #ifdef ALTQ_RIO
283 else {
284 cl->red_ = (red_t *)rio_alloc(0, NULL,
285 red_flags, red_pkttime);
286 if (cl->red_ != NULL)
287 qtype(cl->q_) = Q_RIO;
289 #endif
291 #endif /* ALTQ_RED */
294 * put the class into the class tree
296 crit_enter();
297 if ((peer = ifd->active_[pri]) != NULL) {
298 /* find the last class at this pri */
299 cl->peer_ = peer;
300 while (peer->peer_ != ifd->active_[pri])
301 peer = peer->peer_;
302 peer->peer_ = cl;
303 } else {
304 ifd->active_[pri] = cl;
305 cl->peer_ = cl;
308 if (cl->parent_) {
309 cl->next_ = parent->children_;
310 parent->children_ = cl;
311 parent->leaf_ = 0;
315 * Compute the depth of this class and its ancestors in the class
316 * hierarchy.
318 rmc_depth_compute(cl);
321 * If CBQ's WRR is enabled, then initialize the class WRR state.
323 if (ifd->wrr_) {
324 ifd->num_[pri]++;
325 ifd->alloc_[pri] += cl->allotment_;
326 rmc_wrr_set_weights(ifd);
328 crit_exit();
329 return (cl);
333 rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
334 int minidle, u_int offtime, int pktsize)
336 struct rm_ifdat *ifd;
337 u_int old_allotment;
339 ifd = cl->ifdat_;
340 old_allotment = cl->allotment_;
342 crit_enter();
343 cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
344 cl->qthresh_ = 0;
345 cl->ns_per_byte_ = nsecPerByte;
347 qlimit(cl->q_) = maxq;
349 #if 1 /* minidle is also scaled in ALTQ */
350 cl->minidle_ = (minidle * nsecPerByte) / 8;
351 if (cl->minidle_ > 0)
352 cl->minidle_ = 0;
353 #else
354 cl->minidle_ = minidle;
355 #endif
356 cl->maxidle_ = (maxidle * nsecPerByte) / 8;
357 if (cl->maxidle_ == 0)
358 cl->maxidle_ = 1;
359 #if 1 /* offtime is also scaled in ALTQ */
360 cl->avgidle_ = cl->maxidle_;
361 cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
362 if (cl->offtime_ == 0)
363 cl->offtime_ = 1;
364 #else
365 cl->avgidle_ = 0;
366 cl->offtime_ = (offtime * nsecPerByte) / 8;
367 #endif
370 * If CBQ's WRR is enabled, then initialize the class WRR state.
372 if (ifd->wrr_) {
373 ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
374 rmc_wrr_set_weights(ifd);
376 crit_exit();
377 return (0);
381 * static void
382 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
383 * the appropriate run robin weights for the CBQ weighted round robin
384 * algorithm.
386 * Returns: NONE
389 static void
390 rmc_wrr_set_weights(struct rm_ifdat *ifd)
392 int i;
393 struct rm_class *cl, *clh;
395 for (i = 0; i < RM_MAXPRIO; i++) {
397 * This is inverted from that of the simulator to
398 * maintain precision.
400 if (ifd->num_[i] == 0)
401 ifd->M_[i] = 0;
402 else
403 ifd->M_[i] = ifd->alloc_[i] /
404 (ifd->num_[i] * ifd->maxpkt_);
406 * Compute the weighted allotment for each class.
407 * This takes the expensive div instruction out
408 * of the main loop for the wrr scheduling path.
409 * These only get recomputed when a class comes or
410 * goes.
412 if (ifd->active_[i] != NULL) {
413 clh = cl = ifd->active_[i];
414 do {
415 /* safe-guard for slow link or alloc_ == 0 */
416 if (ifd->M_[i] == 0)
417 cl->w_allotment_ = 0;
418 else
419 cl->w_allotment_ = cl->allotment_ /
420 ifd->M_[i];
421 cl = cl->peer_;
422 } while ((cl != NULL) && (cl != clh));
428 rmc_get_weight(struct rm_ifdat *ifd, int pri)
430 if ((pri >= 0) && (pri < RM_MAXPRIO))
431 return (ifd->M_[pri]);
432 else
433 return (0);
437 * static void
438 * rmc_depth_compute(struct rm_class *cl) - This function computes the
439 * appropriate depth of class 'cl' and its ancestors.
441 * Returns: NONE
444 static void
445 rmc_depth_compute(struct rm_class *cl)
447 rm_class_t *t = cl, *p;
450 * Recompute the depth for the branch of the tree.
452 while (t != NULL) {
453 p = t->parent_;
454 if (p && (t->depth_ >= p->depth_)) {
455 p->depth_ = t->depth_ + 1;
456 t = p;
457 } else
458 t = NULL;
463 * static void
464 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
465 * the depth of the tree after a class has been deleted.
467 * Returns: NONE
470 static void
471 rmc_depth_recompute(rm_class_t *cl)
473 #if 1 /* ALTQ */
474 rm_class_t *p, *t;
476 p = cl;
477 while (p != NULL) {
478 if ((t = p->children_) == NULL) {
479 p->depth_ = 0;
480 } else {
481 int cdepth = 0;
483 while (t != NULL) {
484 if (t->depth_ > cdepth)
485 cdepth = t->depth_;
486 t = t->next_;
489 if (p->depth_ == cdepth + 1)
490 /* no change to this parent */
491 return;
493 p->depth_ = cdepth + 1;
496 p = p->parent_;
498 #else
499 rm_class_t *t;
501 if (cl->depth_ >= 1) {
502 if (cl->children_ == NULL) {
503 cl->depth_ = 0;
504 } else if ((t = cl->children_) != NULL) {
505 while (t != NULL) {
506 if (t->children_ != NULL)
507 rmc_depth_recompute(t);
508 t = t->next_;
510 } else
511 rmc_depth_compute(cl);
513 #endif
517 * void
518 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
519 * function deletes a class from the link-sharing structure and frees
520 * all resources associated with the class.
522 * Returns: NONE
525 void
526 rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
528 struct rm_class *p, *head, *previous;
530 KKASSERT(cl->children_ == NULL);
532 if (cl->sleeping_)
533 callout_stop(&cl->callout_);
535 crit_enter();
537 if (ifd->pollcache_ == cl)
538 ifd->pollcache_ = NULL;
541 * Free packets in the packet queue.
542 * XXX - this may not be a desired behavior. Packets should be
543 * re-queued.
545 rmc_dropall(cl);
548 * If the class has a parent, then remove the class from the
549 * class from the parent's children chain.
551 if (cl->parent_ != NULL) {
552 head = cl->parent_->children_;
553 p = previous = head;
554 if (head->next_ == NULL) {
555 KKASSERT(head == cl);
556 cl->parent_->children_ = NULL;
557 cl->parent_->leaf_ = 1;
558 } else while (p != NULL) {
559 if (p == cl) {
560 if (cl == head)
561 cl->parent_->children_ = cl->next_;
562 else
563 previous->next_ = cl->next_;
564 cl->next_ = NULL;
565 p = NULL;
566 } else {
567 previous = p;
568 p = p->next_;
574 * Delete class from class priority peer list.
576 if ((p = ifd->active_[cl->pri_]) != NULL) {
578 * If there is more than one member of this priority
579 * level, then look for class(cl) in the priority level.
581 if (p != p->peer_) {
582 while (p->peer_ != cl)
583 p = p->peer_;
584 p->peer_ = cl->peer_;
586 if (ifd->active_[cl->pri_] == cl)
587 ifd->active_[cl->pri_] = cl->peer_;
588 } else {
589 KKASSERT(p == cl);
590 ifd->active_[cl->pri_] = NULL;
595 * Recompute the WRR weights.
597 if (ifd->wrr_) {
598 ifd->alloc_[cl->pri_] -= cl->allotment_;
599 ifd->num_[cl->pri_]--;
600 rmc_wrr_set_weights(ifd);
604 * Re-compute the depth of the tree.
606 #if 1 /* ALTQ */
607 rmc_depth_recompute(cl->parent_);
608 #else
609 rmc_depth_recompute(ifd->root_);
610 #endif
612 crit_exit();
615 * Free the class structure.
617 if (cl->red_ != NULL) {
618 #ifdef ALTQ_RIO
619 if (q_is_rio(cl->q_))
620 rio_destroy((rio_t *)cl->red_);
621 #endif
622 #ifdef ALTQ_RED
623 if (q_is_red(cl->q_))
624 red_destroy(cl->red_);
625 #endif
627 kfree(cl->q_, M_ALTQ);
628 kfree(cl, M_ALTQ);
632 * void
633 * rmc_init(...) - Initialize the resource management data structures
634 * associated with the output portion of interface 'ifp'. 'ifd' is
635 * where the structures will be built (for backwards compatibility, the
636 * structures aren't kept in the ifnet struct). 'nsecPerByte'
637 * gives the link speed (inverse of bandwidth) in nanoseconds/byte.
638 * 'restart' is the driver-specific routine that the generic 'delay
639 * until under limit' action will call to restart output. `maxq'
640 * is the queue size of the 'link' & 'default' classes. 'maxqueued'
641 * is the maximum number of packets that the resource management
642 * code will allow to be queued 'downstream' (this is typically 1).
644 * Returns: NONE
647 void
648 rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
649 void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
650 int minidle, u_int offtime, int flags)
652 int i, mtu;
655 * Initialize the CBQ tracing/debug facility.
657 CBQTRACEINIT();
659 bzero(ifd, sizeof (*ifd));
660 mtu = ifq->altq_ifp->if_mtu;
661 ifd->ifq_ = ifq;
662 ifd->restart = restart;
663 ifd->maxqueued_ = maxqueued;
664 ifd->ns_per_byte_ = nsecPerByte;
665 ifd->maxpkt_ = mtu;
666 ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
667 ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
668 #if 1
669 ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
670 if (mtu * nsecPerByte > 10 * 1000000)
671 ifd->maxiftime_ /= 4;
672 #endif
674 reset_cutoff(ifd);
675 CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
678 * Initialize the CBQ's WRR state.
680 for (i = 0; i < RM_MAXPRIO; i++) {
681 ifd->alloc_[i] = 0;
682 ifd->M_[i] = 0;
683 ifd->num_[i] = 0;
684 ifd->na_[i] = 0;
685 ifd->active_[i] = NULL;
689 * Initialize current packet state.
691 ifd->qi_ = 0;
692 ifd->qo_ = 0;
693 for (i = 0; i < RM_MAXQUEUED; i++) {
694 ifd->class_[i] = NULL;
695 ifd->curlen_[i] = 0;
696 ifd->borrowed_[i] = NULL;
700 * Create the root class of the link-sharing structure.
702 ifd->root_ = rmc_newclass(0, ifd, nsecPerByte, rmc_root_overlimit,
703 maxq, 0, 0, maxidle, minidle, offtime, 0, 0);
704 if (ifd->root_ == NULL) {
705 kprintf("rmc_init: root class not allocated\n");
706 return ;
708 ifd->root_->depth_ = 0;
712 * void
713 * rmc_queue_packet(struct rm_class *cl, struct mbuf *m) - Add packet given by
714 * mbuf 'm' to queue for resource class 'cl'. This routine is called
715 * by a driver's if_output routine. This routine must be called with
716 * output packet completion interrupts locked out (to avoid racing with
717 * rmc_dequeue_next).
719 * Returns: 0 on successful queueing
720 * -1 when packet drop occurs
723 rmc_queue_packet(struct rm_class *cl, struct mbuf *m)
725 struct timeval now;
726 struct rm_ifdat *ifd = cl->ifdat_;
727 int cpri = cl->pri_;
728 int is_empty = qempty(cl->q_);
730 RM_GETTIME(now);
731 if (ifd->cutoff_ > 0) {
732 if (TV_LT(&cl->undertime_, &now)) {
733 if (ifd->cutoff_ > cl->depth_)
734 ifd->cutoff_ = cl->depth_;
735 CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
737 #if 1 /* ALTQ */
738 else {
740 * the class is overlimit. if the class has
741 * underlimit ancestors, set cutoff to the lowest
742 * depth among them.
744 struct rm_class *borrow = cl->borrow_;
746 while (borrow != NULL &&
747 borrow->depth_ < ifd->cutoff_) {
748 if (TV_LT(&borrow->undertime_, &now)) {
749 ifd->cutoff_ = borrow->depth_;
750 CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
751 break;
753 borrow = borrow->borrow_;
756 #else /* !ALTQ */
757 else if ((ifd->cutoff_ > 1) && cl->borrow_) {
758 if (TV_LT(&cl->borrow_->undertime_, &now)) {
759 ifd->cutoff_ = cl->borrow_->depth_;
760 CBQTRACE(rmc_queue_packet, 'ffob',
761 cl->borrow_->depth_);
764 #endif /* !ALTQ */
767 if (_rmc_addq(cl, m) < 0)
768 /* failed */
769 return (-1);
771 if (is_empty) {
772 CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
773 ifd->na_[cpri]++;
776 if (qlen(cl->q_) > qlimit(cl->q_)) {
777 /* note: qlimit can be set to 0 or 1 */
778 rmc_drop_action(cl);
779 return (-1);
781 return (0);
785 * void
786 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
787 * classes to see if there are satified.
790 static void
791 rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
793 int i;
794 rm_class_t *p, *bp;
796 for (i = RM_MAXPRIO - 1; i >= 0; i--) {
797 if ((bp = ifd->active_[i]) != NULL) {
798 p = bp;
799 do {
800 if (!rmc_satisfied(p, now)) {
801 ifd->cutoff_ = p->depth_;
802 return;
804 p = p->peer_;
805 } while (p != bp);
809 reset_cutoff(ifd);
813 * rmc_satisfied - Return 1 of the class is satisfied. O, otherwise.
816 static int
817 rmc_satisfied(struct rm_class *cl, struct timeval *now)
819 rm_class_t *p;
821 if (cl == NULL)
822 return (1);
823 if (TV_LT(now, &cl->undertime_))
824 return (1);
825 if (cl->depth_ == 0) {
826 if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
827 return (0);
828 else
829 return (1);
831 if (cl->children_ != NULL) {
832 p = cl->children_;
833 while (p != NULL) {
834 if (!rmc_satisfied(p, now))
835 return (0);
836 p = p->next_;
840 return (1);
844 * Return 1 if class 'cl' is under limit or can borrow from a parent,
845 * 0 if overlimit. As a side-effect, this routine will invoke the
846 * class overlimit action if the class if overlimit.
849 static int
850 rmc_under_limit(struct rm_class *cl, struct timeval *now)
852 rm_class_t *p = cl;
853 rm_class_t *top;
854 struct rm_ifdat *ifd = cl->ifdat_;
856 ifd->borrowed_[ifd->qi_] = NULL;
858 * If cl is the root class, then always return that it is
859 * underlimit. Otherwise, check to see if the class is underlimit.
861 if (cl->parent_ == NULL)
862 return (1);
864 if (cl->sleeping_) {
865 if (TV_LT(now, &cl->undertime_))
866 return (0);
868 callout_stop(&cl->callout_);
869 cl->sleeping_ = 0;
870 cl->undertime_.tv_sec = 0;
871 return (1);
874 top = NULL;
875 while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
876 if (((cl = cl->borrow_) == NULL) ||
877 (cl->depth_ > ifd->cutoff_)) {
878 #ifdef ADJUST_CUTOFF
879 if (cl != NULL)
880 /* cutoff is taking effect, just
881 return false without calling
882 the delay action. */
883 return (0);
884 #endif
885 #ifdef BORROW_OFFTIME
887 * check if the class can borrow offtime too.
888 * borrow offtime from the top of the borrow
889 * chain if the top class is not overloaded.
891 if (cl != NULL) {
892 /* cutoff is taking effect, use this class as top. */
893 top = cl;
894 CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
896 if (top != NULL && top->avgidle_ == top->minidle_)
897 top = NULL;
898 p->overtime_ = *now;
899 (p->overlimit)(p, top);
900 #else
901 p->overtime_ = *now;
902 (p->overlimit)(p, NULL);
903 #endif
904 return (0);
906 top = cl;
909 if (cl != p)
910 ifd->borrowed_[ifd->qi_] = cl;
911 return (1);
915 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
916 * Packet-by-packet round robin.
918 * The heart of the weighted round-robin scheduler, which decides which
919 * class next gets to send a packet. Highest priority first, then
920 * weighted round-robin within priorites.
922 * Each able-to-send class gets to send until its byte allocation is
923 * exhausted. Thus, the active pointer is only changed after a class has
924 * exhausted its allocation.
926 * If the scheduler finds no class that is underlimit or able to borrow,
927 * then the first class found that had a nonzero queue and is allowed to
928 * borrow gets to send.
931 static struct mbuf *
932 _rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
934 struct rm_class *cl = NULL, *first = NULL;
935 u_int deficit;
936 int cpri;
937 struct mbuf *m;
938 struct timeval now;
940 RM_GETTIME(now);
943 * if the driver polls the top of the queue and then removes
944 * the polled packet, we must return the same packet.
946 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
947 cl = ifd->pollcache_;
948 cpri = cl->pri_;
949 if (ifd->efficient_) {
950 /* check if this class is overlimit */
951 if (cl->undertime_.tv_sec != 0 &&
952 rmc_under_limit(cl, &now) == 0)
953 first = cl;
955 ifd->pollcache_ = NULL;
956 goto _wrr_out;
958 /* mode == ALTDQ_POLL || pollcache == NULL */
959 ifd->pollcache_ = NULL;
960 ifd->borrowed_[ifd->qi_] = NULL;
961 #ifdef ADJUST_CUTOFF
962 _again:
963 #endif
964 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
965 if (ifd->na_[cpri] == 0)
966 continue;
967 deficit = 0;
969 * Loop through twice for a priority level, if some class
970 * was unable to send a packet the first round because
971 * of the weighted round-robin mechanism.
972 * During the second loop at this level, deficit==2.
973 * (This second loop is not needed if for every class,
974 * "M[cl->pri_])" times "cl->allotment" is greater than
975 * the byte size for the largest packet in the class.)
977 _wrr_loop:
978 cl = ifd->active_[cpri];
979 KKASSERT(cl != NULL);
980 do {
981 if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
982 cl->bytes_alloc_ += cl->w_allotment_;
983 if (!qempty(cl->q_)) {
984 if ((cl->undertime_.tv_sec == 0) ||
985 rmc_under_limit(cl, &now)) {
986 if (cl->bytes_alloc_ > 0 || deficit > 1)
987 goto _wrr_out;
989 /* underlimit but no alloc */
990 deficit = 1;
991 #if 1
992 ifd->borrowed_[ifd->qi_] = NULL;
993 #endif
995 else if (first == NULL && cl->borrow_ != NULL)
996 first = cl; /* borrowing candidate */
999 cl->bytes_alloc_ = 0;
1000 cl = cl->peer_;
1001 } while (cl != ifd->active_[cpri]);
1003 if (deficit == 1) {
1004 /* first loop found an underlimit class with deficit */
1005 /* Loop on same priority level, with new deficit. */
1006 deficit = 2;
1007 goto _wrr_loop;
1011 #ifdef ADJUST_CUTOFF
1013 * no underlimit class found. if cutoff is taking effect,
1014 * increase cutoff and try again.
1016 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1017 ifd->cutoff_++;
1018 CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1019 goto _again;
1021 #endif /* ADJUST_CUTOFF */
1023 * If LINK_EFFICIENCY is turned on, then the first overlimit
1024 * class we encounter will send a packet if all the classes
1025 * of the link-sharing structure are overlimit.
1027 reset_cutoff(ifd);
1028 CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1030 if (!ifd->efficient_ || first == NULL)
1031 return (NULL);
1033 cl = first;
1034 cpri = cl->pri_;
1035 #if 0 /* too time-consuming for nothing */
1036 if (cl->sleeping_)
1037 callout_stop(&cl->callout_);
1038 cl->sleeping_ = 0;
1039 cl->undertime_.tv_sec = 0;
1040 #endif
1041 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1042 ifd->cutoff_ = cl->borrow_->depth_;
1045 * Deque the packet and do the book keeping...
1047 _wrr_out:
1048 if (op == ALTDQ_REMOVE) {
1049 m = _rmc_getq(cl);
1050 if (m == NULL)
1051 panic("_rmc_wrr_dequeue_next");
1052 if (qempty(cl->q_))
1053 ifd->na_[cpri]--;
1056 * Update class statistics and link data.
1058 if (cl->bytes_alloc_ > 0)
1059 cl->bytes_alloc_ -= m_pktlen(m);
1061 if ((cl->bytes_alloc_ <= 0) || first == cl)
1062 ifd->active_[cl->pri_] = cl->peer_;
1063 else
1064 ifd->active_[cl->pri_] = cl;
1066 ifd->class_[ifd->qi_] = cl;
1067 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1068 ifd->now_[ifd->qi_] = now;
1069 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1070 ifd->queued_++;
1071 } else {
1072 /* mode == ALTDQ_PPOLL */
1073 m = _rmc_pollq(cl);
1074 #ifdef foo
1076 * Don't use poll cache; the poll/dequeue
1077 * model is no longer applicable to SMP
1078 * system. e.g.
1079 * CPU-A CPU-B
1080 * : :
1081 * poll :
1082 * : poll
1083 * dequeue (+) :
1085 * The dequeue at (+) will hit the poll
1086 * cache set by CPU-B.
1088 ifd->pollcache_ = cl;
1089 #endif
1091 return (m);
1095 * Dequeue & return next packet from the highest priority class that
1096 * has a packet to send & has enough allocation to send it. This
1097 * routine is called by a driver whenever it needs a new packet to
1098 * output.
1100 static struct mbuf *
1101 _rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1103 struct mbuf *m;
1104 int cpri;
1105 struct rm_class *cl, *first = NULL;
1106 struct timeval now;
1108 RM_GETTIME(now);
1111 * if the driver polls the top of the queue and then removes
1112 * the polled packet, we must return the same packet.
1114 if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1115 cl = ifd->pollcache_;
1116 cpri = cl->pri_;
1117 ifd->pollcache_ = NULL;
1118 goto _prr_out;
1120 /* mode == ALTDQ_POLL || pollcache == NULL */
1121 ifd->pollcache_ = NULL;
1122 ifd->borrowed_[ifd->qi_] = NULL;
1123 #ifdef ADJUST_CUTOFF
1124 _again:
1125 #endif
1126 for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1127 if (ifd->na_[cpri] == 0)
1128 continue;
1129 cl = ifd->active_[cpri];
1130 KKASSERT(cl != NULL);
1131 do {
1132 if (!qempty(cl->q_)) {
1133 if ((cl->undertime_.tv_sec == 0) ||
1134 rmc_under_limit(cl, &now))
1135 goto _prr_out;
1136 if (first == NULL && cl->borrow_ != NULL)
1137 first = cl;
1139 cl = cl->peer_;
1140 } while (cl != ifd->active_[cpri]);
1143 #ifdef ADJUST_CUTOFF
1145 * no underlimit class found. if cutoff is taking effect, increase
1146 * cutoff and try again.
1148 if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1149 ifd->cutoff_++;
1150 goto _again;
1152 #endif /* ADJUST_CUTOFF */
1154 * If LINK_EFFICIENCY is turned on, then the first overlimit
1155 * class we encounter will send a packet if all the classes
1156 * of the link-sharing structure are overlimit.
1158 reset_cutoff(ifd);
1159 if (!ifd->efficient_ || first == NULL)
1160 return (NULL);
1162 cl = first;
1163 cpri = cl->pri_;
1164 #if 0 /* too time-consuming for nothing */
1165 if (cl->sleeping_)
1166 callout_stop(&cl->callout_);
1167 cl->sleeping_ = 0;
1168 cl->undertime_.tv_sec = 0;
1169 #endif
1170 ifd->borrowed_[ifd->qi_] = cl->borrow_;
1171 ifd->cutoff_ = cl->borrow_->depth_;
1174 * Deque the packet and do the book keeping...
1176 _prr_out:
1177 if (op == ALTDQ_REMOVE) {
1178 m = _rmc_getq(cl);
1179 if (m == NULL)
1180 panic("_rmc_prr_dequeue_next");
1181 if (qempty(cl->q_))
1182 ifd->na_[cpri]--;
1184 ifd->active_[cpri] = cl->peer_;
1186 ifd->class_[ifd->qi_] = cl;
1187 ifd->curlen_[ifd->qi_] = m_pktlen(m);
1188 ifd->now_[ifd->qi_] = now;
1189 ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1190 ifd->queued_++;
1191 } else {
1192 /* mode == ALTDQ_POLL */
1193 m = _rmc_pollq(cl);
1194 #ifdef foo
1196 * Don't use poll cache; the poll/dequeue
1197 * model is no longer applicable to SMP
1198 * system. e.g.
1199 * CPU-A CPU-B
1200 * : :
1201 * poll :
1202 * : poll
1203 * dequeue (+) :
1205 * The dequeue at (+) will hit the poll
1206 * cache set by CPU-B.
1208 ifd->pollcache_ = cl;
1209 #endif
1211 return (m);
1215 * struct mbuf *
1216 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1217 * is invoked by the packet driver to get the next packet to be
1218 * dequeued and output on the link. If WRR is enabled, then the
1219 * WRR dequeue next routine will determine the next packet to sent.
1220 * Otherwise, packet-by-packet round robin is invoked.
1222 * Returns: NULL, if a packet is not available or if all
1223 * classes are overlimit.
1225 * Otherwise, Pointer to the next packet.
1228 struct mbuf *
1229 rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1231 if (ifd->queued_ >= ifd->maxqueued_)
1232 return (NULL);
1233 else if (ifd->wrr_)
1234 return (_rmc_wrr_dequeue_next(ifd, mode));
1235 else
1236 return (_rmc_prr_dequeue_next(ifd, mode));
1240 * Update the utilization estimate for the packet that just completed.
1241 * The packet's class & the parent(s) of that class all get their
1242 * estimators updated. This routine is called by the driver's output-
1243 * packet-completion interrupt service routine.
1247 * a macro to approximate "divide by 1000" that gives 0.000999,
1248 * if a value has enough effective digits.
1249 * (on pentium, mul takes 9 cycles but div takes 46!)
1251 #define NSEC_TO_USEC(t) (((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1252 void
1253 rmc_update_class_util(struct rm_ifdat *ifd)
1255 int idle, avgidle, pktlen;
1256 int pkt_time, tidle;
1257 rm_class_t *cl, *borrowed;
1258 rm_class_t *borrows;
1259 struct timeval *nowp;
1262 * Get the most recent completed class.
1264 if ((cl = ifd->class_[ifd->qo_]) == NULL)
1265 return;
1267 pktlen = ifd->curlen_[ifd->qo_];
1268 borrowed = ifd->borrowed_[ifd->qo_];
1269 borrows = borrowed;
1271 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1274 * Run estimator on class and its ancestors.
1277 * rm_update_class_util is designed to be called when the
1278 * transfer is completed from a xmit complete interrupt,
1279 * but most drivers don't implement an upcall for that.
1280 * so, just use estimated completion time.
1281 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1283 nowp = &ifd->now_[ifd->qo_];
1284 /* get pkt_time (for link) in usec */
1285 #if 1 /* use approximation */
1286 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1287 pkt_time = NSEC_TO_USEC(pkt_time);
1288 #else
1289 pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1290 #endif
1291 #if 1 /* ALTQ4PPP */
1292 if (TV_LT(nowp, &ifd->ifnow_)) {
1293 int iftime;
1296 * make sure the estimated completion time does not go
1297 * too far. it can happen when the link layer supports
1298 * data compression or the interface speed is set to
1299 * a much lower value.
1301 TV_DELTA(&ifd->ifnow_, nowp, iftime);
1302 if (iftime+pkt_time < ifd->maxiftime_) {
1303 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1304 } else {
1305 TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1307 } else {
1308 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1310 #else
1311 if (TV_LT(nowp, &ifd->ifnow_)) {
1312 TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1313 } else {
1314 TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1316 #endif
1318 while (cl != NULL) {
1319 TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1320 if (idle >= 2000000)
1322 * this class is idle enough, reset avgidle.
1323 * (TV_DELTA returns 2000000 us when delta is large.)
1325 cl->avgidle_ = cl->maxidle_;
1327 /* get pkt_time (for class) in usec */
1328 #if 1 /* use approximation */
1329 pkt_time = pktlen * cl->ns_per_byte_;
1330 pkt_time = NSEC_TO_USEC(pkt_time);
1331 #else
1332 pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1333 #endif
1334 idle -= pkt_time;
1336 avgidle = cl->avgidle_;
1337 avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1338 cl->avgidle_ = avgidle;
1340 /* Are we overlimit ? */
1341 if (avgidle <= 0) {
1342 CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1343 #if 1 /* ALTQ */
1345 * need some lower bound for avgidle, otherwise
1346 * a borrowing class gets unbounded penalty.
1348 if (avgidle < cl->minidle_)
1349 avgidle = cl->avgidle_ = cl->minidle_;
1350 #endif
1351 /* set next idle to make avgidle 0 */
1352 tidle = pkt_time +
1353 (((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1354 TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1355 ++cl->stats_.over;
1356 } else {
1357 cl->avgidle_ =
1358 (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1359 cl->undertime_.tv_sec = 0;
1360 if (cl->sleeping_) {
1361 callout_stop(&cl->callout_);
1362 cl->sleeping_ = 0;
1366 if (borrows != NULL) {
1367 if (borrows != cl)
1368 ++cl->stats_.borrows;
1369 else
1370 borrows = NULL;
1372 cl->last_ = ifd->ifnow_;
1373 cl->last_pkttime_ = pkt_time;
1375 #if 1
1376 if (cl->parent_ == NULL) {
1377 /* take stats of root class */
1378 PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1380 #endif
1382 cl = cl->parent_;
1386 * Check to see if cutoff needs to set to a new level.
1388 cl = ifd->class_[ifd->qo_];
1389 if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1390 #if 1 /* ALTQ */
1391 if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1392 rmc_tl_satisfied(ifd, nowp);
1393 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1394 } else {
1395 ifd->cutoff_ = borrowed->depth_;
1396 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1398 #else /* !ALTQ */
1399 if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1400 reset_cutoff(ifd);
1401 #ifdef notdef
1402 rmc_tl_satisfied(ifd, &now);
1403 #endif
1404 CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1405 } else {
1406 ifd->cutoff_ = borrowed->depth_;
1407 CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1409 #endif /* !ALTQ */
1413 * Release class slot
1415 ifd->borrowed_[ifd->qo_] = NULL;
1416 ifd->class_[ifd->qo_] = NULL;
1417 ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1418 ifd->queued_--;
1422 * void
1423 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1424 * over-limit action routines. These get invoked by rmc_under_limit()
1425 * if a class with packets to send if over its bandwidth limit & can't
1426 * borrow from a parent class.
1428 * Returns: NONE
1431 static void
1432 rmc_drop_action(struct rm_class *cl)
1434 struct rm_ifdat *ifd = cl->ifdat_;
1436 KKASSERT(qlen(cl->q_) > 0);
1437 _rmc_dropq(cl);
1438 if (qempty(cl->q_))
1439 ifd->na_[cl->pri_]--;
1442 void
1443 rmc_dropall(struct rm_class *cl)
1445 struct rm_ifdat *ifd = cl->ifdat_;
1447 if (!qempty(cl->q_)) {
1448 _flushq(cl->q_);
1450 ifd->na_[cl->pri_]--;
1455 * void
1456 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1457 * delay action routine. It is invoked via rmc_under_limit when the
1458 * packet is discoverd to be overlimit.
1460 * If the delay action is result of borrow class being overlimit, then
1461 * delay for the offtime of the borrowing class that is overlimit.
1463 * Returns: NONE
1466 void
1467 rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1469 int delay, t, extradelay;
1471 cl->stats_.overactions++;
1472 TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1473 #ifndef BORROW_OFFTIME
1474 delay += cl->offtime_;
1475 #endif
1477 if (!cl->sleeping_) {
1478 CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1479 #ifdef BORROW_OFFTIME
1480 if (borrow != NULL)
1481 extradelay = borrow->offtime_;
1482 else
1483 #endif
1484 extradelay = cl->offtime_;
1486 #ifdef ALTQ
1488 * XXX recalculate suspend time:
1489 * current undertime is (tidle + pkt_time) calculated
1490 * from the last transmission.
1491 * tidle: time required to bring avgidle back to 0
1492 * pkt_time: target waiting time for this class
1493 * we need to replace pkt_time by offtime
1495 extradelay -= cl->last_pkttime_;
1496 #endif
1497 if (extradelay > 0) {
1498 TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1499 delay += extradelay;
1502 cl->sleeping_ = 1;
1503 cl->stats_.delays++;
1506 * Since packets are phased randomly with respect to the
1507 * clock, 1 tick (the next clock tick) can be an arbitrarily
1508 * short time so we have to wait for at least two ticks.
1509 * NOTE: If there's no other traffic, we need the timer as
1510 * a 'backstop' to restart this class.
1512 if (delay > ustick * 2)
1513 t = (delay + ustick - 1) / ustick;
1514 else
1515 t = 2;
1516 callout_reset(&cl->callout_, t, rmc_restart, cl);
1521 * void
1522 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1523 * called by the system timer code & is responsible checking if the
1524 * class is still sleeping (it might have been restarted as a side
1525 * effect of the queue scan on a packet arrival) and, if so, restarting
1526 * output for the class. Inspecting the class state & restarting output
1527 * require locking the class structure. In general the driver is
1528 * responsible for locking but this is the only routine that is not
1529 * called directly or indirectly from the interface driver so it has
1530 * know about system locking conventions. Under bsd, locking is done
1531 * by raising IPL to splimp so that's what's implemented here. On a
1532 * different system this would probably need to be changed.
1534 * Since this function is called from an independant timeout, we
1535 * have to set up the lock conditions expected for the ALTQ operation.
1536 * Note that the restart will probably fall through to an if_start.
1538 * Returns: NONE
1541 static void
1542 rmc_restart(void *arg)
1544 struct rm_class *cl = arg;
1545 struct rm_ifdat *ifd = cl->ifdat_;
1546 struct ifaltq_subque *ifsq = &ifd->ifq_->altq_subq[0];
1548 ALTQ_SQ_LOCK(ifsq);
1549 if (cl->sleeping_) {
1550 cl->sleeping_ = 0;
1551 cl->undertime_.tv_sec = 0;
1553 if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1554 CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1555 (ifd->restart)(ifd->ifq_);
1558 ALTQ_SQ_UNLOCK(ifsq);
1562 * void
1563 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1564 * handling routine for the root class of the link sharing structure.
1566 * Returns: NONE
1569 static void
1570 rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1572 panic("rmc_root_overlimit");
1576 * Packet Queue handling routines. Eventually, this is to localize the
1577 * effects on the code whether queues are red queues or droptail
1578 * queues.
1581 static int
1582 _rmc_addq(rm_class_t *cl, struct mbuf *m)
1584 #ifdef ALTQ_RIO
1585 if (q_is_rio(cl->q_))
1586 return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1587 #endif
1588 #ifdef ALTQ_RED
1589 if (q_is_red(cl->q_))
1590 return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1591 #endif /* ALTQ_RED */
1593 if (cl->flags_ & RMCF_CLEARDSCP)
1594 write_dsfield(m, cl->pktattr_, 0);
1596 _addq(cl->q_, m);
1597 return (0);
1600 /* note: _rmc_dropq is not called for red */
1601 static void
1602 _rmc_dropq(rm_class_t *cl)
1604 struct mbuf *m;
1606 if ((m = _getq(cl->q_)) != NULL)
1607 m_freem(m);
1610 static struct mbuf *
1611 _rmc_getq(rm_class_t *cl)
1613 #ifdef ALTQ_RIO
1614 if (q_is_rio(cl->q_))
1615 return rio_getq((rio_t *)cl->red_, cl->q_);
1616 #endif
1617 #ifdef ALTQ_RED
1618 if (q_is_red(cl->q_))
1619 return red_getq(cl->red_, cl->q_);
1620 #endif
1621 return _getq(cl->q_);
1624 static struct mbuf *
1625 _rmc_pollq(rm_class_t *cl)
1627 return qhead(cl->q_);
1630 #ifdef CBQ_TRACE
1632 * DDB hook to trace cbq events:
1633 * the last 1024 events are held in a circular buffer.
1634 * use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1636 void cbqtrace_dump(int);
1637 static char *rmc_funcname(void *);
1639 static struct rmc_funcs {
1640 void *func;
1641 char *name;
1642 } rmc_funcs[] = {
1643 rmc_init, "rmc_init",
1644 rmc_queue_packet, "rmc_queue_packet",
1645 rmc_under_limit, "rmc_under_limit",
1646 rmc_update_class_util, "rmc_update_class_util",
1647 rmc_delay_action, "rmc_delay_action",
1648 rmc_restart, "rmc_restart",
1649 _rmc_wrr_dequeue_next, "_rmc_wrr_dequeue_next",
1650 NULL, NULL
1653 static chari *
1654 rmc_funcname(void *func)
1656 struct rmc_funcs *fp;
1658 for (fp = rmc_funcs; fp->func != NULL; fp++) {
1659 if (fp->func == func)
1660 return (fp->name);
1663 return ("unknown");
1666 void
1667 cbqtrace_dump(int counter)
1669 int i, *p;
1670 char *cp;
1672 counter = counter % NCBQTRACE;
1673 p = (int *)&cbqtrace_buffer[counter];
1675 for (i=0; i<20; i++) {
1676 kprintf("[0x%x] ", *p++);
1677 kprintf("%s: ", rmc_funcname((void *)*p++));
1678 cp = (char *)p++;
1679 kprintf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1680 kprintf("%d\n",*p++);
1682 if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1683 p = (int *)cbqtrace_buffer;
1686 #endif /* CBQ_TRACE */
1687 #endif /* ALTQ_CBQ */