amd64 port: mainly on the pmap headers, identify_cpu and initcpu
[dragonfly/port-amd64.git] / sys / net / altq / altq_hfsc.c
blobe81dba8e61150468090e80a911dcae0fbed90d65
1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_hfsc.c,v 1.8 2006/12/22 23:44:55 swildner Exp $ */
4 /*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve. the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
45 #include "opt_altq.h"
46 #include "opt_inet.h"
47 #include "opt_inet6.h"
49 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
51 #include <sys/param.h>
52 #include <sys/malloc.h>
53 #include <sys/mbuf.h>
54 #include <sys/socket.h>
55 #include <sys/systm.h>
56 #include <sys/errno.h>
57 #include <sys/queue.h>
58 #include <sys/thread.h>
60 #include <net/if.h>
61 #include <net/ifq_var.h>
62 #include <netinet/in.h>
64 #include <net/pf/pfvar.h>
65 #include <net/altq/altq.h>
66 #include <net/altq/altq_hfsc.h>
68 #include <sys/thread2.h>
71 * function prototypes
73 static int hfsc_clear_interface(struct hfsc_if *);
74 static int hfsc_request(struct ifaltq *, int, void *);
75 static void hfsc_purge(struct hfsc_if *);
76 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
77 struct service_curve *,
78 struct service_curve *,
79 struct service_curve *,
80 struct hfsc_class *, int, int, int);
81 static int hfsc_class_destroy(struct hfsc_class *);
82 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
83 static int hfsc_enqueue(struct ifaltq *, struct mbuf *,
84 struct altq_pktattr *);
85 static struct mbuf *hfsc_dequeue(struct ifaltq *, struct mbuf *, int);
87 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
88 static struct mbuf *hfsc_getq(struct hfsc_class *);
89 static struct mbuf *hfsc_pollq(struct hfsc_class *);
90 static void hfsc_purgeq(struct hfsc_class *);
92 static void update_cfmin(struct hfsc_class *);
93 static void set_active(struct hfsc_class *, int);
94 static void set_passive(struct hfsc_class *);
96 static void init_ed(struct hfsc_class *, int);
97 static void update_ed(struct hfsc_class *, int);
98 static void update_d(struct hfsc_class *, int);
99 static void init_vf(struct hfsc_class *, int);
100 static void update_vf(struct hfsc_class *, int, uint64_t);
101 static ellist_t *ellist_alloc(void);
102 static void ellist_destroy(ellist_t *);
103 static void ellist_insert(struct hfsc_class *);
104 static void ellist_remove(struct hfsc_class *);
105 static void ellist_update(struct hfsc_class *);
106 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t);
107 static actlist_t *actlist_alloc(void);
108 static void actlist_destroy(actlist_t *);
109 static void actlist_insert(struct hfsc_class *);
110 static void actlist_remove(struct hfsc_class *);
111 static void actlist_update(struct hfsc_class *);
113 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t);
115 static __inline uint64_t seg_x2y(uint64_t, uint64_t);
116 static __inline uint64_t seg_y2x(uint64_t, uint64_t);
117 static __inline uint64_t m2sm(u_int);
118 static __inline uint64_t m2ism(u_int);
119 static __inline uint64_t d2dx(u_int);
120 static u_int sm2m(uint64_t);
121 static u_int dx2d(uint64_t);
123 static void sc2isc(struct service_curve *, struct internal_sc *);
124 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
125 uint64_t, uint64_t);
126 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t);
127 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t);
128 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
129 uint64_t, uint64_t);
131 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *);
132 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t);
135 * macros
137 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
139 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
142 hfsc_pfattach(struct pf_altq *a)
144 struct ifnet *ifp;
145 int error;
147 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
148 return (EINVAL);
149 crit_enter();
150 error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
151 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
152 crit_exit();
153 return (error);
157 hfsc_add_altq(struct pf_altq *a)
159 struct hfsc_if *hif;
160 struct ifnet *ifp;
162 if ((ifp = ifunit(a->ifname)) == NULL)
163 return (EINVAL);
164 if (!ifq_is_ready(&ifp->if_snd))
165 return (ENODEV);
167 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO);
169 hif->hif_eligible = ellist_alloc();
170 hif->hif_ifq = &ifp->if_snd;
171 ifq_purge(&ifp->if_snd);
173 /* keep the state in pf_altq */
174 a->altq_disc = hif;
176 return (0);
180 hfsc_remove_altq(struct pf_altq *a)
182 struct hfsc_if *hif;
184 if ((hif = a->altq_disc) == NULL)
185 return (EINVAL);
186 a->altq_disc = NULL;
188 hfsc_clear_interface(hif);
189 hfsc_class_destroy(hif->hif_rootclass);
191 ellist_destroy(hif->hif_eligible);
193 kfree(hif, M_ALTQ);
195 return (0);
199 hfsc_add_queue(struct pf_altq *a)
201 struct hfsc_if *hif;
202 struct hfsc_class *cl, *parent;
203 struct hfsc_opts *opts;
204 struct service_curve rtsc, lssc, ulsc;
206 if ((hif = a->altq_disc) == NULL)
207 return (EINVAL);
209 opts = &a->pq_u.hfsc_opts;
211 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL)
212 parent = NULL;
213 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
214 return (EINVAL);
216 if (a->qid == 0)
217 return (EINVAL);
219 if (clh_to_clp(hif, a->qid) != NULL)
220 return (EBUSY);
222 rtsc.m1 = opts->rtsc_m1;
223 rtsc.d = opts->rtsc_d;
224 rtsc.m2 = opts->rtsc_m2;
225 lssc.m1 = opts->lssc_m1;
226 lssc.d = opts->lssc_d;
227 lssc.m2 = opts->lssc_m2;
228 ulsc.m1 = opts->ulsc_m1;
229 ulsc.d = opts->ulsc_d;
230 ulsc.m2 = opts->ulsc_m2;
232 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit,
233 opts->flags, a->qid);
234 if (cl == NULL)
235 return (ENOMEM);
237 return (0);
241 hfsc_remove_queue(struct pf_altq *a)
243 struct hfsc_if *hif;
244 struct hfsc_class *cl;
246 if ((hif = a->altq_disc) == NULL)
247 return (EINVAL);
249 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
250 return (EINVAL);
252 return (hfsc_class_destroy(cl));
256 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
258 struct hfsc_if *hif;
259 struct hfsc_class *cl;
260 struct hfsc_classstats stats;
261 int error = 0;
263 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
264 return (EBADF);
266 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
267 return (EINVAL);
269 if (*nbytes < sizeof(stats))
270 return (EINVAL);
272 get_class_stats(&stats, cl);
274 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
275 return (error);
276 *nbytes = sizeof(stats);
277 return (0);
281 * bring the interface back to the initial state by discarding
282 * all the filters and classes except the root class.
284 static int
285 hfsc_clear_interface(struct hfsc_if *hif)
287 struct hfsc_class *cl;
289 if (hif->hif_rootclass == NULL)
290 return (0);
293 /* clear out the classes */
294 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
296 * remove the first leaf class found in the hierarchy
297 * then start over
299 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
300 if (!is_a_parent_class(cl)) {
301 hfsc_class_destroy(cl);
302 break;
307 return (0);
310 static int
311 hfsc_request(struct ifaltq *ifq, int req, void *arg)
313 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
315 crit_enter();
316 switch (req) {
317 case ALTRQ_PURGE:
318 hfsc_purge(hif);
319 break;
321 crit_exit();
322 return (0);
325 /* discard all the queued packets on the interface */
326 static void
327 hfsc_purge(struct hfsc_if *hif)
329 struct hfsc_class *cl;
331 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) {
332 if (!qempty(cl->cl_q))
333 hfsc_purgeq(cl);
335 if (ifq_is_enabled(hif->hif_ifq))
336 hif->hif_ifq->ifq_len = 0;
339 struct hfsc_class *
340 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
341 struct service_curve *fsc, struct service_curve *usc,
342 struct hfsc_class *parent, int qlimit, int flags, int qid)
344 struct hfsc_class *cl, *p;
345 int i;
347 if (hif->hif_classes >= HFSC_MAX_CLASSES)
348 return (NULL);
350 #ifndef ALTQ_RED
351 if (flags & HFCF_RED) {
352 #ifdef ALTQ_DEBUG
353 kprintf("hfsc_class_create: RED not configured for HFSC!\n");
354 #endif
355 return (NULL);
357 #endif
359 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
360 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO);
361 cl->cl_actc = actlist_alloc();
363 if (qlimit == 0)
364 qlimit = 50; /* use default */
365 qlimit(cl->cl_q) = qlimit;
366 qtype(cl->cl_q) = Q_DROPTAIL;
367 qlen(cl->cl_q) = 0;
368 cl->cl_flags = flags;
369 #ifdef ALTQ_RED
370 if (flags & (HFCF_RED|HFCF_RIO)) {
371 int red_flags, red_pkttime;
372 u_int m2;
374 m2 = 0;
375 if (rsc != NULL && rsc->m2 > m2)
376 m2 = rsc->m2;
377 if (fsc != NULL && fsc->m2 > m2)
378 m2 = fsc->m2;
379 if (usc != NULL && usc->m2 > m2)
380 m2 = usc->m2;
382 red_flags = 0;
383 if (flags & HFCF_ECN)
384 red_flags |= REDF_ECN;
385 #ifdef ALTQ_RIO
386 if (flags & HFCF_CLEARDSCP)
387 red_flags |= RIOF_CLEARDSCP;
388 #endif
389 if (m2 < 8)
390 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
391 else
392 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
393 * 1000 * 1000 * 1000 / (m2 / 8);
394 if (flags & HFCF_RED) {
395 cl->cl_red = red_alloc(0, 0,
396 qlimit(cl->cl_q) * 10/100,
397 qlimit(cl->cl_q) * 30/100,
398 red_flags, red_pkttime);
399 if (cl->cl_red != NULL)
400 qtype(cl->cl_q) = Q_RED;
402 #ifdef ALTQ_RIO
403 else {
404 cl->cl_red = (red_t *)rio_alloc(0, NULL,
405 red_flags, red_pkttime);
406 if (cl->cl_red != NULL)
407 qtype(cl->cl_q) = Q_RIO;
409 #endif
411 #endif /* ALTQ_RED */
413 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
414 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK);
415 sc2isc(rsc, cl->cl_rsc);
416 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
417 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
419 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
420 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK);
421 if (cl->cl_fsc == NULL)
422 goto err_ret;
423 sc2isc(fsc, cl->cl_fsc);
424 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
426 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
427 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK);
428 if (cl->cl_usc == NULL)
429 goto err_ret;
430 sc2isc(usc, cl->cl_usc);
431 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
434 cl->cl_id = hif->hif_classid++;
435 cl->cl_handle = qid;
436 cl->cl_hif = hif;
437 cl->cl_parent = parent;
439 crit_enter();
440 hif->hif_classes++;
443 * find a free slot in the class table. if the slot matching
444 * the lower bits of qid is free, use this slot. otherwise,
445 * use the first free slot.
447 i = qid % HFSC_MAX_CLASSES;
448 if (hif->hif_class_tbl[i] == NULL)
449 hif->hif_class_tbl[i] = cl;
450 else {
451 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
452 if (hif->hif_class_tbl[i] == NULL) {
453 hif->hif_class_tbl[i] = cl;
454 break;
457 if (i == HFSC_MAX_CLASSES) {
458 crit_exit();
459 goto err_ret;
463 if (flags & HFCF_DEFAULTCLASS)
464 hif->hif_defaultclass = cl;
466 if (parent == NULL) {
467 /* this is root class */
468 hif->hif_rootclass = cl;
469 } else if (parent->cl_children == NULL) {
470 /* add this class to the children list of the parent */
471 parent->cl_children = cl;
472 } else {
473 p = parent->cl_children;
474 while (p->cl_siblings != NULL)
475 p = p->cl_siblings;
476 p->cl_siblings = cl;
478 crit_exit();
480 return (cl);
482 err_ret:
483 if (cl->cl_actc != NULL)
484 actlist_destroy(cl->cl_actc);
485 if (cl->cl_red != NULL) {
486 #ifdef ALTQ_RIO
487 if (q_is_rio(cl->cl_q))
488 rio_destroy((rio_t *)cl->cl_red);
489 #endif
490 #ifdef ALTQ_RED
491 if (q_is_red(cl->cl_q))
492 red_destroy(cl->cl_red);
493 #endif
495 if (cl->cl_fsc != NULL)
496 kfree(cl->cl_fsc, M_ALTQ);
497 if (cl->cl_rsc != NULL)
498 kfree(cl->cl_rsc, M_ALTQ);
499 if (cl->cl_usc != NULL)
500 kfree(cl->cl_usc, M_ALTQ);
501 if (cl->cl_q != NULL)
502 kfree(cl->cl_q, M_ALTQ);
503 kfree(cl, M_ALTQ);
504 return (NULL);
507 static int
508 hfsc_class_destroy(struct hfsc_class *cl)
510 int i;
512 if (cl == NULL)
513 return (0);
515 if (is_a_parent_class(cl))
516 return (EBUSY);
518 crit_enter();
520 if (!qempty(cl->cl_q))
521 hfsc_purgeq(cl);
523 if (cl->cl_parent == NULL) {
524 /* this is root class */
525 } else {
526 struct hfsc_class *p = cl->cl_parent->cl_children;
528 if (p == cl) {
529 cl->cl_parent->cl_children = cl->cl_siblings;
530 } else {
531 do {
532 if (p->cl_siblings == cl) {
533 p->cl_siblings = cl->cl_siblings;
534 break;
536 } while ((p = p->cl_siblings) != NULL);
538 KKASSERT(p != NULL);
541 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
542 if (cl->cl_hif->hif_class_tbl[i] == cl) {
543 cl->cl_hif->hif_class_tbl[i] = NULL;
544 break;
548 cl->cl_hif->hif_classes--;
549 crit_exit();
551 actlist_destroy(cl->cl_actc);
553 if (cl->cl_red != NULL) {
554 #ifdef ALTQ_RIO
555 if (q_is_rio(cl->cl_q))
556 rio_destroy((rio_t *)cl->cl_red);
557 #endif
558 #ifdef ALTQ_RED
559 if (q_is_red(cl->cl_q))
560 red_destroy(cl->cl_red);
561 #endif
564 if (cl == cl->cl_hif->hif_rootclass)
565 cl->cl_hif->hif_rootclass = NULL;
566 if (cl == cl->cl_hif->hif_defaultclass)
567 cl->cl_hif->hif_defaultclass = NULL;
569 if (cl->cl_usc != NULL)
570 kfree(cl->cl_usc, M_ALTQ);
571 if (cl->cl_fsc != NULL)
572 kfree(cl->cl_fsc, M_ALTQ);
573 if (cl->cl_rsc != NULL)
574 kfree(cl->cl_rsc, M_ALTQ);
575 kfree(cl->cl_q, M_ALTQ);
576 kfree(cl, M_ALTQ);
578 return (0);
582 * hfsc_nextclass returns the next class in the tree.
583 * usage:
584 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
585 * do_something;
587 static struct hfsc_class *
588 hfsc_nextclass(struct hfsc_class *cl)
590 if (cl->cl_children != NULL) {
591 cl = cl->cl_children;
592 } else if (cl->cl_siblings != NULL) {
593 cl = cl->cl_siblings;
594 } else {
595 while ((cl = cl->cl_parent) != NULL) {
596 if (cl->cl_siblings != NULL) {
597 cl = cl->cl_siblings;
598 break;
603 return (cl);
607 * hfsc_enqueue is an enqueue function to be registered to
608 * (*altq_enqueue) in struct ifaltq.
610 static int
611 hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
613 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
614 struct hfsc_class *cl;
615 int len;
617 /* grab class set by classifier */
618 if ((m->m_flags & M_PKTHDR) == 0) {
619 /* should not happen */
620 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
621 m_freem(m);
622 return (ENOBUFS);
624 crit_enter();
625 if (m->m_pkthdr.fw_flags & ALTQ_MBUF_TAGGED)
626 cl = clh_to_clp(hif, m->m_pkthdr.altq_qid);
627 else
628 cl = NULL;
629 if (cl == NULL || is_a_parent_class(cl)) {
630 cl = hif->hif_defaultclass;
631 if (cl == NULL) {
632 m_freem(m);
633 crit_exit();
634 return (ENOBUFS);
637 cl->cl_pktattr = NULL;
638 len = m_pktlen(m);
639 if (hfsc_addq(cl, m) != 0) {
640 /* drop occurred. mbuf was freed in hfsc_addq. */
641 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
642 crit_exit();
643 return (ENOBUFS);
645 ifq->ifq_len++;
646 cl->cl_hif->hif_packets++;
648 /* successfully queued. */
649 if (qlen(cl->cl_q) == 1)
650 set_active(cl, m_pktlen(m));
651 crit_exit();
652 return (0);
656 * hfsc_dequeue is a dequeue function to be registered to
657 * (*altq_dequeue) in struct ifaltq.
659 * note: ALTDQ_POLL returns the next packet without removing the packet
660 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
661 * ALTDQ_REMOVE must return the same packet if called immediately
662 * after ALTDQ_POLL.
664 static struct mbuf *
665 hfsc_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
667 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
668 struct hfsc_class *cl;
669 struct mbuf *m;
670 int len, next_len;
671 int realtime = 0;
672 uint64_t cur_time;
674 if (hif->hif_packets == 0) {
675 /* no packet in the tree */
676 return (NULL);
679 crit_enter();
680 cur_time = read_machclk();
682 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
683 cl = hif->hif_pollcache;
684 hif->hif_pollcache = NULL;
685 /* check if the class was scheduled by real-time criteria */
686 if (cl->cl_rsc != NULL)
687 realtime = (cl->cl_e <= cur_time);
688 } else {
690 * if there are eligible classes, use real-time criteria.
691 * find the class with the minimum deadline among
692 * the eligible classes.
694 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) {
695 realtime = 1;
696 } else {
697 #ifdef ALTQ_DEBUG
698 int fits = 0;
699 #endif
701 * use link-sharing criteria
702 * get the class with the minimum vt in the hierarchy
704 cl = hif->hif_rootclass;
705 while (is_a_parent_class(cl)) {
707 cl = actlist_firstfit(cl, cur_time);
708 if (cl == NULL) {
709 #ifdef ALTQ_DEBUG
710 if (fits > 0)
711 kprintf("%d fit but none found\n",fits);
712 #endif
713 m = NULL;
714 goto done;
717 * update parent's cl_cvtmin.
718 * don't update if the new vt is smaller.
720 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
721 cl->cl_parent->cl_cvtmin = cl->cl_vt;
722 #ifdef ALTQ_DEBUG
723 fits++;
724 #endif
728 if (op == ALTDQ_POLL) {
729 hif->hif_pollcache = cl;
730 m = hfsc_pollq(cl);
731 goto done;
735 m = hfsc_getq(cl);
736 if (m == NULL)
737 panic("hfsc_dequeue:");
738 len = m_pktlen(m);
739 cl->cl_hif->hif_packets--;
740 ifq->ifq_len--;
741 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
743 update_vf(cl, len, cur_time);
744 if (realtime)
745 cl->cl_cumul += len;
747 if (!qempty(cl->cl_q)) {
748 if (cl->cl_rsc != NULL) {
749 /* update ed */
750 next_len = m_pktlen(qhead(cl->cl_q));
752 if (realtime)
753 update_ed(cl, next_len);
754 else
755 update_d(cl, next_len);
757 } else {
758 /* the class becomes passive */
759 set_passive(cl);
761 done:
762 crit_exit();
763 KKASSERT(mpolled == NULL || m == mpolled);
764 return (m);
767 static int
768 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
771 #ifdef ALTQ_RIO
772 if (q_is_rio(cl->cl_q))
773 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
774 m, cl->cl_pktattr);
775 #endif
776 #ifdef ALTQ_RED
777 if (q_is_red(cl->cl_q))
778 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
779 #endif
780 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
781 m_freem(m);
782 return (-1);
785 if (cl->cl_flags & HFCF_CLEARDSCP)
786 write_dsfield(m, cl->cl_pktattr, 0);
788 _addq(cl->cl_q, m);
790 return (0);
793 static struct mbuf *
794 hfsc_getq(struct hfsc_class *cl)
796 #ifdef ALTQ_RIO
797 if (q_is_rio(cl->cl_q))
798 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
799 #endif
800 #ifdef ALTQ_RED
801 if (q_is_red(cl->cl_q))
802 return red_getq(cl->cl_red, cl->cl_q);
803 #endif
804 return _getq(cl->cl_q);
807 static struct mbuf *
808 hfsc_pollq(struct hfsc_class *cl)
810 return qhead(cl->cl_q);
813 static void
814 hfsc_purgeq(struct hfsc_class *cl)
816 struct mbuf *m;
818 if (qempty(cl->cl_q))
819 return;
821 while ((m = _getq(cl->cl_q)) != NULL) {
822 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
823 m_freem(m);
824 cl->cl_hif->hif_packets--;
825 cl->cl_hif->hif_ifq->ifq_len--;
827 KKASSERT(qlen(cl->cl_q) == 0);
829 update_vf(cl, 0, 0); /* remove cl from the actlist */
830 set_passive(cl);
833 static void
834 set_active(struct hfsc_class *cl, int len)
836 if (cl->cl_rsc != NULL)
837 init_ed(cl, len);
838 if (cl->cl_fsc != NULL)
839 init_vf(cl, len);
841 cl->cl_stats.period++;
844 static void
845 set_passive(struct hfsc_class *cl)
847 if (cl->cl_rsc != NULL)
848 ellist_remove(cl);
851 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
852 * needs to be called explicitly to remove a class from actlist
856 static void
857 init_ed(struct hfsc_class *cl, int next_len)
859 uint64_t cur_time;
861 cur_time = read_machclk();
863 /* update the deadline curve */
864 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
867 * update the eligible curve.
868 * for concave, it is equal to the deadline curve.
869 * for convex, it is a linear curve with slope m2.
871 cl->cl_eligible = cl->cl_deadline;
872 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
873 cl->cl_eligible.dx = 0;
874 cl->cl_eligible.dy = 0;
877 /* compute e and d */
878 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
879 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
881 ellist_insert(cl);
884 static void
885 update_ed(struct hfsc_class *cl, int next_len)
887 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
888 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
890 ellist_update(cl);
893 static void
894 update_d(struct hfsc_class *cl, int next_len)
896 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
899 static void
900 init_vf(struct hfsc_class *cl, int len)
902 struct hfsc_class *max_cl, *p;
903 uint64_t vt, f, cur_time;
904 int go_active;
906 cur_time = 0;
907 go_active = 1;
908 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
909 if (go_active && cl->cl_nactive++ == 0)
910 go_active = 1;
911 else
912 go_active = 0;
914 if (go_active) {
915 max_cl = actlist_last(cl->cl_parent->cl_actc);
916 if (max_cl != NULL) {
918 * set vt to the average of the min and max
919 * classes. if the parent's period didn't
920 * change, don't decrease vt of the class.
922 vt = max_cl->cl_vt;
923 if (cl->cl_parent->cl_cvtmin != 0)
924 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
926 if (cl->cl_parent->cl_vtperiod !=
927 cl->cl_parentperiod || vt > cl->cl_vt)
928 cl->cl_vt = vt;
929 } else {
931 * first child for a new parent backlog period.
932 * add parent's cvtmax to vtoff of children
933 * to make a new vt (vtoff + vt) larger than
934 * the vt in the last period for all children.
936 vt = cl->cl_parent->cl_cvtmax;
937 for (p = cl->cl_parent->cl_children; p != NULL;
938 p = p->cl_siblings)
939 p->cl_vtoff += vt;
940 cl->cl_vt = 0;
941 cl->cl_parent->cl_cvtmax = 0;
942 cl->cl_parent->cl_cvtmin = 0;
944 cl->cl_initvt = cl->cl_vt;
946 /* update the virtual curve */
947 vt = cl->cl_vt + cl->cl_vtoff;
948 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
949 if (cl->cl_virtual.x == vt) {
950 cl->cl_virtual.x -= cl->cl_vtoff;
951 cl->cl_vtoff = 0;
953 cl->cl_vtadj = 0;
955 cl->cl_vtperiod++; /* increment vt period */
956 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
957 if (cl->cl_parent->cl_nactive == 0)
958 cl->cl_parentperiod++;
959 cl->cl_f = 0;
961 actlist_insert(cl);
963 if (cl->cl_usc != NULL) {
964 /* class has upper limit curve */
965 if (cur_time == 0)
966 cur_time = read_machclk();
968 /* update the ulimit curve */
969 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
970 cl->cl_total);
971 /* compute myf */
972 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
973 cl->cl_total);
974 cl->cl_myfadj = 0;
978 if (cl->cl_myf > cl->cl_cfmin)
979 f = cl->cl_myf;
980 else
981 f = cl->cl_cfmin;
982 if (f != cl->cl_f) {
983 cl->cl_f = f;
984 update_cfmin(cl->cl_parent);
989 static void
990 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time)
992 uint64_t f, myf_bound, delta;
993 int go_passive;
995 go_passive = qempty(cl->cl_q);
997 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
998 cl->cl_total += len;
1000 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1001 continue;
1003 if (go_passive && --cl->cl_nactive == 0)
1004 go_passive = 1;
1005 else
1006 go_passive = 0;
1008 if (go_passive) {
1009 /* no more active child, going passive */
1011 /* update cvtmax of the parent class */
1012 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1013 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1015 /* remove this class from the vt list */
1016 actlist_remove(cl);
1018 update_cfmin(cl->cl_parent);
1020 continue;
1024 * update vt and f
1026 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1027 - cl->cl_vtoff + cl->cl_vtadj;
1030 * if vt of the class is smaller than cvtmin,
1031 * the class was skipped in the past due to non-fit.
1032 * if so, we need to adjust vtadj.
1034 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1035 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1036 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1039 /* update the vt list */
1040 actlist_update(cl);
1042 if (cl->cl_usc != NULL) {
1043 cl->cl_myf = cl->cl_myfadj
1044 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1047 * if myf lags behind by more than one clock tick
1048 * from the current time, adjust myfadj to prevent
1049 * a rate-limited class from going greedy.
1050 * in a steady state under rate-limiting, myf
1051 * fluctuates within one clock tick.
1053 myf_bound = cur_time - machclk_per_tick;
1054 if (cl->cl_myf < myf_bound) {
1055 delta = cur_time - cl->cl_myf;
1056 cl->cl_myfadj += delta;
1057 cl->cl_myf += delta;
1061 /* cl_f is max(cl_myf, cl_cfmin) */
1062 if (cl->cl_myf > cl->cl_cfmin)
1063 f = cl->cl_myf;
1064 else
1065 f = cl->cl_cfmin;
1066 if (f != cl->cl_f) {
1067 cl->cl_f = f;
1068 update_cfmin(cl->cl_parent);
1073 static void
1074 update_cfmin(struct hfsc_class *cl)
1076 struct hfsc_class *p;
1077 uint64_t cfmin;
1079 if (TAILQ_EMPTY(cl->cl_actc)) {
1080 cl->cl_cfmin = 0;
1081 return;
1083 cfmin = HT_INFINITY;
1084 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1085 if (p->cl_f == 0) {
1086 cl->cl_cfmin = 0;
1087 return;
1089 if (p->cl_f < cfmin)
1090 cfmin = p->cl_f;
1092 cl->cl_cfmin = cfmin;
1096 * TAILQ based ellist and actlist implementation
1097 * (ion wanted to make a calendar queue based implementation)
1100 * eligible list holds backlogged classes being sorted by their eligible times.
1101 * there is one eligible list per interface.
1104 static ellist_t *
1105 ellist_alloc(void)
1107 ellist_t *head;
1109 head = kmalloc(sizeof(ellist_t *), M_ALTQ, M_WAITOK);
1110 TAILQ_INIT(head);
1111 return (head);
1114 static void
1115 ellist_destroy(ellist_t *head)
1117 kfree(head, M_ALTQ);
1120 static void
1121 ellist_insert(struct hfsc_class *cl)
1123 struct hfsc_if *hif = cl->cl_hif;
1124 struct hfsc_class *p;
1126 /* check the last entry first */
1127 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1128 p->cl_e <= cl->cl_e) {
1129 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1130 return;
1133 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1134 if (cl->cl_e < p->cl_e) {
1135 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1136 return;
1139 KKASSERT(0); /* should not reach here */
1142 static void
1143 ellist_remove(struct hfsc_class *cl)
1145 struct hfsc_if *hif = cl->cl_hif;
1147 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1150 static void
1151 ellist_update(struct hfsc_class *cl)
1153 struct hfsc_if *hif = cl->cl_hif;
1154 struct hfsc_class *p, *last;
1157 * the eligible time of a class increases monotonically.
1158 * if the next entry has a larger eligible time, nothing to do.
1160 p = TAILQ_NEXT(cl, cl_ellist);
1161 if (p == NULL || cl->cl_e <= p->cl_e)
1162 return;
1164 /* check the last entry */
1165 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1166 KKASSERT(last != NULL);
1167 if (last->cl_e <= cl->cl_e) {
1168 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1169 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1170 return;
1174 * the new position must be between the next entry
1175 * and the last entry
1177 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1178 if (cl->cl_e < p->cl_e) {
1179 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1180 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1181 return;
1184 KKASSERT(0); /* should not reach here */
1187 /* find the class with the minimum deadline among the eligible classes */
1188 struct hfsc_class *
1189 ellist_get_mindl(ellist_t *head, uint64_t cur_time)
1191 struct hfsc_class *p, *cl = NULL;
1193 TAILQ_FOREACH(p, head, cl_ellist) {
1194 if (p->cl_e > cur_time)
1195 break;
1196 if (cl == NULL || p->cl_d < cl->cl_d)
1197 cl = p;
1199 return (cl);
1203 * active children list holds backlogged child classes being sorted
1204 * by their virtual time.
1205 * each intermediate class has one active children list.
1207 static actlist_t *
1208 actlist_alloc(void)
1210 actlist_t *head;
1212 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK);
1213 TAILQ_INIT(head);
1214 return (head);
1217 static void
1218 actlist_destroy(actlist_t *head)
1220 kfree(head, M_ALTQ);
1222 static void
1223 actlist_insert(struct hfsc_class *cl)
1225 struct hfsc_class *p;
1227 /* check the last entry first */
1228 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1229 || p->cl_vt <= cl->cl_vt) {
1230 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1231 return;
1234 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1235 if (cl->cl_vt < p->cl_vt) {
1236 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1237 return;
1240 KKASSERT(0); /* should not reach here */
1243 static void
1244 actlist_remove(struct hfsc_class *cl)
1246 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1249 static void
1250 actlist_update(struct hfsc_class *cl)
1252 struct hfsc_class *p, *last;
1255 * the virtual time of a class increases monotonically during its
1256 * backlogged period.
1257 * if the next entry has a larger virtual time, nothing to do.
1259 p = TAILQ_NEXT(cl, cl_actlist);
1260 if (p == NULL || cl->cl_vt < p->cl_vt)
1261 return;
1263 /* check the last entry */
1264 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1265 KKASSERT(last != NULL);
1266 if (last->cl_vt <= cl->cl_vt) {
1267 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1268 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1269 return;
1273 * the new position must be between the next entry
1274 * and the last entry
1276 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1277 if (cl->cl_vt < p->cl_vt) {
1278 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1279 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1280 return;
1283 KKASSERT(0); /* should not reach here */
1286 static struct hfsc_class *
1287 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time)
1289 struct hfsc_class *p;
1291 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1292 if (p->cl_f <= cur_time)
1293 return (p);
1295 return (NULL);
1299 * service curve support functions
1301 * external service curve parameters
1302 * m: bits/sec
1303 * d: msec
1304 * internal service curve parameters
1305 * sm: (bytes/tsc_interval) << SM_SHIFT
1306 * ism: (tsc_count/byte) << ISM_SHIFT
1307 * dx: tsc_count
1309 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1310 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1311 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1312 * digits in decimal using the following table.
1314 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1315 * ----------+-------------------------------------------------------
1316 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1317 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1318 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1320 * nsec/byte 80000 8000 800 80 8
1321 * ism(500MHz) 40000 4000 400 40 4
1322 * ism(200MHz) 16000 1600 160 16 1.6
1324 #define SM_SHIFT 24
1325 #define ISM_SHIFT 10
1327 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1328 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1330 static __inline uint64_t
1331 seg_x2y(uint64_t x, uint64_t sm)
1333 uint64_t y;
1336 * compute
1337 * y = x * sm >> SM_SHIFT
1338 * but divide it for the upper and lower bits to avoid overflow
1340 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1341 return (y);
1344 static __inline uint64_t
1345 seg_y2x(uint64_t y, uint64_t ism)
1347 uint64_t x;
1349 if (y == 0)
1350 x = 0;
1351 else if (ism == HT_INFINITY)
1352 x = HT_INFINITY;
1353 else
1354 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1356 return (x);
1359 static __inline uint64_t
1360 m2sm(u_int m)
1362 uint64_t sm;
1364 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq;
1365 return (sm);
1368 static __inline uint64_t
1369 m2ism(u_int m)
1371 uint64_t ism;
1373 if (m == 0)
1374 ism = HT_INFINITY;
1375 else
1376 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1377 return (ism);
1380 static __inline uint64_t
1381 d2dx(u_int d)
1383 uint64_t dx;
1385 dx = ((uint64_t)d * machclk_freq) / 1000;
1386 return (dx);
1389 static u_int
1390 sm2m(uint64_t sm)
1392 uint64_t m;
1394 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1395 return ((u_int)m);
1398 static u_int
1399 dx2d(uint64_t dx)
1401 uint64_t d;
1403 d = dx * 1000 / machclk_freq;
1404 return ((u_int)d);
1407 static void
1408 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1410 isc->sm1 = m2sm(sc->m1);
1411 isc->ism1 = m2ism(sc->m1);
1412 isc->dx = d2dx(sc->d);
1413 isc->dy = seg_x2y(isc->dx, isc->sm1);
1414 isc->sm2 = m2sm(sc->m2);
1415 isc->ism2 = m2ism(sc->m2);
1419 * initialize the runtime service curve with the given internal
1420 * service curve starting at (x, y).
1422 static void
1423 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1425 rtsc->x = x;
1426 rtsc->y = y;
1427 rtsc->sm1 = isc->sm1;
1428 rtsc->ism1 = isc->ism1;
1429 rtsc->dx = isc->dx;
1430 rtsc->dy = isc->dy;
1431 rtsc->sm2 = isc->sm2;
1432 rtsc->ism2 = isc->ism2;
1436 * calculate the y-projection of the runtime service curve by the
1437 * given x-projection value
1439 static uint64_t
1440 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y)
1442 uint64_t x;
1444 if (y < rtsc->y) {
1445 x = rtsc->x;
1446 } else if (y <= rtsc->y + rtsc->dy) {
1447 /* x belongs to the 1st segment */
1448 if (rtsc->dy == 0)
1449 x = rtsc->x + rtsc->dx;
1450 else
1451 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1452 } else {
1453 /* x belongs to the 2nd segment */
1454 x = rtsc->x + rtsc->dx
1455 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1457 return (x);
1460 static uint64_t
1461 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x)
1463 uint64_t y;
1465 if (x <= rtsc->x) {
1466 y = rtsc->y;
1467 } else if (x <= rtsc->x + rtsc->dx) {
1468 /* y belongs to the 1st segment */
1469 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1470 } else
1471 /* y belongs to the 2nd segment */
1472 y = rtsc->y + rtsc->dy
1473 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1474 return (y);
1478 * update the runtime service curve by taking the minimum of the current
1479 * runtime service curve and the service curve starting at (x, y).
1481 static void
1482 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1484 uint64_t y1, y2, dx, dy;
1486 if (isc->sm1 <= isc->sm2) {
1487 /* service curve is convex */
1488 y1 = rtsc_x2y(rtsc, x);
1489 if (y1 < y)
1490 /* the current rtsc is smaller */
1491 return;
1492 rtsc->x = x;
1493 rtsc->y = y;
1494 return;
1498 * service curve is concave
1499 * compute the two y values of the current rtsc
1500 * y1: at x
1501 * y2: at (x + dx)
1503 y1 = rtsc_x2y(rtsc, x);
1504 if (y1 <= y) {
1505 /* rtsc is below isc, no change to rtsc */
1506 return;
1509 y2 = rtsc_x2y(rtsc, x + isc->dx);
1510 if (y2 >= y + isc->dy) {
1511 /* rtsc is above isc, replace rtsc by isc */
1512 rtsc->x = x;
1513 rtsc->y = y;
1514 rtsc->dx = isc->dx;
1515 rtsc->dy = isc->dy;
1516 return;
1520 * the two curves intersect
1521 * compute the offsets (dx, dy) using the reverse
1522 * function of seg_x2y()
1523 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1525 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1527 * check if (x, y1) belongs to the 1st segment of rtsc.
1528 * if so, add the offset.
1530 if (rtsc->x + rtsc->dx > x)
1531 dx += rtsc->x + rtsc->dx - x;
1532 dy = seg_x2y(dx, isc->sm1);
1534 rtsc->x = x;
1535 rtsc->y = y;
1536 rtsc->dx = dx;
1537 rtsc->dy = dy;
1540 static void
1541 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1543 sp->class_id = cl->cl_id;
1544 sp->class_handle = cl->cl_handle;
1546 if (cl->cl_rsc != NULL) {
1547 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1548 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1549 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1550 } else {
1551 sp->rsc.m1 = 0;
1552 sp->rsc.d = 0;
1553 sp->rsc.m2 = 0;
1555 if (cl->cl_fsc != NULL) {
1556 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1557 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1558 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1559 } else {
1560 sp->fsc.m1 = 0;
1561 sp->fsc.d = 0;
1562 sp->fsc.m2 = 0;
1564 if (cl->cl_usc != NULL) {
1565 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1566 sp->usc.d = dx2d(cl->cl_usc->dx);
1567 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1568 } else {
1569 sp->usc.m1 = 0;
1570 sp->usc.d = 0;
1571 sp->usc.m2 = 0;
1574 sp->total = cl->cl_total;
1575 sp->cumul = cl->cl_cumul;
1577 sp->d = cl->cl_d;
1578 sp->e = cl->cl_e;
1579 sp->vt = cl->cl_vt;
1580 sp->f = cl->cl_f;
1582 sp->initvt = cl->cl_initvt;
1583 sp->vtperiod = cl->cl_vtperiod;
1584 sp->parentperiod = cl->cl_parentperiod;
1585 sp->nactive = cl->cl_nactive;
1586 sp->vtoff = cl->cl_vtoff;
1587 sp->cvtmax = cl->cl_cvtmax;
1588 sp->myf = cl->cl_myf;
1589 sp->cfmin = cl->cl_cfmin;
1590 sp->cvtmin = cl->cl_cvtmin;
1591 sp->myfadj = cl->cl_myfadj;
1592 sp->vtadj = cl->cl_vtadj;
1594 sp->cur_time = read_machclk();
1595 sp->machclk_freq = machclk_freq;
1597 sp->qlength = qlen(cl->cl_q);
1598 sp->qlimit = qlimit(cl->cl_q);
1599 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1600 sp->drop_cnt = cl->cl_stats.drop_cnt;
1601 sp->period = cl->cl_stats.period;
1603 sp->qtype = qtype(cl->cl_q);
1604 #ifdef ALTQ_RED
1605 if (q_is_red(cl->cl_q))
1606 red_getstats(cl->cl_red, &sp->red[0]);
1607 #endif
1608 #ifdef ALTQ_RIO
1609 if (q_is_rio(cl->cl_q))
1610 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1611 #endif
1614 /* convert a class handle to the corresponding class pointer */
1615 static struct hfsc_class *
1616 clh_to_clp(struct hfsc_if *hif, uint32_t chandle)
1618 int i;
1619 struct hfsc_class *cl;
1621 if (chandle == 0)
1622 return (NULL);
1624 * first, try optimistically the slot matching the lower bits of
1625 * the handle. if it fails, do the linear table search.
1627 i = chandle % HFSC_MAX_CLASSES;
1628 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1629 return (cl);
1630 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1631 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1632 cl->cl_handle == chandle)
1633 return (cl);
1634 return (NULL);
1637 #endif /* ALTQ_HFSC */