mtree(8): Fix crc() prototype.
[dragonfly.git] / sys / net / altq / altq_hfsc.c
blobf377a494af77dccffe7853a618aacac6b45de4dc
1 /* $KAME: altq_hfsc.c,v 1.25 2004/04/17 10:54:48 kjc Exp $ */
3 /*
4 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 * Permission to use, copy, modify, and distribute this software and
7 * its documentation is hereby granted (including for commercial or
8 * for-profit use), provided that both the copyright notice and this
9 * permission notice appear in all copies of the software, derivative
10 * works, or modified versions, and any portions thereof.
12 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
13 * WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON PROVIDES THIS
14 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
15 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
19 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
20 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
21 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
24 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
25 * DAMAGE.
27 * Carnegie Mellon encourages (but does not require) users of this
28 * software to return any improvements or extensions that they make,
29 * and to grant Carnegie Mellon the rights to redistribute these
30 * changes without encumbrance.
33 * H-FSC is described in Proceedings of SIGCOMM'97,
34 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
35 * Real-Time and Priority Service"
36 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
39 * when a class has an upperlimit, the fit-time is computed from the
40 * upperlimit service curve. the link-sharing scheduler does not schedule
41 * a class whose fit-time exceeds the current time.
44 #include "opt_altq.h"
45 #include "opt_inet.h"
46 #include "opt_inet6.h"
48 #ifdef ALTQ_HFSC /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
50 #include <sys/param.h>
51 #include <sys/malloc.h>
52 #include <sys/mbuf.h>
53 #include <sys/socket.h>
54 #include <sys/systm.h>
55 #include <sys/errno.h>
56 #include <sys/queue.h>
57 #include <sys/thread.h>
59 #include <net/if.h>
60 #include <net/ifq_var.h>
61 #include <netinet/in.h>
63 #include <net/pf/pfvar.h>
64 #include <net/altq/altq.h>
65 #include <net/altq/altq_hfsc.h>
67 #include <sys/thread2.h>
69 #define HFSC_SUBQ_INDEX ALTQ_SUBQ_INDEX_DEFAULT
70 #define HFSC_LOCK(ifq) \
71 ALTQ_SQ_LOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX])
72 #define HFSC_UNLOCK(ifq) \
73 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[HFSC_SUBQ_INDEX])
76 * function prototypes
78 static int hfsc_clear_interface(struct hfsc_if *);
79 static int hfsc_request(struct ifaltq_subque *, int, void *);
80 static void hfsc_purge(struct hfsc_if *);
81 static struct hfsc_class *hfsc_class_create(struct hfsc_if *,
82 struct service_curve *,
83 struct service_curve *,
84 struct service_curve *,
85 struct hfsc_class *, int, int, int);
86 static int hfsc_class_destroy(struct hfsc_class *);
87 static struct hfsc_class *hfsc_nextclass(struct hfsc_class *);
88 static int hfsc_enqueue(struct ifaltq_subque *, struct mbuf *,
89 struct altq_pktattr *);
90 static struct mbuf *hfsc_dequeue(struct ifaltq_subque *, int);
92 static int hfsc_addq(struct hfsc_class *, struct mbuf *);
93 static struct mbuf *hfsc_getq(struct hfsc_class *);
94 static struct mbuf *hfsc_pollq(struct hfsc_class *);
95 static void hfsc_purgeq(struct hfsc_class *);
97 static void update_cfmin(struct hfsc_class *);
98 static void set_active(struct hfsc_class *, int);
99 static void set_passive(struct hfsc_class *);
101 static void init_ed(struct hfsc_class *, int);
102 static void update_ed(struct hfsc_class *, int);
103 static void update_d(struct hfsc_class *, int);
104 static void init_vf(struct hfsc_class *, int);
105 static void update_vf(struct hfsc_class *, int, uint64_t);
106 static ellist_t *ellist_alloc(void);
107 static void ellist_destroy(ellist_t *);
108 static void ellist_insert(struct hfsc_class *);
109 static void ellist_remove(struct hfsc_class *);
110 static void ellist_update(struct hfsc_class *);
111 struct hfsc_class *ellist_get_mindl(ellist_t *, uint64_t);
112 static actlist_t *actlist_alloc(void);
113 static void actlist_destroy(actlist_t *);
114 static void actlist_insert(struct hfsc_class *);
115 static void actlist_remove(struct hfsc_class *);
116 static void actlist_update(struct hfsc_class *);
118 static struct hfsc_class *actlist_firstfit(struct hfsc_class *, uint64_t);
120 static __inline uint64_t seg_x2y(uint64_t, uint64_t);
121 static __inline uint64_t seg_y2x(uint64_t, uint64_t);
122 static __inline uint64_t m2sm(u_int);
123 static __inline uint64_t m2ism(u_int);
124 static __inline uint64_t d2dx(u_int);
125 static u_int sm2m(uint64_t);
126 static u_int dx2d(uint64_t);
128 static void sc2isc(struct service_curve *, struct internal_sc *);
129 static void rtsc_init(struct runtime_sc *, struct internal_sc *,
130 uint64_t, uint64_t);
131 static uint64_t rtsc_y2x(struct runtime_sc *, uint64_t);
132 static uint64_t rtsc_x2y(struct runtime_sc *, uint64_t);
133 static void rtsc_min(struct runtime_sc *, struct internal_sc *,
134 uint64_t, uint64_t);
136 static void get_class_stats(struct hfsc_classstats *, struct hfsc_class *);
137 static struct hfsc_class *clh_to_clp(struct hfsc_if *, uint32_t);
140 * macros
142 #define is_a_parent_class(cl) ((cl)->cl_children != NULL)
144 #define HT_INFINITY 0xffffffffffffffffLL /* infinite time value */
147 hfsc_pfattach(struct pf_altq *a, struct ifaltq *ifq)
149 return altq_attach(ifq, ALTQT_HFSC, a->altq_disc, ifq_mapsubq_default,
150 hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
154 hfsc_add_altq(struct pf_altq *a)
156 struct hfsc_if *hif;
157 struct ifnet *ifp;
159 ifnet_lock();
161 if ((ifp = ifunit(a->ifname)) == NULL) {
162 ifnet_unlock();
163 return (EINVAL);
165 if (!ifq_is_ready(&ifp->if_snd)) {
166 ifnet_unlock();
167 return (ENODEV);
170 hif = kmalloc(sizeof(struct hfsc_if), M_ALTQ, M_WAITOK | M_ZERO);
172 hif->hif_eligible = ellist_alloc();
173 hif->hif_ifq = &ifp->if_snd;
174 ifq_purge_all(&ifp->if_snd);
176 ifnet_unlock();
178 /* keep the state in pf_altq */
179 a->altq_disc = hif;
181 return (0);
185 hfsc_remove_altq(struct pf_altq *a)
187 struct hfsc_if *hif;
189 if ((hif = a->altq_disc) == NULL)
190 return (EINVAL);
191 a->altq_disc = NULL;
193 hfsc_clear_interface(hif);
194 hfsc_class_destroy(hif->hif_rootclass);
196 ellist_destroy(hif->hif_eligible);
198 kfree(hif, M_ALTQ);
200 return (0);
203 static int
204 hfsc_add_queue_locked(struct pf_altq *a, struct hfsc_if *hif)
206 struct hfsc_class *cl, *parent;
207 struct hfsc_opts *opts;
208 struct service_curve rtsc, lssc, ulsc;
210 KKASSERT(a->qid != 0);
212 opts = &a->pq_u.hfsc_opts;
214 if (a->parent_qid == HFSC_NULLCLASS_HANDLE && hif->hif_rootclass == NULL)
215 parent = NULL;
216 else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
217 return (EINVAL);
219 if (clh_to_clp(hif, a->qid) != NULL)
220 return (EBUSY);
222 rtsc.m1 = opts->rtsc_m1;
223 rtsc.d = opts->rtsc_d;
224 rtsc.m2 = opts->rtsc_m2;
225 lssc.m1 = opts->lssc_m1;
226 lssc.d = opts->lssc_d;
227 lssc.m2 = opts->lssc_m2;
228 ulsc.m1 = opts->ulsc_m1;
229 ulsc.d = opts->ulsc_d;
230 ulsc.m2 = opts->ulsc_m2;
232 cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc, parent, a->qlimit,
233 opts->flags, a->qid);
234 if (cl == NULL)
235 return (ENOMEM);
237 return (0);
241 hfsc_add_queue(struct pf_altq *a)
243 struct hfsc_if *hif;
244 struct ifaltq *ifq;
245 int error;
247 if (a->qid == 0)
248 return (EINVAL);
250 /* XXX not MP safe */
251 if ((hif = a->altq_disc) == NULL)
252 return (EINVAL);
253 ifq = hif->hif_ifq;
255 HFSC_LOCK(ifq);
256 error = hfsc_add_queue_locked(a, hif);
257 HFSC_UNLOCK(ifq);
259 return error;
262 static int
263 hfsc_remove_queue_locked(struct pf_altq *a, struct hfsc_if *hif)
265 struct hfsc_class *cl;
267 if ((cl = clh_to_clp(hif, a->qid)) == NULL)
268 return (EINVAL);
270 return (hfsc_class_destroy(cl));
274 hfsc_remove_queue(struct pf_altq *a)
276 struct hfsc_if *hif;
277 struct ifaltq *ifq;
278 int error;
280 /* XXX not MP safe */
281 if ((hif = a->altq_disc) == NULL)
282 return (EINVAL);
283 ifq = hif->hif_ifq;
285 HFSC_LOCK(ifq);
286 error = hfsc_remove_queue_locked(a, hif);
287 HFSC_UNLOCK(ifq);
289 return error;
293 hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
295 struct hfsc_if *hif;
296 struct hfsc_class *cl;
297 struct hfsc_classstats stats;
298 struct ifaltq *ifq;
299 int error = 0;
301 if (*nbytes < sizeof(stats))
302 return (EINVAL);
304 ifnet_lock();
306 /* XXX not MP safe */
307 if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL) {
308 ifnet_unlock();
309 return (EBADF);
311 ifq = hif->hif_ifq;
313 HFSC_LOCK(ifq);
315 if ((cl = clh_to_clp(hif, a->qid)) == NULL) {
316 HFSC_UNLOCK(ifq);
317 ifnet_unlock();
318 return (EINVAL);
321 get_class_stats(&stats, cl);
323 HFSC_UNLOCK(ifq);
325 ifnet_unlock();
327 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
328 return (error);
329 *nbytes = sizeof(stats);
330 return (0);
334 * bring the interface back to the initial state by discarding
335 * all the filters and classes except the root class.
337 static int
338 hfsc_clear_interface(struct hfsc_if *hif)
340 struct hfsc_class *cl;
342 if (hif->hif_rootclass == NULL)
343 return (0);
346 /* clear out the classes */
347 while ((cl = hif->hif_rootclass->cl_children) != NULL) {
349 * remove the first leaf class found in the hierarchy
350 * then start over
352 for (; cl != NULL; cl = hfsc_nextclass(cl)) {
353 if (!is_a_parent_class(cl)) {
354 hfsc_class_destroy(cl);
355 break;
360 return (0);
363 static int
364 hfsc_request(struct ifaltq_subque *ifsq, int req, void *arg)
366 struct ifaltq *ifq = ifsq->ifsq_altq;
367 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
369 crit_enter();
370 switch (req) {
371 case ALTRQ_PURGE:
372 if (ifsq_get_index(ifsq) == HFSC_SUBQ_INDEX) {
373 hfsc_purge(hif);
374 } else {
376 * Race happened, the unrelated subqueue was
377 * picked during the packet scheduler transition.
379 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
381 break;
383 crit_exit();
384 return (0);
387 /* discard all the queued packets on the interface */
388 static void
389 hfsc_purge(struct hfsc_if *hif)
391 struct hfsc_class *cl;
393 for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl)) {
394 if (!qempty(cl->cl_q))
395 hfsc_purgeq(cl);
397 if (ifq_is_enabled(hif->hif_ifq))
398 ALTQ_SQ_CNTR_RESET(&hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX]);
401 static struct hfsc_class *
402 hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
403 struct service_curve *fsc, struct service_curve *usc,
404 struct hfsc_class *parent, int qlimit, int flags, int qid)
406 struct hfsc_class *cl, *p;
407 int i;
409 if (hif->hif_classes >= HFSC_MAX_CLASSES)
410 return (NULL);
412 #ifndef ALTQ_RED
413 if (flags & HFCF_RED) {
414 #ifdef ALTQ_DEBUG
415 kprintf("hfsc_class_create: RED not configured for HFSC!\n");
416 #endif
417 return (NULL);
419 #endif
421 cl = kmalloc(sizeof(*cl), M_ALTQ, M_WAITOK | M_ZERO);
422 cl->cl_q = kmalloc(sizeof(*cl->cl_q), M_ALTQ, M_WAITOK | M_ZERO);
423 cl->cl_actc = actlist_alloc();
425 if (qlimit == 0)
426 qlimit = 50; /* use default */
427 qlimit(cl->cl_q) = qlimit;
428 qtype(cl->cl_q) = Q_DROPTAIL;
429 qlen(cl->cl_q) = 0;
430 cl->cl_flags = flags;
431 #ifdef ALTQ_RED
432 if (flags & (HFCF_RED|HFCF_RIO)) {
433 int red_flags, red_pkttime;
434 u_int m2;
436 m2 = 0;
437 if (rsc != NULL && rsc->m2 > m2)
438 m2 = rsc->m2;
439 if (fsc != NULL && fsc->m2 > m2)
440 m2 = fsc->m2;
441 if (usc != NULL && usc->m2 > m2)
442 m2 = usc->m2;
444 red_flags = 0;
445 if (flags & HFCF_ECN)
446 red_flags |= REDF_ECN;
447 #ifdef ALTQ_RIO
448 if (flags & HFCF_CLEARDSCP)
449 red_flags |= RIOF_CLEARDSCP;
450 #endif
451 if (m2 < 8)
452 red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
453 else
454 red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
455 * 1000 * 1000 * 1000 / (m2 / 8);
456 if (flags & HFCF_RED) {
457 cl->cl_red = red_alloc(0, 0,
458 qlimit(cl->cl_q) * 10/100,
459 qlimit(cl->cl_q) * 30/100,
460 red_flags, red_pkttime);
461 if (cl->cl_red != NULL)
462 qtype(cl->cl_q) = Q_RED;
464 #ifdef ALTQ_RIO
465 else {
466 cl->cl_red = (red_t *)rio_alloc(0, NULL,
467 red_flags, red_pkttime);
468 if (cl->cl_red != NULL)
469 qtype(cl->cl_q) = Q_RIO;
471 #endif
473 #endif /* ALTQ_RED */
475 if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
476 cl->cl_rsc = kmalloc(sizeof(*cl->cl_rsc), M_ALTQ, M_WAITOK);
477 sc2isc(rsc, cl->cl_rsc);
478 rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
479 rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
481 if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
482 cl->cl_fsc = kmalloc(sizeof(*cl->cl_fsc), M_ALTQ, M_WAITOK);
483 sc2isc(fsc, cl->cl_fsc);
484 rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
486 if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
487 cl->cl_usc = kmalloc(sizeof(*cl->cl_usc), M_ALTQ, M_WAITOK);
488 sc2isc(usc, cl->cl_usc);
489 rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
492 cl->cl_id = hif->hif_classid++;
493 cl->cl_handle = qid;
494 cl->cl_hif = hif;
495 cl->cl_parent = parent;
497 crit_enter();
498 hif->hif_classes++;
501 * find a free slot in the class table. if the slot matching
502 * the lower bits of qid is free, use this slot. otherwise,
503 * use the first free slot.
505 i = qid % HFSC_MAX_CLASSES;
506 if (hif->hif_class_tbl[i] == NULL)
507 hif->hif_class_tbl[i] = cl;
508 else {
509 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
510 if (hif->hif_class_tbl[i] == NULL) {
511 hif->hif_class_tbl[i] = cl;
512 break;
515 if (i == HFSC_MAX_CLASSES) {
516 crit_exit();
517 goto err_ret;
521 if (flags & HFCF_DEFAULTCLASS)
522 hif->hif_defaultclass = cl;
524 if (parent == NULL) {
525 /* this is root class */
526 hif->hif_rootclass = cl;
527 } else if (parent->cl_children == NULL) {
528 /* add this class to the children list of the parent */
529 parent->cl_children = cl;
530 } else {
531 p = parent->cl_children;
532 while (p->cl_siblings != NULL)
533 p = p->cl_siblings;
534 p->cl_siblings = cl;
536 crit_exit();
538 return (cl);
540 err_ret:
541 if (cl->cl_actc != NULL)
542 actlist_destroy(cl->cl_actc);
543 if (cl->cl_red != NULL) {
544 #ifdef ALTQ_RIO
545 if (q_is_rio(cl->cl_q))
546 rio_destroy((rio_t *)cl->cl_red);
547 #endif
548 #ifdef ALTQ_RED
549 if (q_is_red(cl->cl_q))
550 red_destroy(cl->cl_red);
551 #endif
553 if (cl->cl_fsc != NULL)
554 kfree(cl->cl_fsc, M_ALTQ);
555 if (cl->cl_rsc != NULL)
556 kfree(cl->cl_rsc, M_ALTQ);
557 if (cl->cl_usc != NULL)
558 kfree(cl->cl_usc, M_ALTQ);
559 if (cl->cl_q != NULL)
560 kfree(cl->cl_q, M_ALTQ);
561 kfree(cl, M_ALTQ);
562 return (NULL);
565 static int
566 hfsc_class_destroy(struct hfsc_class *cl)
568 struct hfsc_if *hif;
569 int i;
571 if (cl == NULL)
572 return (0);
573 hif = cl->cl_hif;
575 if (is_a_parent_class(cl))
576 return (EBUSY);
578 crit_enter();
580 if (!qempty(cl->cl_q))
581 hfsc_purgeq(cl);
583 if (cl->cl_parent == NULL) {
584 /* this is root class */
585 } else {
586 struct hfsc_class *p = cl->cl_parent->cl_children;
588 if (p == cl) {
589 cl->cl_parent->cl_children = cl->cl_siblings;
590 } else {
591 do {
592 if (p->cl_siblings == cl) {
593 p->cl_siblings = cl->cl_siblings;
594 break;
596 } while ((p = p->cl_siblings) != NULL);
598 KKASSERT(p != NULL);
601 for (i = 0; i < HFSC_MAX_CLASSES; i++) {
602 if (hif->hif_class_tbl[i] == cl) {
603 hif->hif_class_tbl[i] = NULL;
604 break;
608 hif->hif_classes--;
609 crit_exit();
611 actlist_destroy(cl->cl_actc);
613 if (cl->cl_red != NULL) {
614 #ifdef ALTQ_RIO
615 if (q_is_rio(cl->cl_q))
616 rio_destroy((rio_t *)cl->cl_red);
617 #endif
618 #ifdef ALTQ_RED
619 if (q_is_red(cl->cl_q))
620 red_destroy(cl->cl_red);
621 #endif
624 if (cl == hif->hif_rootclass)
625 hif->hif_rootclass = NULL;
626 if (cl == hif->hif_defaultclass)
627 hif->hif_defaultclass = NULL;
628 if (cl == hif->hif_pollcache)
629 hif->hif_pollcache = NULL;
631 if (cl->cl_usc != NULL)
632 kfree(cl->cl_usc, M_ALTQ);
633 if (cl->cl_fsc != NULL)
634 kfree(cl->cl_fsc, M_ALTQ);
635 if (cl->cl_rsc != NULL)
636 kfree(cl->cl_rsc, M_ALTQ);
637 kfree(cl->cl_q, M_ALTQ);
638 kfree(cl, M_ALTQ);
640 return (0);
644 * hfsc_nextclass returns the next class in the tree.
645 * usage:
646 * for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
647 * do_something;
649 static struct hfsc_class *
650 hfsc_nextclass(struct hfsc_class *cl)
652 if (cl->cl_children != NULL) {
653 cl = cl->cl_children;
654 } else if (cl->cl_siblings != NULL) {
655 cl = cl->cl_siblings;
656 } else {
657 while ((cl = cl->cl_parent) != NULL) {
658 if (cl->cl_siblings != NULL) {
659 cl = cl->cl_siblings;
660 break;
665 return (cl);
669 * hfsc_enqueue is an enqueue function to be registered to
670 * (*ifsq_enqueue) in struct ifaltq_subque.
672 static int
673 hfsc_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
674 struct altq_pktattr *pktattr)
676 struct ifaltq *ifq = ifsq->ifsq_altq;
677 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
678 struct hfsc_class *cl;
679 int len;
681 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) {
683 * Race happened, the unrelated subqueue was
684 * picked during the packet scheduler transition.
686 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
687 m_freem(m);
688 return ENOBUFS;
691 /* grab class set by classifier */
692 M_ASSERTPKTHDR(m);
693 crit_enter();
694 if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
695 cl = clh_to_clp(hif, m->m_pkthdr.pf.qid);
696 else
697 cl = NULL;
698 if (cl == NULL || is_a_parent_class(cl)) {
699 cl = hif->hif_defaultclass;
700 if (cl == NULL) {
701 m_freem(m);
702 crit_exit();
703 return (ENOBUFS);
706 cl->cl_pktattr = NULL;
707 len = m_pktlen(m);
708 if (hfsc_addq(cl, m) != 0) {
709 /* drop occurred. mbuf was freed in hfsc_addq. */
710 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
711 crit_exit();
712 return (ENOBUFS);
714 ALTQ_SQ_PKTCNT_INC(ifsq);
715 cl->cl_hif->hif_packets++;
717 /* successfully queued. */
718 if (qlen(cl->cl_q) == 1)
719 set_active(cl, m_pktlen(m));
720 crit_exit();
721 return (0);
725 * hfsc_dequeue is a dequeue function to be registered to
726 * (*ifsq_dequeue) in struct ifaltq_subque.
728 * note: ALTDQ_POLL returns the next packet without removing the packet
729 * from the queue. ALTDQ_REMOVE is a normal dequeue operation.
731 static struct mbuf *
732 hfsc_dequeue(struct ifaltq_subque *ifsq, int op)
734 struct ifaltq *ifq = ifsq->ifsq_altq;
735 struct hfsc_if *hif = (struct hfsc_if *)ifq->altq_disc;
736 struct hfsc_class *cl;
737 struct mbuf *m;
738 int len, next_len;
739 int realtime = 0;
740 uint64_t cur_time;
742 if (ifsq_get_index(ifsq) != HFSC_SUBQ_INDEX) {
744 * Race happened, the unrelated subqueue was
745 * picked during the packet scheduler transition.
747 ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
748 return NULL;
751 if (hif->hif_packets == 0) {
752 /* no packet in the tree */
753 return (NULL);
756 crit_enter();
757 cur_time = read_machclk();
759 if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
760 cl = hif->hif_pollcache;
761 hif->hif_pollcache = NULL;
762 /* check if the class was scheduled by real-time criteria */
763 if (cl->cl_rsc != NULL)
764 realtime = (cl->cl_e <= cur_time);
765 } else {
767 * if there are eligible classes, use real-time criteria.
768 * find the class with the minimum deadline among
769 * the eligible classes.
771 if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time)) != NULL) {
772 realtime = 1;
773 } else {
774 #ifdef ALTQ_DEBUG
775 int fits = 0;
776 #endif
778 * use link-sharing criteria
779 * get the class with the minimum vt in the hierarchy
781 cl = hif->hif_rootclass;
782 while (is_a_parent_class(cl)) {
784 cl = actlist_firstfit(cl, cur_time);
785 if (cl == NULL) {
786 #ifdef ALTQ_DEBUG
787 if (fits > 0)
788 kprintf("%d fit but none found\n",fits);
789 #endif
790 m = NULL;
791 goto done;
794 * update parent's cl_cvtmin.
795 * don't update if the new vt is smaller.
797 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
798 cl->cl_parent->cl_cvtmin = cl->cl_vt;
799 #ifdef ALTQ_DEBUG
800 fits++;
801 #endif
805 if (op == ALTDQ_POLL) {
806 #ifdef foo
808 * Don't use poll cache; the poll/dequeue
809 * model is no longer applicable to SMP
810 * system. e.g.
811 * CPU-A CPU-B
812 * : :
813 * poll :
814 * : poll
815 * dequeue (+) :
817 * The dequeue at (+) will hit the poll
818 * cache set by CPU-B.
820 hif->hif_pollcache = cl;
821 #endif
822 m = hfsc_pollq(cl);
823 goto done;
827 m = hfsc_getq(cl);
828 if (m == NULL)
829 panic("hfsc_dequeue:");
830 len = m_pktlen(m);
831 cl->cl_hif->hif_packets--;
832 ALTQ_SQ_PKTCNT_DEC(ifsq);
833 PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
835 update_vf(cl, len, cur_time);
836 if (realtime)
837 cl->cl_cumul += len;
839 if (!qempty(cl->cl_q)) {
840 if (cl->cl_rsc != NULL) {
841 /* update ed */
842 next_len = m_pktlen(qhead(cl->cl_q));
844 if (realtime)
845 update_ed(cl, next_len);
846 else
847 update_d(cl, next_len);
849 } else {
850 /* the class becomes passive */
851 set_passive(cl);
853 done:
854 crit_exit();
855 return (m);
858 static int
859 hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
862 #ifdef ALTQ_RIO
863 if (q_is_rio(cl->cl_q))
864 return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
865 m, cl->cl_pktattr);
866 #endif
867 #ifdef ALTQ_RED
868 if (q_is_red(cl->cl_q))
869 return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
870 #endif
871 if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
872 m_freem(m);
873 return (-1);
876 if (cl->cl_flags & HFCF_CLEARDSCP)
877 write_dsfield(m, cl->cl_pktattr, 0);
879 _addq(cl->cl_q, m);
881 return (0);
884 static struct mbuf *
885 hfsc_getq(struct hfsc_class *cl)
887 #ifdef ALTQ_RIO
888 if (q_is_rio(cl->cl_q))
889 return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
890 #endif
891 #ifdef ALTQ_RED
892 if (q_is_red(cl->cl_q))
893 return red_getq(cl->cl_red, cl->cl_q);
894 #endif
895 return _getq(cl->cl_q);
898 static struct mbuf *
899 hfsc_pollq(struct hfsc_class *cl)
901 return qhead(cl->cl_q);
904 static void
905 hfsc_purgeq(struct hfsc_class *cl)
907 struct mbuf *m;
909 if (qempty(cl->cl_q))
910 return;
912 while ((m = _getq(cl->cl_q)) != NULL) {
913 ALTQ_SQ_PKTCNT_DEC(
914 &cl->cl_hif->hif_ifq->altq_subq[HFSC_SUBQ_INDEX]);
915 PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
916 m_freem(m);
917 cl->cl_hif->hif_packets--;
919 KKASSERT(qlen(cl->cl_q) == 0);
921 update_vf(cl, 0, 0); /* remove cl from the actlist */
922 set_passive(cl);
925 static void
926 set_active(struct hfsc_class *cl, int len)
928 if (cl->cl_rsc != NULL)
929 init_ed(cl, len);
930 if (cl->cl_fsc != NULL)
931 init_vf(cl, len);
933 cl->cl_stats.period++;
936 static void
937 set_passive(struct hfsc_class *cl)
939 if (cl->cl_rsc != NULL)
940 ellist_remove(cl);
943 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
944 * needs to be called explicitly to remove a class from actlist
948 static void
949 init_ed(struct hfsc_class *cl, int next_len)
951 uint64_t cur_time;
953 cur_time = read_machclk();
955 /* update the deadline curve */
956 rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
959 * update the eligible curve.
960 * for concave, it is equal to the deadline curve.
961 * for convex, it is a linear curve with slope m2.
963 cl->cl_eligible = cl->cl_deadline;
964 if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
965 cl->cl_eligible.dx = 0;
966 cl->cl_eligible.dy = 0;
969 /* compute e and d */
970 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
971 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
973 ellist_insert(cl);
976 static void
977 update_ed(struct hfsc_class *cl, int next_len)
979 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
980 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
982 ellist_update(cl);
985 static void
986 update_d(struct hfsc_class *cl, int next_len)
988 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
991 static void
992 init_vf(struct hfsc_class *cl, int len)
994 struct hfsc_class *max_cl, *p;
995 uint64_t vt, f, cur_time;
996 int go_active;
998 cur_time = 0;
999 go_active = 1;
1000 for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
1001 if (go_active && cl->cl_nactive++ == 0)
1002 go_active = 1;
1003 else
1004 go_active = 0;
1006 if (go_active) {
1007 max_cl = actlist_last(cl->cl_parent->cl_actc);
1008 if (max_cl != NULL) {
1010 * set vt to the average of the min and max
1011 * classes. if the parent's period didn't
1012 * change, don't decrease vt of the class.
1014 vt = max_cl->cl_vt;
1015 if (cl->cl_parent->cl_cvtmin != 0)
1016 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
1018 if (cl->cl_parent->cl_vtperiod !=
1019 cl->cl_parentperiod || vt > cl->cl_vt)
1020 cl->cl_vt = vt;
1021 } else {
1023 * first child for a new parent backlog period.
1024 * add parent's cvtmax to vtoff of children
1025 * to make a new vt (vtoff + vt) larger than
1026 * the vt in the last period for all children.
1028 vt = cl->cl_parent->cl_cvtmax;
1029 for (p = cl->cl_parent->cl_children; p != NULL;
1030 p = p->cl_siblings)
1031 p->cl_vtoff += vt;
1032 cl->cl_vt = 0;
1033 cl->cl_parent->cl_cvtmax = 0;
1034 cl->cl_parent->cl_cvtmin = 0;
1036 cl->cl_initvt = cl->cl_vt;
1038 /* update the virtual curve */
1039 vt = cl->cl_vt + cl->cl_vtoff;
1040 rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1041 if (cl->cl_virtual.x == vt) {
1042 cl->cl_virtual.x -= cl->cl_vtoff;
1043 cl->cl_vtoff = 0;
1045 cl->cl_vtadj = 0;
1047 cl->cl_vtperiod++; /* increment vt period */
1048 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1049 if (cl->cl_parent->cl_nactive == 0)
1050 cl->cl_parentperiod++;
1051 cl->cl_f = 0;
1053 actlist_insert(cl);
1055 if (cl->cl_usc != NULL) {
1056 /* class has upper limit curve */
1057 if (cur_time == 0)
1058 cur_time = read_machclk();
1060 /* update the ulimit curve */
1061 rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1062 cl->cl_total);
1063 /* compute myf */
1064 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1065 cl->cl_total);
1066 cl->cl_myfadj = 0;
1070 if (cl->cl_myf > cl->cl_cfmin)
1071 f = cl->cl_myf;
1072 else
1073 f = cl->cl_cfmin;
1074 if (f != cl->cl_f) {
1075 cl->cl_f = f;
1076 update_cfmin(cl->cl_parent);
1081 static void
1082 update_vf(struct hfsc_class *cl, int len, uint64_t cur_time)
1084 uint64_t f, myf_bound, delta;
1085 int go_passive;
1087 go_passive = qempty(cl->cl_q);
1089 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1090 cl->cl_total += len;
1092 if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1093 continue;
1095 if (go_passive && --cl->cl_nactive == 0)
1096 go_passive = 1;
1097 else
1098 go_passive = 0;
1100 if (go_passive) {
1101 /* no more active child, going passive */
1103 /* update cvtmax of the parent class */
1104 if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1105 cl->cl_parent->cl_cvtmax = cl->cl_vt;
1107 /* remove this class from the vt list */
1108 actlist_remove(cl);
1110 update_cfmin(cl->cl_parent);
1112 continue;
1116 * update vt and f
1118 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1119 - cl->cl_vtoff + cl->cl_vtadj;
1122 * if vt of the class is smaller than cvtmin,
1123 * the class was skipped in the past due to non-fit.
1124 * if so, we need to adjust vtadj.
1126 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1127 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1128 cl->cl_vt = cl->cl_parent->cl_cvtmin;
1131 /* update the vt list */
1132 actlist_update(cl);
1134 if (cl->cl_usc != NULL) {
1135 cl->cl_myf = cl->cl_myfadj
1136 + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1139 * if myf lags behind by more than one clock tick
1140 * from the current time, adjust myfadj to prevent
1141 * a rate-limited class from going greedy.
1142 * in a steady state under rate-limiting, myf
1143 * fluctuates within one clock tick.
1145 myf_bound = cur_time - machclk_per_tick;
1146 if (cl->cl_myf < myf_bound) {
1147 delta = cur_time - cl->cl_myf;
1148 cl->cl_myfadj += delta;
1149 cl->cl_myf += delta;
1153 /* cl_f is max(cl_myf, cl_cfmin) */
1154 if (cl->cl_myf > cl->cl_cfmin)
1155 f = cl->cl_myf;
1156 else
1157 f = cl->cl_cfmin;
1158 if (f != cl->cl_f) {
1159 cl->cl_f = f;
1160 update_cfmin(cl->cl_parent);
1165 static void
1166 update_cfmin(struct hfsc_class *cl)
1168 struct hfsc_class *p;
1169 uint64_t cfmin;
1171 if (TAILQ_EMPTY(cl->cl_actc)) {
1172 cl->cl_cfmin = 0;
1173 return;
1175 cfmin = HT_INFINITY;
1176 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1177 if (p->cl_f == 0) {
1178 cl->cl_cfmin = 0;
1179 return;
1181 if (p->cl_f < cfmin)
1182 cfmin = p->cl_f;
1184 cl->cl_cfmin = cfmin;
1188 * TAILQ based ellist and actlist implementation
1189 * (ion wanted to make a calendar queue based implementation)
1192 * eligible list holds backlogged classes being sorted by their eligible times.
1193 * there is one eligible list per interface.
1196 static ellist_t *
1197 ellist_alloc(void)
1199 ellist_t *head;
1201 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK);
1202 TAILQ_INIT(head);
1203 return (head);
1206 static void
1207 ellist_destroy(ellist_t *head)
1209 kfree(head, M_ALTQ);
1212 static void
1213 ellist_insert(struct hfsc_class *cl)
1215 struct hfsc_if *hif = cl->cl_hif;
1216 struct hfsc_class *p;
1218 /* check the last entry first */
1219 if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1220 p->cl_e <= cl->cl_e) {
1221 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1222 return;
1225 TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1226 if (cl->cl_e < p->cl_e) {
1227 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1228 return;
1231 KKASSERT(0); /* should not reach here */
1234 static void
1235 ellist_remove(struct hfsc_class *cl)
1237 struct hfsc_if *hif = cl->cl_hif;
1239 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1242 static void
1243 ellist_update(struct hfsc_class *cl)
1245 struct hfsc_if *hif = cl->cl_hif;
1246 struct hfsc_class *p, *last;
1249 * the eligible time of a class increases monotonically.
1250 * if the next entry has a larger eligible time, nothing to do.
1252 p = TAILQ_NEXT(cl, cl_ellist);
1253 if (p == NULL || cl->cl_e <= p->cl_e)
1254 return;
1256 /* check the last entry */
1257 last = TAILQ_LAST(hif->hif_eligible, _eligible);
1258 KKASSERT(last != NULL);
1259 if (last->cl_e <= cl->cl_e) {
1260 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1261 TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1262 return;
1266 * the new position must be between the next entry
1267 * and the last entry
1269 while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1270 if (cl->cl_e < p->cl_e) {
1271 TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1272 TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1273 return;
1276 KKASSERT(0); /* should not reach here */
1279 /* find the class with the minimum deadline among the eligible classes */
1280 struct hfsc_class *
1281 ellist_get_mindl(ellist_t *head, uint64_t cur_time)
1283 struct hfsc_class *p, *cl = NULL;
1285 TAILQ_FOREACH(p, head, cl_ellist) {
1286 if (p->cl_e > cur_time)
1287 break;
1288 if (cl == NULL || p->cl_d < cl->cl_d)
1289 cl = p;
1291 return (cl);
1295 * active children list holds backlogged child classes being sorted
1296 * by their virtual time.
1297 * each intermediate class has one active children list.
1299 static actlist_t *
1300 actlist_alloc(void)
1302 actlist_t *head;
1304 head = kmalloc(sizeof(*head), M_ALTQ, M_WAITOK);
1305 TAILQ_INIT(head);
1306 return (head);
1309 static void
1310 actlist_destroy(actlist_t *head)
1312 kfree(head, M_ALTQ);
1314 static void
1315 actlist_insert(struct hfsc_class *cl)
1317 struct hfsc_class *p;
1319 /* check the last entry first */
1320 if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1321 || p->cl_vt <= cl->cl_vt) {
1322 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1323 return;
1326 TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1327 if (cl->cl_vt < p->cl_vt) {
1328 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1329 return;
1332 KKASSERT(0); /* should not reach here */
1335 static void
1336 actlist_remove(struct hfsc_class *cl)
1338 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1341 static void
1342 actlist_update(struct hfsc_class *cl)
1344 struct hfsc_class *p, *last;
1347 * the virtual time of a class increases monotonically during its
1348 * backlogged period.
1349 * if the next entry has a larger virtual time, nothing to do.
1351 p = TAILQ_NEXT(cl, cl_actlist);
1352 if (p == NULL || cl->cl_vt < p->cl_vt)
1353 return;
1355 /* check the last entry */
1356 last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1357 KKASSERT(last != NULL);
1358 if (last->cl_vt <= cl->cl_vt) {
1359 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1360 TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1361 return;
1365 * the new position must be between the next entry
1366 * and the last entry
1368 while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1369 if (cl->cl_vt < p->cl_vt) {
1370 TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1371 TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1372 return;
1375 KKASSERT(0); /* should not reach here */
1378 static struct hfsc_class *
1379 actlist_firstfit(struct hfsc_class *cl, uint64_t cur_time)
1381 struct hfsc_class *p;
1383 TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1384 if (p->cl_f <= cur_time)
1385 return (p);
1387 return (NULL);
1391 * service curve support functions
1393 * external service curve parameters
1394 * m: bits/sec
1395 * d: msec
1396 * internal service curve parameters
1397 * sm: (bytes/tsc_interval) << SM_SHIFT
1398 * ism: (tsc_count/byte) << ISM_SHIFT
1399 * dx: tsc_count
1401 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1402 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1403 * speed. SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1404 * digits in decimal using the following table.
1406 * bits/sec 100Kbps 1Mbps 10Mbps 100Mbps 1Gbps
1407 * ----------+-------------------------------------------------------
1408 * bytes/nsec 12.5e-6 125e-6 1250e-6 12500e-6 125000e-6
1409 * sm(500MHz) 25.0e-6 250e-6 2500e-6 25000e-6 250000e-6
1410 * sm(200MHz) 62.5e-6 625e-6 6250e-6 62500e-6 625000e-6
1412 * nsec/byte 80000 8000 800 80 8
1413 * ism(500MHz) 40000 4000 400 40 4
1414 * ism(200MHz) 16000 1600 160 16 1.6
1416 #define SM_SHIFT 24
1417 #define ISM_SHIFT 10
1419 #define SM_MASK ((1LL << SM_SHIFT) - 1)
1420 #define ISM_MASK ((1LL << ISM_SHIFT) - 1)
1422 static __inline uint64_t
1423 seg_x2y(uint64_t x, uint64_t sm)
1425 uint64_t y;
1428 * compute
1429 * y = x * sm >> SM_SHIFT
1430 * but divide it for the upper and lower bits to avoid overflow
1432 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1433 return (y);
1436 static __inline uint64_t
1437 seg_y2x(uint64_t y, uint64_t ism)
1439 uint64_t x;
1441 if (y == 0)
1442 x = 0;
1443 else if (ism == HT_INFINITY)
1444 x = HT_INFINITY;
1445 else
1446 x = (y >> ISM_SHIFT) * ism + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1448 return (x);
1451 static __inline uint64_t
1452 m2sm(u_int m)
1454 uint64_t sm;
1456 sm = ((uint64_t)m << SM_SHIFT) / 8 / machclk_freq;
1457 return (sm);
1460 static __inline uint64_t
1461 m2ism(u_int m)
1463 uint64_t ism;
1465 if (m == 0)
1466 ism = HT_INFINITY;
1467 else
1468 ism = ((uint64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1469 return (ism);
1472 static __inline uint64_t
1473 d2dx(u_int d)
1475 uint64_t dx;
1477 dx = ((uint64_t)d * machclk_freq) / 1000;
1478 return (dx);
1481 static u_int
1482 sm2m(uint64_t sm)
1484 uint64_t m;
1486 m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1487 return ((u_int)m);
1490 static u_int
1491 dx2d(uint64_t dx)
1493 uint64_t d;
1495 d = dx * 1000 / machclk_freq;
1496 return ((u_int)d);
1499 static void
1500 sc2isc(struct service_curve *sc, struct internal_sc *isc)
1502 isc->sm1 = m2sm(sc->m1);
1503 isc->ism1 = m2ism(sc->m1);
1504 isc->dx = d2dx(sc->d);
1505 isc->dy = seg_x2y(isc->dx, isc->sm1);
1506 isc->sm2 = m2sm(sc->m2);
1507 isc->ism2 = m2ism(sc->m2);
1511 * initialize the runtime service curve with the given internal
1512 * service curve starting at (x, y).
1514 static void
1515 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1517 rtsc->x = x;
1518 rtsc->y = y;
1519 rtsc->sm1 = isc->sm1;
1520 rtsc->ism1 = isc->ism1;
1521 rtsc->dx = isc->dx;
1522 rtsc->dy = isc->dy;
1523 rtsc->sm2 = isc->sm2;
1524 rtsc->ism2 = isc->ism2;
1528 * calculate the y-projection of the runtime service curve by the
1529 * given x-projection value
1531 static uint64_t
1532 rtsc_y2x(struct runtime_sc *rtsc, uint64_t y)
1534 uint64_t x;
1536 if (y < rtsc->y) {
1537 x = rtsc->x;
1538 } else if (y <= rtsc->y + rtsc->dy) {
1539 /* x belongs to the 1st segment */
1540 if (rtsc->dy == 0)
1541 x = rtsc->x + rtsc->dx;
1542 else
1543 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1544 } else {
1545 /* x belongs to the 2nd segment */
1546 x = rtsc->x + rtsc->dx
1547 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1549 return (x);
1552 static uint64_t
1553 rtsc_x2y(struct runtime_sc *rtsc, uint64_t x)
1555 uint64_t y;
1557 if (x <= rtsc->x) {
1558 y = rtsc->y;
1559 } else if (x <= rtsc->x + rtsc->dx) {
1560 /* y belongs to the 1st segment */
1561 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1562 } else
1563 /* y belongs to the 2nd segment */
1564 y = rtsc->y + rtsc->dy
1565 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1566 return (y);
1570 * update the runtime service curve by taking the minimum of the current
1571 * runtime service curve and the service curve starting at (x, y).
1573 static void
1574 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, uint64_t x, uint64_t y)
1576 uint64_t y1, y2, dx, dy;
1578 if (isc->sm1 <= isc->sm2) {
1579 /* service curve is convex */
1580 y1 = rtsc_x2y(rtsc, x);
1581 if (y1 < y)
1582 /* the current rtsc is smaller */
1583 return;
1584 rtsc->x = x;
1585 rtsc->y = y;
1586 return;
1590 * service curve is concave
1591 * compute the two y values of the current rtsc
1592 * y1: at x
1593 * y2: at (x + dx)
1595 y1 = rtsc_x2y(rtsc, x);
1596 if (y1 <= y) {
1597 /* rtsc is below isc, no change to rtsc */
1598 return;
1601 y2 = rtsc_x2y(rtsc, x + isc->dx);
1602 if (y2 >= y + isc->dy) {
1603 /* rtsc is above isc, replace rtsc by isc */
1604 rtsc->x = x;
1605 rtsc->y = y;
1606 rtsc->dx = isc->dx;
1607 rtsc->dy = isc->dy;
1608 return;
1612 * the two curves intersect
1613 * compute the offsets (dx, dy) using the reverse
1614 * function of seg_x2y()
1615 * seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1617 dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1619 * check if (x, y1) belongs to the 1st segment of rtsc.
1620 * if so, add the offset.
1622 if (rtsc->x + rtsc->dx > x)
1623 dx += rtsc->x + rtsc->dx - x;
1624 dy = seg_x2y(dx, isc->sm1);
1626 rtsc->x = x;
1627 rtsc->y = y;
1628 rtsc->dx = dx;
1629 rtsc->dy = dy;
1632 static void
1633 get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1635 sp->class_id = cl->cl_id;
1636 sp->class_handle = cl->cl_handle;
1638 if (cl->cl_rsc != NULL) {
1639 sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1640 sp->rsc.d = dx2d(cl->cl_rsc->dx);
1641 sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1642 } else {
1643 sp->rsc.m1 = 0;
1644 sp->rsc.d = 0;
1645 sp->rsc.m2 = 0;
1647 if (cl->cl_fsc != NULL) {
1648 sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1649 sp->fsc.d = dx2d(cl->cl_fsc->dx);
1650 sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1651 } else {
1652 sp->fsc.m1 = 0;
1653 sp->fsc.d = 0;
1654 sp->fsc.m2 = 0;
1656 if (cl->cl_usc != NULL) {
1657 sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1658 sp->usc.d = dx2d(cl->cl_usc->dx);
1659 sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1660 } else {
1661 sp->usc.m1 = 0;
1662 sp->usc.d = 0;
1663 sp->usc.m2 = 0;
1666 sp->total = cl->cl_total;
1667 sp->cumul = cl->cl_cumul;
1669 sp->d = cl->cl_d;
1670 sp->e = cl->cl_e;
1671 sp->vt = cl->cl_vt;
1672 sp->f = cl->cl_f;
1674 sp->initvt = cl->cl_initvt;
1675 sp->vtperiod = cl->cl_vtperiod;
1676 sp->parentperiod = cl->cl_parentperiod;
1677 sp->nactive = cl->cl_nactive;
1678 sp->vtoff = cl->cl_vtoff;
1679 sp->cvtmax = cl->cl_cvtmax;
1680 sp->myf = cl->cl_myf;
1681 sp->cfmin = cl->cl_cfmin;
1682 sp->cvtmin = cl->cl_cvtmin;
1683 sp->myfadj = cl->cl_myfadj;
1684 sp->vtadj = cl->cl_vtadj;
1686 sp->cur_time = read_machclk();
1687 sp->machclk_freq = machclk_freq;
1689 sp->qlength = qlen(cl->cl_q);
1690 sp->qlimit = qlimit(cl->cl_q);
1691 sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1692 sp->drop_cnt = cl->cl_stats.drop_cnt;
1693 sp->period = cl->cl_stats.period;
1695 sp->qtype = qtype(cl->cl_q);
1696 #ifdef ALTQ_RED
1697 if (q_is_red(cl->cl_q))
1698 red_getstats(cl->cl_red, &sp->red[0]);
1699 #endif
1700 #ifdef ALTQ_RIO
1701 if (q_is_rio(cl->cl_q))
1702 rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1703 #endif
1706 /* convert a class handle to the corresponding class pointer */
1707 static struct hfsc_class *
1708 clh_to_clp(struct hfsc_if *hif, uint32_t chandle)
1710 int i;
1711 struct hfsc_class *cl;
1713 if (chandle == 0)
1714 return (NULL);
1716 * first, try optimistically the slot matching the lower bits of
1717 * the handle. if it fails, do the linear table search.
1719 i = chandle % HFSC_MAX_CLASSES;
1720 if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1721 return (cl);
1722 for (i = 0; i < HFSC_MAX_CLASSES; i++)
1723 if ((cl = hif->hif_class_tbl[i]) != NULL &&
1724 cl->cl_handle == chandle)
1725 return (cl);
1726 return (NULL);
1729 #endif /* ALTQ_HFSC */