kernel - clarify comment in nestio code
[dragonfly.git] / usr.sbin / pfctl / pfctl_altq.c
bloba498e2c7347c50079f5da8943ac007d0cfa18043
1 /* $OpenBSD: pfctl_altq.c,v 1.91 2006/11/28 00:08:50 henning Exp $ */
3 /*
4 * Copyright (c) 2002
5 * Sony Computer Science Laboratories Inc.
6 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 #include <sys/param.h>
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
24 #include <sys/sysctl.h>
26 #include <net/if.h>
27 #include <net/if_mib.h>
28 #include <netinet/in.h>
29 #include <net/pf/pfvar.h>
31 #include <err.h>
32 #include <errno.h>
33 #include <limits.h>
34 #include <math.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
40 #include <net/altq/altq.h>
41 #include <net/altq/altq_cbq.h>
42 #include <net/altq/altq_priq.h>
43 #include <net/altq/altq_hfsc.h>
44 #include <net/altq/altq_fairq.h>
46 #include "pfctl_parser.h"
47 #include "pfctl.h"
49 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
51 TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs);
52 LIST_HEAD(gen_sc, segment) rtsc, lssc;
54 struct pf_altq *qname_to_pfaltq(const char *, const char *);
55 u_int32_t qname_to_qid(const char *);
57 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *);
58 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
59 static int check_commit_cbq(int, int, struct pf_altq *);
60 static int print_cbq_opts(const struct pf_altq *);
62 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *);
63 static int check_commit_priq(int, int, struct pf_altq *);
64 static int print_priq_opts(const struct pf_altq *);
66 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *);
67 static int check_commit_hfsc(int, int, struct pf_altq *);
68 static int print_hfsc_opts(const struct pf_altq *,
69 const struct node_queue_opt *);
71 static int eval_pfqueue_fairq(struct pfctl *, struct pf_altq *);
72 static int print_fairq_opts(const struct pf_altq *,
73 const struct node_queue_opt *);
74 static int check_commit_fairq(int, int, struct pf_altq *);
76 static void gsc_add_sc(struct gen_sc *, struct service_curve *);
77 static int is_gsc_under_sc(struct gen_sc *,
78 struct service_curve *);
79 static void gsc_destroy(struct gen_sc *);
80 static struct segment *gsc_getentry(struct gen_sc *, double);
81 static int gsc_add_seg(struct gen_sc *, double, double, double,
82 double);
83 static double sc_x2y(struct service_curve *, double);
85 u_int32_t getifspeed(const char *);
86 u_long getifmtu(char *);
87 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
88 u_int32_t);
89 u_int32_t eval_bwspec(struct node_queue_bw *, u_int32_t);
90 void print_hfsc_sc(const char *, u_int, u_int, u_int,
91 const struct node_hfsc_sc *);
92 void print_fairq_sc(const char *, u_int, u_int, u_int,
93 const struct node_fairq_sc *);
95 void
96 pfaltq_store(struct pf_altq *a)
98 struct pf_altq *altq;
100 if ((altq = malloc(sizeof(*altq))) == NULL)
101 err(1, "malloc");
102 memcpy(altq, a, sizeof(struct pf_altq));
103 TAILQ_INSERT_TAIL(&altqs, altq, entries);
106 struct pf_altq *
107 pfaltq_lookup(const char *ifname)
109 struct pf_altq *altq;
111 TAILQ_FOREACH(altq, &altqs, entries) {
112 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
113 altq->qname[0] == 0)
114 return (altq);
116 return (NULL);
119 struct pf_altq *
120 qname_to_pfaltq(const char *qname, const char *ifname)
122 struct pf_altq *altq;
124 TAILQ_FOREACH(altq, &altqs, entries) {
125 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
126 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
127 return (altq);
129 return (NULL);
132 u_int32_t
133 qname_to_qid(const char *qname)
135 struct pf_altq *altq;
138 * We guarantee that same named queues on different interfaces
139 * have the same qid, so we do NOT need to limit matching on
140 * one interface!
143 TAILQ_FOREACH(altq, &altqs, entries) {
144 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
145 return (altq->qid);
147 return (0);
150 void
151 print_altq(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
152 struct node_queue_opt *qopts)
154 if (a->qname[0] != 0) {
155 print_queue(a, level, bw, 1, qopts);
156 return;
159 printf("altq on %s ", a->ifname);
161 switch (a->scheduler) {
162 case ALTQT_CBQ:
163 if (!print_cbq_opts(a))
164 printf("cbq ");
165 break;
166 case ALTQT_PRIQ:
167 if (!print_priq_opts(a))
168 printf("priq ");
169 break;
170 case ALTQT_HFSC:
171 if (!print_hfsc_opts(a, qopts))
172 printf("hfsc ");
173 break;
174 case ALTQT_FAIRQ:
175 if (!print_fairq_opts(a, qopts))
176 printf("hfsc ");
177 break;
180 if (bw != NULL && bw->bw_percent > 0) {
181 if (bw->bw_percent < 100)
182 printf("bandwidth %u%% ", bw->bw_percent);
183 } else
184 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
186 if (a->qlimit != DEFAULT_QLIMIT)
187 printf("qlimit %u ", a->qlimit);
188 printf("tbrsize %u ", a->tbrsize);
191 void
192 print_queue(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
193 int print_interface, struct node_queue_opt *qopts)
195 unsigned i;
197 printf("queue ");
198 for (i = 0; i < level; ++i)
199 printf(" ");
200 printf("%s ", a->qname);
201 if (print_interface)
202 printf("on %s ", a->ifname);
203 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC ||
204 a->scheduler == ALTQT_FAIRQ) {
205 if (bw != NULL && bw->bw_percent > 0) {
206 if (bw->bw_percent < 100)
207 printf("bandwidth %u%% ", bw->bw_percent);
208 } else
209 printf("bandwidth %s ", rate2str((double)a->bandwidth));
211 if (a->priority != DEFAULT_PRIORITY)
212 printf("priority %u ", a->priority);
213 if (a->qlimit != DEFAULT_QLIMIT)
214 printf("qlimit %u ", a->qlimit);
215 switch (a->scheduler) {
216 case ALTQT_CBQ:
217 print_cbq_opts(a);
218 break;
219 case ALTQT_PRIQ:
220 print_priq_opts(a);
221 break;
222 case ALTQT_HFSC:
223 print_hfsc_opts(a, qopts);
224 break;
225 case ALTQT_FAIRQ:
226 print_fairq_opts(a, qopts);
227 break;
232 * eval_pfaltq computes the discipline parameters.
235 eval_pfaltq(struct pfctl *pf __unused, struct pf_altq *pa, struct node_queue_bw *bw,
236 struct node_queue_opt *opts)
238 u_int rate, size, errors = 0;
240 if (bw->bw_absolute > 0)
241 pa->ifbandwidth = bw->bw_absolute;
242 else
243 if ((rate = getifspeed(pa->ifname)) == 0) {
244 fprintf(stderr, "interface %s does not know its bandwidth, "
245 "please specify an absolute bandwidth\n",
246 pa->ifname);
247 errors++;
248 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
249 pa->ifbandwidth = rate;
251 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
253 /* if tbrsize is not specified, use heuristics */
254 if (pa->tbrsize == 0) {
255 rate = pa->ifbandwidth;
256 if (rate <= 1 * 1000 * 1000)
257 size = 1;
258 else if (rate <= 10 * 1000 * 1000)
259 size = 4;
260 else if (rate <= 200 * 1000 * 1000)
261 size = 8;
262 else
263 size = 24;
264 size = size * getifmtu(pa->ifname);
265 if (size > 0xffff)
266 size = 0xffff;
267 pa->tbrsize = size;
269 return (errors);
273 * check_commit_altq does consistency check for each interface
276 check_commit_altq(int dev, int opts)
278 struct pf_altq *altq;
279 int error = 0;
281 /* call the discipline check for each interface. */
282 TAILQ_FOREACH(altq, &altqs, entries) {
283 if (altq->qname[0] == 0) {
284 switch (altq->scheduler) {
285 case ALTQT_CBQ:
286 error = check_commit_cbq(dev, opts, altq);
287 break;
288 case ALTQT_PRIQ:
289 error = check_commit_priq(dev, opts, altq);
290 break;
291 case ALTQT_HFSC:
292 error = check_commit_hfsc(dev, opts, altq);
293 break;
294 case ALTQT_FAIRQ:
295 error = check_commit_fairq(dev, opts, altq);
296 break;
297 default:
298 break;
302 return (error);
306 * eval_pfqueue computes the queue parameters.
309 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
310 struct node_queue_opt *opts)
312 /* should be merged with expand_queue */
313 struct pf_altq *if_pa, *parent, *altq;
314 u_int32_t bwsum;
315 int error = 0;
317 /* find the corresponding interface and copy fields used by queues */
318 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) {
319 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
320 return (1);
322 pa->scheduler = if_pa->scheduler;
323 pa->ifbandwidth = if_pa->ifbandwidth;
325 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
326 fprintf(stderr, "queue %s already exists on interface %s\n",
327 pa->qname, pa->ifname);
328 return (1);
330 pa->qid = qname_to_qid(pa->qname);
332 parent = NULL;
333 if (pa->parent[0] != 0) {
334 parent = qname_to_pfaltq(pa->parent, pa->ifname);
335 if (parent == NULL) {
336 fprintf(stderr, "parent %s not found for %s\n",
337 pa->parent, pa->qname);
338 return (1);
340 pa->parent_qid = parent->qid;
342 if (pa->qlimit == 0)
343 pa->qlimit = DEFAULT_QLIMIT;
345 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC ||
346 pa->scheduler == ALTQT_FAIRQ) {
347 pa->bandwidth = eval_bwspec(bw,
348 parent == NULL ? 0 : parent->bandwidth);
350 if (pa->bandwidth > pa->ifbandwidth) {
351 fprintf(stderr, "bandwidth for %s higher than "
352 "interface\n", pa->qname);
353 return (1);
355 /* check the sum of the child bandwidth is under parent's */
356 if (parent != NULL) {
357 if (pa->bandwidth > parent->bandwidth) {
358 warnx("bandwidth for %s higher than parent",
359 pa->qname);
360 return (1);
362 bwsum = 0;
363 TAILQ_FOREACH(altq, &altqs, entries) {
364 if (strncmp(altq->ifname, pa->ifname,
365 IFNAMSIZ) == 0 &&
366 altq->qname[0] != 0 &&
367 strncmp(altq->parent, pa->parent,
368 PF_QNAME_SIZE) == 0)
369 bwsum += altq->bandwidth;
371 bwsum += pa->bandwidth;
372 if (bwsum > parent->bandwidth) {
373 warnx("the sum of the child bandwidth higher"
374 " than parent \"%s\"", parent->qname);
379 if (eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth))
380 return (1);
382 switch (pa->scheduler) {
383 case ALTQT_CBQ:
384 error = eval_pfqueue_cbq(pf, pa);
385 break;
386 case ALTQT_PRIQ:
387 error = eval_pfqueue_priq(pf, pa);
388 break;
389 case ALTQT_HFSC:
390 error = eval_pfqueue_hfsc(pf, pa);
391 break;
392 case ALTQT_FAIRQ:
393 error = eval_pfqueue_fairq(pf, pa);
394 break;
395 default:
396 break;
398 return (error);
402 * CBQ support functions
404 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
405 #define RM_NS_PER_SEC (1000000000)
407 static int
408 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa)
410 struct cbq_opts *opts;
411 u_int ifmtu;
413 if (pa->priority >= CBQ_MAXPRI) {
414 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
415 return (-1);
418 ifmtu = getifmtu(pa->ifname);
419 opts = &pa->pq_u.cbq_opts;
421 if (opts->pktsize == 0) { /* use default */
422 opts->pktsize = ifmtu;
423 if (opts->pktsize > MCLBYTES) /* do what TCP does */
424 opts->pktsize &= ~MCLBYTES;
425 } else if (opts->pktsize > ifmtu)
426 opts->pktsize = ifmtu;
427 if (opts->maxpktsize == 0) /* use default */
428 opts->maxpktsize = ifmtu;
429 else if (opts->maxpktsize > ifmtu)
430 opts->pktsize = ifmtu;
432 if (opts->pktsize > opts->maxpktsize)
433 opts->pktsize = opts->maxpktsize;
435 if (pa->parent[0] == 0)
436 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
438 cbq_compute_idletime(pf, pa);
439 return (0);
443 * compute ns_per_byte, maxidle, minidle, and offtime
445 static int
446 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
448 struct cbq_opts *opts;
449 double maxidle_s, maxidle, minidle;
450 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
451 double z, g, f, gton, gtom;
452 u_int minburst, maxburst;
454 opts = &pa->pq_u.cbq_opts;
455 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
456 minburst = opts->minburst;
457 maxburst = opts->maxburst;
459 if (pa->bandwidth == 0)
460 f = 0.0001; /* small enough? */
461 else
462 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
464 nsPerByte = ifnsPerByte / f;
465 ptime = (double)opts->pktsize * ifnsPerByte;
466 cptime = ptime * (1.0 - f) / f;
468 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
470 * this causes integer overflow in kernel!
471 * (bandwidth < 6Kbps when max_pkt_size=1500)
473 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0)
474 warnx("queue bandwidth must be larger than %s",
475 rate2str(ifnsPerByte * (double)opts->maxpktsize /
476 (double)INT_MAX * (double)pa->ifbandwidth));
477 fprintf(stderr, "cbq: queue %s is too slow!\n",
478 pa->qname);
479 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
482 if (maxburst == 0) { /* use default */
483 if (cptime > 10.0 * 1000000)
484 maxburst = 4;
485 else
486 maxburst = 16;
488 if (minburst == 0) /* use default */
489 minburst = 2;
490 if (minburst > maxburst)
491 minburst = maxburst;
493 z = (double)(1 << RM_FILTER_GAIN);
494 g = (1.0 - 1.0 / z);
495 gton = pow(g, (double)maxburst);
496 gtom = pow(g, (double)(minburst-1));
497 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
498 maxidle_s = (1.0 - g);
499 if (maxidle > maxidle_s)
500 maxidle = ptime * maxidle;
501 else
502 maxidle = ptime * maxidle_s;
503 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
504 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
506 /* scale parameters */
507 maxidle = ((maxidle * 8.0) / nsPerByte) *
508 pow(2.0, (double)RM_FILTER_GAIN);
509 offtime = (offtime * 8.0) / nsPerByte *
510 pow(2.0, (double)RM_FILTER_GAIN);
511 minidle = ((minidle * 8.0) / nsPerByte) *
512 pow(2.0, (double)RM_FILTER_GAIN);
514 maxidle = maxidle / 1000.0;
515 offtime = offtime / 1000.0;
516 minidle = minidle / 1000.0;
518 opts->minburst = minburst;
519 opts->maxburst = maxburst;
520 opts->ns_per_byte = (u_int)nsPerByte;
521 opts->maxidle = (u_int)fabs(maxidle);
522 opts->minidle = (int)minidle;
523 opts->offtime = (u_int)fabs(offtime);
525 return (0);
528 static int
529 check_commit_cbq(int dev __unused, int opts __unused, struct pf_altq *pa)
531 struct pf_altq *altq;
532 int root_class, default_class;
533 int error = 0;
536 * check if cbq has one root queue and one default queue
537 * for this interface
539 root_class = default_class = 0;
540 TAILQ_FOREACH(altq, &altqs, entries) {
541 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
542 continue;
543 if (altq->qname[0] == 0) /* this is for interface */
544 continue;
545 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
546 root_class++;
547 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
548 default_class++;
550 if (root_class != 1) {
551 warnx("should have one root queue on %s", pa->ifname);
552 error++;
554 if (default_class != 1) {
555 warnx("should have one default queue on %s", pa->ifname);
556 error++;
558 return (error);
561 static int
562 print_cbq_opts(const struct pf_altq *a)
564 const struct cbq_opts *opts;
566 opts = &a->pq_u.cbq_opts;
567 if (opts->flags) {
568 printf("cbq(");
569 if (opts->flags & CBQCLF_RED)
570 printf(" red");
571 if (opts->flags & CBQCLF_ECN)
572 printf(" ecn");
573 if (opts->flags & CBQCLF_RIO)
574 printf(" rio");
575 if (opts->flags & CBQCLF_CLEARDSCP)
576 printf(" cleardscp");
577 if (opts->flags & CBQCLF_BORROW)
578 printf(" borrow");
579 if (opts->flags & CBQCLF_WRR)
580 printf(" wrr");
581 if (opts->flags & CBQCLF_EFFICIENT)
582 printf(" efficient");
583 if (opts->flags & CBQCLF_ROOTCLASS)
584 printf(" root");
585 if (opts->flags & CBQCLF_DEFCLASS)
586 printf(" default");
587 printf(" ) ");
589 return (1);
590 } else
591 return (0);
595 * PRIQ support functions
597 static int
598 eval_pfqueue_priq(struct pfctl *pf __unused, struct pf_altq *pa)
600 struct pf_altq *altq;
602 if (pa->priority >= PRIQ_MAXPRI) {
603 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
604 return (-1);
606 /* the priority should be unique for the interface */
607 TAILQ_FOREACH(altq, &altqs, entries) {
608 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 &&
609 altq->qname[0] != 0 && altq->priority == pa->priority) {
610 warnx("%s and %s have the same priority",
611 altq->qname, pa->qname);
612 return (-1);
616 return (0);
619 static int
620 check_commit_priq(int dev __unused, int opts __unused, struct pf_altq *pa)
622 struct pf_altq *altq;
623 int default_class;
624 int error = 0;
627 * check if priq has one default class for this interface
629 default_class = 0;
630 TAILQ_FOREACH(altq, &altqs, entries) {
631 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
632 continue;
633 if (altq->qname[0] == 0) /* this is for interface */
634 continue;
635 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
636 default_class++;
638 if (default_class != 1) {
639 warnx("should have one default queue on %s", pa->ifname);
640 error++;
642 return (error);
645 static int
646 print_priq_opts(const struct pf_altq *a)
648 const struct priq_opts *opts;
650 opts = &a->pq_u.priq_opts;
652 if (opts->flags) {
653 printf("priq(");
654 if (opts->flags & PRCF_RED)
655 printf(" red");
656 if (opts->flags & PRCF_ECN)
657 printf(" ecn");
658 if (opts->flags & PRCF_RIO)
659 printf(" rio");
660 if (opts->flags & PRCF_CLEARDSCP)
661 printf(" cleardscp");
662 if (opts->flags & PRCF_DEFAULTCLASS)
663 printf(" default");
664 printf(" ) ");
666 return (1);
667 } else
668 return (0);
672 * HFSC support functions
674 static int
675 eval_pfqueue_hfsc(struct pfctl *pf __unused, struct pf_altq *pa)
677 struct pf_altq *altq, *parent;
678 struct hfsc_opts *opts;
679 struct service_curve sc;
681 opts = &pa->pq_u.hfsc_opts;
683 if (pa->parent[0] == 0) {
684 /* root queue */
685 opts->lssc_m1 = pa->ifbandwidth;
686 opts->lssc_m2 = pa->ifbandwidth;
687 opts->lssc_d = 0;
688 return (0);
691 LIST_INIT(&rtsc);
692 LIST_INIT(&lssc);
694 /* if link_share is not specified, use bandwidth */
695 if (opts->lssc_m2 == 0)
696 opts->lssc_m2 = pa->bandwidth;
698 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
699 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
700 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
701 warnx("m2 is zero for %s", pa->qname);
702 return (-1);
705 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
706 (opts->lssc_m1 < opts->lssc_m2 && opts->lssc_m1 != 0) ||
707 (opts->ulsc_m1 < opts->ulsc_m2 && opts->ulsc_m1 != 0)) {
708 warnx("m1 must be zero for convex curve: %s", pa->qname);
709 return (-1);
713 * admission control:
714 * for the real-time service curve, the sum of the service curves
715 * should not exceed 80% of the interface bandwidth. 20% is reserved
716 * not to over-commit the actual interface bandwidth.
717 * for the linkshare service curve, the sum of the child service
718 * curve should not exceed the parent service curve.
719 * for the upper-limit service curve, the assigned bandwidth should
720 * be smaller than the interface bandwidth, and the upper-limit should
721 * be larger than the real-time service curve when both are defined.
723 parent = qname_to_pfaltq(pa->parent, pa->ifname);
724 if (parent == NULL)
725 errx(1, "parent %s not found for %s", pa->parent, pa->qname);
727 TAILQ_FOREACH(altq, &altqs, entries) {
728 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
729 continue;
730 if (altq->qname[0] == 0) /* this is for interface */
731 continue;
733 /* if the class has a real-time service curve, add it. */
734 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) {
735 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1;
736 sc.d = altq->pq_u.hfsc_opts.rtsc_d;
737 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2;
738 gsc_add_sc(&rtsc, &sc);
741 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
742 continue;
744 /* if the class has a linkshare service curve, add it. */
745 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) {
746 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1;
747 sc.d = altq->pq_u.hfsc_opts.lssc_d;
748 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2;
749 gsc_add_sc(&lssc, &sc);
753 /* check the real-time service curve. reserve 20% of interface bw */
754 if (opts->rtsc_m2 != 0) {
755 /* add this queue to the sum */
756 sc.m1 = opts->rtsc_m1;
757 sc.d = opts->rtsc_d;
758 sc.m2 = opts->rtsc_m2;
759 gsc_add_sc(&rtsc, &sc);
760 /* compare the sum with 80% of the interface */
761 sc.m1 = 0;
762 sc.d = 0;
763 sc.m2 = pa->ifbandwidth / 100 * 80;
764 if (!is_gsc_under_sc(&rtsc, &sc)) {
765 warnx("real-time sc exceeds 80%% of the interface "
766 "bandwidth (%s)", rate2str((double)sc.m2));
767 goto err_ret;
771 /* check the linkshare service curve. */
772 if (opts->lssc_m2 != 0) {
773 /* add this queue to the child sum */
774 sc.m1 = opts->lssc_m1;
775 sc.d = opts->lssc_d;
776 sc.m2 = opts->lssc_m2;
777 gsc_add_sc(&lssc, &sc);
778 /* compare the sum of the children with parent's sc */
779 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1;
780 sc.d = parent->pq_u.hfsc_opts.lssc_d;
781 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2;
782 if (!is_gsc_under_sc(&lssc, &sc)) {
783 warnx("linkshare sc exceeds parent's sc");
784 goto err_ret;
788 /* check the upper-limit service curve. */
789 if (opts->ulsc_m2 != 0) {
790 if (opts->ulsc_m1 > pa->ifbandwidth ||
791 opts->ulsc_m2 > pa->ifbandwidth) {
792 warnx("upper-limit larger than interface bandwidth");
793 goto err_ret;
795 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
796 warnx("upper-limit sc smaller than real-time sc");
797 goto err_ret;
801 gsc_destroy(&rtsc);
802 gsc_destroy(&lssc);
804 return (0);
806 err_ret:
807 gsc_destroy(&rtsc);
808 gsc_destroy(&lssc);
809 return (-1);
813 * FAIRQ support functions
815 static int
816 eval_pfqueue_fairq(struct pfctl *pf __unused, struct pf_altq *pa)
818 struct pf_altq *altq, *parent;
819 struct fairq_opts *opts;
820 struct service_curve sc;
822 opts = &pa->pq_u.fairq_opts;
824 if (pa->parent[0] == 0) {
825 /* root queue */
826 opts->lssc_m1 = pa->ifbandwidth;
827 opts->lssc_m2 = pa->ifbandwidth;
828 opts->lssc_d = 0;
829 return (0);
832 LIST_INIT(&lssc);
834 /* if link_share is not specified, use bandwidth */
835 if (opts->lssc_m2 == 0)
836 opts->lssc_m2 = pa->bandwidth;
839 * admission control:
840 * for the real-time service curve, the sum of the service curves
841 * should not exceed 80% of the interface bandwidth. 20% is reserved
842 * not to over-commit the actual interface bandwidth.
843 * for the link-sharing service curve, the sum of the child service
844 * curve should not exceed the parent service curve.
845 * for the upper-limit service curve, the assigned bandwidth should
846 * be smaller than the interface bandwidth, and the upper-limit should
847 * be larger than the real-time service curve when both are defined.
849 parent = qname_to_pfaltq(pa->parent, pa->ifname);
850 if (parent == NULL)
851 errx(1, "parent %s not found for %s", pa->parent, pa->qname);
853 TAILQ_FOREACH(altq, &altqs, entries) {
854 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
855 continue;
856 if (altq->qname[0] == 0) /* this is for interface */
857 continue;
859 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
860 continue;
862 /* if the class has a link-sharing service curve, add it. */
863 if (opts->lssc_m2 != 0 && altq->pq_u.fairq_opts.lssc_m2 != 0) {
864 sc.m1 = altq->pq_u.fairq_opts.lssc_m1;
865 sc.d = altq->pq_u.fairq_opts.lssc_d;
866 sc.m2 = altq->pq_u.fairq_opts.lssc_m2;
867 gsc_add_sc(&lssc, &sc);
871 /* check the link-sharing service curve. */
872 if (opts->lssc_m2 != 0) {
873 sc.m1 = parent->pq_u.fairq_opts.lssc_m1;
874 sc.d = parent->pq_u.fairq_opts.lssc_d;
875 sc.m2 = parent->pq_u.fairq_opts.lssc_m2;
876 if (!is_gsc_under_sc(&lssc, &sc)) {
877 warnx("link-sharing sc exceeds parent's sc");
878 goto err_ret;
882 gsc_destroy(&lssc);
884 return (0);
886 err_ret:
887 gsc_destroy(&lssc);
888 return (-1);
891 static int
892 check_commit_hfsc(int dev __unused, int opts __unused, struct pf_altq *pa)
894 struct pf_altq *altq, *def = NULL;
895 int default_class;
896 int error = 0;
898 /* check if hfsc has one default queue for this interface */
899 default_class = 0;
900 TAILQ_FOREACH(altq, &altqs, entries) {
901 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
902 continue;
903 if (altq->qname[0] == 0) /* this is for interface */
904 continue;
905 if (altq->parent[0] == 0) /* dummy root */
906 continue;
907 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
908 default_class++;
909 def = altq;
912 if (default_class != 1) {
913 warnx("should have one default queue on %s", pa->ifname);
914 return (1);
916 /* make sure the default queue is a leaf */
917 TAILQ_FOREACH(altq, &altqs, entries) {
918 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
919 continue;
920 if (altq->qname[0] == 0) /* this is for interface */
921 continue;
922 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
923 warnx("default queue is not a leaf");
924 error++;
927 return (error);
930 static int
931 check_commit_fairq(int dev __unused, int opts __unused, struct pf_altq *pa)
933 struct pf_altq *altq, *def = NULL;
934 int default_class;
935 int error = 0;
937 /* check if fairq has one default queue for this interface */
938 default_class = 0;
939 TAILQ_FOREACH(altq, &altqs, entries) {
940 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
941 continue;
942 if (altq->qname[0] == 0) /* this is for interface */
943 continue;
944 if (altq->pq_u.fairq_opts.flags & FARF_DEFAULTCLASS) {
945 default_class++;
946 def = altq;
949 if (default_class != 1) {
950 warnx("should have one default queue on %s", pa->ifname);
951 return (1);
953 /* make sure the default queue is a leaf */
954 TAILQ_FOREACH(altq, &altqs, entries) {
955 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
956 continue;
957 if (altq->qname[0] == 0) /* this is for interface */
958 continue;
959 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
960 warnx("default queue is not a leaf");
961 error++;
964 return (error);
967 static int
968 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
970 const struct hfsc_opts *opts;
971 const struct node_hfsc_sc *loc_rtsc, *loc_lssc, *ulsc;
973 opts = &a->pq_u.hfsc_opts;
974 if (qopts == NULL)
975 loc_rtsc = loc_lssc = ulsc = NULL;
976 else {
977 loc_rtsc = &qopts->data.hfsc_opts.realtime;
978 loc_lssc = &qopts->data.hfsc_opts.linkshare;
979 ulsc = &qopts->data.hfsc_opts.upperlimit;
982 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
983 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
984 opts->lssc_d != 0))) {
985 printf("hfsc(");
986 if (opts->flags & HFCF_RED)
987 printf(" red");
988 if (opts->flags & HFCF_ECN)
989 printf(" ecn");
990 if (opts->flags & HFCF_RIO)
991 printf(" rio");
992 if (opts->flags & HFCF_CLEARDSCP)
993 printf(" cleardscp");
994 if (opts->flags & HFCF_DEFAULTCLASS)
995 printf(" default");
996 if (opts->rtsc_m2 != 0)
997 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
998 opts->rtsc_m2, loc_rtsc);
999 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1000 opts->lssc_d != 0))
1001 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1002 opts->lssc_m2, loc_lssc);
1003 if (opts->ulsc_m2 != 0)
1004 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
1005 opts->ulsc_m2, ulsc);
1006 printf(" ) ");
1008 return (1);
1009 } else
1010 return (0);
1013 static int
1014 print_fairq_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
1016 const struct fairq_opts *opts;
1017 const struct node_fairq_sc *loc_lssc;
1019 opts = &a->pq_u.fairq_opts;
1020 if (qopts == NULL)
1021 loc_lssc = NULL;
1022 else
1023 loc_lssc = &qopts->data.fairq_opts.linkshare;
1025 if (opts->flags ||
1026 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1027 opts->lssc_d != 0))) {
1028 printf("fairq(");
1029 if (opts->flags & FARF_RED)
1030 printf(" red");
1031 if (opts->flags & FARF_ECN)
1032 printf(" ecn");
1033 if (opts->flags & FARF_RIO)
1034 printf(" rio");
1035 if (opts->flags & FARF_CLEARDSCP)
1036 printf(" cleardscp");
1037 if (opts->flags & FARF_DEFAULTCLASS)
1038 printf(" default");
1039 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
1040 opts->lssc_d != 0))
1041 print_fairq_sc("linkshare", opts->lssc_m1, opts->lssc_d,
1042 opts->lssc_m2, loc_lssc);
1043 printf(" ) ");
1045 return (1);
1046 } else
1047 return (0);
1051 * admission control using generalized service curve
1053 #ifndef INFINITY
1054 #define INFINITY HUGE_VAL /* positive infinity defined in <math.h> */
1055 #endif
1057 /* add a new service curve to a generalized service curve */
1058 static void
1059 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
1061 if (is_sc_null(sc))
1062 return;
1063 if (sc->d != 0)
1064 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
1065 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
1069 * check whether all points of a generalized service curve have
1070 * their y-coordinates no larger than a given two-piece linear
1071 * service curve.
1073 static int
1074 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
1076 struct segment *s, *last, *end;
1077 double y;
1079 if (is_sc_null(sc)) {
1080 if (LIST_EMPTY(gsc))
1081 return (1);
1082 LIST_FOREACH(s, gsc, _next) {
1083 if (s->m != 0)
1084 return (0);
1086 return (1);
1089 * gsc has a dummy entry at the end with x = INFINITY.
1090 * loop through up to this dummy entry.
1092 end = gsc_getentry(gsc, INFINITY);
1093 if (end == NULL)
1094 return (1);
1095 last = NULL;
1096 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
1097 if (s->y > sc_x2y(sc, s->x))
1098 return (0);
1099 last = s;
1101 /* last now holds the real last segment */
1102 if (last == NULL)
1103 return (1);
1104 if (last->m > sc->m2)
1105 return (0);
1106 if (last->x < sc->d && last->m > sc->m1) {
1107 y = last->y + (sc->d - last->x) * last->m;
1108 if (y > sc_x2y(sc, sc->d))
1109 return (0);
1111 return (1);
1114 static void
1115 gsc_destroy(struct gen_sc *gsc)
1117 struct segment *s;
1119 while ((s = LIST_FIRST(gsc)) != NULL) {
1120 LIST_REMOVE(s, _next);
1121 free(s);
1126 * return a segment entry starting at x.
1127 * if gsc has no entry starting at x, a new entry is created at x.
1129 static struct segment *
1130 gsc_getentry(struct gen_sc *gsc, double x)
1132 struct segment *new, *prev, *s;
1134 prev = NULL;
1135 LIST_FOREACH(s, gsc, _next) {
1136 if (s->x == x)
1137 return (s); /* matching entry found */
1138 else if (s->x < x)
1139 prev = s;
1140 else
1141 break;
1144 /* we have to create a new entry */
1145 if ((new = calloc(1, sizeof(struct segment))) == NULL)
1146 return (NULL);
1148 new->x = x;
1149 if (x == INFINITY || s == NULL)
1150 new->d = 0;
1151 else if (s->x == INFINITY)
1152 new->d = INFINITY;
1153 else
1154 new->d = s->x - x;
1155 if (prev == NULL) {
1156 /* insert the new entry at the head of the list */
1157 new->y = 0;
1158 new->m = 0;
1159 LIST_INSERT_HEAD(gsc, new, _next);
1160 } else {
1162 * the start point intersects with the segment pointed by
1163 * prev. divide prev into 2 segments
1165 if (x == INFINITY) {
1166 prev->d = INFINITY;
1167 if (prev->m == 0)
1168 new->y = prev->y;
1169 else
1170 new->y = INFINITY;
1171 } else {
1172 prev->d = x - prev->x;
1173 new->y = prev->d * prev->m + prev->y;
1175 new->m = prev->m;
1176 LIST_INSERT_AFTER(prev, new, _next);
1178 return (new);
1181 /* add a segment to a generalized service curve */
1182 static int
1183 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1185 struct segment *start, *end, *s;
1186 double x2;
1188 if (d == INFINITY)
1189 x2 = INFINITY;
1190 else
1191 x2 = x + d;
1192 start = gsc_getentry(gsc, x);
1193 end = gsc_getentry(gsc, x2);
1194 if (start == NULL || end == NULL)
1195 return (-1);
1197 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1198 s->m += m;
1199 s->y += y + (s->x - x) * m;
1202 end = gsc_getentry(gsc, INFINITY);
1203 for (; s != end; s = LIST_NEXT(s, _next)) {
1204 s->y += m * d;
1207 return (0);
1210 /* get y-projection of a service curve */
1211 static double
1212 sc_x2y(struct service_curve *sc, double x)
1214 double y;
1216 if (x <= (double)sc->d)
1217 /* y belongs to the 1st segment */
1218 y = x * (double)sc->m1;
1219 else
1220 /* y belongs to the 2nd segment */
1221 y = (double)sc->d * (double)sc->m1
1222 + (x - (double)sc->d) * (double)sc->m2;
1223 return (y);
1227 * misc utilities
1229 #define R2S_BUFS 8
1230 #define RATESTR_MAX 16
1232 char *
1233 rate2str(double rate)
1235 char *buf;
1236 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1237 static int idx = 0;
1238 int i;
1239 static const char unit[] = " KMG";
1241 buf = r2sbuf[idx++];
1242 if (idx == R2S_BUFS)
1243 idx = 0;
1245 for (i = 0; rate >= 1000 && i <= 3; i++)
1246 rate /= 1000;
1248 if ((int)(rate * 100) % 100)
1249 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1250 else
1251 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1253 return (buf);
1256 u_int32_t
1257 getifspeed(const char *ifname)
1259 size_t datalen;
1260 int idx;
1261 struct ifmibdata data;
1262 int name[] = {
1263 CTL_NET,
1264 PF_LINK,
1265 NETLINK_GENERIC,
1266 IFMIB_IFDATA,
1268 IFDATA_GENERAL
1271 if ((idx = (int)if_nametoindex(ifname)) == 0)
1272 err(1, "getifspeed: if_nametoindex");
1273 name[4] = idx;
1275 datalen = sizeof(data);
1276 if (sysctl(name, 6, &data, &datalen, NULL, 0))
1277 err(1, "getifspeed: sysctl");
1279 return(data.ifmd_data.ifi_baudrate);
1282 u_long
1283 getifmtu(char *ifname)
1285 int s;
1286 struct ifreq ifr;
1288 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1289 err(1, "socket");
1290 bzero(&ifr, sizeof(ifr));
1291 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1292 sizeof(ifr.ifr_name))
1293 errx(1, "getifmtu: strlcpy");
1294 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1295 err(1, "SIOCGIFMTU");
1296 if (shutdown(s, SHUT_RDWR) == -1)
1297 err(1, "shutdown");
1298 if (close(s))
1299 err(1, "close");
1300 if (ifr.ifr_mtu > 0)
1301 return (ifr.ifr_mtu);
1302 else {
1303 warnx("could not get mtu for %s, assuming 1500", ifname);
1304 return (1500);
1309 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1310 u_int32_t ref_bw)
1312 int errors = 0;
1314 switch (pa->scheduler) {
1315 case ALTQT_CBQ:
1316 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1317 break;
1318 case ALTQT_PRIQ:
1319 pa->pq_u.priq_opts = opts->data.priq_opts;
1320 break;
1321 case ALTQT_HFSC:
1322 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1323 if (opts->data.hfsc_opts.linkshare.used) {
1324 pa->pq_u.hfsc_opts.lssc_m1 =
1325 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1326 ref_bw);
1327 pa->pq_u.hfsc_opts.lssc_m2 =
1328 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1329 ref_bw);
1330 pa->pq_u.hfsc_opts.lssc_d =
1331 opts->data.hfsc_opts.linkshare.d;
1333 if (opts->data.hfsc_opts.realtime.used) {
1334 pa->pq_u.hfsc_opts.rtsc_m1 =
1335 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1336 ref_bw);
1337 pa->pq_u.hfsc_opts.rtsc_m2 =
1338 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1339 ref_bw);
1340 pa->pq_u.hfsc_opts.rtsc_d =
1341 opts->data.hfsc_opts.realtime.d;
1343 if (opts->data.hfsc_opts.upperlimit.used) {
1344 pa->pq_u.hfsc_opts.ulsc_m1 =
1345 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1346 ref_bw);
1347 pa->pq_u.hfsc_opts.ulsc_m2 =
1348 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1349 ref_bw);
1350 pa->pq_u.hfsc_opts.ulsc_d =
1351 opts->data.hfsc_opts.upperlimit.d;
1353 break;
1354 case ALTQT_FAIRQ:
1355 pa->pq_u.fairq_opts.flags = opts->data.fairq_opts.flags;
1356 pa->pq_u.fairq_opts.nbuckets = opts->data.fairq_opts.nbuckets;
1357 pa->pq_u.fairq_opts.hogs_m1 =
1358 eval_bwspec(&opts->data.fairq_opts.hogs_bw, ref_bw);
1360 if (opts->data.fairq_opts.linkshare.used) {
1361 pa->pq_u.fairq_opts.lssc_m1 =
1362 eval_bwspec(&opts->data.fairq_opts.linkshare.m1,
1363 ref_bw);
1364 pa->pq_u.fairq_opts.lssc_m2 =
1365 eval_bwspec(&opts->data.fairq_opts.linkshare.m2,
1366 ref_bw);
1367 pa->pq_u.fairq_opts.lssc_d =
1368 opts->data.fairq_opts.linkshare.d;
1370 break;
1371 default:
1372 warnx("eval_queue_opts: unknown scheduler type %u",
1373 opts->qtype);
1374 errors++;
1375 break;
1378 return (errors);
1381 u_int32_t
1382 eval_bwspec(struct node_queue_bw *bw, u_int32_t ref_bw)
1384 if (bw->bw_absolute > 0)
1385 return (bw->bw_absolute);
1387 if (bw->bw_percent > 0)
1388 return (ref_bw / 100 * bw->bw_percent);
1390 return (0);
1393 void
1394 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1395 const struct node_hfsc_sc *sc)
1397 printf(" %s", scname);
1399 if (d != 0) {
1400 printf("(");
1401 if (sc != NULL && sc->m1.bw_percent > 0)
1402 printf("%u%%", sc->m1.bw_percent);
1403 else
1404 printf("%s", rate2str((double)m1));
1405 printf(" %u", d);
1408 if (sc != NULL && sc->m2.bw_percent > 0)
1409 printf(" %u%%", sc->m2.bw_percent);
1410 else
1411 printf(" %s", rate2str((double)m2));
1413 if (d != 0)
1414 printf(")");
1417 void
1418 print_fairq_sc(const char *scname, u_int m1, u_int d, u_int m2,
1419 const struct node_fairq_sc *sc)
1421 printf(" %s", scname);
1423 if (d != 0) {
1424 printf("(");
1425 if (sc != NULL && sc->m1.bw_percent > 0)
1426 printf("%u%%", sc->m1.bw_percent);
1427 else
1428 printf("%s", rate2str((double)m1));
1429 printf(" %u", d);
1432 if (sc != NULL && sc->m2.bw_percent > 0)
1433 printf(" %u%%", sc->m2.bw_percent);
1434 else
1435 printf(" %s", rate2str((double)m2));
1437 if (d != 0)
1438 printf(")");