Add support for newer CPU models.
[dragonfly.git] / usr.sbin / pfctl / pfctl_altq.c
blob996fca5ecb901507a6e19fef164821d00b711622
1 /* $OpenBSD: pfctl_altq.c,v 1.83 2004/03/14 21:51:44 dhartmei Exp $ */
2 /* $DragonFly: src/usr.sbin/pfctl/pfctl_altq.c,v 1.2 2005/02/11 22:31:45 joerg Exp $ */
4 /*
5 * Copyright (c) 2002
6 * Sony Computer Science Laboratories Inc.
7 * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 #include <sys/param.h>
23 #include <sys/ioctl.h>
24 #include <sys/socket.h>
25 #include <sys/sysctl.h>
27 #include <net/if.h>
28 #include <net/if_mib.h>
29 #include <netinet/in.h>
30 #include <net/pf/pfvar.h>
32 #include <err.h>
33 #include <errno.h>
34 #include <limits.h>
35 #include <math.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <unistd.h>
41 #include <net/altq/altq.h>
42 #include <net/altq/altq_cbq.h>
43 #include <net/altq/altq_priq.h>
44 #include <net/altq/altq_hfsc.h>
46 #include "pfctl_parser.h"
47 #include "pfctl.h"
49 #define is_sc_null(sc) (((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
51 TAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs);
52 LIST_HEAD(gen_sc, segment) rtsc, lssc;
54 struct pf_altq *qname_to_pfaltq(const char *, const char *);
55 u_int32_t qname_to_qid(const char *);
57 static int eval_pfqueue_cbq(struct pfctl *, struct pf_altq *);
58 static int cbq_compute_idletime(struct pfctl *, struct pf_altq *);
59 static int check_commit_cbq(int, int, struct pf_altq *);
60 static int print_cbq_opts(const struct pf_altq *);
62 static int eval_pfqueue_priq(struct pfctl *, struct pf_altq *);
63 static int check_commit_priq(int, int, struct pf_altq *);
64 static int print_priq_opts(const struct pf_altq *);
66 static int eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *);
67 static int check_commit_hfsc(int, int, struct pf_altq *);
68 static int print_hfsc_opts(const struct pf_altq *,
69 const struct node_queue_opt *);
71 static void gsc_add_sc(struct gen_sc *, struct service_curve *);
72 static int is_gsc_under_sc(struct gen_sc *,
73 struct service_curve *);
74 static void gsc_destroy(struct gen_sc *);
75 static struct segment *gsc_getentry(struct gen_sc *, double);
76 static int gsc_add_seg(struct gen_sc *, double, double, double,
77 double);
78 static double sc_x2y(struct service_curve *, double);
80 u_int32_t getifspeed(const char *);
81 u_long getifmtu(char *);
82 int eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
83 u_int32_t);
84 u_int32_t eval_bwspec(struct node_queue_bw *, u_int32_t);
85 void print_hfsc_sc(const char *, u_int, u_int, u_int,
86 const struct node_hfsc_sc *);
88 void
89 pfaltq_store(struct pf_altq *a)
91 struct pf_altq *altq;
93 if ((altq = malloc(sizeof(*altq))) == NULL)
94 err(1, "malloc");
95 memcpy(altq, a, sizeof(struct pf_altq));
96 TAILQ_INSERT_TAIL(&altqs, altq, entries);
99 void
100 pfaltq_free(struct pf_altq *a)
102 struct pf_altq *altq;
104 TAILQ_FOREACH(altq, &altqs, entries) {
105 if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0 &&
106 strncmp(a->qname, altq->qname, PF_QNAME_SIZE) == 0) {
107 TAILQ_REMOVE(&altqs, altq, entries);
108 free(altq);
109 return;
114 struct pf_altq *
115 pfaltq_lookup(const char *ifname)
117 struct pf_altq *altq;
119 TAILQ_FOREACH(altq, &altqs, entries) {
120 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
121 altq->qname[0] == 0)
122 return (altq);
124 return (NULL);
127 struct pf_altq *
128 qname_to_pfaltq(const char *qname, const char *ifname)
130 struct pf_altq *altq;
132 TAILQ_FOREACH(altq, &altqs, entries) {
133 if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
134 strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
135 return (altq);
137 return (NULL);
140 u_int32_t
141 qname_to_qid(const char *qname)
143 struct pf_altq *altq;
146 * We guarantee that same named queues on different interfaces
147 * have the same qid, so we do NOT need to limit matching on
148 * one interface!
151 TAILQ_FOREACH(altq, &altqs, entries) {
152 if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
153 return (altq->qid);
155 return (0);
158 void
159 print_altq(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
160 struct node_queue_opt *qopts)
162 if (a->qname[0] != 0) {
163 print_queue(a, level, bw, 0, qopts);
164 return;
167 printf("altq on %s ", a->ifname);
169 switch (a->scheduler) {
170 case ALTQT_CBQ:
171 if (!print_cbq_opts(a))
172 printf("cbq ");
173 break;
174 case ALTQT_PRIQ:
175 if (!print_priq_opts(a))
176 printf("priq ");
177 break;
178 case ALTQT_HFSC:
179 if (!print_hfsc_opts(a, qopts))
180 printf("hfsc ");
181 break;
184 if (bw != NULL && bw->bw_percent > 0) {
185 if (bw->bw_percent < 100)
186 printf("bandwidth %u%% ", bw->bw_percent);
187 } else
188 printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
190 if (a->qlimit != DEFAULT_QLIMIT)
191 printf("qlimit %u ", a->qlimit);
192 printf("tbrsize %u ", a->tbrsize);
195 void
196 print_queue(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
197 int print_interface, struct node_queue_opt *qopts)
199 unsigned i;
201 printf("queue ");
202 for (i = 0; i < level; ++i)
203 printf(" ");
204 printf("%s ", a->qname);
205 if (print_interface)
206 printf("on %s ", a->ifname);
207 if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC) {
208 if (bw != NULL && bw->bw_percent > 0) {
209 if (bw->bw_percent < 100)
210 printf("bandwidth %u%% ", bw->bw_percent);
211 } else
212 printf("bandwidth %s ", rate2str((double)a->bandwidth));
214 if (a->priority != DEFAULT_PRIORITY)
215 printf("priority %u ", a->priority);
216 if (a->qlimit != DEFAULT_QLIMIT)
217 printf("qlimit %u ", a->qlimit);
218 switch (a->scheduler) {
219 case ALTQT_CBQ:
220 print_cbq_opts(a);
221 break;
222 case ALTQT_PRIQ:
223 print_priq_opts(a);
224 break;
225 case ALTQT_HFSC:
226 print_hfsc_opts(a, qopts);
227 break;
232 * eval_pfaltq computes the discipline parameters.
235 eval_pfaltq(struct pfctl *pf __unused, struct pf_altq *pa,
236 struct node_queue_bw *bw, struct node_queue_opt *opts)
238 u_int rate, size, errors = 0;
240 if (bw->bw_absolute > 0)
241 pa->ifbandwidth = bw->bw_absolute;
242 else
243 if ((rate = getifspeed(pa->ifname)) == 0) {
244 fprintf(stderr, "cannot determine interface bandwidth "
245 "for %s, specify an absolute bandwidth\n",
246 pa->ifname);
247 errors++;
248 } else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
249 pa->ifbandwidth = rate;
251 errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
253 /* if tbrsize is not specified, use heuristics */
254 if (pa->tbrsize == 0) {
255 rate = pa->ifbandwidth;
256 if (rate <= 1 * 1000 * 1000)
257 size = 1;
258 else if (rate <= 10 * 1000 * 1000)
259 size = 4;
260 else if (rate <= 200 * 1000 * 1000)
261 size = 8;
262 else
263 size = 24;
264 size = size * getifmtu(pa->ifname);
265 if (size > 0xffff)
266 size = 0xffff;
267 pa->tbrsize = size;
269 return (errors);
273 * check_commit_altq does consistency check for each interface
276 check_commit_altq(int dev, int opts)
278 struct pf_altq *altq;
279 int error = 0;
281 /* call the discipline check for each interface. */
282 TAILQ_FOREACH(altq, &altqs, entries) {
283 if (altq->qname[0] == 0) {
284 switch (altq->scheduler) {
285 case ALTQT_CBQ:
286 error = check_commit_cbq(dev, opts, altq);
287 break;
288 case ALTQT_PRIQ:
289 error = check_commit_priq(dev, opts, altq);
290 break;
291 case ALTQT_HFSC:
292 error = check_commit_hfsc(dev, opts, altq);
293 break;
294 default:
295 break;
299 return (error);
303 * eval_pfqueue computes the queue parameters.
306 eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
307 struct node_queue_opt *opts)
309 /* should be merged with expand_queue */
310 struct pf_altq *if_pa, *parent;
311 int error = 0;
313 /* find the corresponding interface and copy fields used by queues */
314 if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) {
315 fprintf(stderr, "altq not defined on %s\n", pa->ifname);
316 return (1);
318 pa->scheduler = if_pa->scheduler;
319 pa->ifbandwidth = if_pa->ifbandwidth;
321 if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
322 fprintf(stderr, "queue %s already exists on interface %s\n",
323 pa->qname, pa->ifname);
324 return (1);
326 pa->qid = qname_to_qid(pa->qname);
328 parent = NULL;
329 if (pa->parent[0] != 0) {
330 parent = qname_to_pfaltq(pa->parent, pa->ifname);
331 if (parent == NULL) {
332 fprintf(stderr, "parent %s not found for %s\n",
333 pa->parent, pa->qname);
334 return (1);
336 pa->parent_qid = parent->qid;
338 if (pa->qlimit == 0)
339 pa->qlimit = DEFAULT_QLIMIT;
341 if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC) {
342 if ((pa->bandwidth = eval_bwspec(bw,
343 parent == NULL ? 0 : parent->bandwidth)) == 0) {
344 fprintf(stderr, "bandwidth for %s invalid (%d / %d)\n",
345 pa->qname, bw->bw_absolute, bw->bw_percent);
346 return (1);
349 if (pa->bandwidth > pa->ifbandwidth) {
350 fprintf(stderr, "bandwidth for %s higher than "
351 "interface\n", pa->qname);
352 return (1);
354 if (parent != NULL && pa->bandwidth > parent->bandwidth) {
355 fprintf(stderr, "bandwidth for %s higher than parent\n",
356 pa->qname);
357 return (1);
361 if (eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth))
362 return (1);
364 switch (pa->scheduler) {
365 case ALTQT_CBQ:
366 error = eval_pfqueue_cbq(pf, pa);
367 break;
368 case ALTQT_PRIQ:
369 error = eval_pfqueue_priq(pf, pa);
370 break;
371 case ALTQT_HFSC:
372 error = eval_pfqueue_hfsc(pf, pa);
373 break;
374 default:
375 break;
377 return (error);
381 * CBQ support functions
383 #define RM_FILTER_GAIN 5 /* log2 of gain, e.g., 5 => 31/32 */
384 #define RM_NS_PER_SEC (1000000000)
386 static int
387 eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa)
389 struct cbq_opts *opts;
390 u_int ifmtu;
392 if (pa->priority >= CBQ_MAXPRI) {
393 warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
394 return (-1);
397 ifmtu = getifmtu(pa->ifname);
398 opts = &pa->pq_u.cbq_opts;
400 if (opts->pktsize == 0) { /* use default */
401 opts->pktsize = ifmtu;
402 if (opts->pktsize > MCLBYTES) /* do what TCP does */
403 opts->pktsize &= ~MCLBYTES;
404 } else if (opts->pktsize > ifmtu)
405 opts->pktsize = ifmtu;
406 if (opts->maxpktsize == 0) /* use default */
407 opts->maxpktsize = ifmtu;
408 else if (opts->maxpktsize > ifmtu)
409 opts->pktsize = ifmtu;
411 if (opts->pktsize > opts->maxpktsize)
412 opts->pktsize = opts->maxpktsize;
414 if (pa->parent[0] == 0)
415 opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
417 cbq_compute_idletime(pf, pa);
418 return (0);
422 * compute ns_per_byte, maxidle, minidle, and offtime
424 static int
425 cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
427 struct cbq_opts *opts;
428 double maxidle_s, maxidle, minidle;
429 double offtime, nsPerByte, ifnsPerByte, ptime, cptime;
430 double z, g, f, gton, gtom;
431 u_int minburst, maxburst;
433 opts = &pa->pq_u.cbq_opts;
434 ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
435 minburst = opts->minburst;
436 maxburst = opts->maxburst;
438 if (pa->bandwidth == 0)
439 f = 0.0001; /* small enough? */
440 else
441 f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
443 nsPerByte = ifnsPerByte / f;
444 ptime = (double)opts->pktsize * ifnsPerByte;
445 cptime = ptime * (1.0 - f) / f;
447 if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
449 * this causes integer overflow in kernel!
450 * (bandwidth < 6Kbps when max_pkt_size=1500)
452 if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0)
453 warnx("queue bandwidth must be larger than %s",
454 rate2str(ifnsPerByte * (double)opts->maxpktsize /
455 (double)INT_MAX * (double)pa->ifbandwidth));
456 fprintf(stderr, "cbq: queue %s is too slow!\n",
457 pa->qname);
458 nsPerByte = (double)(INT_MAX / opts->maxpktsize);
461 if (maxburst == 0) { /* use default */
462 if (cptime > 10.0 * 1000000)
463 maxburst = 4;
464 else
465 maxburst = 16;
467 if (minburst == 0) /* use default */
468 minburst = 2;
469 if (minburst > maxburst)
470 minburst = maxburst;
472 z = (double)(1 << RM_FILTER_GAIN);
473 g = (1.0 - 1.0 / z);
474 gton = pow(g, (double)maxburst);
475 gtom = pow(g, (double)(minburst-1));
476 maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
477 maxidle_s = (1.0 - g);
478 if (maxidle > maxidle_s)
479 maxidle = ptime * maxidle;
480 else
481 maxidle = ptime * maxidle_s;
482 if (minburst)
483 offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
484 else
485 offtime = cptime;
486 minidle = -((double)opts->maxpktsize * (double)nsPerByte);
488 /* scale parameters */
489 maxidle = ((maxidle * 8.0) / nsPerByte) *
490 pow(2.0, (double)RM_FILTER_GAIN);
491 offtime = (offtime * 8.0) / nsPerByte *
492 pow(2.0, (double)RM_FILTER_GAIN);
493 minidle = ((minidle * 8.0) / nsPerByte) *
494 pow(2.0, (double)RM_FILTER_GAIN);
496 maxidle = maxidle / 1000.0;
497 offtime = offtime / 1000.0;
498 minidle = minidle / 1000.0;
500 opts->minburst = minburst;
501 opts->maxburst = maxburst;
502 opts->ns_per_byte = (u_int)nsPerByte;
503 opts->maxidle = (u_int)fabs(maxidle);
504 opts->minidle = (int)minidle;
505 opts->offtime = (u_int)fabs(offtime);
507 return (0);
510 static int
511 check_commit_cbq(int dev __unused, int opts __unused, struct pf_altq *pa)
513 struct pf_altq *altq;
514 int root_class, default_class;
515 int error = 0;
518 * check if cbq has one root queue and one default queue
519 * for this interface
521 root_class = default_class = 0;
522 TAILQ_FOREACH(altq, &altqs, entries) {
523 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
524 continue;
525 if (altq->qname[0] == 0) /* this is for interface */
526 continue;
527 if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
528 root_class++;
529 if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
530 default_class++;
532 if (root_class != 1) {
533 warnx("should have one root queue on %s", pa->ifname);
534 error++;
536 if (default_class != 1) {
537 warnx("should have one default queue on %s", pa->ifname);
538 error++;
540 return (error);
543 static int
544 print_cbq_opts(const struct pf_altq *a)
546 const struct cbq_opts *opts;
548 opts = &a->pq_u.cbq_opts;
549 if (opts->flags) {
550 printf("cbq(");
551 if (opts->flags & CBQCLF_RED)
552 printf(" red");
553 if (opts->flags & CBQCLF_ECN)
554 printf(" ecn");
555 if (opts->flags & CBQCLF_RIO)
556 printf(" rio");
557 if (opts->flags & CBQCLF_CLEARDSCP)
558 printf(" cleardscp");
559 if (opts->flags & CBQCLF_BORROW)
560 printf(" borrow");
561 if (opts->flags & CBQCLF_WRR)
562 printf(" wrr");
563 if (opts->flags & CBQCLF_EFFICIENT)
564 printf(" efficient");
565 if (opts->flags & CBQCLF_ROOTCLASS)
566 printf(" root");
567 if (opts->flags & CBQCLF_DEFCLASS)
568 printf(" default");
569 printf(" ) ");
571 return (1);
572 } else
573 return (0);
577 * PRIQ support functions
579 static int
580 eval_pfqueue_priq(struct pfctl *pf __unused, struct pf_altq *pa)
582 struct pf_altq *altq;
584 if (pa->priority >= PRIQ_MAXPRI) {
585 warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
586 return (-1);
588 /* the priority should be unique for the interface */
589 TAILQ_FOREACH(altq, &altqs, entries) {
590 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 &&
591 altq->qname[0] != 0 && altq->priority == pa->priority) {
592 warnx("%s and %s have the same priority",
593 altq->qname, pa->qname);
594 return (-1);
598 return (0);
601 static int
602 check_commit_priq(int dev __unused, int opts __unused, struct pf_altq *pa)
604 struct pf_altq *altq;
605 int default_class;
606 int error = 0;
609 * check if priq has one default class for this interface
611 default_class = 0;
612 TAILQ_FOREACH(altq, &altqs, entries) {
613 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
614 continue;
615 if (altq->qname[0] == 0) /* this is for interface */
616 continue;
617 if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
618 default_class++;
620 if (default_class != 1) {
621 warnx("should have one default queue on %s", pa->ifname);
622 error++;
624 return (error);
627 static int
628 print_priq_opts(const struct pf_altq *a)
630 const struct priq_opts *opts;
632 opts = &a->pq_u.priq_opts;
634 if (opts->flags) {
635 printf("priq(");
636 if (opts->flags & PRCF_RED)
637 printf(" red");
638 if (opts->flags & PRCF_ECN)
639 printf(" ecn");
640 if (opts->flags & PRCF_RIO)
641 printf(" rio");
642 if (opts->flags & PRCF_CLEARDSCP)
643 printf(" cleardscp");
644 if (opts->flags & PRCF_DEFAULTCLASS)
645 printf(" default");
646 printf(" ) ");
648 return (1);
649 } else
650 return (0);
654 * HFSC support functions
656 static int
657 eval_pfqueue_hfsc(struct pfctl *pf __unused, struct pf_altq *pa)
659 struct pf_altq *altq, *parent;
660 struct hfsc_opts *opts;
661 struct service_curve sc;
663 opts = &pa->pq_u.hfsc_opts;
665 if (pa->parent[0] == 0) {
666 /* root queue */
667 opts->lssc_m1 = pa->ifbandwidth;
668 opts->lssc_m2 = pa->ifbandwidth;
669 opts->lssc_d = 0;
670 return (0);
673 LIST_INIT(&rtsc);
674 LIST_INIT(&lssc);
676 /* if link_share is not specified, use bandwidth */
677 if (opts->lssc_m2 == 0)
678 opts->lssc_m2 = pa->bandwidth;
680 if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
681 (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
682 (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
683 warnx("m2 is zero for %s", pa->qname);
684 return (-1);
687 if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
688 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
689 (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0)) {
690 warnx("m1 must be zero for convex curve: %s", pa->qname);
691 return (-1);
695 * admission control:
696 * for the real-time service curve, the sum of the service curves
697 * should not exceed 80% of the interface bandwidth. 20% is reserved
698 * not to over-commit the actual interface bandwidth.
699 * for the link-sharing service curve, the sum of the child service
700 * curve should not exceed the parent service curve.
701 * for the upper-limit service curve, the assigned bandwidth should
702 * be smaller than the interface bandwidth, and the upper-limit should
703 * be larger than the real-time service curve when both are defined.
705 parent = qname_to_pfaltq(pa->parent, pa->ifname);
706 if (parent == NULL)
707 errx(1, "parent %s not found for %s", pa->parent, pa->qname);
709 TAILQ_FOREACH(altq, &altqs, entries) {
710 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
711 continue;
712 if (altq->qname[0] == 0) /* this is for interface */
713 continue;
715 /* if the class has a real-time service curve, add it. */
716 if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) {
717 sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1;
718 sc.d = altq->pq_u.hfsc_opts.rtsc_d;
719 sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2;
720 gsc_add_sc(&rtsc, &sc);
723 if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
724 continue;
726 /* if the class has a link-sharing service curve, add it. */
727 if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) {
728 sc.m1 = altq->pq_u.hfsc_opts.lssc_m1;
729 sc.d = altq->pq_u.hfsc_opts.lssc_d;
730 sc.m2 = altq->pq_u.hfsc_opts.lssc_m2;
731 gsc_add_sc(&lssc, &sc);
735 /* check the real-time service curve. reserve 20% of interface bw */
736 if (opts->rtsc_m2 != 0) {
737 sc.m1 = 0;
738 sc.d = 0;
739 sc.m2 = pa->ifbandwidth / 100 * 80;
740 if (!is_gsc_under_sc(&rtsc, &sc)) {
741 warnx("real-time sc exceeds the interface bandwidth");
742 goto err_ret;
746 /* check the link-sharing service curve. */
747 if (opts->lssc_m2 != 0) {
748 sc.m1 = parent->pq_u.hfsc_opts.lssc_m1;
749 sc.d = parent->pq_u.hfsc_opts.lssc_d;
750 sc.m2 = parent->pq_u.hfsc_opts.lssc_m2;
751 if (!is_gsc_under_sc(&lssc, &sc)) {
752 warnx("link-sharing sc exceeds parent's sc");
753 goto err_ret;
757 /* check the upper-limit service curve. */
758 if (opts->ulsc_m2 != 0) {
759 if (opts->ulsc_m1 > pa->ifbandwidth ||
760 opts->ulsc_m2 > pa->ifbandwidth) {
761 warnx("upper-limit larger than interface bandwidth");
762 goto err_ret;
764 if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
765 warnx("upper-limit sc smaller than real-time sc");
766 goto err_ret;
770 gsc_destroy(&rtsc);
771 gsc_destroy(&lssc);
773 return (0);
775 err_ret:
776 gsc_destroy(&rtsc);
777 gsc_destroy(&lssc);
778 return (-1);
781 static int
782 check_commit_hfsc(int dev __unused, int opts __unused, struct pf_altq *pa)
784 struct pf_altq *altq, *def = NULL;
785 int default_class;
786 int error = 0;
788 /* check if hfsc has one default queue for this interface */
789 default_class = 0;
790 TAILQ_FOREACH(altq, &altqs, entries) {
791 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
792 continue;
793 if (altq->qname[0] == 0) /* this is for interface */
794 continue;
795 if (altq->parent[0] == 0) /* dummy root */
796 continue;
797 if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
798 default_class++;
799 def = altq;
802 if (default_class != 1) {
803 warnx("should have one default queue on %s", pa->ifname);
804 return (1);
806 /* make sure the default queue is a leaf */
807 TAILQ_FOREACH(altq, &altqs, entries) {
808 if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
809 continue;
810 if (altq->qname[0] == 0) /* this is for interface */
811 continue;
812 if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
813 warnx("default queue is not a leaf");
814 error++;
817 return (error);
820 static int
821 print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
823 const struct hfsc_opts *opts;
824 const struct node_hfsc_sc *loc_rtsc, *loc_lssc, *ulsc;
826 opts = &a->pq_u.hfsc_opts;
827 if (qopts == NULL)
828 loc_rtsc = loc_lssc = ulsc = NULL;
829 else {
830 loc_rtsc = &qopts->data.hfsc_opts.realtime;
831 loc_lssc = &qopts->data.hfsc_opts.linkshare;
832 ulsc = &qopts->data.hfsc_opts.upperlimit;
835 if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
836 (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
837 opts->lssc_d != 0))) {
838 printf("hfsc(");
839 if (opts->flags & HFCF_RED)
840 printf(" red");
841 if (opts->flags & HFCF_ECN)
842 printf(" ecn");
843 if (opts->flags & HFCF_RIO)
844 printf(" rio");
845 if (opts->flags & HFCF_CLEARDSCP)
846 printf(" cleardscp");
847 if (opts->flags & HFCF_DEFAULTCLASS)
848 printf(" default");
849 if (opts->rtsc_m2 != 0)
850 print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
851 opts->rtsc_m2, loc_rtsc);
852 if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
853 opts->lssc_d != 0))
854 print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
855 opts->lssc_m2, loc_lssc);
856 if (opts->ulsc_m2 != 0)
857 print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
858 opts->ulsc_m2, ulsc);
859 printf(" ) ");
861 return (1);
862 } else
863 return (0);
867 * admission control using generalized service curve
869 #define INFINITY HUGE_VAL /* positive infinity defined in <math.h> */
871 /* add a new service curve to a generalized service curve */
872 static void
873 gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
875 if (is_sc_null(sc))
876 return;
877 if (sc->d != 0)
878 gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
879 gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
883 * check whether all points of a generalized service curve have
884 * their y-coordinates no larger than a given two-piece linear
885 * service curve.
887 static int
888 is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
890 struct segment *s, *last, *end;
891 double y;
893 if (is_sc_null(sc)) {
894 if (LIST_EMPTY(gsc))
895 return (1);
896 LIST_FOREACH(s, gsc, _next) {
897 if (s->m != 0)
898 return (0);
900 return (1);
903 * gsc has a dummy entry at the end with x = INFINITY.
904 * loop through up to this dummy entry.
906 end = gsc_getentry(gsc, INFINITY);
907 if (end == NULL)
908 return (1);
909 last = NULL;
910 for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
911 if (s->y > sc_x2y(sc, s->x))
912 return (0);
913 last = s;
915 /* last now holds the real last segment */
916 if (last == NULL)
917 return (1);
918 if (last->m > sc->m2)
919 return (0);
920 if (last->x < sc->d && last->m > sc->m1) {
921 y = last->y + (sc->d - last->x) * last->m;
922 if (y > sc_x2y(sc, sc->d))
923 return (0);
925 return (1);
928 static void
929 gsc_destroy(struct gen_sc *gsc)
931 struct segment *s;
933 while ((s = LIST_FIRST(gsc)) != NULL) {
934 LIST_REMOVE(s, _next);
935 free(s);
940 * return a segment entry starting at x.
941 * if gsc has no entry starting at x, a new entry is created at x.
943 static struct segment *
944 gsc_getentry(struct gen_sc *gsc, double x)
946 struct segment *new, *prev, *s;
948 prev = NULL;
949 LIST_FOREACH(s, gsc, _next) {
950 if (s->x == x)
951 return (s); /* matching entry found */
952 else if (s->x < x)
953 prev = s;
954 else
955 break;
958 /* we have to create a new entry */
959 if ((new = calloc(1, sizeof(struct segment))) == NULL)
960 return (NULL);
962 new->x = x;
963 if (x == INFINITY || s == NULL)
964 new->d = 0;
965 else if (s->x == INFINITY)
966 new->d = INFINITY;
967 else
968 new->d = s->x - x;
969 if (prev == NULL) {
970 /* insert the new entry at the head of the list */
971 new->y = 0;
972 new->m = 0;
973 LIST_INSERT_HEAD(gsc, new, _next);
974 } else {
976 * the start point intersects with the segment pointed by
977 * prev. divide prev into 2 segments
979 if (x == INFINITY) {
980 prev->d = INFINITY;
981 if (prev->m == 0)
982 new->y = prev->y;
983 else
984 new->y = INFINITY;
985 } else {
986 prev->d = x - prev->x;
987 new->y = prev->d * prev->m + prev->y;
989 new->m = prev->m;
990 LIST_INSERT_AFTER(prev, new, _next);
992 return (new);
995 /* add a segment to a generalized service curve */
996 static int
997 gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
999 struct segment *start, *end, *s;
1000 double x2;
1002 if (d == INFINITY)
1003 x2 = INFINITY;
1004 else
1005 x2 = x + d;
1006 start = gsc_getentry(gsc, x);
1007 end = gsc_getentry(gsc, x2);
1008 if (start == NULL || end == NULL)
1009 return (-1);
1011 for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1012 s->m += m;
1013 s->y += y + (s->x - x) * m;
1016 end = gsc_getentry(gsc, INFINITY);
1017 for (; s != end; s = LIST_NEXT(s, _next)) {
1018 s->y += m * d;
1021 return (0);
1024 /* get y-projection of a service curve */
1025 static double
1026 sc_x2y(struct service_curve *sc, double x)
1028 double y;
1030 if (x <= (double)sc->d)
1031 /* y belongs to the 1st segment */
1032 y = x * (double)sc->m1;
1033 else
1034 /* y belongs to the 2nd segment */
1035 y = (double)sc->d * (double)sc->m1
1036 + (x - (double)sc->d) * (double)sc->m2;
1037 return (y);
1041 * misc utilities
1043 #define R2S_BUFS 8
1044 #define RATESTR_MAX 16
1046 char *
1047 rate2str(double rate)
1049 char *buf;
1050 static char r2sbuf[R2S_BUFS][RATESTR_MAX]; /* ring bufer */
1051 static int idx = 0;
1052 int i;
1053 static const char unit[] = " KMG";
1055 buf = r2sbuf[idx++];
1056 if (idx == R2S_BUFS)
1057 idx = 0;
1059 for (i = 0; rate >= 1000 && i <= 3; i++)
1060 rate /= 1000;
1062 if ((int)(rate * 100) % 100)
1063 snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1064 else
1065 snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1067 return (buf);
1070 u_int32_t
1071 getifspeed(const char *ifname)
1073 size_t datalen;
1074 int idx;
1075 struct ifmibdata data;
1076 int name[] = {
1077 CTL_NET,
1078 PF_LINK,
1079 NETLINK_GENERIC,
1080 IFMIB_IFDATA,
1082 IFDATA_GENERAL
1085 if ((idx = (int)if_nametoindex(ifname)) == 0)
1086 err(1, "getifspeed: if_nametoindex");
1087 name[4] = idx;
1089 datalen = sizeof(data);
1090 if (sysctl(name, 6, &data, &datalen, NULL, 0))
1091 err(1, "getifspeed: sysctl");
1093 return(data.ifmd_data.ifi_baudrate);
1096 u_long
1097 getifmtu(char *ifname)
1099 int s;
1100 struct ifreq ifr;
1102 if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1103 err(1, "socket");
1104 if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1105 sizeof(ifr.ifr_name))
1106 errx(1, "getifmtu: strlcpy");
1107 if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1108 err(1, "SIOCGIFMTU");
1109 if (shutdown(s, SHUT_RDWR) == -1)
1110 err(1, "shutdown");
1111 if (close(s))
1112 err(1, "close");
1113 if (ifr.ifr_mtu > 0)
1114 return (ifr.ifr_mtu);
1115 else {
1116 warnx("could not get mtu for %s, assuming 1500", ifname);
1117 return (1500);
1122 eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1123 u_int32_t ref_bw)
1125 int errors = 0;
1127 switch (pa->scheduler) {
1128 case ALTQT_CBQ:
1129 pa->pq_u.cbq_opts = opts->data.cbq_opts;
1130 break;
1131 case ALTQT_PRIQ:
1132 pa->pq_u.priq_opts = opts->data.priq_opts;
1133 break;
1134 case ALTQT_HFSC:
1135 pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1136 if (opts->data.hfsc_opts.linkshare.used) {
1137 pa->pq_u.hfsc_opts.lssc_m1 =
1138 eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1139 ref_bw);
1140 pa->pq_u.hfsc_opts.lssc_m2 =
1141 eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1142 ref_bw);
1143 pa->pq_u.hfsc_opts.lssc_d =
1144 opts->data.hfsc_opts.linkshare.d;
1146 if (opts->data.hfsc_opts.realtime.used) {
1147 pa->pq_u.hfsc_opts.rtsc_m1 =
1148 eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1149 ref_bw);
1150 pa->pq_u.hfsc_opts.rtsc_m2 =
1151 eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1152 ref_bw);
1153 pa->pq_u.hfsc_opts.rtsc_d =
1154 opts->data.hfsc_opts.realtime.d;
1156 if (opts->data.hfsc_opts.upperlimit.used) {
1157 pa->pq_u.hfsc_opts.ulsc_m1 =
1158 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1159 ref_bw);
1160 pa->pq_u.hfsc_opts.ulsc_m2 =
1161 eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1162 ref_bw);
1163 pa->pq_u.hfsc_opts.ulsc_d =
1164 opts->data.hfsc_opts.upperlimit.d;
1166 break;
1167 default:
1168 warnx("eval_queue_opts: unknown scheduler type %u",
1169 opts->qtype);
1170 errors++;
1171 break;
1174 return (errors);
1177 u_int32_t
1178 eval_bwspec(struct node_queue_bw *bw, u_int32_t ref_bw)
1180 if (bw->bw_absolute > 0)
1181 return (bw->bw_absolute);
1183 if (bw->bw_percent > 0)
1184 return (ref_bw / 100 * bw->bw_percent);
1186 return (0);
1189 void
1190 print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1191 const struct node_hfsc_sc *sc)
1193 printf(" %s", scname);
1195 if (d != 0) {
1196 printf("(");
1197 if (sc != NULL && sc->m1.bw_percent > 0)
1198 printf("%u%%", sc->m1.bw_percent);
1199 else
1200 printf("%s", rate2str((double)m1));
1201 printf(" %u", d);
1204 if (sc != NULL && sc->m2.bw_percent > 0)
1205 printf(" %u%%", sc->m2.bw_percent);
1206 else
1207 printf(" %s", rate2str((double)m2));
1209 if (d != 0)
1210 printf(")");