linprocfs - Introduce /proc/mounts
[dragonfly.git] / sys / net / altq / altq_subr.c
blob1383d94834685ceec0a61f6965076e1ac6912822
1 /* $KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_subr.c,v 1.12 2008/05/14 11:59:23 sephe Exp $ */
4 /*
5 * Copyright (C) 1997-2003
6 * Sony Computer Science Laboratories Inc. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 #include "opt_altq.h"
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/syslog.h>
45 #include <sys/sysctl.h>
46 #include <sys/queue.h>
47 #include <sys/thread2.h>
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/ifq_var.h>
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #ifdef INET6
58 #include <netinet/ip6.h>
59 #endif
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
63 #include <net/pf/pfvar.h>
64 #include <net/altq/altq.h>
66 /* machine dependent clock related includes */
67 #if defined(__i386__)
68 #include <machine/clock.h> /* for tsc_frequency */
69 #include <machine/md_var.h> /* for cpu_feature */
70 #include <machine/specialreg.h> /* for CPUID_TSC */
71 #endif /* __i386__ */
74 * internal function prototypes
76 static void tbr_timeout(void *);
77 static int altq_enable_locked(struct ifaltq *);
78 static int altq_disable_locked(struct ifaltq *);
79 static int altq_detach_locked(struct ifaltq *);
80 static int tbr_set_locked(struct ifaltq *, struct tb_profile *);
82 int (*altq_input)(struct mbuf *, int) = NULL;
83 static int tbr_timer = 0; /* token bucket regulator timer */
84 static struct callout tbr_callout;
86 int pfaltq_running; /* keep track of running state */
88 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures");
91 * alternate queueing support routines
94 /* look up the queue state by the interface name and the queueing type. */
95 void *
96 altq_lookup(const char *name, int type)
98 struct ifnet *ifp;
100 if ((ifp = ifunit(name)) != NULL) {
101 if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
102 return (ifp->if_snd.altq_disc);
105 return (NULL);
109 altq_attach(struct ifaltq *ifq, int type, void *discipline,
110 int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *),
111 struct mbuf *(*dequeue)(struct ifaltq *, struct mbuf *, int),
112 int (*request)(struct ifaltq *, int, void *),
113 void *clfier,
114 void *(*classify)(struct ifaltq *, struct mbuf *,
115 struct altq_pktattr *))
117 if (!ifq_is_ready(ifq))
118 return ENXIO;
120 ifq->altq_type = type;
121 ifq->altq_disc = discipline;
122 ifq->altq_enqueue = enqueue;
123 ifq->altq_dequeue = dequeue;
124 ifq->altq_request = request;
125 ifq->altq_clfier = clfier;
126 ifq->altq_classify = classify;
127 ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
128 return 0;
131 static int
132 altq_detach_locked(struct ifaltq *ifq)
134 if (!ifq_is_ready(ifq))
135 return ENXIO;
136 if (ifq_is_enabled(ifq))
137 return EBUSY;
138 if (!ifq_is_attached(ifq))
139 return (0);
141 ifq_set_classic(ifq);
142 ifq->altq_type = ALTQT_NONE;
143 ifq->altq_disc = NULL;
144 ifq->altq_clfier = NULL;
145 ifq->altq_classify = NULL;
146 ifq->altq_flags &= ALTQF_CANTCHANGE;
147 return 0;
151 altq_detach(struct ifaltq *ifq)
153 int error;
155 ALTQ_LOCK(ifq);
156 error = altq_detach_locked(ifq);
157 ALTQ_UNLOCK(ifq);
158 return error;
161 static int
162 altq_enable_locked(struct ifaltq *ifq)
164 if (!ifq_is_ready(ifq))
165 return ENXIO;
166 if (ifq_is_enabled(ifq))
167 return 0;
169 ifq_purge_locked(ifq);
170 KKASSERT(ifq->ifq_len == 0);
172 ifq->altq_flags |= ALTQF_ENABLED;
173 if (ifq->altq_clfier != NULL)
174 ifq->altq_flags |= ALTQF_CLASSIFY;
175 return 0;
179 altq_enable(struct ifaltq *ifq)
181 int error;
183 ALTQ_LOCK(ifq);
184 error = altq_enable_locked(ifq);
185 ALTQ_UNLOCK(ifq);
186 return error;
189 static int
190 altq_disable_locked(struct ifaltq *ifq)
192 if (!ifq_is_enabled(ifq))
193 return 0;
195 ifq_purge_locked(ifq);
196 KKASSERT(ifq->ifq_len == 0);
197 ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
198 return 0;
202 altq_disable(struct ifaltq *ifq)
204 int error;
206 ALTQ_LOCK(ifq);
207 error = altq_disable_locked(ifq);
208 ALTQ_UNLOCK(ifq);
209 return error;
213 * internal representation of token bucket parameters
214 * rate: byte_per_unittime << 32
215 * (((bits_per_sec) / 8) << 32) / machclk_freq
216 * depth: byte << 32
219 #define TBR_SHIFT 32
220 #define TBR_SCALE(x) ((int64_t)(x) << TBR_SHIFT)
221 #define TBR_UNSCALE(x) ((x) >> TBR_SHIFT)
223 struct mbuf *
224 tbr_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
226 struct tb_regulator *tbr;
227 struct mbuf *m;
228 int64_t interval;
229 uint64_t now;
231 crit_enter();
232 tbr = ifq->altq_tbr;
233 if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
234 /* if this is a remove after poll, bypass tbr check */
235 } else {
236 /* update token only when it is negative */
237 if (tbr->tbr_token <= 0) {
238 now = read_machclk();
239 interval = now - tbr->tbr_last;
240 if (interval >= tbr->tbr_filluptime)
241 tbr->tbr_token = tbr->tbr_depth;
242 else {
243 tbr->tbr_token += interval * tbr->tbr_rate;
244 if (tbr->tbr_token > tbr->tbr_depth)
245 tbr->tbr_token = tbr->tbr_depth;
247 tbr->tbr_last = now;
249 /* if token is still negative, don't allow dequeue */
250 if (tbr->tbr_token <= 0) {
251 crit_exit();
252 return (NULL);
256 if (ifq_is_enabled(ifq)) {
257 m = (*ifq->altq_dequeue)(ifq, mpolled, op);
258 } else if (op == ALTDQ_POLL) {
259 IF_POLL(ifq, m);
260 } else {
261 IF_DEQUEUE(ifq, m);
262 KKASSERT(mpolled == NULL || mpolled == m);
265 if (m != NULL && op == ALTDQ_REMOVE)
266 tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
267 tbr->tbr_lastop = op;
268 crit_exit();
269 return (m);
273 * set a token bucket regulator.
274 * if the specified rate is zero, the token bucket regulator is deleted.
276 static int
277 tbr_set_locked(struct ifaltq *ifq, struct tb_profile *profile)
279 struct tb_regulator *tbr, *otbr;
281 if (machclk_freq == 0)
282 init_machclk();
283 if (machclk_freq == 0) {
284 kprintf("%s: no cpu clock available!\n", __func__);
285 return (ENXIO);
288 if (profile->rate == 0) {
289 /* delete this tbr */
290 if ((tbr = ifq->altq_tbr) == NULL)
291 return (ENOENT);
292 ifq->altq_tbr = NULL;
293 kfree(tbr, M_ALTQ);
294 return (0);
297 tbr = kmalloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
298 tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
299 tbr->tbr_depth = TBR_SCALE(profile->depth);
300 if (tbr->tbr_rate > 0)
301 tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
302 else
303 tbr->tbr_filluptime = 0xffffffffffffffffLL;
304 tbr->tbr_token = tbr->tbr_depth;
305 tbr->tbr_last = read_machclk();
306 tbr->tbr_lastop = ALTDQ_REMOVE;
308 otbr = ifq->altq_tbr;
309 ifq->altq_tbr = tbr; /* set the new tbr */
311 if (otbr != NULL)
312 kfree(otbr, M_ALTQ);
313 else if (tbr_timer == 0) {
314 callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
315 tbr_timer = 1;
317 return (0);
321 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
323 int error;
325 ALTQ_LOCK(ifq);
326 error = tbr_set_locked(ifq, profile);
327 ALTQ_UNLOCK(ifq);
328 return error;
332 * tbr_timeout goes through the interface list, and kicks the drivers
333 * if necessary.
335 static void
336 tbr_timeout(void *arg)
338 struct ifnet *ifp;
339 int active;
341 active = 0;
342 crit_enter();
343 for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
344 if (ifp->if_snd.altq_tbr == NULL)
345 continue;
346 active++;
347 if (!ifq_is_empty(&ifp->if_snd) && ifp->if_start != NULL) {
348 ifnet_serialize_tx(ifp);
349 (*ifp->if_start)(ifp);
350 ifnet_deserialize_tx(ifp);
353 crit_exit();
354 if (active > 0)
355 callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
356 else
357 tbr_timer = 0; /* don't need tbr_timer anymore */
361 * get token bucket regulator profile
364 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
366 struct tb_regulator *tbr;
368 if ((tbr = ifq->altq_tbr) == NULL) {
369 profile->rate = 0;
370 profile->depth = 0;
371 } else {
372 profile->rate =
373 (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
374 profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
376 return (0);
380 * attach a discipline to the interface. if one already exists, it is
381 * overridden.
384 altq_pfattach(struct pf_altq *a)
386 struct ifaltq *ifq;
387 struct ifnet *ifp;
388 int error;
390 if (a->scheduler == ALTQT_NONE)
391 return 0;
393 if (a->altq_disc == NULL)
394 return EINVAL;
396 ifp = ifunit(a->ifname);
397 if (ifp == NULL)
398 return EINVAL;
399 ifq = &ifp->if_snd;
401 ALTQ_LOCK(ifq);
403 switch (a->scheduler) {
404 #ifdef ALTQ_CBQ
405 case ALTQT_CBQ:
406 error = cbq_pfattach(a, ifq);
407 break;
408 #endif
409 #ifdef ALTQ_PRIQ
410 case ALTQT_PRIQ:
411 error = priq_pfattach(a, ifq);
412 break;
413 #endif
414 #ifdef ALTQ_HFSC
415 case ALTQT_HFSC:
416 error = hfsc_pfattach(a, ifq);
417 break;
418 #endif
419 #ifdef ALTQ_FAIRQ
420 case ALTQT_FAIRQ:
421 error = fairq_pfattach(a, ifq);
422 break;
423 #endif
424 default:
425 error = ENXIO;
426 goto back;
429 /* if the state is running, enable altq */
430 if (error == 0 && pfaltq_running && ifq->altq_type != ALTQT_NONE &&
431 !ifq_is_enabled(ifq))
432 error = altq_enable_locked(ifq);
434 /* if altq is already enabled, reset set tokenbucket regulator */
435 if (error == 0 && ifq_is_enabled(ifq)) {
436 struct tb_profile tb;
438 tb.rate = a->ifbandwidth;
439 tb.depth = a->tbrsize;
440 error = tbr_set_locked(ifq, &tb);
442 back:
443 ALTQ_UNLOCK(ifq);
444 return (error);
448 * detach a discipline from the interface.
449 * it is possible that the discipline was already overridden by another
450 * discipline.
453 altq_pfdetach(struct pf_altq *a)
455 struct ifnet *ifp;
456 struct ifaltq *ifq;
457 int error = 0;
459 ifp = ifunit(a->ifname);
460 if (ifp == NULL)
461 return (EINVAL);
462 ifq = &ifp->if_snd;
464 /* if this discipline is no longer referenced, just return */
465 if (a->altq_disc == NULL)
466 return (0);
468 ALTQ_LOCK(ifq);
470 if (a->altq_disc != ifq->altq_disc)
471 goto back;
473 if (ifq_is_enabled(ifq))
474 error = altq_disable_locked(ifq);
475 if (error == 0)
476 error = altq_detach_locked(ifq);
478 back:
479 ALTQ_UNLOCK(ifq);
480 return (error);
484 * add a discipline or a queue
487 altq_add(struct pf_altq *a)
489 int error = 0;
491 if (a->qname[0] != 0)
492 return (altq_add_queue(a));
494 if (machclk_freq == 0)
495 init_machclk();
496 if (machclk_freq == 0)
497 panic("altq_add: no cpu clock");
499 switch (a->scheduler) {
500 #ifdef ALTQ_CBQ
501 case ALTQT_CBQ:
502 error = cbq_add_altq(a);
503 break;
504 #endif
505 #ifdef ALTQ_PRIQ
506 case ALTQT_PRIQ:
507 error = priq_add_altq(a);
508 break;
509 #endif
510 #ifdef ALTQ_HFSC
511 case ALTQT_HFSC:
512 error = hfsc_add_altq(a);
513 break;
514 #endif
515 #ifdef ALTQ_FAIRQ
516 case ALTQT_FAIRQ:
517 error = fairq_add_altq(a);
518 break;
519 #endif
520 default:
521 error = ENXIO;
524 return (error);
528 * remove a discipline or a queue
531 altq_remove(struct pf_altq *a)
533 int error = 0;
535 if (a->qname[0] != 0)
536 return (altq_remove_queue(a));
538 switch (a->scheduler) {
539 #ifdef ALTQ_CBQ
540 case ALTQT_CBQ:
541 error = cbq_remove_altq(a);
542 break;
543 #endif
544 #ifdef ALTQ_PRIQ
545 case ALTQT_PRIQ:
546 error = priq_remove_altq(a);
547 break;
548 #endif
549 #ifdef ALTQ_HFSC
550 case ALTQT_HFSC:
551 error = hfsc_remove_altq(a);
552 break;
553 #endif
554 #ifdef ALTQ_FAIRQ
555 case ALTQT_FAIRQ:
556 error = fairq_remove_altq(a);
557 break;
558 #endif
559 default:
560 error = ENXIO;
563 return (error);
567 * add a queue to the discipline
570 altq_add_queue(struct pf_altq *a)
572 int error = 0;
574 switch (a->scheduler) {
575 #ifdef ALTQ_CBQ
576 case ALTQT_CBQ:
577 error = cbq_add_queue(a);
578 break;
579 #endif
580 #ifdef ALTQ_PRIQ
581 case ALTQT_PRIQ:
582 error = priq_add_queue(a);
583 break;
584 #endif
585 #ifdef ALTQ_HFSC
586 case ALTQT_HFSC:
587 error = hfsc_add_queue(a);
588 break;
589 #endif
590 #ifdef ALTQ_FAIRQ
591 case ALTQT_FAIRQ:
592 error = fairq_add_queue(a);
593 break;
594 #endif
595 default:
596 error = ENXIO;
599 return (error);
603 * remove a queue from the discipline
606 altq_remove_queue(struct pf_altq *a)
608 int error = 0;
610 switch (a->scheduler) {
611 #ifdef ALTQ_CBQ
612 case ALTQT_CBQ:
613 error = cbq_remove_queue(a);
614 break;
615 #endif
616 #ifdef ALTQ_PRIQ
617 case ALTQT_PRIQ:
618 error = priq_remove_queue(a);
619 break;
620 #endif
621 #ifdef ALTQ_HFSC
622 case ALTQT_HFSC:
623 error = hfsc_remove_queue(a);
624 break;
625 #endif
626 #ifdef ALTQ_FAIRQ
627 case ALTQT_FAIRQ:
628 error = fairq_remove_queue(a);
629 break;
630 #endif
631 default:
632 error = ENXIO;
635 return (error);
639 * get queue statistics
642 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
644 int error = 0;
646 switch (a->scheduler) {
647 #ifdef ALTQ_CBQ
648 case ALTQT_CBQ:
649 error = cbq_getqstats(a, ubuf, nbytes);
650 break;
651 #endif
652 #ifdef ALTQ_PRIQ
653 case ALTQT_PRIQ:
654 error = priq_getqstats(a, ubuf, nbytes);
655 break;
656 #endif
657 #ifdef ALTQ_HFSC
658 case ALTQT_HFSC:
659 error = hfsc_getqstats(a, ubuf, nbytes);
660 break;
661 #endif
662 #ifdef ALTQ_FAIRQ
663 case ALTQT_FAIRQ:
664 error = fairq_getqstats(a, ubuf, nbytes);
665 break;
666 #endif
667 default:
668 error = ENXIO;
671 return (error);
675 * read and write diffserv field in IPv4 or IPv6 header
677 uint8_t
678 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
680 struct mbuf *m0;
681 uint8_t ds_field = 0;
683 if (pktattr == NULL ||
684 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
685 return ((uint8_t)0);
687 /* verify that pattr_hdr is within the mbuf data */
688 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
689 if ((pktattr->pattr_hdr >= m0->m_data) &&
690 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
691 break;
693 if (m0 == NULL) {
694 /* ick, pattr_hdr is stale */
695 pktattr->pattr_af = AF_UNSPEC;
696 #ifdef ALTQ_DEBUG
697 kprintf("read_dsfield: can't locate header!\n");
698 #endif
699 return ((uint8_t)0);
702 if (pktattr->pattr_af == AF_INET) {
703 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
705 if (ip->ip_v != 4)
706 return ((uint8_t)0); /* version mismatch! */
707 ds_field = ip->ip_tos;
709 #ifdef INET6
710 else if (pktattr->pattr_af == AF_INET6) {
711 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
712 uint32_t flowlabel;
714 flowlabel = ntohl(ip6->ip6_flow);
715 if ((flowlabel >> 28) != 6)
716 return ((uint8_t)0); /* version mismatch! */
717 ds_field = (flowlabel >> 20) & 0xff;
719 #endif
720 return (ds_field);
723 void
724 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield)
726 struct mbuf *m0;
728 if (pktattr == NULL ||
729 (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
730 return;
732 /* verify that pattr_hdr is within the mbuf data */
733 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
734 if ((pktattr->pattr_hdr >= m0->m_data) &&
735 (pktattr->pattr_hdr < m0->m_data + m0->m_len))
736 break;
738 if (m0 == NULL) {
739 /* ick, pattr_hdr is stale */
740 pktattr->pattr_af = AF_UNSPEC;
741 #ifdef ALTQ_DEBUG
742 kprintf("write_dsfield: can't locate header!\n");
743 #endif
744 return;
747 if (pktattr->pattr_af == AF_INET) {
748 struct ip *ip = (struct ip *)pktattr->pattr_hdr;
749 uint8_t old;
750 int32_t sum;
752 if (ip->ip_v != 4)
753 return; /* version mismatch! */
754 old = ip->ip_tos;
755 dsfield |= old & 3; /* leave CU bits */
756 if (old == dsfield)
757 return;
758 ip->ip_tos = dsfield;
760 * update checksum (from RFC1624)
761 * HC' = ~(~HC + ~m + m')
763 sum = ~ntohs(ip->ip_sum) & 0xffff;
764 sum += 0xff00 + (~old & 0xff) + dsfield;
765 sum = (sum >> 16) + (sum & 0xffff);
766 sum += (sum >> 16); /* add carry */
768 ip->ip_sum = htons(~sum & 0xffff);
770 #ifdef INET6
771 else if (pktattr->pattr_af == AF_INET6) {
772 struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
773 uint32_t flowlabel;
775 flowlabel = ntohl(ip6->ip6_flow);
776 if ((flowlabel >> 28) != 6)
777 return; /* version mismatch! */
778 flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
779 ip6->ip6_flow = htonl(flowlabel);
781 #endif
785 * high resolution clock support taking advantage of a machine dependent
786 * high resolution time counter (e.g., timestamp counter of intel pentium).
787 * we assume
788 * - 64-bit-long monotonically-increasing counter
789 * - frequency range is 100M-4GHz (CPU speed)
791 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
792 #define MACHCLK_SHIFT 8
794 int machclk_usepcc;
795 uint64_t machclk_freq = 0;
796 uint32_t machclk_per_tick = 0;
798 void
799 init_machclk(void)
801 callout_init(&tbr_callout);
803 machclk_usepcc = 1;
805 #if !defined(__i386__) || defined(ALTQ_NOPCC)
806 machclk_usepcc = 0;
807 #elif defined(__DragonFly__) && defined(SMP)
808 machclk_usepcc = 0;
809 #elif defined(__i386__)
810 /* check if TSC is available */
811 if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0)
812 machclk_usepcc = 0;
813 #endif
815 if (machclk_usepcc == 0) {
816 /* emulate 256MHz using microtime() */
817 machclk_freq = 1000000LLU << MACHCLK_SHIFT;
818 machclk_per_tick = machclk_freq / hz;
819 #ifdef ALTQ_DEBUG
820 kprintf("altq: emulate %lluHz cpu clock\n", machclk_freq);
821 #endif
822 return;
826 * if the clock frequency (of Pentium TSC or Alpha PCC) is
827 * accessible, just use it.
829 #if _RDTSC_SUPPORTED
830 if (cpu_feature & CPUID_TSC)
831 machclk_freq = (uint64_t)tsc_frequency;
832 #endif
835 * if we don't know the clock frequency, measure it.
837 if (machclk_freq == 0) {
838 static int wait;
839 struct timeval tv_start, tv_end;
840 uint64_t start, end, diff;
841 int timo;
843 microtime(&tv_start);
844 start = read_machclk();
845 timo = hz; /* 1 sec */
846 tsleep(&wait, PCATCH, "init_machclk", timo);
847 microtime(&tv_end);
848 end = read_machclk();
849 diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
850 + tv_end.tv_usec - tv_start.tv_usec;
851 if (diff != 0)
852 machclk_freq = (end - start) * 1000000 / diff;
855 machclk_per_tick = machclk_freq / hz;
857 #ifdef ALTQ_DEBUG
858 kprintf("altq: CPU clock: %lluHz\n", machclk_freq);
859 #endif
862 uint64_t
863 read_machclk(void)
865 uint64_t val;
867 if (machclk_usepcc) {
868 #ifdef _RDTSC_SUPPORTED_
869 val = rdtsc();
870 #else
871 panic("read_machclk");
872 #endif
873 } else {
874 struct timeval tv;
876 microtime(&tv);
877 val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
878 + tv.tv_usec) << MACHCLK_SHIFT);
880 return (val);