Some documentation in JackMessageBuffer::SetInitCallback.
[jack2.git] / linux / cycles.h
blobd96b7b5e0405748841c32e5143c7a046fa22624d
1 /*
2 Copyright (C) 2001 Paul Davis
3 Code derived from various headers from the Linux kernel
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 $Id: cycles.h,v 1.4.2.1 2006/06/20 14:44:00 letz Exp $
22 #ifndef __jack_cycles_h__
23 #define __jack_cycles_h__
26 * Standard way to access the cycle counter on i586+ CPUs.
27 * Currently only used on SMP.
29 * If you really have a SMP machine with i486 chips or older,
30 * compile for that, and this will just always return zero.
31 * That's ok, it just means that the nicer scheduling heuristics
32 * won't work for you.
34 * We only use the low 32 bits, and we'd simply better make sure
35 * that we reschedule before that wraps. Scheduling at least every
36 * four billion cycles just basically sounds like a good idea,
37 * regardless of how fast the machine is.
40 #ifdef __x86_64__
42 typedef unsigned long cycles_t;
43 extern cycles_t cacheflush_time;
45 static inline unsigned long get_cycles(void)
47 unsigned int hi, lo;
48 __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
49 return (((unsigned long)hi)<<32) | ((unsigned long)lo);
52 #endif /* __x86_64__ */
54 #ifdef __sparc_v9__
55 /* rd is V9 only */
56 static inline unsigned long long get_cycles(void)
58 unsigned long long res;
59 __asm__ __volatile__("rd %%tick, %0" : "=r"(res));
60 return res;
62 #endif /* __sparc_v9__ */
64 #ifdef __PPC__
66 /* PowerPC */
68 #define CPU_FTR_601 0x00000100
70 typedef unsigned long cycles_t;
72 /* For the "cycle" counter we use the timebase lower half. */
74 extern cycles_t cacheflush_time;
76 static inline cycles_t get_cycles(void)
78 cycles_t ret = 0;
80 __asm__ __volatile__(
81 "98: mftb %0\n"
82 "99:\n"
83 ".section __ftr_fixup,\"a\"\n"
84 " .long %1\n"
85 " .long 0\n"
86 " .long 98b\n"
87 " .long 99b\n"
88 ".previous"
89 : "=r" (ret) : "i" (CPU_FTR_601));
90 return ret;
93 #endif /* __PPC__ */
95 #ifdef __i386__
97 typedef unsigned long long cycles_t;
99 extern cycles_t cacheflush_time;
101 #define rdtscll(val) \
102 __asm__ __volatile__("rdtsc" : "=A" (val))
104 static inline cycles_t get_cycles (void)
106 unsigned long long ret;
108 rdtscll(ret);
109 return ret;
112 #endif /* __i386__ */
114 /* everything else but x86, amd64, sparcv9 or ppc */
115 #if !defined (__PPC__) && !defined (__x86_64__) && !defined (__i386__) && !defined (__sparc_v9__)
117 #warning No suitable get_cycles() implementation. Returning 0 instead
119 typedef unsigned long long cycles_t;
121 static inline cycles_t get_cycles(void)
123 return 0;
126 #endif /* everything else but x86, amd64, sparcv9 or ppc */
129 #endif /* __jack_cycles_h__ */