Don't move automation to follow region when a region has only been trimmed rather...
[ArdourMidi.git] / libs / ardour / ardour / cycles.h
blob9f6d9b4b5c3ed1837414588a34c7e2a151c2e3b9
1 /*
2 Copyright (C) 2001 Paul Davis
3 Code derived from various headers from the Linux kernel
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #ifndef __ardour_cycles_h__
22 #define __ardour_cycles_h__
24 #include <stdint.h>
26 #if defined(__i386__) || defined(__x86_64__)
29 * Standard way to access the cycle counter on i586+ CPUs.
30 * Currently only used on SMP.
32 * If you really have a SMP machine with i486 chips or older,
33 * compile for that, and this will just always return zero.
34 * That's ok, it just means that the nicer scheduling heuristics
35 * won't work for you.
37 * We only use the low 32 bits, and we'd simply better make sure
38 * that we reschedule before that wraps. Scheduling at least every
39 * four billion cycles just basically sounds like a good idea,
40 * regardless of how fast the machine is.
42 typedef uint64_t cycles_t;
44 extern cycles_t cacheflush_time;
46 #define rdtscll(val) \
47 __asm__ __volatile__("rdtsc" : "=A" (val))
49 static inline cycles_t get_cycles (void)
51 cycles_t ret;
53 rdtscll(ret);
54 return ret & 0xffffffff;
57 #elif defined(__powerpc__)
59 #define CPU_FTR_601 0x00000100
61 typedef uint32_t cycles_t;
64 * For the "cycle" counter we use the timebase lower half.
65 * Currently only used on SMP.
68 extern cycles_t cacheflush_time;
70 static inline cycles_t get_cycles(void)
72 cycles_t ret = 0;
74 __asm__ __volatile__(
75 "98: mftb %0\n"
76 "99:\n"
77 ".section __ftr_fixup,\"a\"\n"
78 " .long %1\n"
79 " .long 0\n"
80 " .long 98b\n"
81 " .long 99b\n"
82 ".previous"
83 : "=r" (ret) : "i" (CPU_FTR_601));
84 return ret;
87 #elif defined(__ia64__)
88 /* ia64 */
90 typedef uint32_t cycles_t;
91 static inline cycles_t
92 get_cycles (void)
94 cycles_t ret;
95 __asm__ __volatile__ ("mov %0=ar.itc" : "=r"(ret));
96 return ret;
99 #elif defined(__alpha__)
100 /* alpha */
103 * Standard way to access the cycle counter.
104 * Currently only used on SMP for scheduling.
106 * Only the low 32 bits are available as a continuously counting entity.
107 * But this only means we'll force a reschedule every 8 seconds or so,
108 * which isn't an evil thing.
111 typedef uint32_t cycles_t;
112 static inline cycles_t get_cycles (void)
114 cycles_t ret;
115 __asm__ __volatile__ ("rpcc %0" : "=r"(ret));
116 return ret;
119 #elif defined(__s390__)
120 /* s390 */
122 typedef uint32_t long cycles_t;
123 static inline cycles_t get_cycles(void)
125 cycles_t cycles;
126 __asm__("stck 0(%0)" : : "a" (&(cycles)) : "memory", "cc");
127 return cycles >> 2;
130 #elif defined(__hppa__)
131 /* hppa/parisc */
133 #define mfctl(reg) ({ \
134 uint32_t cr; \
135 __asm__ __volatile__( \
136 "mfctl " #reg ",%0" : \
137 "=r" (cr) \
138 ); \
139 cr; \
142 typedef uint32_t cycles_t;
143 static inline cycles_t get_cycles (void)
145 return mfctl(16);
148 #elif defined(__mips__)
149 /* mips/mipsel */
152 * Standard way to access the cycle counter.
153 * Currently only used on SMP for scheduling.
155 * Only the low 32 bits are available as a continuously counting entity.
156 * But this only means we'll force a reschedule every 8 seconds or so,
157 * which isn't an evil thing.
159 * We know that all SMP capable CPUs have cycle counters.
162 #define __read_32bit_c0_register(source, sel) \
163 ({ int __res; \
164 if (sel == 0) \
165 __asm__ __volatile__( \
166 "mfc0\t%0, " #source "\n\t" \
167 : "=r" (__res)); \
168 else \
169 __asm__ __volatile__( \
170 ".set\tmips32\n\t" \
171 "mfc0\t%0, " #source ", " #sel "\n\t" \
172 ".set\tmips0\n\t" \
173 : "=r" (__res)); \
174 __res; \
177 /* #define CP0_COUNT $9 */
178 #define read_c0_count() __read_32bit_c0_register($9, 0)
180 typedef uint32_t cycles_t;
181 static inline cycles_t get_cycles (void)
183 return read_c0_count();
186 /* begin mach */
187 #elif defined(__APPLE__)
189 #include <CoreAudio/HostTime.h>
191 typedef UInt64 cycles_t;
192 static inline cycles_t get_cycles (void)
194 UInt64 time = AudioGetCurrentHostTime();
195 return AudioConvertHostTimeToNanos(time);
197 /* end mach */
199 #else
201 /* debian: sparc, arm, m68k */
203 #warning You are compiling libardour on a platform for which ardour/cycles.h needs work
205 #include <sys/time.h>
207 typedef long cycles_t;
209 extern cycles_t cacheflush_time;
211 static inline cycles_t get_cycles(void)
213 struct timeval tv;
214 gettimeofday (&tv, NULL);
216 return tv.tv_usec;
219 #endif
221 #endif /* __ardour_cycles_h__ */