1 #ifndef __ALPHA_DELAY_H
2 #define __ALPHA_DELAY_H
7 * Copyright (C) 1993 Linus Torvalds
9 * Delay routines, using a pre-computed "loops_per_second" value.
12 extern __inline__
void
13 __delay(unsigned long loops
)
15 __asm__
__volatile__(".align 3\n"
16 "1:\tsubq %0,1,%0\n\t"
17 "bge %0,1b": "=r" (loops
) : "0" (loops
));
21 * division by multiplication: you don't have to worry about
24 * Use only for very small delays ( < 1 msec). Should probably use a
25 * lookup table, really, as the multiplications take much too long with
26 * short delays. This is a "reasonable" implementation, though (and the
27 * first constant multiplications gets optimized away if the delay is
30 * Optimize small constants further by exposing the second multiplication
31 * to the compiler. In addition, mulq is 2 cycles faster than umulh.
34 extern __inline__
void
35 __udelay(unsigned long usecs
, unsigned long lps
)
37 /* compute (usecs * 2**64 / 10**6) * loops_per_sec / 2**64 */
39 usecs
*= 0x000010c6f7a0b5edUL
; /* 2**64 / 1000000 */
40 __asm__("umulh %1,%2,%0" :"=r" (usecs
) :"r" (usecs
),"r" (lps
));
44 extern __inline__
void
45 __small_const_udelay(unsigned long usecs
, unsigned long lps
)
47 /* compute (usecs * 2**32 / 10**6) * loops_per_sec / 2**32 */
49 usecs
*= 0x10c6; /* 2^32 / 10^6 */
56 #define udelay(usecs) \
57 (__builtin_constant_p(usecs) && usecs < 0x100000000UL \
58 ? __small_const_udelay(usecs, \
59 cpu_data[smp_processor_id()].loops_per_sec) \
61 cpu_data[smp_processor_id()].loops_per_sec))
63 #define udelay(usecs) \
64 (__builtin_constant_p(usecs) && usecs < 0x100000000UL \
65 ? __small_const_udelay(usecs, loops_per_sec) \
66 : __udelay(usecs, loops_per_sec))
70 #endif /* defined(__ALPHA_DELAY_H) */