Import 2.3.52pre1
[davej-history.git] / arch / i386 / lib / delay.c
blobca5eeb796e53748b5d1e487c937f081a580c03e1
1 /*
2 * Precise Delay Loops for i386
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * The __delay function must _NOT_ be inlined as its execution time
8 * depends wildly on alignment on many x86 processors. The additional
9 * jump magic is needed to get the timing stable on all the CPU's
10 * we have to worry about.
13 #include <linux/sched.h>
14 #include <linux/delay.h>
15 #include <asm/delay.h>
17 #ifdef __SMP__
18 #include <asm/smp.h>
19 #endif
21 int x86_udelay_tsc = 0; /* Delay via TSC */
25 * Do a udelay using the TSC for any CPU that happens
26 * to have one that we trust. This could be optimised to avoid
27 * the multiply per loop but its a delay loop so who are we kidding...
30 static void __rdtsc_delay(unsigned long loops)
32 unsigned long bclock, now;
34 rdtscl(bclock);
37 rdtscl(now);
39 while((now-bclock) < loops);
43 * Non TSC based delay loop for 386, 486, MediaGX
46 static void __loop_delay(unsigned long loops)
48 int d0;
49 __asm__ __volatile__(
50 "\tjmp 1f\n"
51 ".align 16\n"
52 "1:\tjmp 2f\n"
53 ".align 16\n"
54 "2:\tdecl %0\n\tjns 2b"
55 :"=&a" (d0)
56 :"0" (loops));
59 void __delay(unsigned long loops)
61 if(x86_udelay_tsc)
62 __rdtsc_delay(loops);
63 else
64 __loop_delay(loops);
67 inline void __const_udelay(unsigned long xloops)
69 int d0;
70 __asm__("mull %0"
71 :"=d" (xloops), "=&a" (d0)
72 :"1" (xloops),"0" (current_cpu_data.loops_per_sec));
73 __delay(xloops);
76 void __udelay(unsigned long usecs)
78 __const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */