[GFS2] Alter direct I/O path
[linux-2.6/sactl.git] / include / asm-i386 / system.h
blob49928eb33f8bbd830ac1937a7ff5b50de1adec5d
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <linux/bitops.h> /* for LOCK_PREFIX */
9 #ifdef __KERNEL__
11 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
12 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
14 #define switch_to(prev,next,last) do { \
15 unsigned long esi,edi; \
16 asm volatile("pushl %%ebp\n\t" \
17 "movl %%esp,%0\n\t" /* save ESP */ \
18 "movl %5,%%esp\n\t" /* restore ESP */ \
19 "movl $1f,%1\n\t" /* save EIP */ \
20 "pushl %6\n\t" /* restore EIP */ \
21 "jmp __switch_to\n" \
22 "1:\t" \
23 "popl %%ebp\n\t" \
24 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
25 "=a" (last),"=S" (esi),"=D" (edi) \
26 :"m" (next->thread.esp),"m" (next->thread.eip), \
27 "2" (prev), "d" (next)); \
28 } while (0)
30 #define _set_base(addr,base) do { unsigned long __pr; \
31 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
32 "rorl $16,%%edx\n\t" \
33 "movb %%dl,%2\n\t" \
34 "movb %%dh,%3" \
35 :"=&d" (__pr) \
36 :"m" (*((addr)+2)), \
37 "m" (*((addr)+4)), \
38 "m" (*((addr)+7)), \
39 "0" (base) \
40 ); } while(0)
42 #define _set_limit(addr,limit) do { unsigned long __lr; \
43 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
44 "rorl $16,%%edx\n\t" \
45 "movb %2,%%dh\n\t" \
46 "andb $0xf0,%%dh\n\t" \
47 "orb %%dh,%%dl\n\t" \
48 "movb %%dl,%2" \
49 :"=&d" (__lr) \
50 :"m" (*(addr)), \
51 "m" (*((addr)+6)), \
52 "0" (limit) \
53 ); } while(0)
55 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
56 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
59 * Load a segment. Fall back on loading the zero
60 * segment if something goes wrong..
62 #define loadsegment(seg,value) \
63 asm volatile("\n" \
64 "1:\t" \
65 "mov %0,%%" #seg "\n" \
66 "2:\n" \
67 ".section .fixup,\"ax\"\n" \
68 "3:\t" \
69 "pushl $0\n\t" \
70 "popl %%" #seg "\n\t" \
71 "jmp 2b\n" \
72 ".previous\n" \
73 ".section __ex_table,\"a\"\n\t" \
74 ".align 4\n\t" \
75 ".long 1b,3b\n" \
76 ".previous" \
77 : :"rm" (value))
80 * Save a segment register away
82 #define savesegment(seg, value) \
83 asm volatile("mov %%" #seg ",%0":"=rm" (value))
85 #define read_cr0() ({ \
86 unsigned int __dummy; \
87 __asm__ __volatile__( \
88 "movl %%cr0,%0\n\t" \
89 :"=r" (__dummy)); \
90 __dummy; \
92 #define write_cr0(x) \
93 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
95 #define read_cr2() ({ \
96 unsigned int __dummy; \
97 __asm__ __volatile__( \
98 "movl %%cr2,%0\n\t" \
99 :"=r" (__dummy)); \
100 __dummy; \
102 #define write_cr2(x) \
103 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
105 #define read_cr3() ({ \
106 unsigned int __dummy; \
107 __asm__ ( \
108 "movl %%cr3,%0\n\t" \
109 :"=r" (__dummy)); \
110 __dummy; \
112 #define write_cr3(x) \
113 __asm__ __volatile__("movl %0,%%cr3": :"r" (x))
115 #define read_cr4() ({ \
116 unsigned int __dummy; \
117 __asm__( \
118 "movl %%cr4,%0\n\t" \
119 :"=r" (__dummy)); \
120 __dummy; \
122 #define read_cr4_safe() ({ \
123 unsigned int __dummy; \
124 /* This could fault if %cr4 does not exist */ \
125 __asm__("1: movl %%cr4, %0 \n" \
126 "2: \n" \
127 ".section __ex_table,\"a\" \n" \
128 ".long 1b,2b \n" \
129 ".previous \n" \
130 : "=r" (__dummy): "0" (0)); \
131 __dummy; \
133 #define write_cr4(x) \
134 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
137 * Clear and set 'TS' bit respectively
139 #define clts() __asm__ __volatile__ ("clts")
140 #define stts() write_cr0(8 | read_cr0())
142 #endif /* __KERNEL__ */
144 #define wbinvd() \
145 __asm__ __volatile__ ("wbinvd": : :"memory")
147 static inline unsigned long get_limit(unsigned long segment)
149 unsigned long __limit;
150 __asm__("lsll %1,%0"
151 :"=r" (__limit):"r" (segment));
152 return __limit+1;
155 #define nop() __asm__ __volatile__ ("nop")
157 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
159 #define tas(ptr) (xchg((ptr),1))
161 struct __xchg_dummy { unsigned long a[100]; };
162 #define __xg(x) ((struct __xchg_dummy *)(x))
165 #ifdef CONFIG_X86_CMPXCHG64
168 * The semantics of XCHGCMP8B are a bit strange, this is why
169 * there is a loop and the loading of %%eax and %%edx has to
170 * be inside. This inlines well in most cases, the cached
171 * cost is around ~38 cycles. (in the future we might want
172 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
173 * might have an implicit FPU-save as a cost, so it's not
174 * clear which path to go.)
176 * cmpxchg8b must be used with the lock prefix here to allow
177 * the instruction to be executed atomically, see page 3-102
178 * of the instruction set reference 24319102.pdf. We need
179 * the reader side to see the coherent 64bit value.
181 static inline void __set_64bit (unsigned long long * ptr,
182 unsigned int low, unsigned int high)
184 __asm__ __volatile__ (
185 "\n1:\t"
186 "movl (%0), %%eax\n\t"
187 "movl 4(%0), %%edx\n\t"
188 "lock cmpxchg8b (%0)\n\t"
189 "jnz 1b"
190 : /* no outputs */
191 : "D"(ptr),
192 "b"(low),
193 "c"(high)
194 : "ax","dx","memory");
197 static inline void __set_64bit_constant (unsigned long long *ptr,
198 unsigned long long value)
200 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
202 #define ll_low(x) *(((unsigned int*)&(x))+0)
203 #define ll_high(x) *(((unsigned int*)&(x))+1)
205 static inline void __set_64bit_var (unsigned long long *ptr,
206 unsigned long long value)
208 __set_64bit(ptr,ll_low(value), ll_high(value));
211 #define set_64bit(ptr,value) \
212 (__builtin_constant_p(value) ? \
213 __set_64bit_constant(ptr, value) : \
214 __set_64bit_var(ptr, value) )
216 #define _set_64bit(ptr,value) \
217 (__builtin_constant_p(value) ? \
218 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
219 __set_64bit(ptr, ll_low(value), ll_high(value)) )
221 #endif
224 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
225 * Note 2: xchg has side effect, so that attribute volatile is necessary,
226 * but generally the primitive is invalid, *ptr is output argument. --ANK
228 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
230 switch (size) {
231 case 1:
232 __asm__ __volatile__("xchgb %b0,%1"
233 :"=q" (x)
234 :"m" (*__xg(ptr)), "0" (x)
235 :"memory");
236 break;
237 case 2:
238 __asm__ __volatile__("xchgw %w0,%1"
239 :"=r" (x)
240 :"m" (*__xg(ptr)), "0" (x)
241 :"memory");
242 break;
243 case 4:
244 __asm__ __volatile__("xchgl %0,%1"
245 :"=r" (x)
246 :"m" (*__xg(ptr)), "0" (x)
247 :"memory");
248 break;
250 return x;
254 * Atomic compare and exchange. Compare OLD with MEM, if identical,
255 * store NEW in MEM. Return the initial value in MEM. Success is
256 * indicated by comparing RETURN with OLD.
259 #ifdef CONFIG_X86_CMPXCHG
260 #define __HAVE_ARCH_CMPXCHG 1
261 #define cmpxchg(ptr,o,n)\
262 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
263 (unsigned long)(n),sizeof(*(ptr))))
264 #endif
266 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
267 unsigned long new, int size)
269 unsigned long prev;
270 switch (size) {
271 case 1:
272 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
273 : "=a"(prev)
274 : "q"(new), "m"(*__xg(ptr)), "0"(old)
275 : "memory");
276 return prev;
277 case 2:
278 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
279 : "=a"(prev)
280 : "r"(new), "m"(*__xg(ptr)), "0"(old)
281 : "memory");
282 return prev;
283 case 4:
284 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
285 : "=a"(prev)
286 : "r"(new), "m"(*__xg(ptr)), "0"(old)
287 : "memory");
288 return prev;
290 return old;
293 #ifndef CONFIG_X86_CMPXCHG
295 * Building a kernel capable running on 80386. It may be necessary to
296 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
297 * a function for each of the sizes we support.
300 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
301 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
302 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
304 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
305 unsigned long new, int size)
307 switch (size) {
308 case 1:
309 return cmpxchg_386_u8(ptr, old, new);
310 case 2:
311 return cmpxchg_386_u16(ptr, old, new);
312 case 4:
313 return cmpxchg_386_u32(ptr, old, new);
315 return old;
318 #define cmpxchg(ptr,o,n) \
319 ({ \
320 __typeof__(*(ptr)) __ret; \
321 if (likely(boot_cpu_data.x86 > 3)) \
322 __ret = __cmpxchg((ptr), (unsigned long)(o), \
323 (unsigned long)(n), sizeof(*(ptr))); \
324 else \
325 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
326 (unsigned long)(n), sizeof(*(ptr))); \
327 __ret; \
329 #endif
331 #ifdef CONFIG_X86_CMPXCHG64
333 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
334 unsigned long long new)
336 unsigned long long prev;
337 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
338 : "=A"(prev)
339 : "b"((unsigned long)new),
340 "c"((unsigned long)(new >> 32)),
341 "m"(*__xg(ptr)),
342 "0"(old)
343 : "memory");
344 return prev;
347 #define cmpxchg64(ptr,o,n)\
348 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
349 (unsigned long long)(n)))
351 #endif
354 * Force strict CPU ordering.
355 * And yes, this is required on UP too when we're talking
356 * to devices.
358 * For now, "wmb()" doesn't actually do anything, as all
359 * Intel CPU's follow what Intel calls a *Processor Order*,
360 * in which all writes are seen in the program order even
361 * outside the CPU.
363 * I expect future Intel CPU's to have a weaker ordering,
364 * but I'd also expect them to finally get their act together
365 * and add some real memory barriers if so.
367 * Some non intel clones support out of order store. wmb() ceases to be a
368 * nop for these.
373 * Actually only lfence would be needed for mb() because all stores done
374 * by the kernel should be already ordered. But keep a full barrier for now.
377 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
378 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
381 * read_barrier_depends - Flush all pending reads that subsequents reads
382 * depend on.
384 * No data-dependent reads from memory-like regions are ever reordered
385 * over this barrier. All reads preceding this primitive are guaranteed
386 * to access memory (but not necessarily other CPUs' caches) before any
387 * reads following this primitive that depend on the data return by
388 * any of the preceding reads. This primitive is much lighter weight than
389 * rmb() on most CPUs, and is never heavier weight than is
390 * rmb().
392 * These ordering constraints are respected by both the local CPU
393 * and the compiler.
395 * Ordering is not guaranteed by anything other than these primitives,
396 * not even by data dependencies. See the documentation for
397 * memory_barrier() for examples and URLs to more information.
399 * For example, the following code would force ordering (the initial
400 * value of "a" is zero, "b" is one, and "p" is "&a"):
402 * <programlisting>
403 * CPU 0 CPU 1
405 * b = 2;
406 * memory_barrier();
407 * p = &b; q = p;
408 * read_barrier_depends();
409 * d = *q;
410 * </programlisting>
412 * because the read of "*q" depends on the read of "p" and these
413 * two reads are separated by a read_barrier_depends(). However,
414 * the following code, with the same initial values for "a" and "b":
416 * <programlisting>
417 * CPU 0 CPU 1
419 * a = 2;
420 * memory_barrier();
421 * b = 3; y = b;
422 * read_barrier_depends();
423 * x = a;
424 * </programlisting>
426 * does not enforce ordering, since there is no data dependency between
427 * the read of "a" and the read of "b". Therefore, on some CPUs, such
428 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
429 * in cases like this where there are no data dependencies.
432 #define read_barrier_depends() do { } while(0)
434 #ifdef CONFIG_X86_OOSTORE
435 /* Actually there are no OOO store capable CPUs for now that do SSE,
436 but make it already an possibility. */
437 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
438 #else
439 #define wmb() __asm__ __volatile__ ("": : :"memory")
440 #endif
442 #ifdef CONFIG_SMP
443 #define smp_mb() mb()
444 #define smp_rmb() rmb()
445 #define smp_wmb() wmb()
446 #define smp_read_barrier_depends() read_barrier_depends()
447 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
448 #else
449 #define smp_mb() barrier()
450 #define smp_rmb() barrier()
451 #define smp_wmb() barrier()
452 #define smp_read_barrier_depends() do { } while(0)
453 #define set_mb(var, value) do { var = value; barrier(); } while (0)
454 #endif
456 #include <linux/irqflags.h>
459 * disable hlt during certain critical i/o operations
461 #define HAVE_DISABLE_HLT
462 void disable_hlt(void);
463 void enable_hlt(void);
465 extern int es7000_plat;
466 void cpu_idle_wait(void);
469 * On SMP systems, when the scheduler does migration-cost autodetection,
470 * it needs a way to flush as much of the CPU's caches as possible:
472 static inline void sched_cacheflush(void)
474 wbinvd();
477 extern unsigned long arch_align_stack(unsigned long sp);
478 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
480 void default_idle(void);
482 #endif