1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
9 #include <linux/kernel.h>
10 #include <linux/irqflags.h>
13 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
15 struct task_struct
; /* one of the stranger aspects of C forward declarations */
16 extern struct task_struct
*FASTCALL(__switch_to(struct task_struct
*prev
,
17 struct task_struct
*next
));
20 * Saving eflags is important. It switches not only IOPL between tasks,
21 * it also protects other tasks from NT leaking through sysenter etc.
23 #define switch_to(prev, next, last) do { \
24 unsigned long esi, edi; \
25 asm volatile("pushfl\n\t" /* Save flags */ \
27 "movl %%esp,%0\n\t" /* save ESP */ \
28 "movl %5,%%esp\n\t" /* restore ESP */ \
29 "movl $1f,%1\n\t" /* save EIP */ \
30 "pushl %6\n\t" /* restore EIP */ \
35 :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \
36 "=a" (last), "=S" (esi), "=D" (edi) \
37 :"m" (next->thread.sp), "m" (next->thread.ip), \
38 "2" (prev), "d" (next)); \
42 * disable hlt during certain critical i/o operations
44 #define HAVE_DISABLE_HLT
46 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
47 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
49 /* frame pointer must be last for get_wchan */
50 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
51 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
53 #define __EXTRA_CLOBBER \
54 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
55 "r12", "r13", "r14", "r15"
57 /* Save restore flags to clear handle leaking NT */
58 #define switch_to(prev, next, last) \
59 asm volatile(SAVE_CONTEXT \
60 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
61 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
62 "call __switch_to\n\t" \
63 ".globl thread_return\n" \
64 "thread_return:\n\t" \
65 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
66 "movq %P[thread_info](%%rsi),%%r8\n\t" \
67 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
68 "movq %%rax,%%rdi\n\t" \
69 "jc ret_from_fork\n\t" \
72 : [next] "S" (next), [prev] "D" (prev), \
73 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
74 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
75 [tif_fork] "i" (TIF_FORK), \
76 [thread_info] "i" (offsetof(struct task_struct, stack)), \
77 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
78 : "memory", "cc" __EXTRA_CLOBBER)
82 #define _set_base(addr, base) do { unsigned long __pr; \
83 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
84 "rorl $16,%%edx\n\t" \
94 #define _set_limit(addr, limit) do { unsigned long __lr; \
95 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
96 "rorl $16,%%edx\n\t" \
98 "andb $0xf0,%%dh\n\t" \
107 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
108 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
110 extern void load_gs_index(unsigned);
113 * Load a segment. Fall back on loading the zero
114 * segment if something goes wrong..
116 #define loadsegment(seg, value) \
119 "movl %k0,%%" #seg "\n" \
121 ".section .fixup,\"ax\"\n" \
123 "movl %k1, %%" #seg "\n\t" \
126 ".section __ex_table,\"a\"\n\t" \
128 _ASM_PTR " 1b,3b\n" \
130 : :"r" (value), "r" (0))
134 * Save a segment register away
136 #define savesegment(seg, value) \
137 asm volatile("mov %%" #seg ",%0":"=rm" (value))
139 static inline unsigned long get_limit(unsigned long segment
)
141 unsigned long __limit
;
143 :"=r" (__limit
):"r" (segment
));
147 static inline void native_clts(void)
149 asm volatile ("clts");
153 * Volatile isn't enough to prevent the compiler from reordering the
154 * read/write functions for the control registers and messing everything up.
155 * A memory clobber would solve the problem, but would prevent reordering of
156 * all loads stores around it, which can hurt performance. Solution is to
157 * use a variable and mimic reads and writes to it to enforce serialization
159 static unsigned long __force_order
;
161 static inline unsigned long native_read_cr0(void)
164 asm volatile("mov %%cr0,%0\n\t" :"=r" (val
), "=m" (__force_order
));
168 static inline void native_write_cr0(unsigned long val
)
170 asm volatile("mov %0,%%cr0": :"r" (val
), "m" (__force_order
));
173 static inline unsigned long native_read_cr2(void)
176 asm volatile("mov %%cr2,%0\n\t" :"=r" (val
), "=m" (__force_order
));
180 static inline void native_write_cr2(unsigned long val
)
182 asm volatile("mov %0,%%cr2": :"r" (val
), "m" (__force_order
));
185 static inline unsigned long native_read_cr3(void)
188 asm volatile("mov %%cr3,%0\n\t" :"=r" (val
), "=m" (__force_order
));
192 static inline void native_write_cr3(unsigned long val
)
194 asm volatile("mov %0,%%cr3": :"r" (val
), "m" (__force_order
));
197 static inline unsigned long native_read_cr4(void)
200 asm volatile("mov %%cr4,%0\n\t" :"=r" (val
), "=m" (__force_order
));
204 static inline unsigned long native_read_cr4_safe(void)
207 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
208 * exists, so it will never fail. */
210 asm volatile("1: mov %%cr4, %0 \n"
212 ".section __ex_table,\"a\" \n"
215 : "=r" (val
), "=m" (__force_order
) : "0" (0));
217 val
= native_read_cr4();
222 static inline void native_write_cr4(unsigned long val
)
224 asm volatile("mov %0,%%cr4": :"r" (val
), "m" (__force_order
));
227 static inline void native_wbinvd(void)
229 asm volatile("wbinvd": : :"memory");
231 #ifdef CONFIG_PARAVIRT
232 #include <asm/paravirt.h>
234 #define read_cr0() (native_read_cr0())
235 #define write_cr0(x) (native_write_cr0(x))
236 #define read_cr2() (native_read_cr2())
237 #define write_cr2(x) (native_write_cr2(x))
238 #define read_cr3() (native_read_cr3())
239 #define write_cr3(x) (native_write_cr3(x))
240 #define read_cr4() (native_read_cr4())
241 #define read_cr4_safe() (native_read_cr4_safe())
242 #define write_cr4(x) (native_write_cr4(x))
243 #define wbinvd() (native_wbinvd())
247 static inline unsigned long read_cr8(void)
250 asm volatile("movq %%cr8,%0" : "=r" (cr8
));
254 static inline void write_cr8(unsigned long val
)
256 asm volatile("movq %0,%%cr8" :: "r" (val
) : "memory");
261 /* Clear the 'TS' bit */
262 #define clts() (native_clts())
264 #endif/* CONFIG_PARAVIRT */
266 #define stts() write_cr0(8 | read_cr0())
268 #endif /* __KERNEL__ */
270 static inline void clflush(void *__p
)
272 asm volatile("clflush %0" : "+m" (*(char __force
*)__p
));
275 #define nop() __asm__ __volatile__ ("nop")
277 void disable_hlt(void);
278 void enable_hlt(void);
280 extern int es7000_plat
;
281 void cpu_idle_wait(void);
283 extern unsigned long arch_align_stack(unsigned long sp
);
284 extern void free_init_pages(char *what
, unsigned long begin
, unsigned long end
);
286 void default_idle(void);
289 * Force strict CPU ordering.
290 * And yes, this is required on UP too when we're talking
295 * For now, "wmb()" doesn't actually do anything, as all
296 * Intel CPU's follow what Intel calls a *Processor Order*,
297 * in which all writes are seen in the program order even
300 * I expect future Intel CPU's to have a weaker ordering,
301 * but I'd also expect them to finally get their act together
302 * and add some real memory barriers if so.
304 * Some non intel clones support out of order store. wmb() ceases to be a
307 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
308 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
309 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
311 #define mb() asm volatile("mfence":::"memory")
312 #define rmb() asm volatile("lfence":::"memory")
313 #define wmb() asm volatile("sfence" ::: "memory")
317 * read_barrier_depends - Flush all pending reads that subsequents reads
320 * No data-dependent reads from memory-like regions are ever reordered
321 * over this barrier. All reads preceding this primitive are guaranteed
322 * to access memory (but not necessarily other CPUs' caches) before any
323 * reads following this primitive that depend on the data return by
324 * any of the preceding reads. This primitive is much lighter weight than
325 * rmb() on most CPUs, and is never heavier weight than is
328 * These ordering constraints are respected by both the local CPU
331 * Ordering is not guaranteed by anything other than these primitives,
332 * not even by data dependencies. See the documentation for
333 * memory_barrier() for examples and URLs to more information.
335 * For example, the following code would force ordering (the initial
336 * value of "a" is zero, "b" is one, and "p" is "&a"):
344 * read_barrier_depends();
348 * because the read of "*q" depends on the read of "p" and these
349 * two reads are separated by a read_barrier_depends(). However,
350 * the following code, with the same initial values for "a" and "b":
358 * read_barrier_depends();
362 * does not enforce ordering, since there is no data dependency between
363 * the read of "a" and the read of "b". Therefore, on some CPUs, such
364 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
365 * in cases like this where there are no data dependencies.
368 #define read_barrier_depends() do { } while (0)
371 #define smp_mb() mb()
372 #ifdef CONFIG_X86_PPRO_FENCE
373 # define smp_rmb() rmb()
375 # define smp_rmb() barrier()
377 #ifdef CONFIG_X86_OOSTORE
378 # define smp_wmb() wmb()
380 # define smp_wmb() barrier()
382 #define smp_read_barrier_depends() read_barrier_depends()
383 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
385 #define smp_mb() barrier()
386 #define smp_rmb() barrier()
387 #define smp_wmb() barrier()
388 #define smp_read_barrier_depends() do { } while (0)
389 #define set_mb(var, value) do { var = value; barrier(); } while (0)