Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / include / asm-x86 / system.h
blob9cff02ffe6c2d261e6c905fe6f0d9958ccaf2fbb
1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
4 #include <asm/asm.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
8 #include <asm/nops.h>
10 #include <linux/kernel.h>
11 #include <linux/irqflags.h>
13 /* entries in ARCH_DLINFO: */
14 #ifdef CONFIG_IA32_EMULATION
15 # define AT_VECTOR_SIZE_ARCH 2
16 #else
17 # define AT_VECTOR_SIZE_ARCH 1
18 #endif
20 #ifdef CONFIG_X86_32
22 struct task_struct; /* one of the stranger aspects of C forward declarations */
23 struct task_struct *__switch_to(struct task_struct *prev,
24 struct task_struct *next);
27 * Saving eflags is important. It switches not only IOPL between tasks,
28 * it also protects other tasks from NT leaking through sysenter etc.
30 #define switch_to(prev, next, last) do { \
31 unsigned long esi, edi; \
32 asm volatile("pushfl\n\t" /* Save flags */ \
33 "pushl %%ebp\n\t" \
34 "movl %%esp,%0\n\t" /* save ESP */ \
35 "movl %5,%%esp\n\t" /* restore ESP */ \
36 "movl $1f,%1\n\t" /* save EIP */ \
37 "pushl %6\n\t" /* restore EIP */ \
38 "jmp __switch_to\n" \
39 "1:\t" \
40 "popl %%ebp\n\t" \
41 "popfl" \
42 :"=m" (prev->thread.sp), "=m" (prev->thread.ip), \
43 "=a" (last), "=S" (esi), "=D" (edi) \
44 :"m" (next->thread.sp), "m" (next->thread.ip), \
45 "2" (prev), "d" (next)); \
46 } while (0)
49 * disable hlt during certain critical i/o operations
51 #define HAVE_DISABLE_HLT
52 #else
53 #define __SAVE(reg, offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
54 #define __RESTORE(reg, offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
56 /* frame pointer must be last for get_wchan */
57 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
58 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
60 #define __EXTRA_CLOBBER \
61 , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
62 "r12", "r13", "r14", "r15"
64 /* Save restore flags to clear handle leaking NT */
65 #define switch_to(prev, next, last) \
66 asm volatile(SAVE_CONTEXT \
67 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
68 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
69 "call __switch_to\n\t" \
70 ".globl thread_return\n" \
71 "thread_return:\n\t" \
72 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
73 "movq %P[thread_info](%%rsi),%%r8\n\t" \
74 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
75 "movq %%rax,%%rdi\n\t" \
76 "jc ret_from_fork\n\t" \
77 RESTORE_CONTEXT \
78 : "=a" (last) \
79 : [next] "S" (next), [prev] "D" (prev), \
80 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
81 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
82 [tif_fork] "i" (TIF_FORK), \
83 [thread_info] "i" (offsetof(struct task_struct, stack)), \
84 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
85 : "memory", "cc" __EXTRA_CLOBBER)
86 #endif
88 #ifdef __KERNEL__
89 #define _set_base(addr, base) do { unsigned long __pr; \
90 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
91 "rorl $16,%%edx\n\t" \
92 "movb %%dl,%2\n\t" \
93 "movb %%dh,%3" \
94 :"=&d" (__pr) \
95 :"m" (*((addr)+2)), \
96 "m" (*((addr)+4)), \
97 "m" (*((addr)+7)), \
98 "0" (base) \
99 ); } while (0)
101 #define _set_limit(addr, limit) do { unsigned long __lr; \
102 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
103 "rorl $16,%%edx\n\t" \
104 "movb %2,%%dh\n\t" \
105 "andb $0xf0,%%dh\n\t" \
106 "orb %%dh,%%dl\n\t" \
107 "movb %%dl,%2" \
108 :"=&d" (__lr) \
109 :"m" (*(addr)), \
110 "m" (*((addr)+6)), \
111 "0" (limit) \
112 ); } while (0)
114 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
115 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
117 extern void load_gs_index(unsigned);
120 * Load a segment. Fall back on loading the zero
121 * segment if something goes wrong..
123 #define loadsegment(seg, value) \
124 asm volatile("\n" \
125 "1:\t" \
126 "movl %k0,%%" #seg "\n" \
127 "2:\n" \
128 ".section .fixup,\"ax\"\n" \
129 "3:\t" \
130 "movl %k1, %%" #seg "\n\t" \
131 "jmp 2b\n" \
132 ".previous\n" \
133 _ASM_EXTABLE(1b,3b) \
134 : :"r" (value), "r" (0))
138 * Save a segment register away
140 #define savesegment(seg, value) \
141 asm volatile("mov %%" #seg ",%0":"=rm" (value))
143 static inline unsigned long get_limit(unsigned long segment)
145 unsigned long __limit;
146 __asm__("lsll %1,%0"
147 :"=r" (__limit):"r" (segment));
148 return __limit+1;
151 static inline void native_clts(void)
153 asm volatile ("clts");
157 * Volatile isn't enough to prevent the compiler from reordering the
158 * read/write functions for the control registers and messing everything up.
159 * A memory clobber would solve the problem, but would prevent reordering of
160 * all loads stores around it, which can hurt performance. Solution is to
161 * use a variable and mimic reads and writes to it to enforce serialization
163 static unsigned long __force_order;
165 static inline unsigned long native_read_cr0(void)
167 unsigned long val;
168 asm volatile("mov %%cr0,%0\n\t" :"=r" (val), "=m" (__force_order));
169 return val;
172 static inline void native_write_cr0(unsigned long val)
174 asm volatile("mov %0,%%cr0": :"r" (val), "m" (__force_order));
177 static inline unsigned long native_read_cr2(void)
179 unsigned long val;
180 asm volatile("mov %%cr2,%0\n\t" :"=r" (val), "=m" (__force_order));
181 return val;
184 static inline void native_write_cr2(unsigned long val)
186 asm volatile("mov %0,%%cr2": :"r" (val), "m" (__force_order));
189 static inline unsigned long native_read_cr3(void)
191 unsigned long val;
192 asm volatile("mov %%cr3,%0\n\t" :"=r" (val), "=m" (__force_order));
193 return val;
196 static inline void native_write_cr3(unsigned long val)
198 asm volatile("mov %0,%%cr3": :"r" (val), "m" (__force_order));
201 static inline unsigned long native_read_cr4(void)
203 unsigned long val;
204 asm volatile("mov %%cr4,%0\n\t" :"=r" (val), "=m" (__force_order));
205 return val;
208 static inline unsigned long native_read_cr4_safe(void)
210 unsigned long val;
211 /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
212 * exists, so it will never fail. */
213 #ifdef CONFIG_X86_32
214 asm volatile("1: mov %%cr4, %0\n"
215 "2:\n"
216 _ASM_EXTABLE(1b,2b)
217 : "=r" (val), "=m" (__force_order) : "0" (0));
218 #else
219 val = native_read_cr4();
220 #endif
221 return val;
224 static inline void native_write_cr4(unsigned long val)
226 asm volatile("mov %0,%%cr4": :"r" (val), "m" (__force_order));
229 #ifdef CONFIG_X86_64
230 static inline unsigned long native_read_cr8(void)
232 unsigned long cr8;
233 asm volatile("movq %%cr8,%0" : "=r" (cr8));
234 return cr8;
237 static inline void native_write_cr8(unsigned long val)
239 asm volatile("movq %0,%%cr8" :: "r" (val) : "memory");
241 #endif
243 static inline void native_wbinvd(void)
245 asm volatile("wbinvd": : :"memory");
247 #ifdef CONFIG_PARAVIRT
248 #include <asm/paravirt.h>
249 #else
250 #define read_cr0() (native_read_cr0())
251 #define write_cr0(x) (native_write_cr0(x))
252 #define read_cr2() (native_read_cr2())
253 #define write_cr2(x) (native_write_cr2(x))
254 #define read_cr3() (native_read_cr3())
255 #define write_cr3(x) (native_write_cr3(x))
256 #define read_cr4() (native_read_cr4())
257 #define read_cr4_safe() (native_read_cr4_safe())
258 #define write_cr4(x) (native_write_cr4(x))
259 #define wbinvd() (native_wbinvd())
260 #ifdef CONFIG_X86_64
261 #define read_cr8() (native_read_cr8())
262 #define write_cr8(x) (native_write_cr8(x))
263 #endif
265 /* Clear the 'TS' bit */
266 #define clts() (native_clts())
268 #endif/* CONFIG_PARAVIRT */
270 #define stts() write_cr0(8 | read_cr0())
272 #endif /* __KERNEL__ */
274 static inline void clflush(volatile void *__p)
276 asm volatile("clflush %0" : "+m" (*(volatile char __force *)__p));
279 #define nop() __asm__ __volatile__ ("nop")
281 void disable_hlt(void);
282 void enable_hlt(void);
284 extern int es7000_plat;
285 void cpu_idle_wait(void);
287 extern unsigned long arch_align_stack(unsigned long sp);
288 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
290 void default_idle(void);
293 * Force strict CPU ordering.
294 * And yes, this is required on UP too when we're talking
295 * to devices.
297 #ifdef CONFIG_X86_32
299 * For now, "wmb()" doesn't actually do anything, as all
300 * Intel CPU's follow what Intel calls a *Processor Order*,
301 * in which all writes are seen in the program order even
302 * outside the CPU.
304 * I expect future Intel CPU's to have a weaker ordering,
305 * but I'd also expect them to finally get their act together
306 * and add some real memory barriers if so.
308 * Some non intel clones support out of order store. wmb() ceases to be a
309 * nop for these.
311 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
312 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
313 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
314 #else
315 #define mb() asm volatile("mfence":::"memory")
316 #define rmb() asm volatile("lfence":::"memory")
317 #define wmb() asm volatile("sfence" ::: "memory")
318 #endif
321 * read_barrier_depends - Flush all pending reads that subsequents reads
322 * depend on.
324 * No data-dependent reads from memory-like regions are ever reordered
325 * over this barrier. All reads preceding this primitive are guaranteed
326 * to access memory (but not necessarily other CPUs' caches) before any
327 * reads following this primitive that depend on the data return by
328 * any of the preceding reads. This primitive is much lighter weight than
329 * rmb() on most CPUs, and is never heavier weight than is
330 * rmb().
332 * These ordering constraints are respected by both the local CPU
333 * and the compiler.
335 * Ordering is not guaranteed by anything other than these primitives,
336 * not even by data dependencies. See the documentation for
337 * memory_barrier() for examples and URLs to more information.
339 * For example, the following code would force ordering (the initial
340 * value of "a" is zero, "b" is one, and "p" is "&a"):
342 * <programlisting>
343 * CPU 0 CPU 1
345 * b = 2;
346 * memory_barrier();
347 * p = &b; q = p;
348 * read_barrier_depends();
349 * d = *q;
350 * </programlisting>
352 * because the read of "*q" depends on the read of "p" and these
353 * two reads are separated by a read_barrier_depends(). However,
354 * the following code, with the same initial values for "a" and "b":
356 * <programlisting>
357 * CPU 0 CPU 1
359 * a = 2;
360 * memory_barrier();
361 * b = 3; y = b;
362 * read_barrier_depends();
363 * x = a;
364 * </programlisting>
366 * does not enforce ordering, since there is no data dependency between
367 * the read of "a" and the read of "b". Therefore, on some CPUs, such
368 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
369 * in cases like this where there are no data dependencies.
372 #define read_barrier_depends() do { } while (0)
374 #ifdef CONFIG_SMP
375 #define smp_mb() mb()
376 #ifdef CONFIG_X86_PPRO_FENCE
377 # define smp_rmb() rmb()
378 #else
379 # define smp_rmb() barrier()
380 #endif
381 #ifdef CONFIG_X86_OOSTORE
382 # define smp_wmb() wmb()
383 #else
384 # define smp_wmb() barrier()
385 #endif
386 #define smp_read_barrier_depends() read_barrier_depends()
387 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
388 #else
389 #define smp_mb() barrier()
390 #define smp_rmb() barrier()
391 #define smp_wmb() barrier()
392 #define smp_read_barrier_depends() do { } while (0)
393 #define set_mb(var, value) do { var = value; barrier(); } while (0)
394 #endif
397 * Stop RDTSC speculation. This is needed when you need to use RDTSC
398 * (or get_cycles or vread that possibly accesses the TSC) in a defined
399 * code region.
401 * (Could use an alternative three way for this if there was one.)
403 static inline void rdtsc_barrier(void)
405 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
406 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
409 #endif