[PATCH] Fix "value computed is not used" compile warnings with gcc-4.1
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / include / asm-x86_64 / system.h
blobb7f66034ae7ac5e4d9ba115cf6db9d651bb2c41b
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
8 #ifdef __KERNEL__
10 #ifdef CONFIG_SMP
11 #define LOCK_PREFIX "lock ; "
12 #else
13 #define LOCK_PREFIX ""
14 #endif
16 #define __STR(x) #x
17 #define STR(x) __STR(x)
19 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
20 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
22 /* frame pointer must be last for get_wchan */
23 #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
24 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
26 #define __EXTRA_CLOBBER \
27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
29 #define switch_to(prev,next,last) \
30 asm volatile(SAVE_CONTEXT \
31 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
32 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
33 "call __switch_to\n\t" \
34 ".globl thread_return\n" \
35 "thread_return:\n\t" \
36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
37 "movq %P[thread_info](%%rsi),%%r8\n\t" \
38 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
39 "movq %%rax,%%rdi\n\t" \
40 "jc ret_from_fork\n\t" \
41 RESTORE_CONTEXT \
42 : "=a" (last) \
43 : [next] "S" (next), [prev] "D" (prev), \
44 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
45 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
46 [tif_fork] "i" (TIF_FORK), \
47 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
48 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
49 : "memory", "cc" __EXTRA_CLOBBER)
51 extern void load_gs_index(unsigned);
54 * Load a segment. Fall back on loading the zero
55 * segment if something goes wrong..
57 #define loadsegment(seg,value) \
58 asm volatile("\n" \
59 "1:\t" \
60 "movl %k0,%%" #seg "\n" \
61 "2:\n" \
62 ".section .fixup,\"ax\"\n" \
63 "3:\t" \
64 "movl %1,%%" #seg "\n\t" \
65 "jmp 2b\n" \
66 ".previous\n" \
67 ".section __ex_table,\"a\"\n\t" \
68 ".align 8\n\t" \
69 ".quad 1b,3b\n" \
70 ".previous" \
71 : :"r" (value), "r" (0))
73 #define set_debug(value,register) \
74 __asm__("movq %0,%%db" #register \
75 : /* no output */ \
76 :"r" ((unsigned long) value))
79 #ifdef __KERNEL__
80 struct alt_instr {
81 __u8 *instr; /* original instruction */
82 __u8 *replacement;
83 __u8 cpuid; /* cpuid bit set for replacement */
84 __u8 instrlen; /* length of original instruction */
85 __u8 replacementlen; /* length of new instruction, <= instrlen */
86 __u8 pad[5];
87 };
88 #endif
91 * Alternative instructions for different CPU types or capabilities.
93 * This allows to use optimized instructions even on generic binary
94 * kernels.
96 * length of oldinstr must be longer or equal the length of newinstr
97 * It can be padded with nops as needed.
99 * For non barrier like inlines please define new variants
100 * without volatile and memory clobber.
102 #define alternative(oldinstr, newinstr, feature) \
103 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
104 ".section .altinstructions,\"a\"\n" \
105 " .align 8\n" \
106 " .quad 661b\n" /* label */ \
107 " .quad 663f\n" /* new instruction */ \
108 " .byte %c0\n" /* feature bit */ \
109 " .byte 662b-661b\n" /* sourcelen */ \
110 " .byte 664f-663f\n" /* replacementlen */ \
111 ".previous\n" \
112 ".section .altinstr_replacement,\"ax\"\n" \
113 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
114 ".previous" :: "i" (feature) : "memory")
117 * Alternative inline assembly with input.
119 * Peculiarities:
120 * No memory clobber here.
121 * Argument numbers start with 1.
122 * Best is to use constraints that are fixed size (like (%1) ... "r")
123 * If you use variable sized constraints like "m" or "g" in the
124 * replacement make sure to pad to the worst case length.
126 #define alternative_input(oldinstr, newinstr, feature, input...) \
127 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
128 ".section .altinstructions,\"a\"\n" \
129 " .align 8\n" \
130 " .quad 661b\n" /* label */ \
131 " .quad 663f\n" /* new instruction */ \
132 " .byte %c0\n" /* feature bit */ \
133 " .byte 662b-661b\n" /* sourcelen */ \
134 " .byte 664f-663f\n" /* replacementlen */ \
135 ".previous\n" \
136 ".section .altinstr_replacement,\"ax\"\n" \
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" :: "i" (feature), ##input)
140 /* Like alternative_input, but with a single output argument */
141 #define alternative_io(oldinstr, newinstr, feature, output, input...) \
142 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
143 ".section .altinstructions,\"a\"\n" \
144 " .align 8\n" \
145 " .quad 661b\n" /* label */ \
146 " .quad 663f\n" /* new instruction */ \
147 " .byte %c[feat]\n" /* feature bit */ \
148 " .byte 662b-661b\n" /* sourcelen */ \
149 " .byte 664f-663f\n" /* replacementlen */ \
150 ".previous\n" \
151 ".section .altinstr_replacement,\"ax\"\n" \
152 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
153 ".previous" : output : [feat] "i" (feature), ##input)
156 * Clear and set 'TS' bit respectively
158 #define clts() __asm__ __volatile__ ("clts")
160 static inline unsigned long read_cr0(void)
162 unsigned long cr0;
163 asm volatile("movq %%cr0,%0" : "=r" (cr0));
164 return cr0;
167 static inline void write_cr0(unsigned long val)
169 asm volatile("movq %0,%%cr0" :: "r" (val));
172 static inline unsigned long read_cr3(void)
174 unsigned long cr3;
175 asm("movq %%cr3,%0" : "=r" (cr3));
176 return cr3;
179 static inline unsigned long read_cr4(void)
181 unsigned long cr4;
182 asm("movq %%cr4,%0" : "=r" (cr4));
183 return cr4;
186 static inline void write_cr4(unsigned long val)
188 asm volatile("movq %0,%%cr4" :: "r" (val));
191 #define stts() write_cr0(8 | read_cr0())
193 #define wbinvd() \
194 __asm__ __volatile__ ("wbinvd": : :"memory");
197 * On SMP systems, when the scheduler does migration-cost autodetection,
198 * it needs a way to flush as much of the CPU's caches as possible.
200 static inline void sched_cacheflush(void)
202 wbinvd();
205 #endif /* __KERNEL__ */
207 #define nop() __asm__ __volatile__ ("nop")
209 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
211 #define tas(ptr) (xchg((ptr),1))
213 #define __xg(x) ((volatile long *)(x))
215 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
217 *ptr = val;
220 #define _set_64bit set_64bit
223 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
224 * Note 2: xchg has side effect, so that attribute volatile is necessary,
225 * but generally the primitive is invalid, *ptr is output argument. --ANK
227 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
229 switch (size) {
230 case 1:
231 __asm__ __volatile__("xchgb %b0,%1"
232 :"=q" (x)
233 :"m" (*__xg(ptr)), "0" (x)
234 :"memory");
235 break;
236 case 2:
237 __asm__ __volatile__("xchgw %w0,%1"
238 :"=r" (x)
239 :"m" (*__xg(ptr)), "0" (x)
240 :"memory");
241 break;
242 case 4:
243 __asm__ __volatile__("xchgl %k0,%1"
244 :"=r" (x)
245 :"m" (*__xg(ptr)), "0" (x)
246 :"memory");
247 break;
248 case 8:
249 __asm__ __volatile__("xchgq %0,%1"
250 :"=r" (x)
251 :"m" (*__xg(ptr)), "0" (x)
252 :"memory");
253 break;
255 return x;
259 * Atomic compare and exchange. Compare OLD with MEM, if identical,
260 * store NEW in MEM. Return the initial value in MEM. Success is
261 * indicated by comparing RETURN with OLD.
264 #define __HAVE_ARCH_CMPXCHG 1
266 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
267 unsigned long new, int size)
269 unsigned long prev;
270 switch (size) {
271 case 1:
272 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
273 : "=a"(prev)
274 : "q"(new), "m"(*__xg(ptr)), "0"(old)
275 : "memory");
276 return prev;
277 case 2:
278 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
279 : "=a"(prev)
280 : "r"(new), "m"(*__xg(ptr)), "0"(old)
281 : "memory");
282 return prev;
283 case 4:
284 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
285 : "=a"(prev)
286 : "r"(new), "m"(*__xg(ptr)), "0"(old)
287 : "memory");
288 return prev;
289 case 8:
290 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
291 : "=a"(prev)
292 : "r"(new), "m"(*__xg(ptr)), "0"(old)
293 : "memory");
294 return prev;
296 return old;
299 #define cmpxchg(ptr,o,n)\
300 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
301 (unsigned long)(n),sizeof(*(ptr))))
303 #ifdef CONFIG_SMP
304 #define smp_mb() mb()
305 #define smp_rmb() rmb()
306 #define smp_wmb() wmb()
307 #define smp_read_barrier_depends() do {} while(0)
308 #else
309 #define smp_mb() barrier()
310 #define smp_rmb() barrier()
311 #define smp_wmb() barrier()
312 #define smp_read_barrier_depends() do {} while(0)
313 #endif
317 * Force strict CPU ordering.
318 * And yes, this is required on UP too when we're talking
319 * to devices.
321 #define mb() asm volatile("mfence":::"memory")
322 #define rmb() asm volatile("lfence":::"memory")
324 #ifdef CONFIG_UNORDERED_IO
325 #define wmb() asm volatile("sfence" ::: "memory")
326 #else
327 #define wmb() asm volatile("" ::: "memory")
328 #endif
329 #define read_barrier_depends() do {} while(0)
330 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
331 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
333 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
335 /* interrupt control.. */
336 #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
337 #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
339 #ifdef CONFIG_X86_VSMP
340 /* Interrupt control for VSMP architecture */
341 #define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0)
342 #define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0)
344 #define irqs_disabled() \
345 ({ \
346 unsigned long flags; \
347 local_save_flags(flags); \
348 (flags & (1<<18)) || !(flags & (1<<9)); \
351 /* For spinlocks etc */
352 #define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0)
353 #else /* CONFIG_X86_VSMP */
354 #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
355 #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
357 #define irqs_disabled() \
358 ({ \
359 unsigned long flags; \
360 local_save_flags(flags); \
361 !(flags & (1<<9)); \
364 /* For spinlocks etc */
365 #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
366 #endif
368 /* used in the idle loop; sti takes one instruction cycle to complete */
369 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
370 /* used when interrupts are already enabled or to shutdown the processor */
371 #define halt() __asm__ __volatile__("hlt": : :"memory")
373 void cpu_idle_wait(void);
375 extern unsigned long arch_align_stack(unsigned long sp);
377 #endif