[PATCH] Disable a.out for AMD64
[linux-2.6/history.git] / include / asm-s390 / system.h
blob3f755cc52e6d5f6be61477acbf4f160a3ade6b6b
1 /*
2 * include/asm-s390/system.h
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
9 */
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/ptrace.h>
18 #include <asm/setup.h>
20 #ifdef __KERNEL__
22 struct task_struct;
24 extern struct task_struct *resume(void *, void *);
26 #ifdef __s390x__
27 #define __FLAG_SHIFT 56
28 #else /* ! __s390x__ */
29 #define __FLAG_SHIFT 24
30 #endif /* ! __s390x__ */
32 static inline void save_fp_regs(s390_fp_regs *fpregs)
34 asm volatile (
35 " std 0,8(%0)\n"
36 " std 2,24(%0)\n"
37 " std 4,40(%0)\n"
38 " std 6,56(%0)"
39 : : "a" (fpregs) : "memory" );
40 if (!MACHINE_HAS_IEEE)
41 return;
42 asm volatile(
43 " stfpc 0(%0)\n"
44 " std 1,16(%0)\n"
45 " std 3,32(%0)\n"
46 " std 5,48(%0)\n"
47 " std 7,64(%0)\n"
48 " std 8,72(%0)\n"
49 " std 9,80(%0)\n"
50 " std 10,88(%0)\n"
51 " std 11,96(%0)\n"
52 " std 12,104(%0)\n"
53 " std 13,112(%0)\n"
54 " std 14,120(%0)\n"
55 " std 15,128(%0)\n"
56 : : "a" (fpregs) : "memory" );
59 static inline void restore_fp_regs(s390_fp_regs *fpregs)
61 asm volatile (
62 " ld 0,8(%0)\n"
63 " ld 2,24(%0)\n"
64 " ld 4,40(%0)\n"
65 " ld 6,56(%0)"
66 : : "a" (fpregs));
67 if (!MACHINE_HAS_IEEE)
68 return;
69 asm volatile(
70 " lfpc 0(%0)\n"
71 " ld 1,16(%0)\n"
72 " ld 3,32(%0)\n"
73 " ld 5,48(%0)\n"
74 " ld 7,64(%0)\n"
75 " ld 8,72(%0)\n"
76 " ld 9,80(%0)\n"
77 " ld 10,88(%0)\n"
78 " ld 11,96(%0)\n"
79 " ld 12,104(%0)\n"
80 " ld 13,112(%0)\n"
81 " ld 14,120(%0)\n"
82 " ld 15,128(%0)\n"
83 : : "a" (fpregs));
86 #define switch_to(prev,next,last) do { \
87 if (prev == next) \
88 break; \
89 save_fp_regs(&prev->thread.fp_regs); \
90 restore_fp_regs(&next->thread.fp_regs); \
91 prev = resume(prev,next); \
92 } while (0)
94 #define nop() __asm__ __volatile__ ("nop")
96 #define xchg(ptr,x) \
97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
99 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
101 unsigned long addr, old;
102 int shift;
104 switch (size) {
105 case 1:
106 addr = (unsigned long) ptr;
107 shift = (3 ^ (addr & 3)) << 3;
108 addr ^= addr & 3;
109 asm volatile(
110 " l %0,0(%3)\n"
111 "0: lr 0,%0\n"
112 " nr 0,%2\n"
113 " or 0,%1\n"
114 " cs %0,0,0(%3)\n"
115 " jl 0b\n"
116 : "=&d" (old)
117 : "d" (x << shift), "d" (~(255 << shift)), "a" (addr)
118 : "memory", "cc", "0" );
119 x = old >> shift;
120 break;
121 case 2:
122 addr = (unsigned long) ptr;
123 shift = (2 ^ (addr & 2)) << 3;
124 addr ^= addr & 2;
125 asm volatile(
126 " l %0,0(%3)\n"
127 "0: lr 0,%0\n"
128 " nr 0,%2\n"
129 " or 0,%1\n"
130 " cs %0,0,0(%3)\n"
131 " jl 0b\n"
132 : "=&d" (old)
133 : "d" (x << shift), "d" (~(65535 << shift)), "a" (addr)
134 : "memory", "cc", "0" );
135 x = old >> shift;
136 break;
137 case 4:
138 asm volatile (
139 " l %0,0(%2)\n"
140 "0: cs %0,%1,0(%2)\n"
141 " jl 0b\n"
142 : "=&d" (old) : "d" (x), "a" (ptr)
143 : "memory", "cc", "0" );
144 x = old;
145 break;
146 #ifdef __s390x__
147 case 8:
148 asm volatile (
149 " lg %0,0(%2)\n"
150 "0: csg %0,%1,0(%2)\n"
151 " jl 0b\n"
152 : "=&d" (old) : "d" (x), "a" (ptr)
153 : "memory", "cc", "0" );
154 x = old;
155 break;
156 #endif /* __s390x__ */
158 return x;
162 * Atomic compare and exchange. Compare OLD with MEM, if identical,
163 * store NEW in MEM. Return the initial value in MEM. Success is
164 * indicated by comparing RETURN with OLD.
167 #define __HAVE_ARCH_CMPXCHG 1
169 #define cmpxchg(ptr,o,n)\
170 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
171 (unsigned long)(n),sizeof(*(ptr))))
173 static inline unsigned long
174 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
176 unsigned long addr, prev, tmp;
177 int shift;
179 switch (size) {
180 case 1:
181 addr = (unsigned long) ptr;
182 shift = (3 ^ (addr & 3)) << 3;
183 addr ^= addr & 3;
184 asm volatile(
185 " l %0,0(%4)\n"
186 "0: nr %0,%5\n"
187 " lr %1,%0\n"
188 " or %0,%2\n"
189 " or %1,%3\n"
190 " cs %0,%1,0(%4)\n"
191 " jnl 1f\n"
192 " xr %1,%0\n"
193 " nr %1,%5\n"
194 " jnz 0b\n"
195 "1:"
196 : "=&d" (prev), "=&d" (tmp)
197 : "d" (old << shift), "d" (new << shift), "a" (ptr),
198 "d" (~(255 << shift))
199 : "memory", "cc" );
200 return prev >> shift;
201 case 2:
202 addr = (unsigned long) ptr;
203 shift = (2 ^ (addr & 2)) << 3;
204 addr ^= addr & 2;
205 asm volatile(
206 " l %0,0(%4)\n"
207 "0: nr %0,%5\n"
208 " lr %1,%0\n"
209 " or %0,%2\n"
210 " or %1,%3\n"
211 " cs %0,%1,0(%4)\n"
212 " jnl 1f\n"
213 " xr %1,%0\n"
214 " nr %1,%5\n"
215 " jnz 0b\n"
216 "1:"
217 : "=&d" (prev), "=&d" (tmp)
218 : "d" (old << shift), "d" (new << shift), "a" (ptr),
219 "d" (~(65535 << shift))
220 : "memory", "cc" );
221 return prev >> shift;
222 case 4:
223 asm volatile (
224 " cs %0,%2,0(%3)\n"
225 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
226 : "memory", "cc" );
227 return prev;
228 #ifdef __s390x__
229 case 8:
230 asm volatile (
231 " csg %0,%2,0(%3)\n"
232 : "=&d" (prev) : "0" (old), "d" (new), "a" (ptr)
233 : "memory", "cc" );
234 return prev;
235 #endif /* __s390x__ */
237 return old;
241 * Force strict CPU ordering.
242 * And yes, this is required on UP too when we're talking
243 * to devices.
245 * This is very similar to the ppc eieio/sync instruction in that is
246 * does a checkpoint syncronisation & makes sure that
247 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
250 #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
251 # define SYNC_OTHER_CORES(x) eieio()
252 #define mb() eieio()
253 #define rmb() eieio()
254 #define wmb() eieio()
255 #define read_barrier_depends() do { } while(0)
256 #define smp_mb() mb()
257 #define smp_rmb() rmb()
258 #define smp_wmb() wmb()
259 #define smp_read_barrier_depends() read_barrier_depends()
260 #define smp_mb__before_clear_bit() smp_mb()
261 #define smp_mb__after_clear_bit() smp_mb()
264 #define set_mb(var, value) do { var = value; mb(); } while (0)
265 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
267 /* interrupt control.. */
268 #define local_irq_enable() ({ \
269 unsigned long __dummy; \
270 __asm__ __volatile__ ( \
271 "stosm 0(%1),0x03" : "=m" (__dummy) : "a" (&__dummy) ); \
274 #define local_irq_disable() ({ \
275 unsigned long __flags; \
276 __asm__ __volatile__ ( \
277 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
278 __flags; \
281 #define local_save_flags(x) \
282 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x) )
284 #define local_irq_restore(x) \
285 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
287 #define irqs_disabled() \
288 ({ \
289 unsigned long flags; \
290 local_save_flags(flags); \
291 !((flags >> __FLAG_SHIFT) & 3); \
294 #ifdef __s390x__
296 #define __load_psw(psw) \
297 __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw) : "cc" );
299 #define __ctl_load(array, low, high) ({ \
300 __asm__ __volatile__ ( \
301 " bras 1,0f\n" \
302 " lctlg 0,0,0(%0)\n" \
303 "0: ex %1,0(1)" \
304 : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
307 #define __ctl_store(array, low, high) ({ \
308 __asm__ __volatile__ ( \
309 " bras 1,0f\n" \
310 " stctg 0,0,0(%1)\n" \
311 "0: ex %2,0(1)" \
312 : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
315 #define __ctl_set_bit(cr, bit) ({ \
316 __u8 __dummy[24]; \
317 __asm__ __volatile__ ( \
318 " bras 1,0f\n" /* skip indirect insns */ \
319 " stctg 0,0,0(%1)\n" \
320 " lctlg 0,0,0(%1)\n" \
321 "0: ex %2,0(1)\n" /* execute stctl */ \
322 " lg 0,0(%1)\n" \
323 " ogr 0,%3\n" /* set the bit */ \
324 " stg 0,0(%1)\n" \
325 "1: ex %2,6(1)" /* execute lctl */ \
326 : "=m" (__dummy) \
327 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
328 "a" (cr*17), "a" (1L<<(bit)) \
329 : "cc", "0", "1" ); \
332 #define __ctl_clear_bit(cr, bit) ({ \
333 __u8 __dummy[16]; \
334 __asm__ __volatile__ ( \
335 " bras 1,0f\n" /* skip indirect insns */ \
336 " stctg 0,0,0(%1)\n" \
337 " lctlg 0,0,0(%1)\n" \
338 "0: ex %2,0(1)\n" /* execute stctl */ \
339 " lg 0,0(%1)\n" \
340 " ngr 0,%3\n" /* set the bit */ \
341 " stg 0,0(%1)\n" \
342 "1: ex %2,6(1)" /* execute lctl */ \
343 : "=m" (__dummy) \
344 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
345 "a" (cr*17), "a" (~(1L<<(bit))) \
346 : "cc", "0", "1" ); \
349 #else /* __s390x__ */
351 #define __load_psw(psw) \
352 __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
354 #define __ctl_load(array, low, high) ({ \
355 __asm__ __volatile__ ( \
356 " bras 1,0f\n" \
357 " lctl 0,0,0(%0)\n" \
358 "0: ex %1,0(1)" \
359 : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
362 #define __ctl_store(array, low, high) ({ \
363 __asm__ __volatile__ ( \
364 " bras 1,0f\n" \
365 " stctl 0,0,0(%1)\n" \
366 "0: ex %2,0(1)" \
367 : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
370 #define __ctl_set_bit(cr, bit) ({ \
371 __u8 __dummy[16]; \
372 __asm__ __volatile__ ( \
373 " bras 1,0f\n" /* skip indirect insns */ \
374 " stctl 0,0,0(%1)\n" \
375 " lctl 0,0,0(%1)\n" \
376 "0: ex %2,0(1)\n" /* execute stctl */ \
377 " l 0,0(%1)\n" \
378 " or 0,%3\n" /* set the bit */ \
379 " st 0,0(%1)\n" \
380 "1: ex %2,4(1)" /* execute lctl */ \
381 : "=m" (__dummy) \
382 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
383 "a" (cr*17), "a" (1<<(bit)) \
384 : "cc", "0", "1" ); \
387 #define __ctl_clear_bit(cr, bit) ({ \
388 __u8 __dummy[16]; \
389 __asm__ __volatile__ ( \
390 " bras 1,0f\n" /* skip indirect insns */ \
391 " stctl 0,0,0(%1)\n" \
392 " lctl 0,0,0(%1)\n" \
393 "0: ex %2,0(1)\n" /* execute stctl */ \
394 " l 0,0(%1)\n" \
395 " nr 0,%3\n" /* set the bit */ \
396 " st 0,0(%1)\n" \
397 "1: ex %2,4(1)" /* execute lctl */ \
398 : "=m" (__dummy) \
399 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
400 "a" (cr*17), "a" (~(1<<(bit))) \
401 : "cc", "0", "1" ); \
403 #endif /* __s390x__ */
405 /* For spinlocks etc */
406 #define local_irq_save(x) ((x) = local_irq_disable())
408 #ifdef CONFIG_SMP
410 extern void smp_ctl_set_bit(int cr, int bit);
411 extern void smp_ctl_clear_bit(int cr, int bit);
412 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
413 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
415 #else
417 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
418 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
420 #endif /* CONFIG_SMP */
422 extern void (*_machine_restart)(char *command);
423 extern void (*_machine_halt)(void);
424 extern void (*_machine_power_off)(void);
426 #endif /* __KERNEL__ */
428 #endif