2 * include/asm-s390/system.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #include <linux/kernel.h>
16 #include <asm/types.h>
17 #include <asm/ptrace.h>
18 #include <asm/setup.h>
24 extern struct task_struct
*resume(void *, void *);
27 #define __FLAG_SHIFT 56
28 #else /* ! __s390x__ */
29 #define __FLAG_SHIFT 24
30 #endif /* ! __s390x__ */
32 static inline void save_fp_regs(s390_fp_regs
*fpregs
)
39 : : "a" (fpregs
) : "memory" );
40 if (!MACHINE_HAS_IEEE
)
56 : : "a" (fpregs
) : "memory" );
59 static inline void restore_fp_regs(s390_fp_regs
*fpregs
)
67 if (!MACHINE_HAS_IEEE
)
86 #define switch_to(prev,next,last) do { \
89 save_fp_regs(&prev->thread.fp_regs); \
90 restore_fp_regs(&next->thread.fp_regs); \
91 prev = resume(prev,next); \
94 #define nop() __asm__ __volatile__ ("nop")
97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(void *)(ptr),sizeof(*(ptr))))
99 static inline unsigned long __xchg(unsigned long x
, void * ptr
, int size
)
101 unsigned long addr
, old
;
106 addr
= (unsigned long) ptr
;
107 shift
= (3 ^ (addr
& 3)) << 3;
117 : "d" (x
<< shift
), "d" (~(255 << shift
)), "a" (addr
)
118 : "memory", "cc", "0" );
122 addr
= (unsigned long) ptr
;
123 shift
= (2 ^ (addr
& 2)) << 3;
133 : "d" (x
<< shift
), "d" (~(65535 << shift
)), "a" (addr
)
134 : "memory", "cc", "0" );
140 "0: cs %0,%1,0(%2)\n"
142 : "=&d" (old
) : "d" (x
), "a" (ptr
)
143 : "memory", "cc", "0" );
150 "0: csg %0,%1,0(%2)\n"
152 : "=&d" (old
) : "d" (x
), "a" (ptr
)
153 : "memory", "cc", "0" );
156 #endif /* __s390x__ */
162 * Atomic compare and exchange. Compare OLD with MEM, if identical,
163 * store NEW in MEM. Return the initial value in MEM. Success is
164 * indicated by comparing RETURN with OLD.
167 #define __HAVE_ARCH_CMPXCHG 1
169 #define cmpxchg(ptr,o,n)\
170 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
171 (unsigned long)(n),sizeof(*(ptr))))
173 static inline unsigned long
174 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
176 unsigned long addr
, prev
, tmp
;
181 addr
= (unsigned long) ptr
;
182 shift
= (3 ^ (addr
& 3)) << 3;
196 : "=&d" (prev
), "=&d" (tmp
)
197 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
198 "d" (~(255 << shift
))
200 return prev
>> shift
;
202 addr
= (unsigned long) ptr
;
203 shift
= (2 ^ (addr
& 2)) << 3;
217 : "=&d" (prev
), "=&d" (tmp
)
218 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
219 "d" (~(65535 << shift
))
221 return prev
>> shift
;
225 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
232 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
235 #endif /* __s390x__ */
241 * Force strict CPU ordering.
242 * And yes, this is required on UP too when we're talking
245 * This is very similar to the ppc eieio/sync instruction in that is
246 * does a checkpoint syncronisation & makes sure that
247 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
250 #define eieio() __asm__ __volatile__ ( "bcr 15,0" : : : "memory" )
251 # define SYNC_OTHER_CORES(x) eieio()
253 #define rmb() eieio()
254 #define wmb() eieio()
255 #define read_barrier_depends() do { } while(0)
256 #define smp_mb() mb()
257 #define smp_rmb() rmb()
258 #define smp_wmb() wmb()
259 #define smp_read_barrier_depends() read_barrier_depends()
260 #define smp_mb__before_clear_bit() smp_mb()
261 #define smp_mb__after_clear_bit() smp_mb()
264 #define set_mb(var, value) do { var = value; mb(); } while (0)
265 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
267 /* interrupt control.. */
268 #define local_irq_enable() ({ \
269 unsigned long __dummy; \
270 __asm__ __volatile__ ( \
271 "stosm 0(%1),0x03" : "=m" (__dummy) : "a" (&__dummy) ); \
274 #define local_irq_disable() ({ \
275 unsigned long __flags; \
276 __asm__ __volatile__ ( \
277 "stnsm 0(%1),0xfc" : "=m" (__flags) : "a" (&__flags) ); \
281 #define local_save_flags(x) \
282 __asm__ __volatile__("stosm 0(%1),0" : "=m" (x) : "a" (&x) )
284 #define local_irq_restore(x) \
285 __asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
287 #define irqs_disabled() \
289 unsigned long flags; \
290 local_save_flags(flags); \
291 !((flags >> __FLAG_SHIFT) & 3); \
296 #define __load_psw(psw) \
297 __asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw) : "cc" );
299 #define __ctl_load(array, low, high) ({ \
300 __asm__ __volatile__ ( \
302 " lctlg 0,0,0(%0)\n" \
304 : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
307 #define __ctl_store(array, low, high) ({ \
308 __asm__ __volatile__ ( \
310 " stctg 0,0,0(%1)\n" \
312 : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
315 #define __ctl_set_bit(cr, bit) ({ \
317 __asm__ __volatile__ ( \
318 " bras 1,0f\n" /* skip indirect insns */ \
319 " stctg 0,0,0(%1)\n" \
320 " lctlg 0,0,0(%1)\n" \
321 "0: ex %2,0(1)\n" /* execute stctl */ \
323 " ogr 0,%3\n" /* set the bit */ \
325 "1: ex %2,6(1)" /* execute lctl */ \
327 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
328 "a" (cr*17), "a" (1L<<(bit)) \
329 : "cc", "0", "1" ); \
332 #define __ctl_clear_bit(cr, bit) ({ \
334 __asm__ __volatile__ ( \
335 " bras 1,0f\n" /* skip indirect insns */ \
336 " stctg 0,0,0(%1)\n" \
337 " lctlg 0,0,0(%1)\n" \
338 "0: ex %2,0(1)\n" /* execute stctl */ \
340 " ngr 0,%3\n" /* set the bit */ \
342 "1: ex %2,6(1)" /* execute lctl */ \
344 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
345 "a" (cr*17), "a" (~(1L<<(bit))) \
346 : "cc", "0", "1" ); \
349 #else /* __s390x__ */
351 #define __load_psw(psw) \
352 __asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
354 #define __ctl_load(array, low, high) ({ \
355 __asm__ __volatile__ ( \
357 " lctl 0,0,0(%0)\n" \
359 : : "a" (&array), "a" (((low)<<4)+(high)) : "1" ); \
362 #define __ctl_store(array, low, high) ({ \
363 __asm__ __volatile__ ( \
365 " stctl 0,0,0(%1)\n" \
367 : "=m" (array) : "a" (&array), "a" (((low)<<4)+(high)): "1" ); \
370 #define __ctl_set_bit(cr, bit) ({ \
372 __asm__ __volatile__ ( \
373 " bras 1,0f\n" /* skip indirect insns */ \
374 " stctl 0,0,0(%1)\n" \
375 " lctl 0,0,0(%1)\n" \
376 "0: ex %2,0(1)\n" /* execute stctl */ \
378 " or 0,%3\n" /* set the bit */ \
380 "1: ex %2,4(1)" /* execute lctl */ \
382 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
383 "a" (cr*17), "a" (1<<(bit)) \
384 : "cc", "0", "1" ); \
387 #define __ctl_clear_bit(cr, bit) ({ \
389 __asm__ __volatile__ ( \
390 " bras 1,0f\n" /* skip indirect insns */ \
391 " stctl 0,0,0(%1)\n" \
392 " lctl 0,0,0(%1)\n" \
393 "0: ex %2,0(1)\n" /* execute stctl */ \
395 " nr 0,%3\n" /* set the bit */ \
397 "1: ex %2,4(1)" /* execute lctl */ \
399 : "a" ((((unsigned long) &__dummy) + 7) & ~7UL), \
400 "a" (cr*17), "a" (~(1<<(bit))) \
401 : "cc", "0", "1" ); \
403 #endif /* __s390x__ */
405 /* For spinlocks etc */
406 #define local_irq_save(x) ((x) = local_irq_disable())
410 extern void smp_ctl_set_bit(int cr
, int bit
);
411 extern void smp_ctl_clear_bit(int cr
, int bit
);
412 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
413 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
417 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
418 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
420 #endif /* CONFIG_SMP */
422 extern void (*_machine_restart
)(char *command
);
423 extern void (*_machine_halt
)(void);
424 extern void (*_machine_power_off
)(void);
426 #endif /* __KERNEL__ */