2 * include/asm-s390/system.h
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/kernel.h>
15 #include <asm/types.h>
16 #include <asm/ptrace.h>
17 #include <asm/setup.h>
18 #include <asm/processor.h>
24 extern struct task_struct
*__switch_to(void *, void *);
26 static inline void save_fp_regs(s390_fp_regs
*fpregs
)
33 : "=m" (*fpregs
) : "a" (fpregs
), "m" (*fpregs
) : "memory");
34 if (!MACHINE_HAS_IEEE
)
50 : "=m" (*fpregs
) : "a" (fpregs
), "m" (*fpregs
) : "memory");
53 static inline void restore_fp_regs(s390_fp_regs
*fpregs
)
60 : : "a" (fpregs
), "m" (*fpregs
));
61 if (!MACHINE_HAS_IEEE
)
77 : : "a" (fpregs
), "m" (*fpregs
));
80 static inline void save_access_regs(unsigned int *acrs
)
82 asm volatile("stam 0,15,0(%0)" : : "a" (acrs
) : "memory");
85 static inline void restore_access_regs(unsigned int *acrs
)
87 asm volatile("lam 0,15,0(%0)" : : "a" (acrs
));
90 #define switch_to(prev,next,last) do { \
93 save_fp_regs(&prev->thread.fp_regs); \
94 restore_fp_regs(&next->thread.fp_regs); \
95 save_access_regs(&prev->thread.acrs[0]); \
96 restore_access_regs(&next->thread.acrs[0]); \
97 prev = __switch_to(prev,next); \
101 * On SMP systems, when the scheduler does migration-cost autodetection,
102 * it needs a way to flush as much of the CPU's caches as possible.
104 * TODO: fill this in!
106 static inline void sched_cacheflush(void)
110 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
111 extern void account_vtime(struct task_struct
*);
112 extern void account_tick_vtime(struct task_struct
*);
113 extern void account_system_vtime(struct task_struct
*);
115 #define account_vtime(x) do { /* empty */ } while (0)
118 #define finish_arch_switch(prev) do { \
119 set_fs(current->thread.mm_segment); \
120 account_vtime(prev); \
123 #define nop() asm volatile("nop")
125 #define xchg(ptr,x) \
127 __typeof__(*(ptr)) __ret; \
128 __ret = (__typeof__(*(ptr))) \
129 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
133 static inline unsigned long __xchg(unsigned long x
, void * ptr
, int size
)
135 unsigned long addr
, old
;
140 addr
= (unsigned long) ptr
;
141 shift
= (3 ^ (addr
& 3)) << 3;
150 : "=&d" (old
), "=m" (*(int *) addr
)
151 : "d" (x
<< shift
), "d" (~(255 << shift
)), "a" (addr
),
152 "m" (*(int *) addr
) : "memory", "cc", "0");
156 addr
= (unsigned long) ptr
;
157 shift
= (2 ^ (addr
& 2)) << 3;
166 : "=&d" (old
), "=m" (*(int *) addr
)
167 : "d" (x
<< shift
), "d" (~(65535 << shift
)), "a" (addr
),
168 "m" (*(int *) addr
) : "memory", "cc", "0");
174 "0: cs %0,%2,0(%3)\n"
176 : "=&d" (old
), "=m" (*(int *) ptr
)
177 : "d" (x
), "a" (ptr
), "m" (*(int *) ptr
)
185 "0: csg %0,%2,0(%3)\n"
187 : "=&d" (old
), "=m" (*(long *) ptr
)
188 : "d" (x
), "a" (ptr
), "m" (*(long *) ptr
)
192 #endif /* __s390x__ */
198 * Atomic compare and exchange. Compare OLD with MEM, if identical,
199 * store NEW in MEM. Return the initial value in MEM. Success is
200 * indicated by comparing RETURN with OLD.
203 #define __HAVE_ARCH_CMPXCHG 1
205 #define cmpxchg(ptr,o,n)\
206 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
207 (unsigned long)(n),sizeof(*(ptr))))
209 static inline unsigned long
210 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
212 unsigned long addr
, prev
, tmp
;
217 addr
= (unsigned long) ptr
;
218 shift
= (3 ^ (addr
& 3)) << 3;
232 : "=&d" (prev
), "=&d" (tmp
)
233 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
234 "d" (~(255 << shift
))
236 return prev
>> shift
;
238 addr
= (unsigned long) ptr
;
239 shift
= (2 ^ (addr
& 2)) << 3;
253 : "=&d" (prev
), "=&d" (tmp
)
254 : "d" (old
<< shift
), "d" (new << shift
), "a" (ptr
),
255 "d" (~(65535 << shift
))
257 return prev
>> shift
;
261 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
268 : "=&d" (prev
) : "0" (old
), "d" (new), "a" (ptr
)
271 #endif /* __s390x__ */
277 * Force strict CPU ordering.
278 * And yes, this is required on UP too when we're talking
281 * This is very similar to the ppc eieio/sync instruction in that is
282 * does a checkpoint syncronisation & makes sure that
283 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
286 #define eieio() asm volatile("bcr 15,0" : : : "memory")
287 #define SYNC_OTHER_CORES(x) eieio()
289 #define rmb() eieio()
290 #define wmb() eieio()
291 #define read_barrier_depends() do { } while(0)
292 #define smp_mb() mb()
293 #define smp_rmb() rmb()
294 #define smp_wmb() wmb()
295 #define smp_read_barrier_depends() read_barrier_depends()
296 #define smp_mb__before_clear_bit() smp_mb()
297 #define smp_mb__after_clear_bit() smp_mb()
300 #define set_mb(var, value) do { var = value; mb(); } while (0)
304 #define __ctl_load(array, low, high) ({ \
305 typedef struct { char _[sizeof(array)]; } addrtype; \
307 " lctlg %1,%2,0(%0)\n" \
308 : : "a" (&array), "i" (low), "i" (high), \
309 "m" (*(addrtype *)(array))); \
312 #define __ctl_store(array, low, high) ({ \
313 typedef struct { char _[sizeof(array)]; } addrtype; \
315 " stctg %2,%3,0(%1)\n" \
316 : "=m" (*(addrtype *)(array)) \
317 : "a" (&array), "i" (low), "i" (high)); \
320 #else /* __s390x__ */
322 #define __ctl_load(array, low, high) ({ \
323 typedef struct { char _[sizeof(array)]; } addrtype; \
325 " lctl %1,%2,0(%0)\n" \
326 : : "a" (&array), "i" (low), "i" (high), \
327 "m" (*(addrtype *)(array))); \
330 #define __ctl_store(array, low, high) ({ \
331 typedef struct { char _[sizeof(array)]; } addrtype; \
333 " stctl %2,%3,0(%1)\n" \
334 : "=m" (*(addrtype *)(array)) \
335 : "a" (&array), "i" (low), "i" (high)); \
338 #endif /* __s390x__ */
340 #define __ctl_set_bit(cr, bit) ({ \
341 unsigned long __dummy; \
342 __ctl_store(__dummy, cr, cr); \
343 __dummy |= 1UL << (bit); \
344 __ctl_load(__dummy, cr, cr); \
347 #define __ctl_clear_bit(cr, bit) ({ \
348 unsigned long __dummy; \
349 __ctl_store(__dummy, cr, cr); \
350 __dummy &= ~(1UL << (bit)); \
351 __ctl_load(__dummy, cr, cr); \
354 #include <linux/irqflags.h>
357 * Use to set psw mask except for the first byte which
358 * won't be changed by this function.
361 __set_psw_mask(unsigned long mask
)
363 __load_psw_mask(mask
| (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
366 #define local_mcck_enable() __set_psw_mask(PSW_KERNEL_BITS)
367 #define local_mcck_disable() __set_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK)
371 extern void smp_ctl_set_bit(int cr
, int bit
);
372 extern void smp_ctl_clear_bit(int cr
, int bit
);
373 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
374 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
378 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
379 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
381 #endif /* CONFIG_SMP */
383 extern void (*_machine_restart
)(char *command
);
384 extern void (*_machine_halt
)(void);
385 extern void (*_machine_power_off
)(void);
387 #define arch_align_stack(x) (x)
389 #endif /* __KERNEL__ */