2 * Copyright IBM Corp. 1999, 2009
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <asm/types.h>
13 #include <asm/ptrace.h>
14 #include <asm/setup.h>
15 #include <asm/processor.h>
16 #include <asm/lowcore.h>
22 extern struct task_struct
*__switch_to(void *, void *);
24 static inline void save_fp_regs(s390_fp_regs
*fpregs
)
28 " std 2,%O0+24(%R0)\n"
29 " std 4,%O0+40(%R0)\n"
31 : "=Q" (*fpregs
) : "Q" (*fpregs
));
32 if (!MACHINE_HAS_IEEE
)
36 " std 1,%O0+16(%R0)\n"
37 " std 3,%O0+32(%R0)\n"
38 " std 5,%O0+48(%R0)\n"
39 " std 7,%O0+64(%R0)\n"
40 " std 8,%O0+72(%R0)\n"
41 " std 9,%O0+80(%R0)\n"
42 " std 10,%O0+88(%R0)\n"
43 " std 11,%O0+96(%R0)\n"
44 " std 12,%O0+104(%R0)\n"
45 " std 13,%O0+112(%R0)\n"
46 " std 14,%O0+120(%R0)\n"
47 " std 15,%O0+128(%R0)\n"
48 : "=Q" (*fpregs
) : "Q" (*fpregs
));
51 static inline void restore_fp_regs(s390_fp_regs
*fpregs
)
59 if (!MACHINE_HAS_IEEE
)
69 " ld 10,%O0+88(%R0)\n"
70 " ld 11,%O0+96(%R0)\n"
71 " ld 12,%O0+104(%R0)\n"
72 " ld 13,%O0+112(%R0)\n"
73 " ld 14,%O0+120(%R0)\n"
74 " ld 15,%O0+128(%R0)\n"
78 static inline void save_access_regs(unsigned int *acrs
)
80 asm volatile("stam 0,15,%0" : "=Q" (*acrs
));
83 static inline void restore_access_regs(unsigned int *acrs
)
85 asm volatile("lam 0,15,%0" : : "Q" (*acrs
));
88 #define switch_to(prev,next,last) do { \
91 save_fp_regs(&prev->thread.fp_regs); \
92 restore_fp_regs(&next->thread.fp_regs); \
93 save_access_regs(&prev->thread.acrs[0]); \
94 restore_access_regs(&next->thread.acrs[0]); \
95 prev = __switch_to(prev,next); \
98 extern void account_vtime(struct task_struct
*, struct task_struct
*);
99 extern void account_tick_vtime(struct task_struct
*);
100 extern void account_system_vtime(struct task_struct
*);
103 extern void pfault_irq_init(void);
104 extern int pfault_init(void);
105 extern void pfault_fini(void);
106 #else /* CONFIG_PFAULT */
107 #define pfault_irq_init() do { } while (0)
108 #define pfault_init() ({-1;})
109 #define pfault_fini() do { } while (0)
110 #endif /* CONFIG_PFAULT */
112 extern void cmma_init(void);
113 extern int memcpy_real(void *, void *, size_t);
115 #define finish_arch_switch(prev) do { \
116 set_fs(current->thread.mm_segment); \
117 account_vtime(prev, current); \
120 #define nop() asm volatile("nop")
122 #define xchg(ptr,x) \
124 __typeof__(*(ptr)) __ret; \
125 __ret = (__typeof__(*(ptr))) \
126 __xchg((unsigned long)(x), (void *)(ptr),sizeof(*(ptr))); \
130 extern void __xchg_called_with_bad_pointer(void);
132 static inline unsigned long __xchg(unsigned long x
, void * ptr
, int size
)
134 unsigned long addr
, old
;
139 addr
= (unsigned long) ptr
;
140 shift
= (3 ^ (addr
& 3)) << 3;
149 : "=&d" (old
), "=Q" (*(int *) addr
)
150 : "d" (x
<< shift
), "d" (~(255 << shift
)),
151 "Q" (*(int *) addr
) : "memory", "cc", "0");
154 addr
= (unsigned long) ptr
;
155 shift
= (2 ^ (addr
& 2)) << 3;
164 : "=&d" (old
), "=Q" (*(int *) addr
)
165 : "d" (x
<< shift
), "d" (~(65535 << shift
)),
166 "Q" (*(int *) addr
) : "memory", "cc", "0");
173 : "=&d" (old
), "=Q" (*(int *) ptr
)
174 : "d" (x
), "Q" (*(int *) ptr
)
183 : "=&d" (old
), "=m" (*(long *) ptr
)
184 : "d" (x
), "Q" (*(long *) ptr
)
187 #endif /* __s390x__ */
189 __xchg_called_with_bad_pointer();
194 * Atomic compare and exchange. Compare OLD with MEM, if identical,
195 * store NEW in MEM. Return the initial value in MEM. Success is
196 * indicated by comparing RETURN with OLD.
199 #define __HAVE_ARCH_CMPXCHG 1
201 #define cmpxchg(ptr, o, n) \
202 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
203 (unsigned long)(n), sizeof(*(ptr))))
205 extern void __cmpxchg_called_with_bad_pointer(void);
207 static inline unsigned long
208 __cmpxchg(volatile void *ptr
, unsigned long old
, unsigned long new, int size
)
210 unsigned long addr
, prev
, tmp
;
215 addr
= (unsigned long) ptr
;
216 shift
= (3 ^ (addr
& 3)) << 3;
230 : "=&d" (prev
), "=&d" (tmp
), "=Q" (*(int *) ptr
)
231 : "d" (old
<< shift
), "d" (new << shift
),
232 "d" (~(255 << shift
)), "Q" (*(int *) ptr
)
234 return prev
>> shift
;
236 addr
= (unsigned long) ptr
;
237 shift
= (2 ^ (addr
& 2)) << 3;
251 : "=&d" (prev
), "=&d" (tmp
), "=Q" (*(int *) ptr
)
252 : "d" (old
<< shift
), "d" (new << shift
),
253 "d" (~(65535 << shift
)), "Q" (*(int *) ptr
)
255 return prev
>> shift
;
259 : "=&d" (prev
), "=Q" (*(int *) ptr
)
260 : "0" (old
), "d" (new), "Q" (*(int *) ptr
)
267 : "=&d" (prev
), "=Q" (*(long *) ptr
)
268 : "0" (old
), "d" (new), "Q" (*(long *) ptr
)
271 #endif /* __s390x__ */
273 __cmpxchg_called_with_bad_pointer();
278 * Force strict CPU ordering.
279 * And yes, this is required on UP too when we're talking
282 * This is very similar to the ppc eieio/sync instruction in that is
283 * does a checkpoint syncronisation & makes sure that
284 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
287 #define eieio() asm volatile("bcr 15,0" : : : "memory")
288 #define SYNC_OTHER_CORES(x) eieio()
290 #define rmb() eieio()
291 #define wmb() eieio()
292 #define read_barrier_depends() do { } while(0)
293 #define smp_mb() mb()
294 #define smp_rmb() rmb()
295 #define smp_wmb() wmb()
296 #define smp_read_barrier_depends() read_barrier_depends()
297 #define smp_mb__before_clear_bit() smp_mb()
298 #define smp_mb__after_clear_bit() smp_mb()
301 #define set_mb(var, value) do { var = value; mb(); } while (0)
305 #define __ctl_load(array, low, high) ({ \
306 typedef struct { char _[sizeof(array)]; } addrtype; \
308 " lctlg %1,%2,%0\n" \
309 : : "Q" (*(addrtype *)(&array)), \
310 "i" (low), "i" (high)); \
313 #define __ctl_store(array, low, high) ({ \
314 typedef struct { char _[sizeof(array)]; } addrtype; \
316 " stctg %1,%2,%0\n" \
317 : "=Q" (*(addrtype *)(&array)) \
318 : "i" (low), "i" (high)); \
321 #else /* __s390x__ */
323 #define __ctl_load(array, low, high) ({ \
324 typedef struct { char _[sizeof(array)]; } addrtype; \
327 : : "Q" (*(addrtype *)(&array)), \
328 "i" (low), "i" (high)); \
331 #define __ctl_store(array, low, high) ({ \
332 typedef struct { char _[sizeof(array)]; } addrtype; \
334 " stctl %1,%2,%0\n" \
335 : "=Q" (*(addrtype *)(&array)) \
336 : "i" (low), "i" (high)); \
339 #endif /* __s390x__ */
341 #define __ctl_set_bit(cr, bit) ({ \
342 unsigned long __dummy; \
343 __ctl_store(__dummy, cr, cr); \
344 __dummy |= 1UL << (bit); \
345 __ctl_load(__dummy, cr, cr); \
348 #define __ctl_clear_bit(cr, bit) ({ \
349 unsigned long __dummy; \
350 __ctl_store(__dummy, cr, cr); \
351 __dummy &= ~(1UL << (bit)); \
352 __ctl_load(__dummy, cr, cr); \
355 #include <linux/irqflags.h>
357 #include <asm-generic/cmpxchg-local.h>
359 static inline unsigned long __cmpxchg_local(volatile void *ptr
,
361 unsigned long new, int size
)
370 return __cmpxchg(ptr
, old
, new, size
);
372 return __cmpxchg_local_generic(ptr
, old
, new, size
);
379 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
382 #define cmpxchg_local(ptr, o, n) \
383 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
384 (unsigned long)(n), sizeof(*(ptr))))
386 #define cmpxchg64_local(ptr, o, n) \
388 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
389 cmpxchg_local((ptr), (o), (n)); \
392 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
396 * Use to set psw mask except for the first byte which
397 * won't be changed by this function.
400 __set_psw_mask(unsigned long mask
)
402 __load_psw_mask(mask
| (__raw_local_irq_stosm(0x00) & ~(-1UL >> 8)));
405 #define local_mcck_enable() __set_psw_mask(psw_kernel_bits)
406 #define local_mcck_disable() __set_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK)
410 extern void smp_ctl_set_bit(int cr
, int bit
);
411 extern void smp_ctl_clear_bit(int cr
, int bit
);
412 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
413 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
417 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
418 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
420 #endif /* CONFIG_SMP */
422 static inline unsigned int stfl(void)
425 " .insn s,0xb2b10000,0(0)\n" /* stfl */
428 return S390_lowcore
.stfl_fac_list
;
431 static inline int __stfle(unsigned long long *list
, int doublewords
)
433 typedef struct { unsigned long long _
[doublewords
]; } addrtype
;
434 register unsigned long __nr
asm("0") = doublewords
- 1;
436 asm volatile(".insn s,0xb2b00000,%0" /* stfle */
437 : "=m" (*(addrtype
*) list
), "+d" (__nr
) : : "cc");
441 static inline int stfle(unsigned long long *list
, int doublewords
)
443 if (!(stfl() & (1UL << 24)))
445 return __stfle(list
, doublewords
);
448 static inline unsigned short stap(void)
450 unsigned short cpu_address
;
452 asm volatile("stap %0" : "=m" (cpu_address
));
456 extern void (*_machine_restart
)(char *command
);
457 extern void (*_machine_halt
)(void);
458 extern void (*_machine_power_off
)(void);
460 #define arch_align_stack(x) (x)
462 static inline int tprot(unsigned long addr
)
472 : "+d" (rc
) : "a" (addr
) : "cc");
476 #endif /* __KERNEL__ */