Merge with Linux 2.4.0-test6-pre2.
[linux-2.6/linux-mips.git] / include / asm-s390 / system.h
blobafced6af1f44c51382ef03aadc05fedba81d3eb0
1 /*
2 * include/asm-s390/system.h
4 * S390 version
5 * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
8 * Derived from "include/asm-i386/system.h"
9 */
11 #ifndef __ASM_SYSTEM_H
12 #define __ASM_SYSTEM_H
14 #include <linux/config.h>
15 #ifdef __KERNEL__
16 #include <asm/lowcore.h>
17 #endif
18 #include <linux/kernel.h>
20 #define prepare_to_switch() do { } while(0)
21 #define switch_to(prev,next,last) do { \
22 if (prev == next) \
23 break; \
24 save_fp_regs1(&prev->thread.fp_regs); \
25 restore_fp_regs1(&next->thread.fp_regs); \
26 last = resume(&prev->thread,&next->thread); \
27 } while (0)
29 struct task_struct;
31 #define nop() __asm__ __volatile__ ("nop")
33 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
36 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
38 switch (size) {
39 case 1:
40 asm volatile (
41 " lhi 1,3\n"
42 " nr 1,%0\n" /* isolate last 2 bits */
43 " xr 1,%0\n" /* align ptr */
44 " bras 2,0f\n"
45 " icm 1,8,%1\n" /* for ptr&3 == 0 */
46 " stcm 0,8,%1\n"
47 " icm 1,4,%1\n" /* for ptr&3 == 1 */
48 " stcm 0,4,%1\n"
49 " icm 1,2,%1\n" /* for ptr&3 == 2 */
50 " stcm 0,2,%1\n"
51 " icm 1,1,%1\n" /* for ptr&3 == 3 */
52 " stcm 0,1,%1\n"
53 "0: sll 1,3\n"
54 " la 2,0(1,2)\n" /* r2 points to an icm */
55 " l 0,%1\n" /* get fullword */
56 "1: lr 1,0\n" /* cs loop */
57 " ex 0,0(2)\n" /* insert x */
58 " cs 0,1,%1\n"
59 " jl 1b\n"
60 " ex 0,4(2)" /* store *ptr to x */
61 : "+a&" (ptr) : "m" (x)
62 : "memory", "0", "1", "2");
63 case 2:
64 if(((__u32)ptr)&1)
65 panic("misaligned (__u16 *) in __xchg\n");
66 asm volatile (
67 " lhi 1,2\n"
68 " nr 1,%0\n" /* isolate bit 2^1 */
69 " xr 1,%0\n" /* align ptr */
70 " bras 2,0f\n"
71 " icm 1,12,%1\n" /* for ptr&2 == 0 */
72 " stcm 0,12,%1\n"
73 " icm 1,3,%1\n" /* for ptr&2 == 1 */
74 " stcm 0,3,%1\n"
75 "0: sll 1,2\n"
76 " la 2,0(1,2)\n" /* r2 points to an icm */
77 " l 0,%1\n" /* get fullword */
78 "1: lr 1,0\n" /* cs loop */
79 " ex 0,0(2)\n" /* insert x */
80 " cs 0,1,%1\n"
81 " jl 1b\n"
82 " ex 0,4(2)" /* store *ptr to x */
83 : "+a&" (ptr) : "m" (x)
84 : "memory", "0", "1", "2");
85 break;
86 case 4:
87 if(((__u32)ptr)&3)
88 panic("misaligned (__u32 *) in __xchg\n");
89 asm volatile (
90 " l 0,0(%1)\n"
91 "0: cs 0,%0,0(%1)\n"
92 " jl 0b\n"
93 " lr %0,0\n"
94 : "+d&" (x) : "a" (ptr)
95 : "memory", "0" );
96 break;
97 default:
98 abort();
100 return x;
104 * Force strict CPU ordering.
105 * And yes, this is required on UP too when we're talking
106 * to devices.
108 * This is very similar to the ppc eieio/sync instruction in that is
109 * does a checkpoint syncronisation & makes sure that
110 * all memory ops have completed wrt other CPU's ( see 7-15 POP DJB ).
113 #define eieio() __asm__ __volatile__ ("BCR 15,0")
114 # define SYNC_OTHER_CORES(x) eieio()
115 #define mb() eieio()
116 #define rmb() eieio()
117 #define wmb() eieio()
119 #define set_mb(var, value) do { var = value; mb(); } while (0)
120 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
122 /* interrupt control.. */
123 #define __sti() ({ \
124 __u8 dummy; \
125 __asm__ __volatile__ ( \
126 "stosm %0,0x03" : "=m" (dummy) : : "memory"); \
129 #define __cli() ({ \
130 __u32 flags; \
131 __asm__ __volatile__ ( \
132 "stnsm %0,0xFC" : "=m" (flags) : : "memory"); \
133 flags; \
136 #define __save_flags(x) \
137 __asm__ __volatile__("stosm %0,0" : "=m" (x) : : "memory")
139 #define __restore_flags(x) \
140 __asm__ __volatile__("ssm %0" : : "m" (x) : "memory")
142 #define __ctl_set_bit(cr, bit) ({ \
143 __u8 dummy[16]; \
144 __asm__ __volatile__ ( \
145 " la 1,%0\n" /* align to 8 byte */ \
146 " ahi 1,7\n" \
147 " srl 1,3\n" \
148 " sll 1,3\n" \
149 " bras 2,0f\n" /* skip indirect insns */ \
150 " stctl 0,0,0(1)\n" \
151 " lctl 0,0,0(1)\n" \
152 "0: ex %1,0(2)\n" /* execute stctl */ \
153 " l 0,0(1)\n" \
154 " or 0,%2\n" /* set the bit */ \
155 " st 0,0(1)\n" \
156 "1: ex %1,4(2)" /* execute lctl */ \
157 : "=m" (dummy) : "a" (cr*17), "a" (1<<(bit)) \
158 : "0", "1", "2"); \
161 #define __ctl_clear_bit(cr, bit) ({ \
162 __u8 dummy[16]; \
163 __asm__ __volatile__ ( \
164 " la 1,%0\n" /* align to 8 byte */ \
165 " ahi 1,7\n" \
166 " srl 1,3\n" \
167 " sll 1,3\n" \
168 " bras 2,0f\n" /* skip indirect insns */ \
169 " stctl 0,0,0(1)\n" \
170 " lctl 0,0,0(1)\n" \
171 "0: ex %1,0(2)\n" /* execute stctl */ \
172 " l 0,0(1)\n" \
173 " nr 0,%2\n" /* set the bit */ \
174 " st 0,0(1)\n" \
175 "1: ex %1,4(2)" /* execute lctl */ \
176 : "=m" (dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
177 : "0", "1", "2"); \
180 /* For spinlocks etc */
181 #define local_irq_save(x) ((x) = __cli())
182 #define local_irq_restore(x) __restore_flags(x)
183 #define local_irq_disable() __cli()
184 #define local_irq_enable() __sti()
186 #ifdef CONFIG_SMP
188 extern void __global_cli(void);
189 extern void __global_sti(void);
191 extern unsigned long __global_save_flags(void);
192 extern void __global_restore_flags(unsigned long);
193 #define cli() __global_cli()
194 #define sti() __global_sti()
195 #define save_flags(x) ((x)=__global_save_flags())
196 #define restore_flags(x) __global_restore_flags(x)
198 extern void smp_ctl_set_bit(int cr, int bit);
199 extern void smp_ctl_clear_bit(int cr, int bit);
200 #define ctl_set_bit(cr, bit) smp_ctl_set_bit(cr, bit)
201 #define ctl_clear_bit(cr, bit) smp_ctl_clear_bit(cr, bit)
203 #else
205 #define cli() __cli()
206 #define sti() __sti()
207 #define save_flags(x) __save_flags(x)
208 #define restore_flags(x) __restore_flags(x)
210 #define ctl_set_bit(cr, bit) __ctl_set_bit(cr, bit)
211 #define ctl_clear_bit(cr, bit) __ctl_clear_bit(cr, bit)
214 #endif
216 #ifdef __KERNEL__
217 extern struct task_struct *resume(void *,void *);
219 extern int save_fp_regs1(s390_fp_regs *fpregs);
220 extern void save_fp_regs(s390_fp_regs *fpregs);
221 extern int restore_fp_regs1(s390_fp_regs *fpregs);
222 extern void restore_fp_regs(s390_fp_regs *fpregs);
223 extern void show_crashed_task_info(void);
224 #endif
226 #endif