Linux-2.3.3 and a short hiatus..
[davej-history.git] / include / asm-i386 / system.h
blobebdb8b790f9bfa5b3c188cbef96e445569558833
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
7 #ifdef __KERNEL__
9 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
10 extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
12 #define switch_to(prev,next,last) do { \
13 asm volatile("pushl %%esi\n\t" \
14 "pushl %%edi\n\t" \
15 "pushl %%ebp\n\t" \
16 "movl %%esp,%0\n\t" /* save ESP */ \
17 "movl %3,%%esp\n\t" /* restore ESP */ \
18 "movl $1f,%1\n\t" /* save EIP */ \
19 "pushl %4\n\t" /* restore EIP */ \
20 "jmp __switch_to\n" \
21 "1:\t" \
22 "popl %%ebp\n\t" \
23 "popl %%edi\n\t" \
24 "popl %%esi\n\t" \
25 :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
26 "=b" (last) \
27 :"m" (next->tss.esp),"m" (next->tss.eip), \
28 "a" (prev), "d" (next), \
29 "b" (prev)); \
30 } while (0)
32 #define _set_base(addr,base) do { unsigned long __pr; \
33 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
34 "rorl $16,%%edx\n\t" \
35 "movb %%dl,%2\n\t" \
36 "movb %%dh,%3" \
37 :"=&d" (__pr) \
38 :"m" (*((addr)+2)), \
39 "m" (*((addr)+4)), \
40 "m" (*((addr)+7)), \
41 "0" (base) \
42 ); } while(0)
44 #define _set_limit(addr,limit) do { unsigned long __lr; \
45 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
46 "rorl $16,%%edx\n\t" \
47 "movb %2,%%dh\n\t" \
48 "andb $0xf0,%%dh\n\t" \
49 "orb %%dh,%%dl\n\t" \
50 "movb %%dl,%2" \
51 :"=&d" (__lr) \
52 :"m" (*(addr)), \
53 "m" (*((addr)+6)), \
54 "0" (limit) \
55 ); } while(0)
57 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
58 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
60 static inline unsigned long _get_base(char * addr)
62 unsigned long __base;
63 __asm__("movb %3,%%dh\n\t"
64 "movb %2,%%dl\n\t"
65 "shll $16,%%edx\n\t"
66 "movw %1,%%dx"
67 :"=&d" (__base)
68 :"m" (*((addr)+2)),
69 "m" (*((addr)+4)),
70 "m" (*((addr)+7)));
71 return __base;
74 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
77 * Load a segment. Fall back on loading the zero
78 * segment if something goes wrong..
80 #define loadsegment(seg,value) \
81 asm volatile("\n" \
82 "1:\t" \
83 "movl %0,%%" #seg "\n" \
84 "2:\n" \
85 ".section .fixup,\"ax\"\n" \
86 "3:\t" \
87 "pushl $0\n\t" \
88 "popl %%" #seg "\n\t" \
89 "jmp 2b\n" \
90 ".previous\n" \
91 ".section __ex_table,\"a\"\n\t" \
92 ".align 4\n\t" \
93 ".long 1b,3b\n" \
94 ".previous" \
95 : :"m" (*(unsigned int *)&(value)))
98 * Clear and set 'TS' bit respectively
100 #define clts() __asm__ __volatile__ ("clts")
101 #define read_cr0() ({ \
102 unsigned int __dummy; \
103 __asm__( \
104 "movl %%cr0,%0\n\t" \
105 :"=r" (__dummy)); \
106 __dummy; \
108 #define write_cr0(x) \
109 __asm__("movl %0,%%cr0": :"r" (x));
110 #define stts() write_cr0(8 | read_cr0())
112 #endif /* __KERNEL__ */
114 static inline unsigned long get_limit(unsigned long segment)
116 unsigned long __limit;
117 __asm__("lsll %1,%0"
118 :"=r" (__limit):"r" (segment));
119 return __limit+1;
122 #define nop() __asm__ __volatile__ ("nop")
124 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
125 #define tas(ptr) (xchg((ptr),1))
127 struct __xchg_dummy { unsigned long a[100]; };
128 #define __xg(x) ((struct __xchg_dummy *)(x))
131 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
133 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
135 switch (size) {
136 case 1:
137 __asm__("xchgb %b0,%1"
138 :"=q" (x)
139 :"m" (*__xg(ptr)), "0" (x)
140 :"memory");
141 break;
142 case 2:
143 __asm__("xchgw %w0,%1"
144 :"=r" (x)
145 :"m" (*__xg(ptr)), "0" (x)
146 :"memory");
147 break;
148 case 4:
149 __asm__("xchgl %0,%1"
150 :"=r" (x)
151 :"m" (*__xg(ptr)), "0" (x)
152 :"memory");
153 break;
155 return x;
159 * Force strict CPU ordering.
160 * And yes, this is required on UP too when we're talking
161 * to devices.
163 * For now, "wmb()" doesn't actually do anything, as all
164 * Intel CPU's follow what Intel calls a *Processor Order*,
165 * in which all writes are seen in the program order even
166 * outside the CPU.
168 * I expect future Intel CPU's to have a weaker ordering,
169 * but I'd also expect them to finally get their act together
170 * and add some real memory barriers if so.
172 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
173 #define rmb() mb()
174 #define wmb() __asm__ __volatile__ ("": : :"memory")
176 /* interrupt control.. */
177 #define __sti() __asm__ __volatile__ ("sti": : :"memory")
178 #define __cli() __asm__ __volatile__ ("cli": : :"memory")
179 #define __save_flags(x) \
180 __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
181 #define __restore_flags(x) \
182 __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
185 #ifdef __SMP__
187 extern void __global_cli(void);
188 extern void __global_sti(void);
189 extern unsigned long __global_save_flags(void);
190 extern void __global_restore_flags(unsigned long);
191 #define cli() __global_cli()
192 #define sti() __global_sti()
193 #define save_flags(x) ((x)=__global_save_flags())
194 #define restore_flags(x) __global_restore_flags(x)
196 #else
198 #define cli() __cli()
199 #define sti() __sti()
200 #define save_flags(x) __save_flags(x)
201 #define restore_flags(x) __restore_flags(x)
203 #endif
206 * disable hlt during certain critical i/o operations
208 #define HAVE_DISABLE_HLT
209 void disable_hlt(void);
210 void enable_hlt(void);
212 #endif