Import 2.1.116pre2
[davej-history.git] / include / asm-i386 / system.h
blob91b98d5e80a48d0772153b940445a7498150b4d3
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
7 /*
8 * Entry into gdt where to find first TSS. GDT layout:
9 * 0 - null
10 * 1 - not used
11 * 2 - kernel code segment
12 * 3 - kernel data segment
13 * 4 - user code segment
14 * 5 - user data segment
15 * 6 - not used
16 * 7 - not used
17 * 8 - APM BIOS support
18 * 9 - APM BIOS support
19 * 10 - APM BIOS support
20 * 11 - APM BIOS support
21 * 12 - TSS #0
22 * 13 - LDT #0
23 * 14 - TSS #1
24 * 15 - LDT #1
26 #define FIRST_TSS_ENTRY 12
27 #define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
28 #define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
29 #define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
30 #define load_TR(n) __asm__ __volatile__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
31 #define load_ldt(n) __asm__ __volatile__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
32 #define store_TR(n) \
33 __asm__("str %%ax\n\t" \
34 "subl %2,%%eax\n\t" \
35 "shrl $4,%%eax" \
36 :"=a" (n) \
37 :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
39 #ifdef __KERNEL__
41 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
42 extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
45 * We do most of the task switching in C, but we need
46 * to do the EIP/ESP switch in assembly..
48 #define switch_to(prev,next) do { \
49 unsigned long eax, edx, ecx; \
50 asm volatile("pushl %%ebx\n\t" \
51 "pushl %%esi\n\t" \
52 "pushl %%edi\n\t" \
53 "pushl %%ebp\n\t" \
54 "movl %%esp,%0\n\t" /* save ESP */ \
55 "movl %5,%%esp\n\t" /* restore ESP */ \
56 "movl $1f,%1\n\t" /* save EIP */ \
57 "pushl %6\n\t" /* restore EIP */ \
58 "jmp __switch_to\n" \
59 "1:\t" \
60 "popl %%ebp\n\t" \
61 "popl %%edi\n\t" \
62 "popl %%esi\n\t" \
63 "popl %%ebx" \
64 :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
65 "=a" (eax), "=d" (edx), "=c" (ecx) \
66 :"m" (next->tss.esp),"m" (next->tss.eip), \
67 "a" (prev), "d" (next)); \
68 } while (0)
70 #define _set_base(addr,base) \
71 __asm__("movw %%dx,%0\n\t" \
72 "rorl $16,%%edx\n\t" \
73 "movb %%dl,%1\n\t" \
74 "movb %%dh,%2" \
75 : /* no output */ \
76 :"m" (*((addr)+2)), \
77 "m" (*((addr)+4)), \
78 "m" (*((addr)+7)), \
79 "d" (base) \
80 :"dx")
82 #define _set_limit(addr,limit) \
83 __asm__("movw %%dx,%0\n\t" \
84 "rorl $16,%%edx\n\t" \
85 "movb %1,%%dh\n\t" \
86 "andb $0xf0,%%dh\n\t" \
87 "orb %%dh,%%dl\n\t" \
88 "movb %%dl,%1" \
89 : /* no output */ \
90 :"m" (*(addr)), \
91 "m" (*((addr)+6)), \
92 "d" (limit) \
93 :"dx")
95 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
96 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
98 static inline unsigned long _get_base(char * addr)
100 unsigned long __base;
101 __asm__("movb %3,%%dh\n\t"
102 "movb %2,%%dl\n\t"
103 "shll $16,%%edx\n\t"
104 "movw %1,%%dx"
105 :"=&d" (__base)
106 :"m" (*((addr)+2)),
107 "m" (*((addr)+4)),
108 "m" (*((addr)+7)));
109 return __base;
112 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
115 * Load a segment. Fall back on loading the zero
116 * segment if something goes wrong..
118 #define loadsegment(seg,value) \
119 asm volatile("\n" \
120 "1:\t" \
121 "movl %0,%%" #seg "\n" \
122 "2:\n" \
123 ".section fixup,\"ax\"\n" \
124 "3:\t" \
125 "pushl $0\n\t" \
126 "popl %%" #seg "\n\t" \
127 "jmp 2b\n" \
128 ".previous\n" \
129 ".section __ex_table,\"a\"\n\t" \
130 ".align 4\n\t" \
131 ".long 1b,3b\n" \
132 ".previous" \
133 : :"m" (*(unsigned int *)&(value)))
136 * Clear and set 'TS' bit respectively
138 #define clts() __asm__ __volatile__ ("clts")
139 #define stts() \
140 __asm__ __volatile__ ( \
141 "movl %%cr0,%%eax\n\t" \
142 "orl $8,%%eax\n\t" \
143 "movl %%eax,%%cr0" \
144 : /* no outputs */ \
145 : /* no inputs */ \
146 :"ax")
148 #endif /* __KERNEL__ */
150 static inline unsigned long get_limit(unsigned long segment)
152 unsigned long __limit;
153 __asm__("lsll %1,%0"
154 :"=r" (__limit):"r" (segment));
155 return __limit+1;
158 #define nop() __asm__ __volatile__ ("nop")
160 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
161 #define tas(ptr) (xchg((ptr),1))
163 struct __xchg_dummy { unsigned long a[100]; };
164 #define __xg(x) ((struct __xchg_dummy *)(x))
167 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
169 static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
171 switch (size) {
172 case 1:
173 __asm__("xchgb %b0,%1"
174 :"=q" (x)
175 :"m" (*__xg(ptr)), "0" (x)
176 :"memory");
177 break;
178 case 2:
179 __asm__("xchgw %w0,%1"
180 :"=r" (x)
181 :"m" (*__xg(ptr)), "0" (x)
182 :"memory");
183 break;
184 case 4:
185 __asm__("xchgl %0,%1"
186 :"=r" (x)
187 :"m" (*__xg(ptr)), "0" (x)
188 :"memory");
189 break;
191 return x;
195 * Force strict CPU ordering.
196 * And yes, this is required on UP too when we're talking
197 * to devices.
199 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
201 /* interrupt control.. */
202 #define __sti() __asm__ __volatile__ ("sti": : :"memory")
203 #define __cli() __asm__ __volatile__ ("cli": : :"memory")
204 #define __save_flags(x) \
205 __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
206 #define __restore_flags(x) \
207 __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
210 #ifdef __SMP__
212 extern void __global_cli(void);
213 extern void __global_sti(void);
214 extern unsigned long __global_save_flags(void);
215 extern void __global_restore_flags(unsigned long);
216 #define cli() __global_cli()
217 #define sti() __global_sti()
218 #define save_flags(x) ((x)=__global_save_flags())
219 #define restore_flags(x) __global_restore_flags(x)
221 #else
223 #define cli() __cli()
224 #define sti() __sti()
225 #define save_flags(x) __save_flags(x)
226 #define restore_flags(x) __restore_flags(x)
228 #endif
230 #define _set_gate(gate_addr,type,dpl,addr) \
231 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
232 "movw %2,%%dx\n\t" \
233 "movl %%eax,%0\n\t" \
234 "movl %%edx,%1" \
235 :"=m" (*((long *) (gate_addr))), \
236 "=m" (*(1+(long *) (gate_addr))) \
237 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
238 "d" ((char *) (addr)),"a" (__KERNEL_CS << 16) \
239 :"ax","dx")
241 #define set_intr_gate(n,addr) \
242 _set_gate(idt+(n),14,0,addr)
244 #define set_trap_gate(n,addr) \
245 _set_gate(idt+(n),15,0,addr)
247 #define set_system_gate(n,addr) \
248 _set_gate(idt+(n),15,3,addr)
250 #define set_call_gate(a,addr) \
251 _set_gate(a,12,3,addr)
253 #define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
254 *((gate_addr)+1) = ((base) & 0xff000000) | \
255 (((base) & 0x00ff0000)>>16) | \
256 ((limit) & 0xf0000) | \
257 ((dpl)<<13) | \
258 (0x00408000) | \
259 ((type)<<8); \
260 *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
261 ((limit) & 0x0ffff); }
263 #define _set_tssldt_desc(n,addr,limit,type) \
264 __asm__ __volatile__ ("movw %3,0(%2)\n\t" \
265 "movw %%ax,2(%2)\n\t" \
266 "rorl $16,%%eax\n\t" \
267 "movb %%al,4(%2)\n\t" \
268 "movb %4,5(%2)\n\t" \
269 "movb $0,6(%2)\n\t" \
270 "movb %%ah,7(%2)\n\t" \
271 "rorl $16,%%eax" \
272 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
274 #define set_tss_desc(n,addr) \
275 _set_tssldt_desc(((char *) (n)),((int)(addr)),235,0x89)
276 #define set_ldt_desc(n,addr,size) \
277 _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),0x82)
280 * This is the ldt that every process will get unless we need
281 * something other than this.
283 extern struct desc_struct default_ldt;
286 * disable hlt during certain critical i/o operations
288 #define HAVE_DISABLE_HLT
289 void disable_hlt(void);
290 void enable_hlt(void);
292 #endif