4 #include <linux/kernel.h>
5 #include <asm/segment.h>
9 struct task_struct
; /* one of the stranger aspects of C forward declarations.. */
10 extern void FASTCALL(__switch_to(struct task_struct
*prev
, struct task_struct
*next
));
12 #define switch_to(prev,next,last) do { \
13 asm volatile("pushl %%esi\n\t" \
16 "movl %%esp,%0\n\t" /* save ESP */ \
17 "movl %3,%%esp\n\t" /* restore ESP */ \
18 "movl $1f,%1\n\t" /* save EIP */ \
19 "pushl %4\n\t" /* restore EIP */ \
25 :"=m" (prev->tss.esp),"=m" (prev->tss.eip), \
27 :"m" (next->tss.esp),"m" (next->tss.eip), \
28 "a" (prev), "d" (next), \
32 #define _set_base(addr,base) do { unsigned long __pr; \
33 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
34 "rorl $16,%%edx\n\t" \
44 #define _set_limit(addr,limit) do { unsigned long __lr; \
45 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
46 "rorl $16,%%edx\n\t" \
48 "andb $0xf0,%%dh\n\t" \
57 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
58 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
60 static inline unsigned long _get_base(char * addr
)
63 __asm__("movb %3,%%dh\n\t"
74 #define get_base(ldt) _get_base( ((char *)&(ldt)) )
77 * Load a segment. Fall back on loading the zero
78 * segment if something goes wrong..
80 #define loadsegment(seg,value) \
83 "movl %0,%%" #seg "\n" \
85 ".section .fixup,\"ax\"\n" \
88 "popl %%" #seg "\n\t" \
91 ".section __ex_table,\"a\"\n\t" \
95 : :"m" (*(unsigned int *)&(value)))
98 * Clear and set 'TS' bit respectively
100 #define clts() __asm__ __volatile__ ("clts")
101 #define read_cr0() ({ \
102 unsigned int __dummy; \
104 "movl %%cr0,%0\n\t" \
108 #define write_cr0(x) \
109 __asm__("movl %0,%%cr0": :"r" (x));
110 #define stts() write_cr0(8 | read_cr0())
112 #endif /* __KERNEL__ */
114 static inline unsigned long get_limit(unsigned long segment
)
116 unsigned long __limit
;
118 :"=r" (__limit
):"r" (segment
));
122 #define nop() __asm__ __volatile__ ("nop")
124 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
125 #define tas(ptr) (xchg((ptr),1))
127 struct __xchg_dummy
{ unsigned long a
[100]; };
128 #define __xg(x) ((struct __xchg_dummy *)(x))
131 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
133 static inline unsigned long __xchg(unsigned long x
, void * ptr
, int size
)
137 __asm__("xchgb %b0,%1"
139 :"m" (*__xg(ptr
)), "0" (x
)
143 __asm__("xchgw %w0,%1"
145 :"m" (*__xg(ptr
)), "0" (x
)
149 __asm__("xchgl %0,%1"
151 :"m" (*__xg(ptr
)), "0" (x
)
159 * Force strict CPU ordering.
160 * And yes, this is required on UP too when we're talking
163 * For now, "wmb()" doesn't actually do anything, as all
164 * Intel CPU's follow what Intel calls a *Processor Order*,
165 * in which all writes are seen in the program order even
168 * I expect future Intel CPU's to have a weaker ordering,
169 * but I'd also expect them to finally get their act together
170 * and add some real memory barriers if so.
172 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
174 #define wmb() __asm__ __volatile__ ("": : :"memory")
176 /* interrupt control.. */
177 #define __sti() __asm__ __volatile__ ("sti": : :"memory")
178 #define __cli() __asm__ __volatile__ ("cli": : :"memory")
179 #define __save_flags(x) \
180 __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
181 #define __restore_flags(x) \
182 __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
187 extern void __global_cli(void);
188 extern void __global_sti(void);
189 extern unsigned long __global_save_flags(void);
190 extern void __global_restore_flags(unsigned long);
191 #define cli() __global_cli()
192 #define sti() __global_sti()
193 #define save_flags(x) ((x)=__global_save_flags())
194 #define restore_flags(x) __global_restore_flags(x)
198 #define cli() __cli()
199 #define sti() __sti()
200 #define save_flags(x) __save_flags(x)
201 #define restore_flags(x) __restore_flags(x)
206 * disable hlt during certain critical i/o operations
208 #define HAVE_DISABLE_HLT
209 void disable_hlt(void);
210 void enable_hlt(void);