[PATCH] meminfo: add Cached underflow check
[linux-2.6/linux-2.6-openrd.git] / include / asm-i386 / desc.h
blob11e67811a9901b5c6b31ef361dfb6d03667c3ca3
1 #ifndef __ARCH_DESC_H
2 #define __ARCH_DESC_H
4 #include <asm/ldt.h>
5 #include <asm/segment.h>
7 #define CPU_16BIT_STACK_SIZE 1024
9 #ifndef __ASSEMBLY__
11 #include <linux/preempt.h>
12 #include <linux/smp.h>
13 #include <linux/percpu.h>
15 #include <asm/mmu.h>
17 extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
18 DECLARE_PER_CPU(struct desc_struct, cpu_gdt_table[GDT_ENTRIES]);
20 DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
22 struct Xgt_desc_struct {
23 unsigned short size;
24 unsigned long address __attribute__((packed));
25 unsigned short pad;
26 } __attribute__ ((packed));
28 extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
30 #define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
31 #define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))
34 * This is the ldt that every process will get unless we need
35 * something other than this.
37 extern struct desc_struct default_ldt[];
38 extern void set_intr_gate(unsigned int irq, void * addr);
40 #define _set_tssldt_desc(n,addr,limit,type) \
41 __asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
42 "movw %%ax,2(%2)\n\t" \
43 "rorl $16,%%eax\n\t" \
44 "movb %%al,4(%2)\n\t" \
45 "movb %4,5(%2)\n\t" \
46 "movb $0,6(%2)\n\t" \
47 "movb %%ah,7(%2)\n\t" \
48 "rorl $16,%%eax" \
49 : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
51 static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
53 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[entry], (int)addr,
54 offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
57 #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
59 static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
61 _set_tssldt_desc(&per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
64 #define LDT_entry_a(info) \
65 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
67 #define LDT_entry_b(info) \
68 (((info)->base_addr & 0xff000000) | \
69 (((info)->base_addr & 0x00ff0000) >> 16) | \
70 ((info)->limit & 0xf0000) | \
71 (((info)->read_exec_only ^ 1) << 9) | \
72 ((info)->contents << 10) | \
73 (((info)->seg_not_present ^ 1) << 15) | \
74 ((info)->seg_32bit << 22) | \
75 ((info)->limit_in_pages << 23) | \
76 ((info)->useable << 20) | \
77 0x7000)
79 #define LDT_empty(info) (\
80 (info)->base_addr == 0 && \
81 (info)->limit == 0 && \
82 (info)->contents == 0 && \
83 (info)->read_exec_only == 1 && \
84 (info)->seg_32bit == 0 && \
85 (info)->limit_in_pages == 0 && \
86 (info)->seg_not_present == 1 && \
87 (info)->useable == 0 )
89 #if TLS_SIZE != 24
90 # error update this code.
91 #endif
93 static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
95 #define C(i) per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
96 C(0); C(1); C(2);
97 #undef C
100 static inline void clear_LDT(void)
102 int cpu = get_cpu();
104 set_ldt_desc(cpu, &default_ldt[0], 5);
105 load_LDT_desc();
106 put_cpu();
110 * load one particular LDT into the current CPU
112 static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
114 void *segments = pc->ldt;
115 int count = pc->size;
117 if (likely(!count)) {
118 segments = &default_ldt[0];
119 count = 5;
122 set_ldt_desc(cpu, segments, count);
123 load_LDT_desc();
126 static inline void load_LDT(mm_context_t *pc)
128 int cpu = get_cpu();
129 load_LDT_nolock(pc, cpu);
130 put_cpu();
133 static inline unsigned long get_desc_base(unsigned long *desc)
135 unsigned long base;
136 base = ((desc[0] >> 16) & 0x0000ffff) |
137 ((desc[1] << 16) & 0x00ff0000) |
138 (desc[1] & 0xff000000);
139 return base;
142 #endif /* !__ASSEMBLY__ */
144 #endif