1 #ifndef __ASM_X86_PROCESSOR_H
2 #define __ASM_X86_PROCESSOR_H
4 #include <asm/processor-flags.h>
6 /* migration helpers, for KVM - will be removed in 2.6.25: */
8 #define Xgt_desc_struct desc_ptr
10 /* Forward declaration, a strange C thing */
15 #include <asm/math_emu.h>
16 #include <asm/segment.h>
17 #include <asm/types.h>
18 #include <asm/sigcontext.h>
19 #include <asm/current.h>
20 #include <asm/cpufeature.h>
21 #include <asm/system.h>
23 #include <asm/percpu.h>
25 #include <asm/desc_defs.h>
28 #include <linux/personality.h>
29 #include <linux/cpumask.h>
30 #include <linux/cache.h>
31 #include <linux/threads.h>
32 #include <linux/init.h>
35 * Default implementation of macro that returns current
36 * instruction pointer ("program counter").
38 static inline void *current_text_addr(void)
42 asm volatile("mov $1f, %0; 1:":"=r" (pc
));
47 #ifdef CONFIG_X86_VSMP
48 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
49 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
51 # define ARCH_MIN_TASKALIGN 16
52 # define ARCH_MIN_MMSTRUCT_ALIGN 0
56 * CPU type and hardware bug flags. Kept separately for each CPU.
57 * Members of this structure are referenced in head.S, so think twice
58 * before touching them. [mj]
62 __u8 x86
; /* CPU family */
63 __u8 x86_vendor
; /* CPU vendor */
67 char wp_works_ok
; /* It doesn't on 386's */
69 /* Problems on some 486Dx4's and old 386's: */
78 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
82 /* CPUID returned core id bits: */
84 /* Max extended CPUID function supported: */
85 __u32 extended_cpuid_level
;
87 /* Maximum supported CPUID level, -1=no CPUID: */
89 __u32 x86_capability
[NCAPINTS
];
90 char x86_vendor_id
[16];
91 char x86_model_id
[64];
92 /* in KB - valid for CPUS which support this call: */
94 int x86_cache_alignment
; /* In bytes */
96 unsigned long loops_per_jiffy
;
98 /* cpus sharing the last level cache: */
99 cpumask_t llc_shared_map
;
101 /* cpuid returned max cores value: */
104 u16 x86_clflush_size
;
106 /* number of cores as seen by the OS: */
108 /* Physical processor id: */
112 /* Index into per_cpu list: */
115 } __attribute__((__aligned__(SMP_CACHE_BYTES
)));
117 #define X86_VENDOR_INTEL 0
118 #define X86_VENDOR_CYRIX 1
119 #define X86_VENDOR_AMD 2
120 #define X86_VENDOR_UMC 3
121 #define X86_VENDOR_NEXGEN 4
122 #define X86_VENDOR_CENTAUR 5
123 #define X86_VENDOR_TRANSMETA 7
124 #define X86_VENDOR_NSC 8
125 #define X86_VENDOR_NUM 9
127 #define X86_VENDOR_UNKNOWN 0xff
130 * capabilities of CPUs
132 extern struct cpuinfo_x86 boot_cpu_data
;
133 extern struct cpuinfo_x86 new_cpu_data
;
135 extern struct tss_struct doublefault_tss
;
136 extern __u32 cleared_cpu_caps
[NCAPINTS
];
139 DECLARE_PER_CPU(struct cpuinfo_x86
, cpu_info
);
140 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
141 #define current_cpu_data cpu_data(smp_processor_id())
143 #define cpu_data(cpu) boot_cpu_data
144 #define current_cpu_data boot_cpu_data
147 static inline int hlt_works(int cpu
)
150 return cpu_data(cpu
).hlt_works_ok
;
156 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
158 extern void cpu_detect(struct cpuinfo_x86
*c
);
160 extern void identify_cpu(struct cpuinfo_x86
*);
161 extern void identify_boot_cpu(void);
162 extern void identify_secondary_cpu(struct cpuinfo_x86
*);
163 extern void print_cpu_info(struct cpuinfo_x86
*);
164 extern void init_scattered_cpuid_features(struct cpuinfo_x86
*c
);
165 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
);
166 extern unsigned short num_cache_leaves
;
168 #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64)
169 extern void detect_ht(struct cpuinfo_x86
*c
);
171 static inline void detect_ht(struct cpuinfo_x86
*c
) {}
174 static inline void native_cpuid(unsigned int *eax
, unsigned int *ebx
,
175 unsigned int *ecx
, unsigned int *edx
)
177 /* ecx is often an input as well as an output. */
183 : "0" (*eax
), "2" (*ecx
));
186 static inline void load_cr3(pgd_t
*pgdir
)
188 write_cr3(__pa(pgdir
));
192 /* This is the TSS defined by the hardware. */
194 unsigned short back_link
, __blh
;
196 unsigned short ss0
, __ss0h
;
198 /* ss1 caches MSR_IA32_SYSENTER_CS: */
199 unsigned short ss1
, __ss1h
;
201 unsigned short ss2
, __ss2h
;
213 unsigned short es
, __esh
;
214 unsigned short cs
, __csh
;
215 unsigned short ss
, __ssh
;
216 unsigned short ds
, __dsh
;
217 unsigned short fs
, __fsh
;
218 unsigned short gs
, __gsh
;
219 unsigned short ldt
, __ldth
;
220 unsigned short trace
;
221 unsigned short io_bitmap_base
;
223 } __attribute__((packed
));
237 } __attribute__((packed
)) ____cacheline_aligned
;
243 #define IO_BITMAP_BITS 65536
244 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
245 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
246 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
247 #define INVALID_IO_BITMAP_OFFSET 0x8000
248 #define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
252 * The hardware state:
254 struct x86_hw_tss x86_tss
;
257 * The extra 1 is there because the CPU will access an
258 * additional byte beyond the end of the IO permission
259 * bitmap. The extra byte must be all 1 bits, and must
260 * be within the limit.
262 unsigned long io_bitmap
[IO_BITMAP_LONGS
+ 1];
264 * Cache the current maximum and the last task that used the bitmap:
266 unsigned long io_bitmap_max
;
267 struct thread_struct
*io_bitmap_owner
;
270 * Pad the TSS to be cacheline-aligned (size is 0x100):
272 unsigned long __cacheline_filler
[35];
274 * .. and then another 0x100 bytes for the emergency kernel stack:
276 unsigned long stack
[64];
278 } __attribute__((packed
));
280 DECLARE_PER_CPU(struct tss_struct
, init_tss
);
283 * Save the original ist values for checking stack pointers during debugging
286 unsigned long ist
[7];
289 #define MXCSR_DEFAULT 0x1f80
291 struct i387_fsave_struct
{
292 u32 cwd
; /* FPU Control Word */
293 u32 swd
; /* FPU Status Word */
294 u32 twd
; /* FPU Tag Word */
295 u32 fip
; /* FPU IP Offset */
296 u32 fcs
; /* FPU IP Selector */
297 u32 foo
; /* FPU Operand Pointer Offset */
298 u32 fos
; /* FPU Operand Pointer Selector */
300 /* 8*10 bytes for each FP-reg = 80 bytes: */
303 /* Software status information [not touched by FSAVE ]: */
307 struct i387_fxsave_struct
{
308 u16 cwd
; /* Control Word */
309 u16 swd
; /* Status Word */
310 u16 twd
; /* Tag Word */
311 u16 fop
; /* Last Instruction Opcode */
314 u64 rip
; /* Instruction Pointer */
315 u64 rdp
; /* Data Pointer */
318 u32 fip
; /* FPU IP Offset */
319 u32 fcs
; /* FPU IP Selector */
320 u32 foo
; /* FPU Operand Offset */
321 u32 fos
; /* FPU Operand Selector */
324 u32 mxcsr
; /* MXCSR Register State */
325 u32 mxcsr_mask
; /* MXCSR Mask */
327 /* 8*16 bytes for each FP-reg = 128 bytes: */
330 /* 16*16 bytes for each XMM-reg = 256 bytes: */
335 } __attribute__((aligned(16)));
337 struct i387_soft_struct
{
345 /* 8*10 bytes for each FP-reg = 80 bytes: */
358 struct i387_fsave_struct fsave
;
359 struct i387_fxsave_struct fxsave
;
360 struct i387_soft_struct soft
;
364 DECLARE_PER_CPU(struct orig_ist
, orig_ist
);
367 extern void print_cpu_info(struct cpuinfo_x86
*);
368 extern void init_scattered_cpuid_features(struct cpuinfo_x86
*c
);
369 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
);
370 extern unsigned short num_cache_leaves
;
372 struct thread_struct
{
373 /* Cached TLS descriptors: */
374 struct desc_struct tls_array
[GDT_ENTRY_TLS_ENTRIES
];
378 unsigned long sysenter_cs
;
380 unsigned long usersp
; /* Copy from PDA */
383 unsigned short fsindex
;
384 unsigned short gsindex
;
389 /* Hardware debugging registers: */
390 unsigned long debugreg0
;
391 unsigned long debugreg1
;
392 unsigned long debugreg2
;
393 unsigned long debugreg3
;
394 unsigned long debugreg6
;
395 unsigned long debugreg7
;
398 unsigned long trap_no
;
399 unsigned long error_code
;
400 /* Floating point info: */
401 union i387_union i387
__attribute__((aligned(16)));;
403 /* Virtual 86 mode info */
404 struct vm86_struct __user
*vm86_info
;
405 unsigned long screen_bitmap
;
406 unsigned long v86flags
;
407 unsigned long v86mask
;
408 unsigned long saved_sp0
;
409 unsigned int saved_fs
;
410 unsigned int saved_gs
;
412 /* IO permissions: */
413 unsigned long *io_bitmap_ptr
;
415 /* Max allowed port in the bitmap, in bytes: */
416 unsigned io_bitmap_max
;
417 /* MSR_IA32_DEBUGCTLMSR value to switch in if TIF_DEBUGCTLMSR is set. */
418 unsigned long debugctlmsr
;
419 /* Debug Store - if not 0 points to a DS Save Area configuration;
420 * goes into MSR_IA32_DS_AREA */
421 unsigned long ds_area_msr
;
424 static inline unsigned long native_get_debugreg(int regno
)
426 unsigned long val
= 0; /* Damn you, gcc! */
430 asm("mov %%db0, %0" :"=r" (val
)); break;
432 asm("mov %%db1, %0" :"=r" (val
)); break;
434 asm("mov %%db2, %0" :"=r" (val
)); break;
436 asm("mov %%db3, %0" :"=r" (val
)); break;
438 asm("mov %%db6, %0" :"=r" (val
)); break;
440 asm("mov %%db7, %0" :"=r" (val
)); break;
447 static inline void native_set_debugreg(int regno
, unsigned long value
)
451 asm("mov %0, %%db0" ::"r" (value
));
454 asm("mov %0, %%db1" ::"r" (value
));
457 asm("mov %0, %%db2" ::"r" (value
));
460 asm("mov %0, %%db3" ::"r" (value
));
463 asm("mov %0, %%db6" ::"r" (value
));
466 asm("mov %0, %%db7" ::"r" (value
));
474 * Set IOPL bits in EFLAGS from given mask
476 static inline void native_set_iopl_mask(unsigned mask
)
481 __asm__
__volatile__ ("pushfl;"
488 : "i" (~X86_EFLAGS_IOPL
), "r" (mask
));
493 native_load_sp0(struct tss_struct
*tss
, struct thread_struct
*thread
)
495 tss
->x86_tss
.sp0
= thread
->sp0
;
497 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
498 if (unlikely(tss
->x86_tss
.ss1
!= thread
->sysenter_cs
)) {
499 tss
->x86_tss
.ss1
= thread
->sysenter_cs
;
500 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
505 static inline void native_swapgs(void)
508 asm volatile("swapgs" ::: "memory");
512 #ifdef CONFIG_PARAVIRT
513 #include <asm/paravirt.h>
515 #define __cpuid native_cpuid
516 #define paravirt_enabled() 0
519 * These special macros can be used to get or set a debugging register
521 #define get_debugreg(var, register) \
522 (var) = native_get_debugreg(register)
523 #define set_debugreg(value, register) \
524 native_set_debugreg(register, value)
527 load_sp0(struct tss_struct
*tss
, struct thread_struct
*thread
)
529 native_load_sp0(tss
, thread
);
532 #define set_iopl_mask native_set_iopl_mask
533 #define SWAPGS swapgs
534 #endif /* CONFIG_PARAVIRT */
537 * Save the cr4 feature set we're using (ie
538 * Pentium 4MB enable and PPro Global page
539 * enable), so that any CPU's that boot up
540 * after us can get the correct flags.
542 extern unsigned long mmu_cr4_features
;
544 static inline void set_in_cr4(unsigned long mask
)
548 mmu_cr4_features
|= mask
;
554 static inline void clear_in_cr4(unsigned long mask
)
558 mmu_cr4_features
&= ~mask
;
564 struct microcode_header
{
572 unsigned int datasize
;
573 unsigned int totalsize
;
574 unsigned int reserved
[3];
578 struct microcode_header hdr
;
579 unsigned int bits
[0];
582 typedef struct microcode microcode_t
;
583 typedef struct microcode_header microcode_header_t
;
585 /* microcode format is extended from prescott processors */
586 struct extended_signature
{
592 struct extended_sigtable
{
595 unsigned int reserved
[3];
596 struct extended_signature sigs
[0];
605 * create a kernel thread without removing it from tasklists
607 extern int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
);
609 /* Free all resources held by a thread. */
610 extern void release_thread(struct task_struct
*);
612 /* Prepare to copy thread state - unlazy all lazy state */
613 extern void prepare_to_copy(struct task_struct
*tsk
);
615 unsigned long get_wchan(struct task_struct
*p
);
618 * Generic CPUID function
619 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
620 * resulting in stale register contents being returned.
622 static inline void cpuid(unsigned int op
,
623 unsigned int *eax
, unsigned int *ebx
,
624 unsigned int *ecx
, unsigned int *edx
)
628 __cpuid(eax
, ebx
, ecx
, edx
);
631 /* Some CPUID calls want 'count' to be placed in ecx */
632 static inline void cpuid_count(unsigned int op
, int count
,
633 unsigned int *eax
, unsigned int *ebx
,
634 unsigned int *ecx
, unsigned int *edx
)
638 __cpuid(eax
, ebx
, ecx
, edx
);
642 * CPUID functions returning a single datum
644 static inline unsigned int cpuid_eax(unsigned int op
)
646 unsigned int eax
, ebx
, ecx
, edx
;
648 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
653 static inline unsigned int cpuid_ebx(unsigned int op
)
655 unsigned int eax
, ebx
, ecx
, edx
;
657 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
662 static inline unsigned int cpuid_ecx(unsigned int op
)
664 unsigned int eax
, ebx
, ecx
, edx
;
666 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
671 static inline unsigned int cpuid_edx(unsigned int op
)
673 unsigned int eax
, ebx
, ecx
, edx
;
675 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
680 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
681 static inline void rep_nop(void)
683 __asm__
__volatile__("rep; nop" ::: "memory");
686 static inline void cpu_relax(void)
691 /* Stop speculative execution: */
692 static inline void sync_core(void)
696 asm volatile("cpuid" : "=a" (tmp
) : "0" (1)
697 : "ebx", "ecx", "edx", "memory");
701 __monitor(const void *eax
, unsigned long ecx
, unsigned long edx
)
703 /* "monitor %eax, %ecx, %edx;" */
705 ".byte 0x0f, 0x01, 0xc8;"
706 :: "a" (eax
), "c" (ecx
), "d"(edx
));
709 static inline void __mwait(unsigned long eax
, unsigned long ecx
)
711 /* "mwait %eax, %ecx;" */
713 ".byte 0x0f, 0x01, 0xc9;"
714 :: "a" (eax
), "c" (ecx
));
717 static inline void __sti_mwait(unsigned long eax
, unsigned long ecx
)
719 /* "mwait %eax, %ecx;" */
721 "sti; .byte 0x0f, 0x01, 0xc9;"
722 :: "a" (eax
), "c" (ecx
));
725 extern void mwait_idle_with_hints(unsigned long eax
, unsigned long ecx
);
727 extern int force_mwait
;
729 extern void select_idle_routine(const struct cpuinfo_x86
*c
);
731 extern unsigned long boot_option_idle_override
;
733 extern void enable_sep_cpu(void);
734 extern int sysenter_setup(void);
736 /* Defined in head.S */
737 extern struct desc_ptr early_gdt_descr
;
739 extern void cpu_set_gdt(int);
740 extern void switch_to_new_gdt(void);
741 extern void cpu_init(void);
742 extern void init_gdt(int cpu
);
745 * from system description table in BIOS. Mostly for MCA use, but
746 * others may find it useful:
748 extern unsigned int machine_id
;
749 extern unsigned int machine_submodel_id
;
750 extern unsigned int BIOS_revision
;
752 /* Boot loader type from the setup header: */
753 extern int bootloader_type
;
755 extern char ignore_fpu_irq
;
757 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
758 #define ARCH_HAS_PREFETCHW
759 #define ARCH_HAS_SPINLOCK_PREFETCH
762 # define BASE_PREFETCH ASM_NOP4
763 # define ARCH_HAS_PREFETCH
765 # define BASE_PREFETCH "prefetcht0 (%1)"
769 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
771 * It's not worth to care about 3dnow prefetches for the K6
772 * because they are microcoded there and very slow.
774 static inline void prefetch(const void *x
)
776 alternative_input(BASE_PREFETCH
,
783 * 3dnow prefetch to get an exclusive cache line.
784 * Useful for spinlocks to avoid one state transition in the
785 * cache coherency protocol:
787 static inline void prefetchw(const void *x
)
789 alternative_input(BASE_PREFETCH
,
795 static inline void spin_lock_prefetch(const void *x
)
802 * User space process size: 3GB (default).
804 #define TASK_SIZE PAGE_OFFSET
805 #define STACK_TOP TASK_SIZE
806 #define STACK_TOP_MAX STACK_TOP
808 #define INIT_THREAD { \
809 .sp0 = sizeof(init_stack) + (long)&init_stack, \
811 .sysenter_cs = __KERNEL_CS, \
812 .io_bitmap_ptr = NULL, \
813 .fs = __KERNEL_PERCPU, \
817 * Note that the .io_bitmap member must be extra-big. This is because
818 * the CPU will access an additional byte beyond the end of the IO
819 * permission bitmap. The extra byte must be all 1 bits, and must
820 * be within the limit.
824 .sp0 = sizeof(init_stack) + (long)&init_stack, \
825 .ss0 = __KERNEL_DS, \
826 .ss1 = __KERNEL_CS, \
827 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
829 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
832 extern unsigned long thread_saved_pc(struct task_struct
*tsk
);
834 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
835 #define KSTK_TOP(info) \
837 unsigned long *__ptr = (unsigned long *)(info); \
838 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
842 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
843 * This is necessary to guarantee that the entire "struct pt_regs"
844 * is accessable even if the CPU haven't stored the SS/ESP registers
845 * on the stack (interrupt gate does not save these registers
846 * when switching to the same priv ring).
847 * Therefore beware: accessing the ss/esp fields of the
848 * "struct pt_regs" is possible, but they may contain the
849 * completely wrong values.
851 #define task_pt_regs(task) \
853 struct pt_regs *__regs__; \
854 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
858 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
862 * User space process size. 47bits minus one guard page.
864 #define TASK_SIZE64 (0x800000000000UL - 4096)
866 /* This decides where the kernel will search for a free chunk of vm
867 * space during mmap's.
869 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
870 0xc0000000 : 0xFFFFe000)
872 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
873 IA32_PAGE_OFFSET : TASK_SIZE64)
874 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
875 IA32_PAGE_OFFSET : TASK_SIZE64)
877 #define STACK_TOP TASK_SIZE
878 #define STACK_TOP_MAX TASK_SIZE64
880 #define INIT_THREAD { \
881 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
885 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
889 * Return saved PC of a blocked thread.
890 * What is this good for? it will be always the scheduler or ret_from_fork.
892 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
894 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
895 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
896 #endif /* CONFIG_X86_64 */
898 extern void start_thread(struct pt_regs
*regs
, unsigned long new_ip
,
899 unsigned long new_sp
);
902 * This decides where the kernel will search for a free chunk of vm
903 * space during mmap's.
905 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
907 #define KSTK_EIP(task) (task_pt_regs(task)->ip)