1 #ifndef _ASM_X86_PROCESSOR_H
2 #define _ASM_X86_PROCESSOR_H
4 #include <asm/processor-flags.h>
6 /* Forward declaration, a strange C thing */
11 #include <asm/math_emu.h>
12 #include <asm/segment.h>
13 #include <asm/types.h>
14 #include <asm/sigcontext.h>
15 #include <asm/current.h>
16 #include <asm/cpufeature.h>
17 #include <asm/system.h>
19 #include <asm/pgtable_types.h>
20 #include <asm/percpu.h>
22 #include <asm/desc_defs.h>
25 #include <linux/personality.h>
26 #include <linux/cpumask.h>
27 #include <linux/cache.h>
28 #include <linux/threads.h>
29 #include <linux/math64.h>
30 #include <linux/init.h>
31 #include <linux/err.h>
35 * Default implementation of macro that returns current
36 * instruction pointer ("program counter").
38 static inline void *current_text_addr(void)
42 asm volatile("mov $1f, %0; 1:":"=r" (pc
));
47 #ifdef CONFIG_X86_VSMP
48 # define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
49 # define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
51 # define ARCH_MIN_TASKALIGN 16
52 # define ARCH_MIN_MMSTRUCT_ALIGN 0
56 * CPU type and hardware bug flags. Kept separately for each CPU.
57 * Members of this structure are referenced in head.S, so think twice
58 * before touching them. [mj]
62 __u8 x86
; /* CPU family */
63 __u8 x86_vendor
; /* CPU vendor */
67 char wp_works_ok
; /* It doesn't on 386's */
69 /* Problems on some 486Dx4's and old 386's: */
78 /* Number of 4K pages in DTLB/ITLB combined(in pages): */
83 /* CPUID returned core id bits: */
85 /* Max extended CPUID function supported: */
86 __u32 extended_cpuid_level
;
87 /* Maximum supported CPUID level, -1=no CPUID: */
89 __u32 x86_capability
[NCAPINTS
];
90 char x86_vendor_id
[16];
91 char x86_model_id
[64];
92 /* in KB - valid for CPUS which support this call: */
94 int x86_cache_alignment
; /* In bytes */
96 unsigned long loops_per_jiffy
;
98 /* cpus sharing the last level cache: */
99 cpumask_var_t llc_shared_map
;
101 /* cpuid returned max cores value: */
105 u16 x86_clflush_size
;
107 /* number of cores as seen by the OS: */
109 /* Physical processor id: */
113 /* Index into per_cpu list: */
116 } __attribute__((__aligned__(SMP_CACHE_BYTES
)));
118 #define X86_VENDOR_INTEL 0
119 #define X86_VENDOR_CYRIX 1
120 #define X86_VENDOR_AMD 2
121 #define X86_VENDOR_UMC 3
122 #define X86_VENDOR_CENTAUR 5
123 #define X86_VENDOR_TRANSMETA 7
124 #define X86_VENDOR_NSC 8
125 #define X86_VENDOR_NUM 9
127 #define X86_VENDOR_UNKNOWN 0xff
130 * capabilities of CPUs
132 extern struct cpuinfo_x86 boot_cpu_data
;
133 extern struct cpuinfo_x86 new_cpu_data
;
135 extern struct tss_struct doublefault_tss
;
136 extern __u32 cpu_caps_cleared
[NCAPINTS
];
137 extern __u32 cpu_caps_set
[NCAPINTS
];
140 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86
, cpu_info
);
141 #define cpu_data(cpu) per_cpu(cpu_info, cpu)
142 #define current_cpu_data __get_cpu_var(cpu_info)
144 #define cpu_data(cpu) boot_cpu_data
145 #define current_cpu_data boot_cpu_data
148 extern const struct seq_operations cpuinfo_op
;
150 static inline int hlt_works(int cpu
)
153 return cpu_data(cpu
).hlt_works_ok
;
159 #define cache_line_size() (boot_cpu_data.x86_cache_alignment)
161 extern void cpu_detect(struct cpuinfo_x86
*c
);
163 extern struct pt_regs
*idle_regs(struct pt_regs
*);
165 extern void early_cpu_init(void);
166 extern void identify_boot_cpu(void);
167 extern void identify_secondary_cpu(struct cpuinfo_x86
*);
168 extern void print_cpu_info(struct cpuinfo_x86
*);
169 extern void init_scattered_cpuid_features(struct cpuinfo_x86
*c
);
170 extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86
*c
);
171 extern unsigned short num_cache_leaves
;
173 extern void detect_extended_topology(struct cpuinfo_x86
*c
);
174 extern void detect_ht(struct cpuinfo_x86
*c
);
176 static inline void native_cpuid(unsigned int *eax
, unsigned int *ebx
,
177 unsigned int *ecx
, unsigned int *edx
)
179 /* ecx is often an input as well as an output. */
185 : "0" (*eax
), "2" (*ecx
));
188 static inline void load_cr3(pgd_t
*pgdir
)
190 write_cr3(__pa(pgdir
));
194 /* This is the TSS defined by the hardware. */
196 unsigned short back_link
, __blh
;
198 unsigned short ss0
, __ss0h
;
200 /* ss1 caches MSR_IA32_SYSENTER_CS: */
201 unsigned short ss1
, __ss1h
;
203 unsigned short ss2
, __ss2h
;
215 unsigned short es
, __esh
;
216 unsigned short cs
, __csh
;
217 unsigned short ss
, __ssh
;
218 unsigned short ds
, __dsh
;
219 unsigned short fs
, __fsh
;
220 unsigned short gs
, __gsh
;
221 unsigned short ldt
, __ldth
;
222 unsigned short trace
;
223 unsigned short io_bitmap_base
;
225 } __attribute__((packed
));
239 } __attribute__((packed
)) ____cacheline_aligned
;
245 #define IO_BITMAP_BITS 65536
246 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
247 #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
248 #define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
249 #define INVALID_IO_BITMAP_OFFSET 0x8000
253 * The hardware state:
255 struct x86_hw_tss x86_tss
;
258 * The extra 1 is there because the CPU will access an
259 * additional byte beyond the end of the IO permission
260 * bitmap. The extra byte must be all 1 bits, and must
261 * be within the limit.
263 unsigned long io_bitmap
[IO_BITMAP_LONGS
+ 1];
266 * .. and then another 0x100 bytes for the emergency kernel stack:
268 unsigned long stack
[64];
270 } ____cacheline_aligned
;
272 DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct
, init_tss
);
275 * Save the original ist values for checking stack pointers during debugging
278 unsigned long ist
[7];
281 #define MXCSR_DEFAULT 0x1f80
283 struct i387_fsave_struct
{
284 u32 cwd
; /* FPU Control Word */
285 u32 swd
; /* FPU Status Word */
286 u32 twd
; /* FPU Tag Word */
287 u32 fip
; /* FPU IP Offset */
288 u32 fcs
; /* FPU IP Selector */
289 u32 foo
; /* FPU Operand Pointer Offset */
290 u32 fos
; /* FPU Operand Pointer Selector */
292 /* 8*10 bytes for each FP-reg = 80 bytes: */
295 /* Software status information [not touched by FSAVE ]: */
299 struct i387_fxsave_struct
{
300 u16 cwd
; /* Control Word */
301 u16 swd
; /* Status Word */
302 u16 twd
; /* Tag Word */
303 u16 fop
; /* Last Instruction Opcode */
306 u64 rip
; /* Instruction Pointer */
307 u64 rdp
; /* Data Pointer */
310 u32 fip
; /* FPU IP Offset */
311 u32 fcs
; /* FPU IP Selector */
312 u32 foo
; /* FPU Operand Offset */
313 u32 fos
; /* FPU Operand Selector */
316 u32 mxcsr
; /* MXCSR Register State */
317 u32 mxcsr_mask
; /* MXCSR Mask */
319 /* 8*16 bytes for each FP-reg = 128 bytes: */
322 /* 16*16 bytes for each XMM-reg = 256 bytes: */
332 } __attribute__((aligned(16)));
334 struct i387_soft_struct
{
342 /* 8*10 bytes for each FP-reg = 80 bytes: */
350 struct math_emu_info
*info
;
355 /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
359 struct xsave_hdr_struct
{
363 } __attribute__((packed
));
365 struct xsave_struct
{
366 struct i387_fxsave_struct i387
;
367 struct xsave_hdr_struct xsave_hdr
;
368 struct ymmh_struct ymmh
;
369 /* new processor state extensions will go here */
370 } __attribute__ ((packed
, aligned (64)));
372 union thread_xstate
{
373 struct i387_fsave_struct fsave
;
374 struct i387_fxsave_struct fxsave
;
375 struct i387_soft_struct soft
;
376 struct xsave_struct xsave
;
380 union thread_xstate
*state
;
384 DECLARE_PER_CPU(struct orig_ist
, orig_ist
);
386 union irq_stack_union
{
387 char irq_stack
[IRQ_STACK_SIZE
];
389 * GCC hardcodes the stack canary as %gs:40. Since the
390 * irq_stack is the object at %gs:0, we reserve the bottom
391 * 48 bytes of the irq stack for the canary.
395 unsigned long stack_canary
;
399 DECLARE_PER_CPU_FIRST(union irq_stack_union
, irq_stack_union
);
400 DECLARE_INIT_PER_CPU(irq_stack_union
);
402 DECLARE_PER_CPU(char *, irq_stack_ptr
);
403 DECLARE_PER_CPU(unsigned int, irq_count
);
404 extern unsigned long kernel_eflags
;
405 extern asmlinkage
void ignore_sysret(void);
407 #ifdef CONFIG_CC_STACKPROTECTOR
409 * Make sure stack canary segment base is cached-aligned:
410 * "For Intel Atom processors, avoid non zero segment base address
411 * that is not aligned to cache line boundary at all cost."
412 * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
414 struct stack_canary
{
415 char __pad
[20]; /* canary at %gs:20 */
416 unsigned long canary
;
418 DECLARE_PER_CPU_ALIGNED(struct stack_canary
, stack_canary
);
422 extern unsigned int xstate_size
;
423 extern void free_thread_xstate(struct task_struct
*);
424 extern struct kmem_cache
*task_xstate_cachep
;
428 struct thread_struct
{
429 /* Cached TLS descriptors: */
430 struct desc_struct tls_array
[GDT_ENTRY_TLS_ENTRIES
];
434 unsigned long sysenter_cs
;
436 unsigned long usersp
; /* Copy from PDA */
439 unsigned short fsindex
;
440 unsigned short gsindex
;
449 /* Save middle states of ptrace breakpoints */
450 struct perf_event
*ptrace_bps
[HBP_NUM
];
451 /* Debug status used for traps, single steps, etc... */
452 unsigned long debugreg6
;
453 /* Keep track of the exact dr7 value set by the user */
454 unsigned long ptrace_dr7
;
457 unsigned long trap_no
;
458 unsigned long error_code
;
459 /* floating point and extended processor state */
462 /* Virtual 86 mode info */
463 struct vm86_struct __user
*vm86_info
;
464 unsigned long screen_bitmap
;
465 unsigned long v86flags
;
466 unsigned long v86mask
;
467 unsigned long saved_sp0
;
468 unsigned int saved_fs
;
469 unsigned int saved_gs
;
471 /* IO permissions: */
472 unsigned long *io_bitmap_ptr
;
474 /* Max allowed port in the bitmap, in bytes: */
475 unsigned io_bitmap_max
;
478 static inline unsigned long native_get_debugreg(int regno
)
480 unsigned long val
= 0; /* Damn you, gcc! */
484 asm("mov %%db0, %0" :"=r" (val
));
487 asm("mov %%db1, %0" :"=r" (val
));
490 asm("mov %%db2, %0" :"=r" (val
));
493 asm("mov %%db3, %0" :"=r" (val
));
496 asm("mov %%db6, %0" :"=r" (val
));
499 asm("mov %%db7, %0" :"=r" (val
));
507 static inline void native_set_debugreg(int regno
, unsigned long value
)
511 asm("mov %0, %%db0" ::"r" (value
));
514 asm("mov %0, %%db1" ::"r" (value
));
517 asm("mov %0, %%db2" ::"r" (value
));
520 asm("mov %0, %%db3" ::"r" (value
));
523 asm("mov %0, %%db6" ::"r" (value
));
526 asm("mov %0, %%db7" ::"r" (value
));
534 * Set IOPL bits in EFLAGS from given mask
536 static inline void native_set_iopl_mask(unsigned mask
)
541 asm volatile ("pushfl;"
548 : "i" (~X86_EFLAGS_IOPL
), "r" (mask
));
553 native_load_sp0(struct tss_struct
*tss
, struct thread_struct
*thread
)
555 tss
->x86_tss
.sp0
= thread
->sp0
;
557 /* Only happens when SEP is enabled, no need to test "SEP"arately: */
558 if (unlikely(tss
->x86_tss
.ss1
!= thread
->sysenter_cs
)) {
559 tss
->x86_tss
.ss1
= thread
->sysenter_cs
;
560 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
565 static inline void native_swapgs(void)
568 asm volatile("swapgs" ::: "memory");
572 #ifdef CONFIG_PARAVIRT
573 #include <asm/paravirt.h>
575 #define __cpuid native_cpuid
576 #define paravirt_enabled() 0
579 * These special macros can be used to get or set a debugging register
581 #define get_debugreg(var, register) \
582 (var) = native_get_debugreg(register)
583 #define set_debugreg(value, register) \
584 native_set_debugreg(register, value)
586 static inline void load_sp0(struct tss_struct
*tss
,
587 struct thread_struct
*thread
)
589 native_load_sp0(tss
, thread
);
592 #define set_iopl_mask native_set_iopl_mask
593 #endif /* CONFIG_PARAVIRT */
596 * Save the cr4 feature set we're using (ie
597 * Pentium 4MB enable and PPro Global page
598 * enable), so that any CPU's that boot up
599 * after us can get the correct flags.
601 extern unsigned long mmu_cr4_features
;
603 static inline void set_in_cr4(unsigned long mask
)
607 mmu_cr4_features
|= mask
;
613 static inline void clear_in_cr4(unsigned long mask
)
617 mmu_cr4_features
&= ~mask
;
629 * create a kernel thread without removing it from tasklists
631 extern int kernel_thread(int (*fn
)(void *), void *arg
, unsigned long flags
);
633 /* Free all resources held by a thread. */
634 extern void release_thread(struct task_struct
*);
636 /* Prepare to copy thread state - unlazy all lazy state */
637 extern void prepare_to_copy(struct task_struct
*tsk
);
639 unsigned long get_wchan(struct task_struct
*p
);
642 * Generic CPUID function
643 * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
644 * resulting in stale register contents being returned.
646 static inline void cpuid(unsigned int op
,
647 unsigned int *eax
, unsigned int *ebx
,
648 unsigned int *ecx
, unsigned int *edx
)
652 __cpuid(eax
, ebx
, ecx
, edx
);
655 /* Some CPUID calls want 'count' to be placed in ecx */
656 static inline void cpuid_count(unsigned int op
, int count
,
657 unsigned int *eax
, unsigned int *ebx
,
658 unsigned int *ecx
, unsigned int *edx
)
662 __cpuid(eax
, ebx
, ecx
, edx
);
666 * CPUID functions returning a single datum
668 static inline unsigned int cpuid_eax(unsigned int op
)
670 unsigned int eax
, ebx
, ecx
, edx
;
672 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
677 static inline unsigned int cpuid_ebx(unsigned int op
)
679 unsigned int eax
, ebx
, ecx
, edx
;
681 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
686 static inline unsigned int cpuid_ecx(unsigned int op
)
688 unsigned int eax
, ebx
, ecx
, edx
;
690 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
695 static inline unsigned int cpuid_edx(unsigned int op
)
697 unsigned int eax
, ebx
, ecx
, edx
;
699 cpuid(op
, &eax
, &ebx
, &ecx
, &edx
);
704 /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
705 static inline void rep_nop(void)
707 asm volatile("rep; nop" ::: "memory");
710 static inline void cpu_relax(void)
715 /* Stop speculative execution and prefetching of modified code. */
716 static inline void sync_core(void)
720 #if defined(CONFIG_M386) || defined(CONFIG_M486)
721 if (boot_cpu_data
.x86
< 5)
722 /* There is no speculative execution.
723 * jmp is a barrier to prefetching. */
724 asm volatile("jmp 1f\n1:\n" ::: "memory");
727 /* cpuid is a barrier to speculative execution.
728 * Prefetched instructions are automatically
729 * invalidated when modified. */
730 asm volatile("cpuid" : "=a" (tmp
) : "0" (1)
731 : "ebx", "ecx", "edx", "memory");
734 static inline void __monitor(const void *eax
, unsigned long ecx
,
737 /* "monitor %eax, %ecx, %edx;" */
738 asm volatile(".byte 0x0f, 0x01, 0xc8;"
739 :: "a" (eax
), "c" (ecx
), "d"(edx
));
742 static inline void __mwait(unsigned long eax
, unsigned long ecx
)
744 /* "mwait %eax, %ecx;" */
745 asm volatile(".byte 0x0f, 0x01, 0xc9;"
746 :: "a" (eax
), "c" (ecx
));
749 static inline void __sti_mwait(unsigned long eax
, unsigned long ecx
)
752 /* "mwait %eax, %ecx;" */
753 asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
754 :: "a" (eax
), "c" (ecx
));
757 extern void mwait_idle_with_hints(unsigned long eax
, unsigned long ecx
);
759 extern void select_idle_routine(const struct cpuinfo_x86
*c
);
760 extern void init_c1e_mask(void);
762 extern unsigned long boot_option_idle_override
;
763 extern unsigned long idle_halt
;
764 extern unsigned long idle_nomwait
;
765 extern bool c1e_detected
;
767 extern void enable_sep_cpu(void);
768 extern int sysenter_setup(void);
770 extern void early_trap_init(void);
772 /* Defined in head.S */
773 extern struct desc_ptr early_gdt_descr
;
775 extern void cpu_set_gdt(int);
776 extern void switch_to_new_gdt(int);
777 extern void load_percpu_segment(int);
778 extern void cpu_init(void);
780 static inline unsigned long get_debugctlmsr(void)
782 unsigned long debugctlmsr
= 0;
784 #ifndef CONFIG_X86_DEBUGCTLMSR
785 if (boot_cpu_data
.x86
< 6)
788 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctlmsr
);
793 static inline void update_debugctlmsr(unsigned long debugctlmsr
)
795 #ifndef CONFIG_X86_DEBUGCTLMSR
796 if (boot_cpu_data
.x86
< 6)
799 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctlmsr
);
803 * from system description table in BIOS. Mostly for MCA use, but
804 * others may find it useful:
806 extern unsigned int machine_id
;
807 extern unsigned int machine_submodel_id
;
808 extern unsigned int BIOS_revision
;
810 /* Boot loader type from the setup header: */
811 extern int bootloader_type
;
812 extern int bootloader_version
;
814 extern char ignore_fpu_irq
;
816 #define HAVE_ARCH_PICK_MMAP_LAYOUT 1
817 #define ARCH_HAS_PREFETCHW
818 #define ARCH_HAS_SPINLOCK_PREFETCH
821 # define BASE_PREFETCH ASM_NOP4
822 # define ARCH_HAS_PREFETCH
824 # define BASE_PREFETCH "prefetcht0 (%1)"
828 * Prefetch instructions for Pentium III (+) and AMD Athlon (+)
830 * It's not worth to care about 3dnow prefetches for the K6
831 * because they are microcoded there and very slow.
833 static inline void prefetch(const void *x
)
835 alternative_input(BASE_PREFETCH
,
842 * 3dnow prefetch to get an exclusive cache line.
843 * Useful for spinlocks to avoid one state transition in the
844 * cache coherency protocol:
846 static inline void prefetchw(const void *x
)
848 alternative_input(BASE_PREFETCH
,
854 static inline void spin_lock_prefetch(const void *x
)
861 * User space process size: 3GB (default).
863 #define TASK_SIZE PAGE_OFFSET
864 #define TASK_SIZE_MAX TASK_SIZE
865 #define STACK_TOP TASK_SIZE
866 #define STACK_TOP_MAX STACK_TOP
868 #define INIT_THREAD { \
869 .sp0 = sizeof(init_stack) + (long)&init_stack, \
871 .sysenter_cs = __KERNEL_CS, \
872 .io_bitmap_ptr = NULL, \
876 * Note that the .io_bitmap member must be extra-big. This is because
877 * the CPU will access an additional byte beyond the end of the IO
878 * permission bitmap. The extra byte must be all 1 bits, and must
879 * be within the limit.
883 .sp0 = sizeof(init_stack) + (long)&init_stack, \
884 .ss0 = __KERNEL_DS, \
885 .ss1 = __KERNEL_CS, \
886 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
888 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \
891 extern unsigned long thread_saved_pc(struct task_struct
*tsk
);
893 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
894 #define KSTK_TOP(info) \
896 unsigned long *__ptr = (unsigned long *)(info); \
897 (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
901 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
902 * This is necessary to guarantee that the entire "struct pt_regs"
903 * is accessable even if the CPU haven't stored the SS/ESP registers
904 * on the stack (interrupt gate does not save these registers
905 * when switching to the same priv ring).
906 * Therefore beware: accessing the ss/esp fields of the
907 * "struct pt_regs" is possible, but they may contain the
908 * completely wrong values.
910 #define task_pt_regs(task) \
912 struct pt_regs *__regs__; \
913 __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
917 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
921 * User space process size. 47bits minus one guard page.
923 #define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
925 /* This decides where the kernel will search for a free chunk of vm
926 * space during mmap's.
928 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
929 0xc0000000 : 0xFFFFe000)
931 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
932 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
933 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? \
934 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
936 #define STACK_TOP TASK_SIZE
937 #define STACK_TOP_MAX TASK_SIZE_MAX
939 #define INIT_THREAD { \
940 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
944 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
948 * Return saved PC of a blocked thread.
949 * What is this good for? it will be always the scheduler or ret_from_fork.
951 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
953 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
954 extern unsigned long KSTK_ESP(struct task_struct
*task
);
955 #endif /* CONFIG_X86_64 */
957 extern void start_thread(struct pt_regs
*regs
, unsigned long new_ip
,
958 unsigned long new_sp
);
961 * This decides where the kernel will search for a free chunk of vm
962 * space during mmap's.
964 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
966 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
968 /* Get/set a process' ability to use the timestamp counter instruction */
969 #define GET_TSC_CTL(adr) get_tsc_mode((adr))
970 #define SET_TSC_CTL(val) set_tsc_mode((val))
972 extern int get_tsc_mode(unsigned long adr
);
973 extern int set_tsc_mode(unsigned int val
);
975 extern int amd_get_nb_id(int cpu
);
981 static inline void get_aperfmperf(struct aperfmperf
*am
)
983 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF
));
985 rdmsrl(MSR_IA32_APERF
, am
->aperf
);
986 rdmsrl(MSR_IA32_MPERF
, am
->mperf
);
989 #define APERFMPERF_SHIFT 10
992 unsigned long calc_aperfmperf_ratio(struct aperfmperf
*old
,
993 struct aperfmperf
*new)
995 u64 aperf
= new->aperf
- old
->aperf
;
996 u64 mperf
= new->mperf
- old
->mperf
;
997 unsigned long ratio
= aperf
;
999 mperf
>>= APERFMPERF_SHIFT
;
1001 ratio
= div64_u64(aperf
, mperf
);
1007 * AMD errata checking
1009 #ifdef CONFIG_CPU_SUP_AMD
1010 extern const int amd_erratum_383
[];
1011 extern const int amd_erratum_400
[];
1012 extern bool cpu_has_amd_erratum(const int *);
1014 #define AMD_LEGACY_ERRATUM(...) { -1, __VA_ARGS__, 0 }
1015 #define AMD_OSVW_ERRATUM(osvw_id, ...) { osvw_id, __VA_ARGS__, 0 }
1016 #define AMD_MODEL_RANGE(f, m_start, s_start, m_end, s_end) \
1017 ((f << 24) | (m_start << 16) | (s_start << 12) | (m_end << 4) | (s_end))
1018 #define AMD_MODEL_RANGE_FAMILY(range) (((range) >> 24) & 0xff)
1019 #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff)
1020 #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff)
1023 #define cpu_has_amd_erratum(x) (false)
1024 #endif /* CONFIG_CPU_SUP_AMD */
1026 #endif /* _ASM_X86_PROCESSOR_H */