x86: unify mm_segment_t definition
[linux-2.6/x86.git] / include / asm-x86 / processor_64.h
blob1a258749b7cab3a8a1201f6d26fef68ff5f26037
1 /*
2 * Copyright (C) 1994 Linus Torvalds
3 */
5 #ifndef __ASM_X86_64_PROCESSOR_H
6 #define __ASM_X86_64_PROCESSOR_H
8 #include <asm/segment.h>
9 #include <asm/page.h>
10 #include <asm/types.h>
11 #include <asm/sigcontext.h>
12 #include <asm/cpufeature.h>
13 #include <linux/threads.h>
14 #include <asm/msr.h>
15 #include <asm/current.h>
16 #include <asm/system.h>
17 #include <linux/personality.h>
18 #include <asm/desc_defs.h>
20 extern void identify_cpu(struct cpuinfo_x86 *);
23 * User space process size. 47bits minus one guard page.
25 #define TASK_SIZE64 (0x800000000000UL - 4096)
27 /* This decides where the kernel will search for a free chunk of vm
28 * space during mmap's.
30 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
32 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
33 #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
36 struct i387_fxsave_struct {
37 u16 cwd;
38 u16 swd;
39 u16 twd;
40 u16 fop;
41 u64 rip;
42 u64 rdp;
43 u32 mxcsr;
44 u32 mxcsr_mask;
45 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
46 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
47 u32 padding[24];
48 } __attribute__ ((aligned (16)));
50 union i387_union {
51 struct i387_fxsave_struct fxsave;
54 /* Save the original ist values for checking stack pointers during debugging */
55 struct orig_ist {
56 unsigned long ist[7];
58 DECLARE_PER_CPU(struct orig_ist, orig_ist);
60 #define INIT_THREAD { \
61 .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
64 #define INIT_TSS { \
65 .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
68 #define start_thread(regs,new_rip,new_rsp) do { \
69 asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
70 load_gs_index(0); \
71 (regs)->ip = (new_rip); \
72 (regs)->sp = (new_rsp); \
73 write_pda(oldrsp, (new_rsp)); \
74 (regs)->cs = __USER_CS; \
75 (regs)->ss = __USER_DS; \
76 (regs)->flags = 0x200; \
77 set_fs(USER_DS); \
78 } while(0)
81 * Return saved PC of a blocked thread.
82 * What is this good for? it will be always the scheduler or ret_from_fork.
84 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
86 #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
87 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
90 #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2)
91 #define ASM_NOP1 P6_NOP1
92 #define ASM_NOP2 P6_NOP2
93 #define ASM_NOP3 P6_NOP3
94 #define ASM_NOP4 P6_NOP4
95 #define ASM_NOP5 P6_NOP5
96 #define ASM_NOP6 P6_NOP6
97 #define ASM_NOP7 P6_NOP7
98 #define ASM_NOP8 P6_NOP8
99 #else
100 #define ASM_NOP1 K8_NOP1
101 #define ASM_NOP2 K8_NOP2
102 #define ASM_NOP3 K8_NOP3
103 #define ASM_NOP4 K8_NOP4
104 #define ASM_NOP5 K8_NOP5
105 #define ASM_NOP6 K8_NOP6
106 #define ASM_NOP7 K8_NOP7
107 #define ASM_NOP8 K8_NOP8
108 #endif
110 /* Opteron nops */
111 #define K8_NOP1 ".byte 0x90\n"
112 #define K8_NOP2 ".byte 0x66,0x90\n"
113 #define K8_NOP3 ".byte 0x66,0x66,0x90\n"
114 #define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
115 #define K8_NOP5 K8_NOP3 K8_NOP2
116 #define K8_NOP6 K8_NOP3 K8_NOP3
117 #define K8_NOP7 K8_NOP4 K8_NOP3
118 #define K8_NOP8 K8_NOP4 K8_NOP4
120 /* P6 nops */
121 /* uses eax dependencies (Intel-recommended choice) */
122 #define P6_NOP1 ".byte 0x90\n"
123 #define P6_NOP2 ".byte 0x66,0x90\n"
124 #define P6_NOP3 ".byte 0x0f,0x1f,0x00\n"
125 #define P6_NOP4 ".byte 0x0f,0x1f,0x40,0\n"
126 #define P6_NOP5 ".byte 0x0f,0x1f,0x44,0x00,0\n"
127 #define P6_NOP6 ".byte 0x66,0x0f,0x1f,0x44,0x00,0\n"
128 #define P6_NOP7 ".byte 0x0f,0x1f,0x80,0,0,0,0\n"
129 #define P6_NOP8 ".byte 0x0f,0x1f,0x84,0x00,0,0,0,0\n"
131 #define ASM_NOP_MAX 8
133 static inline void prefetchw(void *x)
135 alternative_input("prefetcht0 (%1)",
136 "prefetchw (%1)",
137 X86_FEATURE_3DNOW,
138 "r" (x));
141 #endif /* __ASM_X86_64_PROCESSOR_H */