initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / x86_64 / kernel / setup64.c
blobb83680afe01a95c542abf544ee0618b33296bcfc
1 /*
2 * X86-64 specific CPU setup.
3 * Copyright (C) 1995 Linus Torvalds
4 * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
5 * See setup.c for older changelog.
6 * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $
7 */
8 #include <linux/config.h>
9 #include <linux/init.h>
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/string.h>
13 #include <linux/bootmem.h>
14 #include <asm/pda.h>
15 #include <asm/pgtable.h>
16 #include <asm/processor.h>
17 #include <asm/desc.h>
18 #include <asm/bitops.h>
19 #include <asm/atomic.h>
20 #include <asm/mmu_context.h>
21 #include <asm/smp.h>
22 #include <asm/i387.h>
23 #include <asm/percpu.h>
24 #include <asm/mtrr.h>
25 #include <asm/proto.h>
26 #include <asm/mman.h>
27 #include <asm/numa.h>
29 char x86_boot_params[2048] __initdata = {0,};
31 unsigned long cpu_initialized __initdata = 0;
33 struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned;
35 extern struct task_struct init_task;
37 extern unsigned char __per_cpu_start[], __per_cpu_end[];
39 extern struct desc_ptr cpu_gdt_descr[];
40 struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table };
42 char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
44 unsigned long __supported_pte_mask = ~0UL;
45 static int do_not_nx __initdata = 0;
46 unsigned long vm_stack_flags = __VM_STACK_FLAGS;
47 unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
48 unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
49 unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
50 unsigned long vm_force_exec32 = PROT_EXEC;
52 /* noexec=on|off
53 Control non executable mappings for 64bit processes.
55 on Enable
56 off Disable
57 noforce (default) Don't enable by default for heap/stack/data,
58 but allow PROT_EXEC to be effective
60 */
61 static int __init nonx_setup(char *str)
63 if (!strcmp(str, "on")) {
64 __supported_pte_mask |= _PAGE_NX;
65 do_not_nx = 0;
66 vm_data_default_flags &= ~VM_EXEC;
67 vm_stack_flags &= ~VM_EXEC;
68 } else if (!strcmp(str, "noforce") || !strcmp(str, "off")) {
69 do_not_nx = (str[0] == 'o');
70 if (do_not_nx)
71 __supported_pte_mask &= ~_PAGE_NX;
72 vm_data_default_flags |= VM_EXEC;
73 vm_stack_flags |= VM_EXEC;
75 return 1;
78 __setup("noexec=", nonx_setup);
80 /* noexec32=opt{,opt}
82 Control the no exec default for 32bit processes. Can be also overwritten
83 per executable using ELF header flags (e.g. needed for the X server)
84 Requires noexec=on or noexec=noforce to be effective.
86 Valid options:
87 all,on Heap,stack,data is non executable.
88 off (default) Heap,stack,data is executable
89 stack Stack is non executable, heap/data is.
90 force Don't imply PROT_EXEC for PROT_READ
91 compat (default) Imply PROT_EXEC for PROT_READ
94 static int __init nonx32_setup(char *s)
96 while (*s) {
97 if (!strncmp(s, "all", 3) || !strncmp(s,"on",2)) {
98 vm_data_default_flags32 &= ~VM_EXEC;
99 vm_stack_flags32 &= ~VM_EXEC;
100 } else if (!strncmp(s, "off",3)) {
101 vm_data_default_flags32 |= VM_EXEC;
102 vm_stack_flags32 |= VM_EXEC;
103 } else if (!strncmp(s, "stack", 5)) {
104 vm_data_default_flags32 |= VM_EXEC;
105 vm_stack_flags32 &= ~VM_EXEC;
106 } else if (!strncmp(s, "force",5)) {
107 vm_force_exec32 = 0;
108 } else if (!strncmp(s, "compat",5)) {
109 vm_force_exec32 = PROT_EXEC;
111 s += strcspn(s, ",");
112 if (*s == ',')
113 ++s;
115 return 1;
118 __setup("noexec32=", nonx32_setup);
121 * Great future plan:
122 * Declare PDA itself and support (irqstack,tss,pml4) as per cpu data.
123 * Always point %gs to its beginning
125 void __init setup_per_cpu_areas(void)
127 int i;
128 unsigned long size;
130 /* Copy section for each CPU (we discard the original) */
131 size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
132 #ifdef CONFIG_MODULES
133 if (size < PERCPU_ENOUGH_ROOM)
134 size = PERCPU_ENOUGH_ROOM;
135 #endif
137 for (i = 0; i < NR_CPUS; i++) {
138 unsigned char *ptr;
139 /* If possible allocate on the node of the CPU.
140 In case it doesn't exist round-robin nodes. */
141 if (!NODE_DATA(i % numnodes)) {
142 printk("cpu with no node %d, numnodes %d\n", i, numnodes);
143 ptr = alloc_bootmem(size);
144 } else {
145 ptr = alloc_bootmem_node(NODE_DATA(i % numnodes), size);
147 if (!ptr)
148 panic("Cannot allocate cpu data for CPU %d\n", i);
149 cpu_pda[i].data_offset = ptr - __per_cpu_start;
150 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
154 void pda_init(int cpu)
156 pml4_t *level4;
157 struct x8664_pda *pda = &cpu_pda[cpu];
159 /* Setup up data that may be needed in __get_free_pages early */
160 asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
161 wrmsrl(MSR_GS_BASE, cpu_pda + cpu);
163 pda->me = pda;
164 pda->cpunumber = cpu;
165 pda->irqcount = -1;
166 pda->kernelstack =
167 (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
168 pda->active_mm = &init_mm;
169 pda->mmu_state = 0;
171 if (cpu == 0) {
172 /* others are initialized in smpboot.c */
173 pda->pcurrent = &init_task;
174 pda->irqstackptr = boot_cpu_stack;
175 level4 = init_level4_pgt;
176 } else {
177 level4 = (pml4_t *)__get_free_pages(GFP_ATOMIC, 0);
178 if (!level4)
179 panic("Cannot allocate top level page for cpu %d", cpu);
180 pda->irqstackptr = (char *)
181 __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
182 if (!pda->irqstackptr)
183 panic("cannot allocate irqstack for cpu %d", cpu);
186 pda->level4_pgt = (unsigned long *)level4;
187 if (level4 != init_level4_pgt)
188 memcpy(level4, &init_level4_pgt, PAGE_SIZE);
189 set_pml4(level4 + 510, mk_kernel_pml4(__pa_symbol(boot_vmalloc_pgt)));
190 asm volatile("movq %0,%%cr3" :: "r" (__pa(level4)));
192 pda->irqstackptr += IRQSTACKSIZE-64;
195 char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ]
196 __attribute__((section(".bss.page_aligned")));
198 /* May not be marked __init: used by software suspend */
199 void syscall_init(void)
202 * LSTAR and STAR live in a bit strange symbiosis.
203 * They both write to the same internal register. STAR allows to set CS/DS
204 * but only a 32bit target. LSTAR sets the 64bit rip.
206 wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
207 wrmsrl(MSR_LSTAR, system_call);
209 #ifdef CONFIG_IA32_EMULATION
210 syscall32_cpu_init ();
211 #endif
213 /* Flags to clear on syscall */
214 wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
217 void __init check_efer(void)
219 unsigned long efer;
221 rdmsrl(MSR_EFER, efer);
222 if (!(efer & EFER_NX) || do_not_nx) {
223 __supported_pte_mask &= ~_PAGE_NX;
228 * cpu_init() initializes state that is per-CPU. Some data is already
229 * initialized (naturally) in the bootstrap process, such as the GDT
230 * and IDT. We reload them nevertheless, this function acts as a
231 * 'CPU state barrier', nothing should get across.
232 * A lot of state is already set up in PDA init.
234 void __init cpu_init (void)
236 #ifdef CONFIG_SMP
237 int cpu = stack_smp_processor_id();
238 #else
239 int cpu = smp_processor_id();
240 #endif
241 struct tss_struct *t = &per_cpu(init_tss, cpu);
242 unsigned long v;
243 char *estacks = NULL;
244 struct task_struct *me;
245 int i;
247 /* CPU 0 is initialised in head64.c */
248 if (cpu != 0) {
249 pda_init(cpu);
250 } else
251 estacks = boot_exception_stacks;
253 me = current;
255 if (test_and_set_bit(cpu, &cpu_initialized))
256 panic("CPU#%d already initialized!\n", cpu);
258 printk("Initializing CPU#%d\n", cpu);
260 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
263 * Initialize the per-CPU GDT with the boot GDT,
264 * and set up the GDT descriptor:
266 if (cpu) {
267 memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE);
270 cpu_gdt_descr[cpu].size = GDT_SIZE;
271 cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu];
272 __asm__ __volatile__("lgdt %0": "=m" (cpu_gdt_descr[cpu]));
273 __asm__ __volatile__("lidt %0": "=m" (idt_descr));
275 memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8);
278 * Delete NT
281 asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax");
283 if (cpu == 0)
284 early_identify_cpu(&boot_cpu_data);
286 syscall_init();
288 wrmsrl(MSR_FS_BASE, 0);
289 wrmsrl(MSR_KERNEL_GS_BASE, 0);
290 barrier();
292 check_efer();
295 * set up and load the per-CPU TSS
297 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
298 if (cpu) {
299 estacks = (char *)__get_free_pages(GFP_ATOMIC,
300 EXCEPTION_STACK_ORDER);
301 if (!estacks)
302 panic("Cannot allocate exception stack %ld %d\n",
303 v, cpu);
305 estacks += EXCEPTION_STKSZ;
306 t->ist[v] = (unsigned long)estacks;
309 t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
311 * <= is required because the CPU will access up to
312 * 8 bits beyond the end of the IO permission bitmap.
314 for (i = 0; i <= IO_BITMAP_LONGS; i++)
315 t->io_bitmap[i] = ~0UL;
317 atomic_inc(&init_mm.mm_count);
318 me->active_mm = &init_mm;
319 if (me->mm)
320 BUG();
321 enter_lazy_tlb(&init_mm, me);
323 set_tss_desc(cpu, t);
324 load_TR_desc();
325 load_LDT(&init_mm.context);
328 * Clear all 6 debug registers:
331 set_debug(0UL, 0);
332 set_debug(0UL, 1);
333 set_debug(0UL, 2);
334 set_debug(0UL, 3);
335 set_debug(0UL, 6);
336 set_debug(0UL, 7);
338 fpu_init();
340 #ifdef CONFIG_NUMA
341 numa_add_cpu(cpu);
342 #endif