x86: make Voyager use x86 per-cpu setup.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / setup_percpu.c
blob599dc1cc1da84dab44cd7bae1c50b9de1f62fbc1
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
18 #include <asm/cpu.h>
20 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
21 # define DBG(x...) printk(KERN_DEBUG x)
22 #else
23 # define DBG(x...)
24 #endif
26 DEFINE_PER_CPU(int, cpu_number);
27 EXPORT_PER_CPU_SYMBOL(cpu_number);
29 #ifdef CONFIG_X86_64
30 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
31 #else
32 #define BOOT_PERCPU_OFFSET 0
33 #endif
35 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
36 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
38 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
39 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
41 EXPORT_SYMBOL(__per_cpu_offset);
44 * Great future plan:
45 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
46 * Always point %gs to its beginning
48 void __init setup_per_cpu_areas(void)
50 ssize_t size;
51 char *ptr;
52 int cpu;
54 /* Copy section for each CPU (we discard the original) */
55 size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
57 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
58 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
60 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
62 for_each_possible_cpu(cpu) {
63 #ifndef CONFIG_NEED_MULTIPLE_NODES
64 ptr = alloc_bootmem_pages(size);
65 #else
66 int node = early_cpu_to_node(cpu);
67 if (!node_online(node) || !NODE_DATA(node)) {
68 ptr = alloc_bootmem_pages(size);
69 pr_info("cpu %d has no node %d or node-local memory\n",
70 cpu, node);
71 pr_debug("per cpu data for cpu%d at %016lx\n",
72 cpu, __pa(ptr));
73 } else {
74 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
75 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
76 cpu, node, __pa(ptr));
78 #endif
80 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
81 per_cpu_offset(cpu) = ptr - __per_cpu_start;
82 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
83 per_cpu(cpu_number, cpu) = cpu;
85 * Copy data used in early init routines from the initial arrays to the
86 * per cpu data areas. These arrays then become expendable and the
87 * *_early_ptr's are zeroed indicating that the static arrays are gone.
89 #ifdef CONFIG_X86_LOCAL_APIC
90 per_cpu(x86_cpu_to_apicid, cpu) =
91 early_per_cpu_map(x86_cpu_to_apicid, cpu);
92 per_cpu(x86_bios_cpu_apicid, cpu) =
93 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
94 #endif
95 #ifdef CONFIG_X86_64
96 per_cpu(irq_stack_ptr, cpu) =
97 per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
98 #ifdef CONFIG_NUMA
99 per_cpu(x86_cpu_to_node_map, cpu) =
100 early_per_cpu_map(x86_cpu_to_node_map, cpu);
101 #endif
103 * Up to this point, the boot CPU has been using .data.init
104 * area. Reload %gs offset for the boot CPU.
106 if (cpu == boot_cpu_id)
107 load_gs_base(cpu);
108 #endif
110 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
113 /* indicate the early static arrays will soon be gone */
114 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
115 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
116 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
117 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
118 #endif
120 /* Setup node to cpumask map */
121 setup_node_to_cpumask_map();
123 /* Setup cpu initialized, callin, callout masks */
124 setup_cpu_local_masks();