1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
26 * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27 * voyager wants cpu_number too.
30 DEFINE_PER_CPU(int, cpu_number
);
31 EXPORT_PER_CPU_SYMBOL(cpu_number
);
34 #ifdef CONFIG_X86_LOCAL_APIC
35 unsigned int num_processors
;
36 unsigned disabled_cpus __cpuinitdata
;
37 /* Processor that is doing the boot up */
38 unsigned int boot_cpu_physical_apicid
= -1U;
39 EXPORT_SYMBOL(boot_cpu_physical_apicid
);
40 unsigned int max_physical_apicid
;
42 /* Bitmask of physically existing CPUs */
43 physid_mask_t phys_cpu_present_map
;
47 * Map cpu index to physical APIC ID
49 DEFINE_EARLY_PER_CPU(u16
, x86_cpu_to_apicid
, BAD_APICID
);
50 DEFINE_EARLY_PER_CPU(u16
, x86_bios_cpu_apicid
, BAD_APICID
);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid
);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid
);
54 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
55 #define X86_64_NUMA 1 /* (used later) */
58 * Map cpu index to node index
60 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map
, NUMA_NO_NODE
);
61 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map
);
64 * Which logical CPUs are on which nodes
66 cpumask_t
*node_to_cpumask_map
;
67 EXPORT_SYMBOL(node_to_cpumask_map
);
70 * Setup node_to_cpumask_map
72 static void __init
setup_node_to_cpumask_map(void);
75 static inline void setup_node_to_cpumask_map(void) { }
79 * Define load_pda_offset() and per-cpu __pda for x86_64.
80 * load_pda_offset() is responsible for loading the offset of pda into
83 * On SMP, pda offset also duals as percpu base address and thus it
84 * should be at the start of per-cpu area. To achieve this, it's
85 * preallocated in vmlinux_64.lds.S directly instead of using
89 void __cpuinit
load_pda_offset(int cpu
)
91 /* Memory clobbers used to order pda/percpu accesses */
93 wrmsrl(MSR_GS_BASE
, cpu_pda(cpu
));
97 DEFINE_PER_CPU(struct x8664_pda
, __pda
);
99 EXPORT_PER_CPU_SYMBOL(__pda
);
100 #endif /* CONFIG_SMP && CONFIG_X86_64 */
104 /* correctly size the local cpu masks */
105 static void setup_cpu_local_masks(void)
107 alloc_bootmem_cpumask_var(&cpu_initialized_mask
);
108 alloc_bootmem_cpumask_var(&cpu_callin_mask
);
109 alloc_bootmem_cpumask_var(&cpu_callout_mask
);
110 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask
);
113 #else /* CONFIG_X86_32 */
115 static inline void setup_cpu_local_masks(void)
119 #endif /* CONFIG_X86_32 */
121 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
123 * Copy data used in early init routines from the initial arrays to the
124 * per cpu data areas. These arrays then become expendable and the
125 * *_early_ptr's are zeroed indicating that the static arrays are gone.
127 static void __init
setup_per_cpu_maps(void)
131 for_each_possible_cpu(cpu
) {
132 per_cpu(x86_cpu_to_apicid
, cpu
) =
133 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
134 per_cpu(x86_bios_cpu_apicid
, cpu
) =
135 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
137 per_cpu(x86_cpu_to_node_map
, cpu
) =
138 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
142 /* indicate the early static arrays will soon be gone */
143 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
144 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
146 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
151 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
= {
152 [0] = (unsigned long)__per_cpu_load
,
155 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
157 EXPORT_SYMBOL(__per_cpu_offset
);
161 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
162 * Always point %gs to its beginning
164 void __init
setup_per_cpu_areas(void)
166 ssize_t size
, old_size
;
169 unsigned long align
= 1;
171 /* Copy section for each CPU (we discard the original) */
172 old_size
= PERCPU_ENOUGH_ROOM
;
173 align
= max_t(unsigned long, PAGE_SIZE
, align
);
174 size
= roundup(old_size
, align
);
176 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
177 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
179 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size
);
181 for_each_possible_cpu(cpu
) {
182 #ifndef CONFIG_NEED_MULTIPLE_NODES
183 ptr
= __alloc_bootmem(size
, align
,
184 __pa(MAX_DMA_ADDRESS
));
186 int node
= early_cpu_to_node(cpu
);
187 if (!node_online(node
) || !NODE_DATA(node
)) {
188 ptr
= __alloc_bootmem(size
, align
,
189 __pa(MAX_DMA_ADDRESS
));
190 pr_info("cpu %d has no node %d or node-local memory\n",
192 pr_debug("per cpu data for cpu%d at %016lx\n",
195 ptr
= __alloc_bootmem_node(NODE_DATA(node
), size
, align
,
196 __pa(MAX_DMA_ADDRESS
));
197 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
198 cpu
, node
, __pa(ptr
));
202 memcpy(ptr
, __per_cpu_load
, __per_cpu_end
- __per_cpu_start
);
203 per_cpu_offset(cpu
) = ptr
- __per_cpu_start
;
204 per_cpu(this_cpu_off
, cpu
) = per_cpu_offset(cpu
);
205 per_cpu(cpu_number
, cpu
) = cpu
;
207 per_cpu(irq_stack_ptr
, cpu
) =
208 (char *)per_cpu(irq_stack
, cpu
) + IRQ_STACK_SIZE
- 64;
210 * CPU0 modified pda in the init data area, reload pda
211 * offset for CPU0 and clear the area for others.
216 memset(cpu_pda(cpu
), 0, sizeof(*cpu_pda(cpu
)));
219 DBG("PERCPU: cpu %4d %p\n", cpu
, ptr
);
222 /* Setup percpu data maps */
223 setup_per_cpu_maps();
225 /* Setup node to cpumask map */
226 setup_node_to_cpumask_map();
228 /* Setup cpu initialized, callin, callout masks */
229 setup_cpu_local_masks();
237 * Allocate node_to_cpumask_map based on number of available nodes
238 * Requires node_possible_map to be valid.
240 * Note: node_to_cpumask() is not valid until after this is done.
241 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
243 static void __init
setup_node_to_cpumask_map(void)
245 unsigned int node
, num
= 0;
248 /* setup nr_node_ids if not done yet */
249 if (nr_node_ids
== MAX_NUMNODES
) {
250 for_each_node_mask(node
, node_possible_map
)
252 nr_node_ids
= num
+ 1;
255 /* allocate the map */
256 map
= alloc_bootmem_low(nr_node_ids
* sizeof(cpumask_t
));
257 DBG("node_to_cpumask_map at %p for %d nodes\n", map
, nr_node_ids
);
259 pr_debug("Node to cpumask map at %p for %d nodes\n",
262 /* node_to_cpumask() will now work */
263 node_to_cpumask_map
= map
;
266 void __cpuinit
numa_set_node(int cpu
, int node
)
268 int *cpu_to_node_map
= early_per_cpu_ptr(x86_cpu_to_node_map
);
270 /* early setting, no percpu area yet */
271 if (cpu_to_node_map
) {
272 cpu_to_node_map
[cpu
] = node
;
276 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
277 if (cpu
>= nr_cpu_ids
|| !per_cpu_offset(cpu
)) {
278 printk(KERN_ERR
"numa_set_node: invalid cpu# (%d)\n", cpu
);
283 per_cpu(x86_cpu_to_node_map
, cpu
) = node
;
285 if (node
!= NUMA_NO_NODE
)
286 cpu_pda(cpu
)->nodenumber
= node
;
289 void __cpuinit
numa_clear_node(int cpu
)
291 numa_set_node(cpu
, NUMA_NO_NODE
);
294 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
296 void __cpuinit
numa_add_cpu(int cpu
)
298 cpu_set(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
301 void __cpuinit
numa_remove_cpu(int cpu
)
303 cpu_clear(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
306 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
309 * --------- debug versions of the numa functions ---------
311 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
313 int node
= early_cpu_to_node(cpu
);
317 if (node_to_cpumask_map
== NULL
) {
318 printk(KERN_ERR
"node_to_cpumask_map NULL\n");
323 mask
= &node_to_cpumask_map
[node
];
327 cpu_clear(cpu
, *mask
);
329 cpulist_scnprintf(buf
, sizeof(buf
), mask
);
330 printk(KERN_DEBUG
"%s cpu %d node %d: mask now %s\n",
331 enable
? "numa_add_cpu" : "numa_remove_cpu", cpu
, node
, buf
);
334 void __cpuinit
numa_add_cpu(int cpu
)
336 numa_set_cpumask(cpu
, 1);
339 void __cpuinit
numa_remove_cpu(int cpu
)
341 numa_set_cpumask(cpu
, 0);
344 int cpu_to_node(int cpu
)
346 if (early_per_cpu_ptr(x86_cpu_to_node_map
)) {
348 "cpu_to_node(%d): usage too early!\n", cpu
);
350 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
352 return per_cpu(x86_cpu_to_node_map
, cpu
);
354 EXPORT_SYMBOL(cpu_to_node
);
357 * Same function as cpu_to_node() but used if called before the
358 * per_cpu areas are setup.
360 int early_cpu_to_node(int cpu
)
362 if (early_per_cpu_ptr(x86_cpu_to_node_map
))
363 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
365 if (!per_cpu_offset(cpu
)) {
367 "early_cpu_to_node(%d): no per_cpu area!\n", cpu
);
371 return per_cpu(x86_cpu_to_node_map
, cpu
);
376 static const cpumask_t cpu_mask_none
;
379 * Returns a pointer to the bitmask of CPUs on Node 'node'.
381 const cpumask_t
*cpumask_of_node(int node
)
383 if (node_to_cpumask_map
== NULL
) {
385 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
388 return (const cpumask_t
*)&cpu_online_map
;
390 if (node
>= nr_node_ids
) {
392 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
395 return &cpu_mask_none
;
397 return &node_to_cpumask_map
[node
];
399 EXPORT_SYMBOL(cpumask_of_node
);
402 * Returns a bitmask of CPUs on Node 'node'.
404 * Side note: this function creates the returned cpumask on the stack
405 * so with a high NR_CPUS count, excessive stack space is used. The
406 * node_to_cpumask_ptr function should be used whenever possible.
408 cpumask_t
node_to_cpumask(int node
)
410 if (node_to_cpumask_map
== NULL
) {
412 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node
);
414 return cpu_online_map
;
416 if (node
>= nr_node_ids
) {
418 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
421 return cpu_mask_none
;
423 return node_to_cpumask_map
[node
];
425 EXPORT_SYMBOL(node_to_cpumask
);
428 * --------- end of debug versions of the numa functions ---------
431 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
433 #endif /* X86_64_NUMA */