1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/cpumask.h>
18 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
19 # define DBG(x...) printk(KERN_DEBUG x)
24 #ifdef CONFIG_X86_LOCAL_APIC
25 unsigned int num_processors
;
26 unsigned disabled_cpus __cpuinitdata
;
27 /* Processor that is doing the boot up */
28 unsigned int boot_cpu_physical_apicid
= -1U;
29 EXPORT_SYMBOL(boot_cpu_physical_apicid
);
30 unsigned int max_physical_apicid
;
32 /* Bitmask of physically existing CPUs */
33 physid_mask_t phys_cpu_present_map
;
37 * Map cpu index to physical APIC ID
39 DEFINE_EARLY_PER_CPU(u16
, x86_cpu_to_apicid
, BAD_APICID
);
40 DEFINE_EARLY_PER_CPU(u16
, x86_bios_cpu_apicid
, BAD_APICID
);
41 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid
);
42 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid
);
44 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
45 #define X86_64_NUMA 1 /* (used later) */
48 * Map cpu index to node index
50 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map
, NUMA_NO_NODE
);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map
);
54 * Which logical CPUs are on which nodes
56 cpumask_t
*node_to_cpumask_map
;
57 EXPORT_SYMBOL(node_to_cpumask_map
);
60 * Setup node_to_cpumask_map
62 static void __init
setup_node_to_cpumask_map(void);
65 static inline void setup_node_to_cpumask_map(void) { }
68 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
70 * Copy data used in early init routines from the initial arrays to the
71 * per cpu data areas. These arrays then become expendable and the
72 * *_early_ptr's are zeroed indicating that the static arrays are gone.
74 static void __init
setup_per_cpu_maps(void)
78 for_each_possible_cpu(cpu
) {
79 per_cpu(x86_cpu_to_apicid
, cpu
) =
80 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
81 per_cpu(x86_bios_cpu_apicid
, cpu
) =
82 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
84 per_cpu(x86_cpu_to_node_map
, cpu
) =
85 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
89 /* indicate the early static arrays will soon be gone */
90 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
91 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
93 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
99 * Great future not-so-futuristic plan: make i386 and x86_64 do it
102 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
103 EXPORT_SYMBOL(__per_cpu_offset
);
104 static inline void setup_cpu_pda_map(void) { }
106 #elif !defined(CONFIG_SMP)
107 static inline void setup_cpu_pda_map(void) { }
109 #else /* CONFIG_SMP && CONFIG_X86_64 */
112 * Allocate cpu_pda pointer table and array via alloc_bootmem.
114 static void __init
setup_cpu_pda_map(void)
120 size
= roundup(sizeof(struct x8664_pda
), cache_line_size());
122 /* allocate cpu_pda array and pointer table */
124 unsigned long asize
= size
* (nr_cpu_ids
- 1);
126 pda
= alloc_bootmem(asize
);
129 /* initialize pointer table to static pda's */
130 for_each_possible_cpu(cpu
) {
132 /* leave boot cpu pda in place */
135 cpu_pda(cpu
) = (struct x8664_pda
*)pda
;
136 cpu_pda(cpu
)->in_bootmem
= 1;
141 #endif /* CONFIG_SMP && CONFIG_X86_64 */
145 /* correctly size the local cpu masks */
146 static void setup_cpu_local_masks(void)
148 alloc_bootmem_cpumask_var(&cpu_initialized_mask
);
149 alloc_bootmem_cpumask_var(&cpu_callin_mask
);
150 alloc_bootmem_cpumask_var(&cpu_callout_mask
);
151 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask
);
154 #else /* CONFIG_X86_32 */
156 static inline void setup_cpu_local_masks(void)
160 #endif /* CONFIG_X86_32 */
164 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
165 * Always point %gs to its beginning
167 void __init
setup_per_cpu_areas(void)
169 ssize_t size
, old_size
;
172 unsigned long align
= 1;
174 /* Setup cpu_pda map */
177 /* Copy section for each CPU (we discard the original) */
178 old_size
= PERCPU_ENOUGH_ROOM
;
179 align
= max_t(unsigned long, PAGE_SIZE
, align
);
180 size
= roundup(old_size
, align
);
182 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
183 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
185 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size
);
187 for_each_possible_cpu(cpu
) {
188 #ifndef CONFIG_NEED_MULTIPLE_NODES
189 ptr
= __alloc_bootmem(size
, align
,
190 __pa(MAX_DMA_ADDRESS
));
192 int node
= early_cpu_to_node(cpu
);
193 if (!node_online(node
) || !NODE_DATA(node
)) {
194 ptr
= __alloc_bootmem(size
, align
,
195 __pa(MAX_DMA_ADDRESS
));
196 pr_info("cpu %d has no node %d or node-local memory\n",
198 pr_debug("per cpu data for cpu%d at %016lx\n",
201 ptr
= __alloc_bootmem_node(NODE_DATA(node
), size
, align
,
202 __pa(MAX_DMA_ADDRESS
));
203 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
204 cpu
, node
, __pa(ptr
));
207 per_cpu_offset(cpu
) = ptr
- __per_cpu_start
;
208 memcpy(ptr
, __per_cpu_load
, __per_cpu_end
- __per_cpu_start
);
210 DBG("PERCPU: cpu %4d %p\n", cpu
, ptr
);
213 /* Setup percpu data maps */
214 setup_per_cpu_maps();
216 /* Setup node to cpumask map */
217 setup_node_to_cpumask_map();
219 /* Setup cpu initialized, callin, callout masks */
220 setup_cpu_local_masks();
228 * Allocate node_to_cpumask_map based on number of available nodes
229 * Requires node_possible_map to be valid.
231 * Note: node_to_cpumask() is not valid until after this is done.
232 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
234 static void __init
setup_node_to_cpumask_map(void)
236 unsigned int node
, num
= 0;
239 /* setup nr_node_ids if not done yet */
240 if (nr_node_ids
== MAX_NUMNODES
) {
241 for_each_node_mask(node
, node_possible_map
)
243 nr_node_ids
= num
+ 1;
246 /* allocate the map */
247 map
= alloc_bootmem_low(nr_node_ids
* sizeof(cpumask_t
));
248 DBG("node_to_cpumask_map at %p for %d nodes\n", map
, nr_node_ids
);
250 pr_debug("Node to cpumask map at %p for %d nodes\n",
253 /* node_to_cpumask() will now work */
254 node_to_cpumask_map
= map
;
257 void __cpuinit
numa_set_node(int cpu
, int node
)
259 int *cpu_to_node_map
= early_per_cpu_ptr(x86_cpu_to_node_map
);
261 /* early setting, no percpu area yet */
262 if (cpu_to_node_map
) {
263 cpu_to_node_map
[cpu
] = node
;
267 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
268 if (cpu
>= nr_cpu_ids
|| !per_cpu_offset(cpu
)) {
269 printk(KERN_ERR
"numa_set_node: invalid cpu# (%d)\n", cpu
);
274 per_cpu(x86_cpu_to_node_map
, cpu
) = node
;
276 if (node
!= NUMA_NO_NODE
)
277 cpu_pda(cpu
)->nodenumber
= node
;
280 void __cpuinit
numa_clear_node(int cpu
)
282 numa_set_node(cpu
, NUMA_NO_NODE
);
285 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
287 void __cpuinit
numa_add_cpu(int cpu
)
289 cpu_set(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
292 void __cpuinit
numa_remove_cpu(int cpu
)
294 cpu_clear(cpu
, node_to_cpumask_map
[early_cpu_to_node(cpu
)]);
297 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
300 * --------- debug versions of the numa functions ---------
302 static void __cpuinit
numa_set_cpumask(int cpu
, int enable
)
304 int node
= early_cpu_to_node(cpu
);
308 if (node_to_cpumask_map
== NULL
) {
309 printk(KERN_ERR
"node_to_cpumask_map NULL\n");
314 mask
= &node_to_cpumask_map
[node
];
318 cpu_clear(cpu
, *mask
);
320 cpulist_scnprintf(buf
, sizeof(buf
), mask
);
321 printk(KERN_DEBUG
"%s cpu %d node %d: mask now %s\n",
322 enable
? "numa_add_cpu" : "numa_remove_cpu", cpu
, node
, buf
);
325 void __cpuinit
numa_add_cpu(int cpu
)
327 numa_set_cpumask(cpu
, 1);
330 void __cpuinit
numa_remove_cpu(int cpu
)
332 numa_set_cpumask(cpu
, 0);
335 int cpu_to_node(int cpu
)
337 if (early_per_cpu_ptr(x86_cpu_to_node_map
)) {
339 "cpu_to_node(%d): usage too early!\n", cpu
);
341 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
343 return per_cpu(x86_cpu_to_node_map
, cpu
);
345 EXPORT_SYMBOL(cpu_to_node
);
348 * Same function as cpu_to_node() but used if called before the
349 * per_cpu areas are setup.
351 int early_cpu_to_node(int cpu
)
353 if (early_per_cpu_ptr(x86_cpu_to_node_map
))
354 return early_per_cpu_ptr(x86_cpu_to_node_map
)[cpu
];
356 if (!per_cpu_offset(cpu
)) {
358 "early_cpu_to_node(%d): no per_cpu area!\n", cpu
);
362 return per_cpu(x86_cpu_to_node_map
, cpu
);
367 static const cpumask_t cpu_mask_none
;
370 * Returns a pointer to the bitmask of CPUs on Node 'node'.
372 const cpumask_t
*cpumask_of_node(int node
)
374 if (node_to_cpumask_map
== NULL
) {
376 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
379 return (const cpumask_t
*)&cpu_online_map
;
381 if (node
>= nr_node_ids
) {
383 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
386 return &cpu_mask_none
;
388 return &node_to_cpumask_map
[node
];
390 EXPORT_SYMBOL(cpumask_of_node
);
393 * Returns a bitmask of CPUs on Node 'node'.
395 * Side note: this function creates the returned cpumask on the stack
396 * so with a high NR_CPUS count, excessive stack space is used. The
397 * node_to_cpumask_ptr function should be used whenever possible.
399 cpumask_t
node_to_cpumask(int node
)
401 if (node_to_cpumask_map
== NULL
) {
403 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node
);
405 return cpu_online_map
;
407 if (node
>= nr_node_ids
) {
409 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
412 return cpu_mask_none
;
414 return node_to_cpumask_map
[node
];
416 EXPORT_SYMBOL(node_to_cpumask
);
419 * --------- end of debug versions of the numa functions ---------
422 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
424 #endif /* X86_64_NUMA */