percpu: add @align to pcpu_fc_alloc_fn_t
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / kernel / setup_percpu.c
blob660cde13314170581334f2d36bc19120546249a9
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
19 #include <asm/cpu.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
24 #else
25 # define DBG(x...)
26 #endif
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
31 #ifdef CONFIG_X86_64
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
33 #else
34 #define BOOT_PERCPU_OFFSET 0
35 #endif
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43 EXPORT_SYMBOL(__per_cpu_offset);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
52 #ifdef CONFIG_X86_64
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
54 #else
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
56 #endif
58 /**
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
65 * RETURNS:
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t *last = NULL;
72 unsigned int cpu;
74 for_each_possible_cpu(cpu) {
75 int node = early_cpu_to_node(cpu);
77 if (node_online(node) && NODE_DATA(node) &&
78 last && last != NODE_DATA(node))
79 return true;
81 last = NODE_DATA(node);
83 #endif
84 return false;
87 /**
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
91 * @align: alignment
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
95 * configuration.
97 * RETURNS:
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
101 unsigned long align)
103 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node = early_cpu_to_node(cpu);
106 void *ptr;
108 if (!node_online(node) || !NODE_DATA(node)) {
109 ptr = __alloc_bootmem_nopanic(size, align, goal);
110 pr_info("cpu %d has no node %d or node-local memory\n",
111 cpu, node);
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu, size, __pa(ptr));
114 } else {
115 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
116 size, align, goal);
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu, size, node, __pa(ptr));
120 return ptr;
121 #else
122 return __alloc_bootmem_nopanic(size, align, goal);
123 #endif
127 * Helpers for first chunk memory allocation
129 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
131 return pcpu_alloc_bootmem(cpu, size, align);
134 static void __init pcpu_fc_free(void *ptr, size_t size)
136 free_bootmem(__pa(ptr), size);
140 * Large page remapping allocator
142 #ifdef CONFIG_NEED_MULTIPLE_NODES
143 static void __init pcpul_map(void *ptr, size_t size, void *addr)
145 pmd_t *pmd, pmd_v;
147 pmd = populate_extra_pmd((unsigned long)addr);
148 pmd_v = pfn_pmd(page_to_pfn(virt_to_page(ptr)), PAGE_KERNEL_LARGE);
149 set_pmd(pmd, pmd_v);
152 static int pcpu_lpage_cpu_distance(unsigned int from, unsigned int to)
154 if (early_cpu_to_node(from) == early_cpu_to_node(to))
155 return LOCAL_DISTANCE;
156 else
157 return REMOTE_DISTANCE;
160 static ssize_t __init setup_pcpu_lpage(bool chosen)
162 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
163 size_t dyn_size = reserve - PERCPU_FIRST_CHUNK_RESERVE;
164 size_t unit_map_size, unit_size;
165 int *unit_map;
166 int nr_units;
167 ssize_t ret;
169 /* on non-NUMA, embedding is better */
170 if (!chosen && !pcpu_need_numa())
171 return -EINVAL;
173 /* need PSE */
174 if (!cpu_has_pse) {
175 pr_warning("PERCPU: lpage allocator requires PSE\n");
176 return -EINVAL;
179 /* allocate and build unit_map */
180 unit_map_size = nr_cpu_ids * sizeof(int);
181 unit_map = alloc_bootmem_nopanic(unit_map_size);
182 if (!unit_map) {
183 pr_warning("PERCPU: failed to allocate unit_map\n");
184 return -ENOMEM;
187 ret = pcpu_lpage_build_unit_map(PERCPU_FIRST_CHUNK_RESERVE,
188 &dyn_size, &unit_size, PMD_SIZE,
189 unit_map, pcpu_lpage_cpu_distance);
190 if (ret < 0) {
191 pr_warning("PERCPU: failed to build unit_map\n");
192 goto out_free;
194 nr_units = ret;
196 /* do the parameters look okay? */
197 if (!chosen) {
198 size_t vm_size = VMALLOC_END - VMALLOC_START;
199 size_t tot_size = nr_units * unit_size;
201 /* don't consume more than 20% of vmalloc area */
202 if (tot_size > vm_size / 5) {
203 pr_info("PERCPU: too large chunk size %zuMB for "
204 "large page remap\n", tot_size >> 20);
205 ret = -EINVAL;
206 goto out_free;
210 ret = pcpu_lpage_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
211 unit_size, PMD_SIZE, unit_map, nr_units,
212 pcpu_fc_alloc, pcpu_fc_free, pcpul_map);
213 out_free:
214 if (ret < 0)
215 free_bootmem(__pa(unit_map), unit_map_size);
216 return ret;
218 #else
219 static ssize_t __init setup_pcpu_lpage(bool chosen)
221 return -EINVAL;
223 #endif
226 * Embedding allocator
228 * The first chunk is sized to just contain the static area plus
229 * module and dynamic reserves and embedded into linear physical
230 * mapping so that it can use PMD mapping without additional TLB
231 * pressure.
233 static ssize_t __init setup_pcpu_embed(bool chosen)
235 size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
238 * If large page isn't supported, there's no benefit in doing
239 * this. Also, embedding allocation doesn't play well with
240 * NUMA.
242 if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
243 return -EINVAL;
245 return pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
246 reserve - PERCPU_FIRST_CHUNK_RESERVE);
250 * Page allocator
252 * Boring fallback 4k page allocator. This allocator puts more
253 * pressure on PTE TLBs but other than that behaves nicely on both UMA
254 * and NUMA.
256 static void __init pcpup_populate_pte(unsigned long addr)
258 populate_extra_pte(addr);
261 static ssize_t __init setup_pcpu_page(void)
263 return pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
264 pcpu_fc_alloc, pcpu_fc_free,
265 pcpup_populate_pte);
268 static inline void setup_percpu_segment(int cpu)
270 #ifdef CONFIG_X86_32
271 struct desc_struct gdt;
273 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
274 0x2 | DESCTYPE_S, 0x8);
275 gdt.s = 1;
276 write_gdt_entry(get_cpu_gdt_table(cpu),
277 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
278 #endif
281 void __init setup_per_cpu_areas(void)
283 unsigned int cpu;
284 unsigned long delta;
285 size_t pcpu_unit_size;
286 ssize_t ret;
288 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
289 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
292 * Allocate percpu area. If PSE is supported, try to make use
293 * of large page mappings. Please read comments on top of
294 * each allocator for details.
296 ret = -EINVAL;
297 if (pcpu_chosen_fc != PCPU_FC_AUTO) {
298 if (pcpu_chosen_fc != PCPU_FC_PAGE) {
299 if (pcpu_chosen_fc == PCPU_FC_LPAGE)
300 ret = setup_pcpu_lpage(true);
301 else
302 ret = setup_pcpu_embed(true);
304 if (ret < 0)
305 pr_warning("PERCPU: %s allocator failed (%zd), "
306 "falling back to page size\n",
307 pcpu_fc_names[pcpu_chosen_fc], ret);
309 } else {
310 ret = setup_pcpu_lpage(false);
311 if (ret < 0)
312 ret = setup_pcpu_embed(false);
314 if (ret < 0)
315 ret = setup_pcpu_page();
316 if (ret < 0)
317 panic("cannot initialize percpu area (err=%zd)", ret);
319 pcpu_unit_size = ret;
321 /* alrighty, percpu areas up and running */
322 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
323 for_each_possible_cpu(cpu) {
324 per_cpu_offset(cpu) =
325 delta + pcpu_unit_map[cpu] * pcpu_unit_size;
326 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
327 per_cpu(cpu_number, cpu) = cpu;
328 setup_percpu_segment(cpu);
329 setup_stack_canary_segment(cpu);
331 * Copy data used in early init routines from the
332 * initial arrays to the per cpu data areas. These
333 * arrays then become expendable and the *_early_ptr's
334 * are zeroed indicating that the static arrays are
335 * gone.
337 #ifdef CONFIG_X86_LOCAL_APIC
338 per_cpu(x86_cpu_to_apicid, cpu) =
339 early_per_cpu_map(x86_cpu_to_apicid, cpu);
340 per_cpu(x86_bios_cpu_apicid, cpu) =
341 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
342 #endif
343 #ifdef CONFIG_X86_64
344 per_cpu(irq_stack_ptr, cpu) =
345 per_cpu(irq_stack_union.irq_stack, cpu) +
346 IRQ_STACK_SIZE - 64;
347 #ifdef CONFIG_NUMA
348 per_cpu(x86_cpu_to_node_map, cpu) =
349 early_per_cpu_map(x86_cpu_to_node_map, cpu);
350 #endif
351 #endif
353 * Up to this point, the boot CPU has been using .data.init
354 * area. Reload any changed state for the boot CPU.
356 if (cpu == boot_cpu_id)
357 switch_to_new_gdt(cpu);
360 /* indicate the early static arrays will soon be gone */
361 #ifdef CONFIG_X86_LOCAL_APIC
362 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
363 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
364 #endif
365 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
366 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
367 #endif
369 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
371 * make sure boot cpu node_number is right, when boot cpu is on the
372 * node that doesn't have mem installed
374 per_cpu(node_number, boot_cpu_id) = cpu_to_node(boot_cpu_id);
375 #endif
377 /* Setup node to cpumask map */
378 setup_node_to_cpumask_map();
380 /* Setup cpu initialized, callin, callout masks */
381 setup_cpu_local_masks();