1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
28 DEFINE_PER_CPU(int, cpu_number
);
29 EXPORT_PER_CPU_SYMBOL(cpu_number
);
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34 #define BOOT_PERCPU_OFFSET 0
37 DEFINE_PER_CPU(unsigned long, this_cpu_off
) = BOOT_PERCPU_OFFSET
;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off
);
40 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
= {
41 [0 ... NR_CPUS
-1] = BOOT_PERCPU_OFFSET
,
43 EXPORT_SYMBOL(__per_cpu_offset
);
46 * On x86_64 symbols referenced from code should be reachable using
47 * 32bit relocations. Reserve space for static percpu variables in
48 * modules so that they are always served from the first chunk which
49 * is located at the percpu segment base. On x86_32, anything can
50 * address anywhere. No need to reserve space in the first chunk.
53 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
55 #define PERCPU_FIRST_CHUNK_RESERVE 0
59 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
61 * If NUMA is not configured or there is only one NUMA node available,
62 * there is no reason to consider NUMA. This function determines
63 * whether percpu allocation should consider NUMA or not.
66 * true if NUMA should be considered; otherwise, false.
68 static bool __init
pcpu_need_numa(void)
70 #ifdef CONFIG_NEED_MULTIPLE_NODES
71 pg_data_t
*last
= NULL
;
74 for_each_possible_cpu(cpu
) {
75 int node
= early_cpu_to_node(cpu
);
77 if (node_online(node
) && NODE_DATA(node
) &&
78 last
&& last
!= NODE_DATA(node
))
81 last
= NODE_DATA(node
);
88 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
89 * @cpu: cpu to allocate for
90 * @size: size allocation in bytes
93 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
94 * does the right thing for NUMA regardless of the current
98 * Pointer to the allocated area on success, NULL on failure.
100 static void * __init
pcpu_alloc_bootmem(unsigned int cpu
, unsigned long size
,
103 const unsigned long goal
= __pa(MAX_DMA_ADDRESS
);
104 #ifdef CONFIG_NEED_MULTIPLE_NODES
105 int node
= early_cpu_to_node(cpu
);
108 if (!node_online(node
) || !NODE_DATA(node
)) {
109 ptr
= __alloc_bootmem_nopanic(size
, align
, goal
);
110 pr_info("cpu %d has no node %d or node-local memory\n",
112 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
113 cpu
, size
, __pa(ptr
));
115 ptr
= __alloc_bootmem_node_nopanic(NODE_DATA(node
),
117 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
118 "%016lx\n", cpu
, size
, node
, __pa(ptr
));
122 return __alloc_bootmem_nopanic(size
, align
, goal
);
127 * Large page remap allocator
129 * This allocator uses PMD page as unit. A PMD page is allocated for
130 * each cpu and each is remapped into vmalloc area using PMD mapping.
131 * As PMD page is quite large, only part of it is used for the first
132 * chunk. Unused part is returned to the bootmem allocator.
134 * So, the PMD pages are mapped twice - once to the physical mapping
135 * and to the vmalloc area for the first percpu chunk. The double
136 * mapping does add one more PMD TLB entry pressure but still is much
137 * better than only using 4k mappings while still being NUMA friendly.
139 #ifdef CONFIG_NEED_MULTIPLE_NODES
145 static size_t pcpul_size
;
146 static struct pcpul_ent
*pcpul_map
;
147 static struct vm_struct pcpul_vm
;
149 static struct page
* __init
pcpul_get_page(unsigned int cpu
, int pageno
)
151 size_t off
= (size_t)pageno
<< PAGE_SHIFT
;
153 if (off
>= pcpul_size
)
156 return virt_to_page(pcpul_map
[cpu
].ptr
+ off
);
159 static ssize_t __init
setup_pcpu_lpage(size_t static_size
, bool chosen
)
161 size_t map_size
, dyn_size
;
167 size_t vm_size
= VMALLOC_END
- VMALLOC_START
;
168 size_t tot_size
= nr_cpu_ids
* PMD_SIZE
;
170 /* on non-NUMA, embedding is better */
171 if (!pcpu_need_numa())
174 /* don't consume more than 20% of vmalloc area */
175 if (tot_size
> vm_size
/ 5) {
176 pr_info("PERCPU: too large chunk size %zuMB for "
177 "large page remap\n", tot_size
>> 20);
184 pr_warning("PERCPU: lpage allocator requires PSE\n");
189 * Currently supports only single page. Supporting multiple
190 * pages won't be too difficult if it ever becomes necessary.
192 pcpul_size
= PFN_ALIGN(static_size
+ PERCPU_MODULE_RESERVE
+
193 PERCPU_DYNAMIC_RESERVE
);
194 if (pcpul_size
> PMD_SIZE
) {
195 pr_warning("PERCPU: static data is larger than large page, "
196 "can't use large page\n");
199 dyn_size
= pcpul_size
- static_size
- PERCPU_FIRST_CHUNK_RESERVE
;
201 /* allocate pointer array and alloc large pages */
202 map_size
= PFN_ALIGN(nr_cpu_ids
* sizeof(pcpul_map
[0]));
203 pcpul_map
= alloc_bootmem(map_size
);
205 for_each_possible_cpu(cpu
) {
206 pcpul_map
[cpu
].cpu
= cpu
;
207 pcpul_map
[cpu
].ptr
= pcpu_alloc_bootmem(cpu
, PMD_SIZE
,
209 if (!pcpul_map
[cpu
].ptr
) {
210 pr_warning("PERCPU: failed to allocate large page "
216 * Only use pcpul_size bytes and give back the rest.
218 * Ingo: The 2MB up-rounding bootmem is needed to make
219 * sure the partial 2MB page is still fully RAM - it's
220 * not well-specified to have a PAT-incompatible area
221 * (unmapped RAM, device memory, etc.) in that hole.
223 free_bootmem(__pa(pcpul_map
[cpu
].ptr
+ pcpul_size
),
224 PMD_SIZE
- pcpul_size
);
226 memcpy(pcpul_map
[cpu
].ptr
, __per_cpu_load
, static_size
);
229 /* allocate address and map */
230 pcpul_vm
.flags
= VM_ALLOC
;
231 pcpul_vm
.size
= nr_cpu_ids
* PMD_SIZE
;
232 vm_area_register_early(&pcpul_vm
, PMD_SIZE
);
234 for_each_possible_cpu(cpu
) {
237 pmd
= populate_extra_pmd((unsigned long)pcpul_vm
.addr
+
239 pmd_v
= pfn_pmd(page_to_pfn(virt_to_page(pcpul_map
[cpu
].ptr
)),
244 /* we're ready, commit */
245 pr_info("PERCPU: Remapped at %p with large pages, static data "
246 "%zu bytes\n", pcpul_vm
.addr
, static_size
);
248 ret
= pcpu_setup_first_chunk(pcpul_get_page
, static_size
,
249 PERCPU_FIRST_CHUNK_RESERVE
, dyn_size
,
250 PMD_SIZE
, pcpul_vm
.addr
, NULL
);
252 /* sort pcpul_map array for pcpu_lpage_remapped() */
253 for (i
= 0; i
< nr_cpu_ids
- 1; i
++)
254 for (j
= i
+ 1; j
< nr_cpu_ids
; j
++)
255 if (pcpul_map
[i
].ptr
> pcpul_map
[j
].ptr
) {
256 struct pcpul_ent tmp
= pcpul_map
[i
];
257 pcpul_map
[i
] = pcpul_map
[j
];
264 for_each_possible_cpu(cpu
)
265 if (pcpul_map
[cpu
].ptr
)
266 free_bootmem(__pa(pcpul_map
[cpu
].ptr
), pcpul_size
);
267 free_bootmem(__pa(pcpul_map
), map_size
);
272 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
273 * @kaddr: the kernel address in question
275 * Determine whether @kaddr falls in the pcpul recycled area. This is
276 * used by pageattr to detect VM aliases and break up the pcpu PMD
277 * mapping such that the same physical page is not mapped under
278 * different attributes.
280 * The recycled area is always at the tail of a partially used PMD
284 * Address of corresponding remapped pcpu address if match is found;
287 void *pcpu_lpage_remapped(void *kaddr
)
289 void *pmd_addr
= (void *)((unsigned long)kaddr
& PMD_MASK
);
290 unsigned long offset
= (unsigned long)kaddr
& ~PMD_MASK
;
291 int left
= 0, right
= nr_cpu_ids
- 1;
294 /* pcpul in use at all? */
298 /* okay, perform binary search */
299 while (left
<= right
) {
300 pos
= (left
+ right
) / 2;
302 if (pcpul_map
[pos
].ptr
< pmd_addr
)
304 else if (pcpul_map
[pos
].ptr
> pmd_addr
)
307 /* it shouldn't be in the area for the first chunk */
308 WARN_ON(offset
< pcpul_size
);
310 return pcpul_vm
.addr
+
311 pcpul_map
[pos
].cpu
* PMD_SIZE
+ offset
;
318 static ssize_t __init
setup_pcpu_lpage(size_t static_size
, bool chosen
)
325 * Embedding allocator
327 * The first chunk is sized to just contain the static area plus
328 * module and dynamic reserves and embedded into linear physical
329 * mapping so that it can use PMD mapping without additional TLB
332 static ssize_t __init
setup_pcpu_embed(size_t static_size
, bool chosen
)
334 size_t reserve
= PERCPU_MODULE_RESERVE
+ PERCPU_DYNAMIC_RESERVE
;
337 * If large page isn't supported, there's no benefit in doing
338 * this. Also, embedding allocation doesn't play well with
341 if (!chosen
&& (!cpu_has_pse
|| pcpu_need_numa()))
344 return pcpu_embed_first_chunk(static_size
, PERCPU_FIRST_CHUNK_RESERVE
,
345 reserve
- PERCPU_FIRST_CHUNK_RESERVE
, -1);
351 * This is the basic allocator. Static percpu area is allocated
352 * page-by-page and most of initialization is done by the generic
355 static struct page
**pcpu4k_pages __initdata
;
356 static int pcpu4k_nr_static_pages __initdata
;
358 static struct page
* __init
pcpu4k_get_page(unsigned int cpu
, int pageno
)
360 if (pageno
< pcpu4k_nr_static_pages
)
361 return pcpu4k_pages
[cpu
* pcpu4k_nr_static_pages
+ pageno
];
365 static void __init
pcpu4k_populate_pte(unsigned long addr
)
367 populate_extra_pte(addr
);
370 static ssize_t __init
setup_pcpu_4k(size_t static_size
)
377 pcpu4k_nr_static_pages
= PFN_UP(static_size
);
379 /* unaligned allocations can't be freed, round up to page size */
380 pages_size
= PFN_ALIGN(pcpu4k_nr_static_pages
* nr_cpu_ids
381 * sizeof(pcpu4k_pages
[0]));
382 pcpu4k_pages
= alloc_bootmem(pages_size
);
384 /* allocate and copy */
386 for_each_possible_cpu(cpu
)
387 for (i
= 0; i
< pcpu4k_nr_static_pages
; i
++) {
390 ptr
= pcpu_alloc_bootmem(cpu
, PAGE_SIZE
, PAGE_SIZE
);
392 pr_warning("PERCPU: failed to allocate "
393 "4k page for cpu%u\n", cpu
);
397 memcpy(ptr
, __per_cpu_load
+ i
* PAGE_SIZE
, PAGE_SIZE
);
398 pcpu4k_pages
[j
++] = virt_to_page(ptr
);
401 /* we're ready, commit */
402 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
403 pcpu4k_nr_static_pages
, static_size
);
405 ret
= pcpu_setup_first_chunk(pcpu4k_get_page
, static_size
,
406 PERCPU_FIRST_CHUNK_RESERVE
, -1,
407 -1, NULL
, pcpu4k_populate_pte
);
412 free_bootmem(__pa(page_address(pcpu4k_pages
[j
])), PAGE_SIZE
);
415 free_bootmem(__pa(pcpu4k_pages
), pages_size
);
419 /* for explicit first chunk allocator selection */
420 static char pcpu_chosen_alloc
[16] __initdata
;
422 static int __init
percpu_alloc_setup(char *str
)
424 strncpy(pcpu_chosen_alloc
, str
, sizeof(pcpu_chosen_alloc
) - 1);
427 early_param("percpu_alloc", percpu_alloc_setup
);
429 static inline void setup_percpu_segment(int cpu
)
432 struct desc_struct gdt
;
434 pack_descriptor(&gdt
, per_cpu_offset(cpu
), 0xFFFFF,
435 0x2 | DESCTYPE_S
, 0x8);
437 write_gdt_entry(get_cpu_gdt_table(cpu
),
438 GDT_ENTRY_PERCPU
, &gdt
, DESCTYPE_S
);
442 void __init
setup_per_cpu_areas(void)
444 size_t static_size
= __per_cpu_end
- __per_cpu_start
;
447 size_t pcpu_unit_size
;
450 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
451 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
454 * Allocate percpu area. If PSE is supported, try to make use
455 * of large page mappings. Please read comments on top of
456 * each allocator for details.
459 if (strlen(pcpu_chosen_alloc
)) {
460 if (strcmp(pcpu_chosen_alloc
, "4k")) {
461 if (!strcmp(pcpu_chosen_alloc
, "lpage"))
462 ret
= setup_pcpu_lpage(static_size
, true);
463 else if (!strcmp(pcpu_chosen_alloc
, "embed"))
464 ret
= setup_pcpu_embed(static_size
, true);
466 pr_warning("PERCPU: unknown allocator %s "
467 "specified\n", pcpu_chosen_alloc
);
469 pr_warning("PERCPU: %s allocator failed (%zd), "
470 "falling back to 4k\n",
471 pcpu_chosen_alloc
, ret
);
474 ret
= setup_pcpu_lpage(static_size
, false);
476 ret
= setup_pcpu_embed(static_size
, false);
479 ret
= setup_pcpu_4k(static_size
);
481 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
484 pcpu_unit_size
= ret
;
486 /* alrighty, percpu areas up and running */
487 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
488 for_each_possible_cpu(cpu
) {
489 per_cpu_offset(cpu
) = delta
+ cpu
* pcpu_unit_size
;
490 per_cpu(this_cpu_off
, cpu
) = per_cpu_offset(cpu
);
491 per_cpu(cpu_number
, cpu
) = cpu
;
492 setup_percpu_segment(cpu
);
493 setup_stack_canary_segment(cpu
);
495 * Copy data used in early init routines from the
496 * initial arrays to the per cpu data areas. These
497 * arrays then become expendable and the *_early_ptr's
498 * are zeroed indicating that the static arrays are
501 #ifdef CONFIG_X86_LOCAL_APIC
502 per_cpu(x86_cpu_to_apicid
, cpu
) =
503 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
504 per_cpu(x86_bios_cpu_apicid
, cpu
) =
505 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
508 per_cpu(irq_stack_ptr
, cpu
) =
509 per_cpu(irq_stack_union
.irq_stack
, cpu
) +
512 per_cpu(x86_cpu_to_node_map
, cpu
) =
513 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
517 * Up to this point, the boot CPU has been using .data.init
518 * area. Reload any changed state for the boot CPU.
520 if (cpu
== boot_cpu_id
)
521 switch_to_new_gdt(cpu
);
524 /* indicate the early static arrays will soon be gone */
525 #ifdef CONFIG_X86_LOCAL_APIC
526 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
527 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
529 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
530 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
533 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
535 * make sure boot cpu node_number is right, when boot cpu is on the
536 * node that doesn't have mem installed
538 per_cpu(node_number
, boot_cpu_id
) = cpu_to_node(boot_cpu_id
);
541 /* Setup node to cpumask map */
542 setup_node_to_cpumask_map();
544 /* Setup cpu initialized, callin, callout masks */
545 setup_cpu_local_masks();