1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/kernel.h>
4 #include <linux/module.h>
5 #include <linux/init.h>
6 #include <linux/bootmem.h>
7 #include <linux/percpu.h>
8 #include <linux/kexec.h>
9 #include <linux/crash_dump.h>
10 #include <linux/smp.h>
11 #include <linux/topology.h>
12 #include <linux/pfn.h>
13 #include <asm/sections.h>
14 #include <asm/processor.h>
15 #include <asm/setup.h>
16 #include <asm/mpspec.h>
17 #include <asm/apicdef.h>
18 #include <asm/highmem.h>
19 #include <asm/proto.h>
20 #include <asm/cpumask.h>
22 #include <asm/stackprotector.h>
24 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
25 # define DBG(fmt, ...) pr_dbg(fmt, ##__VA_ARGS__)
27 # define DBG(fmt, ...) do { if (0) pr_dbg(fmt, ##__VA_ARGS__); } while (0)
30 DEFINE_PER_CPU(int, cpu_number
);
31 EXPORT_PER_CPU_SYMBOL(cpu_number
);
34 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
36 #define BOOT_PERCPU_OFFSET 0
39 DEFINE_PER_CPU(unsigned long, this_cpu_off
) = BOOT_PERCPU_OFFSET
;
40 EXPORT_PER_CPU_SYMBOL(this_cpu_off
);
42 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
= {
43 [0 ... NR_CPUS
-1] = BOOT_PERCPU_OFFSET
,
45 EXPORT_SYMBOL(__per_cpu_offset
);
48 * On x86_64 symbols referenced from code should be reachable using
49 * 32bit relocations. Reserve space for static percpu variables in
50 * modules so that they are always served from the first chunk which
51 * is located at the percpu segment base. On x86_32, anything can
52 * address anywhere. No need to reserve space in the first chunk.
55 #define PERCPU_FIRST_CHUNK_RESERVE PERCPU_MODULE_RESERVE
57 #define PERCPU_FIRST_CHUNK_RESERVE 0
62 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
64 * If NUMA is not configured or there is only one NUMA node available,
65 * there is no reason to consider NUMA. This function determines
66 * whether percpu allocation should consider NUMA or not.
69 * true if NUMA should be considered; otherwise, false.
71 static bool __init
pcpu_need_numa(void)
73 #ifdef CONFIG_NEED_MULTIPLE_NODES
74 pg_data_t
*last
= NULL
;
77 for_each_possible_cpu(cpu
) {
78 int node
= early_cpu_to_node(cpu
);
80 if (node_online(node
) && NODE_DATA(node
) &&
81 last
&& last
!= NODE_DATA(node
))
84 last
= NODE_DATA(node
);
92 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
93 * @cpu: cpu to allocate for
94 * @size: size allocation in bytes
97 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
98 * does the right thing for NUMA regardless of the current
102 * Pointer to the allocated area on success, NULL on failure.
104 static void * __init
pcpu_alloc_bootmem(unsigned int cpu
, unsigned long size
,
107 const unsigned long goal
= __pa(MAX_DMA_ADDRESS
);
108 #ifdef CONFIG_NEED_MULTIPLE_NODES
109 int node
= early_cpu_to_node(cpu
);
112 if (!node_online(node
) || !NODE_DATA(node
)) {
113 ptr
= __alloc_bootmem_nopanic(size
, align
, goal
);
114 pr_info("cpu %d has no node %d or node-local memory\n",
116 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
117 cpu
, size
, __pa(ptr
));
119 ptr
= __alloc_bootmem_node_nopanic(NODE_DATA(node
),
121 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
122 cpu
, size
, node
, __pa(ptr
));
126 return __alloc_bootmem_nopanic(size
, align
, goal
);
131 * Helpers for first chunk memory allocation
133 static void * __init
pcpu_fc_alloc(unsigned int cpu
, size_t size
, size_t align
)
135 return pcpu_alloc_bootmem(cpu
, size
, align
);
138 static void __init
pcpu_fc_free(void *ptr
, size_t size
)
140 #ifdef CONFIG_NO_BOOTMEM
141 u64 start
= __pa(ptr
);
142 u64 end
= start
+ size
;
143 free_early_partial(start
, end
);
145 free_bootmem(__pa(ptr
), size
);
149 static int __init
pcpu_cpu_distance(unsigned int from
, unsigned int to
)
151 #ifdef CONFIG_NEED_MULTIPLE_NODES
152 if (early_cpu_to_node(from
) == early_cpu_to_node(to
))
153 return LOCAL_DISTANCE
;
155 return REMOTE_DISTANCE
;
157 return LOCAL_DISTANCE
;
161 static void __init
pcpup_populate_pte(unsigned long addr
)
163 populate_extra_pte(addr
);
166 static inline void setup_percpu_segment(int cpu
)
169 struct desc_struct gdt
;
171 pack_descriptor(&gdt
, per_cpu_offset(cpu
), 0xFFFFF,
172 0x2 | DESCTYPE_S
, 0x8);
174 write_gdt_entry(get_cpu_gdt_table(cpu
),
175 GDT_ENTRY_PERCPU
, &gdt
, DESCTYPE_S
);
179 void __init
setup_per_cpu_areas(void)
185 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
186 NR_CPUS
, nr_cpumask_bits
, nr_cpu_ids
, nr_node_ids
);
189 * Allocate percpu area. Embedding allocator is our favorite;
190 * however, on NUMA configurations, it can result in very
191 * sparse unit mapping and vmalloc area isn't spacious enough
192 * on 32bit. Use page in that case.
195 if (pcpu_chosen_fc
== PCPU_FC_AUTO
&& pcpu_need_numa())
196 pcpu_chosen_fc
= PCPU_FC_PAGE
;
199 if (pcpu_chosen_fc
!= PCPU_FC_PAGE
) {
200 const size_t atom_size
= cpu_has_pse
? PMD_SIZE
: PAGE_SIZE
;
201 const size_t dyn_size
= PERCPU_MODULE_RESERVE
+
202 PERCPU_DYNAMIC_RESERVE
- PERCPU_FIRST_CHUNK_RESERVE
;
204 rc
= pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE
,
207 pcpu_fc_alloc
, pcpu_fc_free
);
209 pr_warning("%s allocator failed (%d), falling back to page size\n",
210 pcpu_fc_names
[pcpu_chosen_fc
], rc
);
213 rc
= pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE
,
214 pcpu_fc_alloc
, pcpu_fc_free
,
217 panic("cannot initialize percpu area (err=%d)", rc
);
219 /* alrighty, percpu areas up and running */
220 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
221 for_each_possible_cpu(cpu
) {
222 per_cpu_offset(cpu
) = delta
+ pcpu_unit_offsets
[cpu
];
223 per_cpu(this_cpu_off
, cpu
) = per_cpu_offset(cpu
);
224 per_cpu(cpu_number
, cpu
) = cpu
;
225 setup_percpu_segment(cpu
);
226 setup_stack_canary_segment(cpu
);
228 * Copy data used in early init routines from the
229 * initial arrays to the per cpu data areas. These
230 * arrays then become expendable and the *_early_ptr's
231 * are zeroed indicating that the static arrays are
234 #ifdef CONFIG_X86_LOCAL_APIC
235 per_cpu(x86_cpu_to_apicid
, cpu
) =
236 early_per_cpu_map(x86_cpu_to_apicid
, cpu
);
237 per_cpu(x86_bios_cpu_apicid
, cpu
) =
238 early_per_cpu_map(x86_bios_cpu_apicid
, cpu
);
241 per_cpu(irq_stack_ptr
, cpu
) =
242 per_cpu(irq_stack_union
.irq_stack
, cpu
) +
245 per_cpu(x86_cpu_to_node_map
, cpu
) =
246 early_per_cpu_map(x86_cpu_to_node_map
, cpu
);
250 * Up to this point, the boot CPU has been using .data.init
251 * area. Reload any changed state for the boot CPU.
253 if (cpu
== boot_cpu_id
)
254 switch_to_new_gdt(cpu
);
257 /* indicate the early static arrays will soon be gone */
258 #ifdef CONFIG_X86_LOCAL_APIC
259 early_per_cpu_ptr(x86_cpu_to_apicid
) = NULL
;
260 early_per_cpu_ptr(x86_bios_cpu_apicid
) = NULL
;
262 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
263 early_per_cpu_ptr(x86_cpu_to_node_map
) = NULL
;
266 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
268 * make sure boot cpu node_number is right, when boot cpu is on the
269 * node that doesn't have mem installed
271 per_cpu(node_number
, boot_cpu_id
) = cpu_to_node(boot_cpu_id
);
274 /* Setup node to cpumask map */
275 setup_node_to_cpumask_map();
277 /* Setup cpu initialized, callin, callout masks */
278 setup_cpu_local_masks();