GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / mm / percpu.c
blobb6bdeea299238b97361cbad6617a0c3a34db98e3
1 /* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */
2 /*
3 * mm/percpu.c - percpu memory allocator
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
8 * This file is released under the GPLv2.
10 * This is percpu allocator which can handle both static and dynamic
11 * areas. Percpu areas are allocated in chunks. Each chunk is
12 * consisted of boot-time determined number of units and the first
13 * chunk is used for static percpu variables in the kernel image
14 * (special boot time alloc/init handling necessary as these areas
15 * need to be brought up before allocation services are running).
16 * Unit grows as necessary and all units grow or shrink in unison.
17 * When a chunk is filled up, another chunk is allocated.
19 * c0 c1 c2
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
31 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
33 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
44 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
47 * To use this allocator, arch code should do the followings.
49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
50 * regular address to percpu pointer and back if they need to be
51 * different from the default
53 * - use pcpu_setup_first_chunk() during percpu area initialization to
54 * setup the first chunk containing the kernel static percpu area
57 #include <linux/bitmap.h>
58 #include <linux/bootmem.h>
59 #include <linux/err.h>
60 #include <linux/list.h>
61 #include <linux/log2.h>
62 #include <linux/mm.h>
63 #include <linux/module.h>
64 #include <linux/mutex.h>
65 #include <linux/percpu.h>
66 #include <linux/pfn.h>
67 #include <linux/slab.h>
68 #include <linux/spinlock.h>
69 #include <linux/vmalloc.h>
70 #include <linux/workqueue.h>
72 #include <asm/cacheflush.h>
73 #include <asm/sections.h>
74 #include <asm/tlbflush.h>
75 #include <asm/io.h>
77 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
78 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
80 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81 #ifndef __addr_to_pcpu_ptr
82 #define __addr_to_pcpu_ptr(addr) \
83 (void __percpu *)((unsigned long)(addr) - \
84 (unsigned long)pcpu_base_addr + \
85 (unsigned long)__per_cpu_start)
86 #endif
87 #ifndef __pcpu_ptr_to_addr
88 #define __pcpu_ptr_to_addr(ptr) \
89 (void __force *)((unsigned long)(ptr) + \
90 (unsigned long)pcpu_base_addr - \
91 (unsigned long)__per_cpu_start)
92 #endif
94 struct pcpu_chunk {
95 struct list_head list; /* linked to pcpu_slot lists */
96 int free_size; /* free bytes in the chunk */
97 int contig_hint; /* max contiguous size hint */
98 void *base_addr; /* base address of this chunk */
99 int map_used; /* # of map entries used */
100 int map_alloc; /* # of map entries allocated */
101 int *map; /* allocation map */
102 void *data; /* chunk data */
103 bool immutable; /* no [de]population allowed */
104 unsigned long populated[]; /* populated bitmap */
107 static int pcpu_unit_pages __read_mostly;
108 static int pcpu_unit_size __read_mostly;
109 static int pcpu_nr_units __read_mostly;
110 static int pcpu_atom_size __read_mostly;
111 static int pcpu_nr_slots __read_mostly;
112 static size_t pcpu_chunk_struct_size __read_mostly;
114 /* cpus with the lowest and highest unit numbers */
115 static unsigned int pcpu_first_unit_cpu __read_mostly;
116 static unsigned int pcpu_last_unit_cpu __read_mostly;
118 /* the address of the first chunk which starts with the kernel static area */
119 void *pcpu_base_addr __read_mostly;
120 EXPORT_SYMBOL_GPL(pcpu_base_addr);
122 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
123 const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
125 /* group information, used for vm allocation */
126 static int pcpu_nr_groups __read_mostly;
127 static const unsigned long *pcpu_group_offsets __read_mostly;
128 static const size_t *pcpu_group_sizes __read_mostly;
131 * The first chunk which always exists. Note that unlike other
132 * chunks, this one can be allocated and mapped in several different
133 * ways and thus often doesn't live in the vmalloc area.
135 static struct pcpu_chunk *pcpu_first_chunk;
138 * Optional reserved chunk. This chunk reserves part of the first
139 * chunk and serves it for reserved allocations. The amount of
140 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
141 * area doesn't exist, the following variables contain NULL and 0
142 * respectively.
144 static struct pcpu_chunk *pcpu_reserved_chunk;
145 static int pcpu_reserved_chunk_limit;
148 * Synchronization rules.
150 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
151 * protects allocation/reclaim paths, chunks, populated bitmap and
152 * vmalloc mapping. The latter is a spinlock and protects the index
153 * data structures - chunk slots, chunks and area maps in chunks.
155 * During allocation, pcpu_alloc_mutex is kept locked all the time and
156 * pcpu_lock is grabbed and released as necessary. All actual memory
157 * allocations are done using GFP_KERNEL with pcpu_lock released. In
158 * general, percpu memory can't be allocated with irq off but
159 * irqsave/restore are still used in alloc path so that it can be used
160 * from early init path - sched_init() specifically.
162 * Free path accesses and alters only the index data structures, so it
163 * can be safely called from atomic context. When memory needs to be
164 * returned to the system, free path schedules reclaim_work which
165 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
166 * reclaimed, release both locks and frees the chunks. Note that it's
167 * necessary to grab both locks to remove a chunk from circulation as
168 * allocation path might be referencing the chunk with only
169 * pcpu_alloc_mutex locked.
171 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
172 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
174 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
176 /* reclaim work to release fully free chunks, scheduled from free path */
177 static void pcpu_reclaim(struct work_struct *work);
178 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
180 static bool pcpu_addr_in_first_chunk(void *addr)
182 void *first_start = pcpu_first_chunk->base_addr;
184 return addr >= first_start && addr < first_start + pcpu_unit_size;
187 static bool pcpu_addr_in_reserved_chunk(void *addr)
189 void *first_start = pcpu_first_chunk->base_addr;
191 return addr >= first_start &&
192 addr < first_start + pcpu_reserved_chunk_limit;
195 static int __pcpu_size_to_slot(int size)
197 int highbit = fls(size); /* size is in bytes */
198 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
201 static int pcpu_size_to_slot(int size)
203 if (size == pcpu_unit_size)
204 return pcpu_nr_slots - 1;
205 return __pcpu_size_to_slot(size);
208 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
210 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
211 return 0;
213 return pcpu_size_to_slot(chunk->free_size);
216 /* set the pointer to a chunk in a page struct */
217 static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
219 page->index = (unsigned long)pcpu;
222 /* obtain pointer to a chunk from a page struct */
223 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
225 return (struct pcpu_chunk *)page->index;
228 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
230 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
233 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
234 unsigned int cpu, int page_idx)
236 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
237 (page_idx << PAGE_SHIFT);
240 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
241 int *rs, int *re, int end)
243 *rs = find_next_zero_bit(chunk->populated, end, *rs);
244 *re = find_next_bit(chunk->populated, end, *rs + 1);
247 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
248 int *rs, int *re, int end)
250 *rs = find_next_bit(chunk->populated, end, *rs);
251 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
255 * (Un)populated page region iterators. Iterate over (un)populated
256 * page regions betwen @start and @end in @chunk. @rs and @re should
257 * be integer variables and will be set to start and end page index of
258 * the current region.
260 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
261 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
262 (rs) < (re); \
263 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
265 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
266 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
267 (rs) < (re); \
268 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
271 * pcpu_mem_alloc - allocate memory
272 * @size: bytes to allocate
274 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
275 * kzalloc() is used; otherwise, vmalloc() is used. The returned
276 * memory is always zeroed.
278 * CONTEXT:
279 * Does GFP_KERNEL allocation.
281 * RETURNS:
282 * Pointer to the allocated area on success, NULL on failure.
284 static void *pcpu_mem_alloc(size_t size)
286 if (WARN_ON_ONCE(!slab_is_available()))
287 return NULL;
289 if (size <= PAGE_SIZE)
290 return kzalloc(size, GFP_KERNEL);
291 else {
292 void *ptr = vmalloc(size);
293 if (ptr)
294 memset(ptr, 0, size);
295 return ptr;
300 * pcpu_mem_free - free memory
301 * @ptr: memory to free
302 * @size: size of the area
304 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
306 static void pcpu_mem_free(void *ptr, size_t size)
308 if (size <= PAGE_SIZE)
309 kfree(ptr);
310 else
311 vfree(ptr);
315 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
316 * @chunk: chunk of interest
317 * @oslot: the previous slot it was on
319 * This function is called after an allocation or free changed @chunk.
320 * New slot according to the changed state is determined and @chunk is
321 * moved to the slot. Note that the reserved chunk is never put on
322 * chunk slots.
324 * CONTEXT:
325 * pcpu_lock.
327 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
329 int nslot = pcpu_chunk_slot(chunk);
331 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
332 if (oslot < nslot)
333 list_move(&chunk->list, &pcpu_slot[nslot]);
334 else
335 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
340 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
341 * @chunk: chunk of interest
343 * Determine whether area map of @chunk needs to be extended to
344 * accomodate a new allocation.
346 * CONTEXT:
347 * pcpu_lock.
349 * RETURNS:
350 * New target map allocation length if extension is necessary, 0
351 * otherwise.
353 static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
355 int new_alloc;
357 if (chunk->map_alloc >= chunk->map_used + 2)
358 return 0;
360 new_alloc = PCPU_DFL_MAP_ALLOC;
361 while (new_alloc < chunk->map_used + 2)
362 new_alloc *= 2;
364 return new_alloc;
368 * pcpu_extend_area_map - extend area map of a chunk
369 * @chunk: chunk of interest
370 * @new_alloc: new target allocation length of the area map
372 * Extend area map of @chunk to have @new_alloc entries.
374 * CONTEXT:
375 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
377 * RETURNS:
378 * 0 on success, -errno on failure.
380 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
382 int *old = NULL, *new = NULL;
383 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
384 unsigned long flags;
386 new = pcpu_mem_alloc(new_size);
387 if (!new)
388 return -ENOMEM;
390 /* acquire pcpu_lock and switch to new area map */
391 spin_lock_irqsave(&pcpu_lock, flags);
393 if (new_alloc <= chunk->map_alloc)
394 goto out_unlock;
396 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
397 old = chunk->map;
399 memcpy(new, old, old_size);
401 chunk->map_alloc = new_alloc;
402 chunk->map = new;
403 new = NULL;
405 out_unlock:
406 spin_unlock_irqrestore(&pcpu_lock, flags);
409 * pcpu_mem_free() might end up calling vfree() which uses
410 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
412 pcpu_mem_free(old, old_size);
413 pcpu_mem_free(new, new_size);
415 return 0;
419 * pcpu_split_block - split a map block
420 * @chunk: chunk of interest
421 * @i: index of map block to split
422 * @head: head size in bytes (can be 0)
423 * @tail: tail size in bytes (can be 0)
425 * Split the @i'th map block into two or three blocks. If @head is
426 * non-zero, @head bytes block is inserted before block @i moving it
427 * to @i+1 and reducing its size by @head bytes.
429 * If @tail is non-zero, the target block, which can be @i or @i+1
430 * depending on @head, is reduced by @tail bytes and @tail byte block
431 * is inserted after the target block.
433 * @chunk->map must have enough free slots to accomodate the split.
435 * CONTEXT:
436 * pcpu_lock.
438 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
439 int head, int tail)
441 int nr_extra = !!head + !!tail;
443 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
445 /* insert new subblocks */
446 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
447 sizeof(chunk->map[0]) * (chunk->map_used - i));
448 chunk->map_used += nr_extra;
450 if (head) {
451 chunk->map[i + 1] = chunk->map[i] - head;
452 chunk->map[i++] = head;
454 if (tail) {
455 chunk->map[i++] -= tail;
456 chunk->map[i] = tail;
461 * pcpu_alloc_area - allocate area from a pcpu_chunk
462 * @chunk: chunk of interest
463 * @size: wanted size in bytes
464 * @align: wanted align
466 * Try to allocate @size bytes area aligned at @align from @chunk.
467 * Note that this function only allocates the offset. It doesn't
468 * populate or map the area.
470 * @chunk->map must have at least two free slots.
472 * CONTEXT:
473 * pcpu_lock.
475 * RETURNS:
476 * Allocated offset in @chunk on success, -1 if no matching area is
477 * found.
479 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
481 int oslot = pcpu_chunk_slot(chunk);
482 int max_contig = 0;
483 int i, off;
485 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
486 bool is_last = i + 1 == chunk->map_used;
487 int head, tail;
489 /* extra for alignment requirement */
490 head = ALIGN(off, align) - off;
491 BUG_ON(i == 0 && head != 0);
493 if (chunk->map[i] < 0)
494 continue;
495 if (chunk->map[i] < head + size) {
496 max_contig = max(chunk->map[i], max_contig);
497 continue;
501 * If head is small or the previous block is free,
502 * merge'em. Note that 'small' is defined as smaller
503 * than sizeof(int), which is very small but isn't too
504 * uncommon for percpu allocations.
506 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
507 if (chunk->map[i - 1] > 0)
508 chunk->map[i - 1] += head;
509 else {
510 chunk->map[i - 1] -= head;
511 chunk->free_size -= head;
513 chunk->map[i] -= head;
514 off += head;
515 head = 0;
518 /* if tail is small, just keep it around */
519 tail = chunk->map[i] - head - size;
520 if (tail < sizeof(int))
521 tail = 0;
523 /* split if warranted */
524 if (head || tail) {
525 pcpu_split_block(chunk, i, head, tail);
526 if (head) {
527 i++;
528 off += head;
529 max_contig = max(chunk->map[i - 1], max_contig);
531 if (tail)
532 max_contig = max(chunk->map[i + 1], max_contig);
535 /* update hint and mark allocated */
536 if (is_last)
537 chunk->contig_hint = max_contig; /* fully scanned */
538 else
539 chunk->contig_hint = max(chunk->contig_hint,
540 max_contig);
542 chunk->free_size -= chunk->map[i];
543 chunk->map[i] = -chunk->map[i];
545 pcpu_chunk_relocate(chunk, oslot);
546 return off;
549 chunk->contig_hint = max_contig; /* fully scanned */
550 pcpu_chunk_relocate(chunk, oslot);
552 /* tell the upper layer that this chunk has no matching area */
553 return -1;
557 * pcpu_free_area - free area to a pcpu_chunk
558 * @chunk: chunk of interest
559 * @freeme: offset of area to free
561 * Free area starting from @freeme to @chunk. Note that this function
562 * only modifies the allocation map. It doesn't depopulate or unmap
563 * the area.
565 * CONTEXT:
566 * pcpu_lock.
568 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
570 int oslot = pcpu_chunk_slot(chunk);
571 int i, off;
573 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
574 if (off == freeme)
575 break;
576 BUG_ON(off != freeme);
577 BUG_ON(chunk->map[i] > 0);
579 chunk->map[i] = -chunk->map[i];
580 chunk->free_size += chunk->map[i];
582 /* merge with previous? */
583 if (i > 0 && chunk->map[i - 1] >= 0) {
584 chunk->map[i - 1] += chunk->map[i];
585 chunk->map_used--;
586 memmove(&chunk->map[i], &chunk->map[i + 1],
587 (chunk->map_used - i) * sizeof(chunk->map[0]));
588 i--;
590 /* merge with next? */
591 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
592 chunk->map[i] += chunk->map[i + 1];
593 chunk->map_used--;
594 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
595 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
598 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
599 pcpu_chunk_relocate(chunk, oslot);
602 static struct pcpu_chunk *pcpu_alloc_chunk(void)
604 struct pcpu_chunk *chunk;
606 chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
607 if (!chunk)
608 return NULL;
610 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
611 if (!chunk->map) {
612 kfree(chunk);
613 return NULL;
616 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
617 chunk->map[chunk->map_used++] = pcpu_unit_size;
619 INIT_LIST_HEAD(&chunk->list);
620 chunk->free_size = pcpu_unit_size;
621 chunk->contig_hint = pcpu_unit_size;
623 return chunk;
626 static void pcpu_free_chunk(struct pcpu_chunk *chunk)
628 if (!chunk)
629 return;
630 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
631 kfree(chunk);
635 * Chunk management implementation.
637 * To allow different implementations, chunk alloc/free and
638 * [de]population are implemented in a separate file which is pulled
639 * into this file and compiled together. The following functions
640 * should be implemented.
642 * pcpu_populate_chunk - populate the specified range of a chunk
643 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
644 * pcpu_create_chunk - create a new chunk
645 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
646 * pcpu_addr_to_page - translate address to physical address
647 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
649 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
650 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
651 static struct pcpu_chunk *pcpu_create_chunk(void);
652 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
653 static struct page *pcpu_addr_to_page(void *addr);
654 static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
656 #ifdef CONFIG_NEED_PER_CPU_KM
657 #include "percpu-km.c"
658 #else
659 #include "percpu-vm.c"
660 #endif
663 * pcpu_chunk_addr_search - determine chunk containing specified address
664 * @addr: address for which the chunk needs to be determined.
666 * RETURNS:
667 * The address of the found chunk.
669 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
671 /* is it in the first chunk? */
672 if (pcpu_addr_in_first_chunk(addr)) {
673 /* is it in the reserved area? */
674 if (pcpu_addr_in_reserved_chunk(addr))
675 return pcpu_reserved_chunk;
676 return pcpu_first_chunk;
680 * The address is relative to unit0 which might be unused and
681 * thus unmapped. Offset the address to the unit space of the
682 * current processor before looking it up in the vmalloc
683 * space. Note that any possible cpu id can be used here, so
684 * there's no need to worry about preemption or cpu hotplug.
686 addr += pcpu_unit_offsets[raw_smp_processor_id()];
687 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
691 * pcpu_alloc - the percpu allocator
692 * @size: size of area to allocate in bytes
693 * @align: alignment of area (max PAGE_SIZE)
694 * @reserved: allocate from the reserved chunk if available
696 * Allocate percpu area of @size bytes aligned at @align.
698 * CONTEXT:
699 * Does GFP_KERNEL allocation.
701 * RETURNS:
702 * Percpu pointer to the allocated area on success, NULL on failure.
704 static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
706 static int warn_limit = 10;
707 struct pcpu_chunk *chunk;
708 const char *err;
709 int slot, off, new_alloc;
710 unsigned long flags;
712 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
713 WARN(true, "illegal size (%zu) or align (%zu) for "
714 "percpu allocation\n", size, align);
715 return NULL;
718 mutex_lock(&pcpu_alloc_mutex);
719 spin_lock_irqsave(&pcpu_lock, flags);
721 /* serve reserved allocations from the reserved chunk if available */
722 if (reserved && pcpu_reserved_chunk) {
723 chunk = pcpu_reserved_chunk;
725 if (size > chunk->contig_hint) {
726 err = "alloc from reserved chunk failed";
727 goto fail_unlock;
730 while ((new_alloc = pcpu_need_to_extend(chunk))) {
731 spin_unlock_irqrestore(&pcpu_lock, flags);
732 if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
733 err = "failed to extend area map of reserved chunk";
734 goto fail_unlock_mutex;
736 spin_lock_irqsave(&pcpu_lock, flags);
739 off = pcpu_alloc_area(chunk, size, align);
740 if (off >= 0)
741 goto area_found;
743 err = "alloc from reserved chunk failed";
744 goto fail_unlock;
747 restart:
748 /* search through normal chunks */
749 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
750 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
751 if (size > chunk->contig_hint)
752 continue;
754 new_alloc = pcpu_need_to_extend(chunk);
755 if (new_alloc) {
756 spin_unlock_irqrestore(&pcpu_lock, flags);
757 if (pcpu_extend_area_map(chunk,
758 new_alloc) < 0) {
759 err = "failed to extend area map";
760 goto fail_unlock_mutex;
762 spin_lock_irqsave(&pcpu_lock, flags);
764 * pcpu_lock has been dropped, need to
765 * restart cpu_slot list walking.
767 goto restart;
770 off = pcpu_alloc_area(chunk, size, align);
771 if (off >= 0)
772 goto area_found;
776 /* hmmm... no space left, create a new chunk */
777 spin_unlock_irqrestore(&pcpu_lock, flags);
779 chunk = pcpu_create_chunk();
780 if (!chunk) {
781 err = "failed to allocate new chunk";
782 goto fail_unlock_mutex;
785 spin_lock_irqsave(&pcpu_lock, flags);
786 pcpu_chunk_relocate(chunk, -1);
787 goto restart;
789 area_found:
790 spin_unlock_irqrestore(&pcpu_lock, flags);
792 /* populate, map and clear the area */
793 if (pcpu_populate_chunk(chunk, off, size)) {
794 spin_lock_irqsave(&pcpu_lock, flags);
795 pcpu_free_area(chunk, off);
796 err = "failed to populate";
797 goto fail_unlock;
800 mutex_unlock(&pcpu_alloc_mutex);
802 /* return address relative to base address */
803 return __addr_to_pcpu_ptr(chunk->base_addr + off);
805 fail_unlock:
806 spin_unlock_irqrestore(&pcpu_lock, flags);
807 fail_unlock_mutex:
808 mutex_unlock(&pcpu_alloc_mutex);
809 if (warn_limit) {
810 pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
811 "%s\n", size, align, err);
812 dump_stack();
813 if (!--warn_limit)
814 pr_info("PERCPU: limit reached, disable warning\n");
816 return NULL;
820 * __alloc_percpu - allocate dynamic percpu area
821 * @size: size of area to allocate in bytes
822 * @align: alignment of area (max PAGE_SIZE)
824 * Allocate percpu area of @size bytes aligned at @align. Might
825 * sleep. Might trigger writeouts.
827 * CONTEXT:
828 * Does GFP_KERNEL allocation.
830 * RETURNS:
831 * Percpu pointer to the allocated area on success, NULL on failure.
833 void __percpu *__alloc_percpu(size_t size, size_t align)
835 return pcpu_alloc(size, align, false);
837 EXPORT_SYMBOL_GPL(__alloc_percpu);
840 * __alloc_reserved_percpu - allocate reserved percpu area
841 * @size: size of area to allocate in bytes
842 * @align: alignment of area (max PAGE_SIZE)
844 * Allocate percpu area of @size bytes aligned at @align from reserved
845 * percpu area if arch has set it up; otherwise, allocation is served
846 * from the same dynamic area. Might sleep. Might trigger writeouts.
848 * CONTEXT:
849 * Does GFP_KERNEL allocation.
851 * RETURNS:
852 * Percpu pointer to the allocated area on success, NULL on failure.
854 void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
856 return pcpu_alloc(size, align, true);
860 * pcpu_reclaim - reclaim fully free chunks, workqueue function
861 * @work: unused
863 * Reclaim all fully free chunks except for the first one.
865 * CONTEXT:
866 * workqueue context.
868 static void pcpu_reclaim(struct work_struct *work)
870 LIST_HEAD(todo);
871 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
872 struct pcpu_chunk *chunk, *next;
874 mutex_lock(&pcpu_alloc_mutex);
875 spin_lock_irq(&pcpu_lock);
877 list_for_each_entry_safe(chunk, next, head, list) {
878 WARN_ON(chunk->immutable);
880 /* spare the first one */
881 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
882 continue;
884 list_move(&chunk->list, &todo);
887 spin_unlock_irq(&pcpu_lock);
889 list_for_each_entry_safe(chunk, next, &todo, list) {
890 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
891 pcpu_destroy_chunk(chunk);
894 mutex_unlock(&pcpu_alloc_mutex);
898 * free_percpu - free percpu area
899 * @ptr: pointer to area to free
901 * Free percpu area @ptr.
903 * CONTEXT:
904 * Can be called from atomic context.
906 void free_percpu(void __percpu *ptr)
908 void *addr;
909 struct pcpu_chunk *chunk;
910 unsigned long flags;
911 int off;
913 if (!ptr)
914 return;
916 addr = __pcpu_ptr_to_addr(ptr);
918 spin_lock_irqsave(&pcpu_lock, flags);
920 chunk = pcpu_chunk_addr_search(addr);
921 off = addr - chunk->base_addr;
923 pcpu_free_area(chunk, off);
925 /* if there are more than one fully free chunks, wake up grim reaper */
926 if (chunk->free_size == pcpu_unit_size) {
927 struct pcpu_chunk *pos;
929 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
930 if (pos != chunk) {
931 schedule_work(&pcpu_reclaim_work);
932 break;
936 spin_unlock_irqrestore(&pcpu_lock, flags);
938 EXPORT_SYMBOL_GPL(free_percpu);
941 * is_kernel_percpu_address - test whether address is from static percpu area
942 * @addr: address to test
944 * Test whether @addr belongs to in-kernel static percpu area. Module
945 * static percpu areas are not considered. For those, use
946 * is_module_percpu_address().
948 * RETURNS:
949 * %true if @addr is from in-kernel static percpu area, %false otherwise.
951 bool is_kernel_percpu_address(unsigned long addr)
953 const size_t static_size = __per_cpu_end - __per_cpu_start;
954 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
955 unsigned int cpu;
957 for_each_possible_cpu(cpu) {
958 void *start = per_cpu_ptr(base, cpu);
960 if ((void *)addr >= start && (void *)addr < start + static_size)
961 return true;
963 return false;
967 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
968 * @addr: the address to be converted to physical address
970 * Given @addr which is dereferenceable address obtained via one of
971 * percpu access macros, this function translates it into its physical
972 * address. The caller is responsible for ensuring @addr stays valid
973 * until this function finishes.
975 * RETURNS:
976 * The physical address for @addr.
978 phys_addr_t per_cpu_ptr_to_phys(void *addr)
980 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
981 bool in_first_chunk = false;
982 unsigned long first_start, first_end;
983 unsigned int cpu;
986 * The following test on first_start/end isn't strictly
987 * necessary but will speed up lookups of addresses which
988 * aren't in the first chunk.
990 first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
991 first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
992 pcpu_unit_pages);
993 if ((unsigned long)addr >= first_start &&
994 (unsigned long)addr < first_end) {
995 for_each_possible_cpu(cpu) {
996 void *start = per_cpu_ptr(base, cpu);
998 if (addr >= start && addr < start + pcpu_unit_size) {
999 in_first_chunk = true;
1000 break;
1005 if (in_first_chunk) {
1006 if ((unsigned long)addr < VMALLOC_START ||
1007 (unsigned long)addr >= VMALLOC_END)
1008 return __pa(addr);
1009 else
1010 return page_to_phys(vmalloc_to_page(addr));
1011 } else
1012 return page_to_phys(pcpu_addr_to_page(addr));
1016 * pcpu_alloc_alloc_info - allocate percpu allocation info
1017 * @nr_groups: the number of groups
1018 * @nr_units: the number of units
1020 * Allocate ai which is large enough for @nr_groups groups containing
1021 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1022 * cpu_map array which is long enough for @nr_units and filled with
1023 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1024 * pointer of other groups.
1026 * RETURNS:
1027 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1028 * failure.
1030 struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1031 int nr_units)
1033 struct pcpu_alloc_info *ai;
1034 size_t base_size, ai_size;
1035 void *ptr;
1036 int unit;
1038 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1039 __alignof__(ai->groups[0].cpu_map[0]));
1040 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1042 ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1043 if (!ptr)
1044 return NULL;
1045 ai = ptr;
1046 ptr += base_size;
1048 ai->groups[0].cpu_map = ptr;
1050 for (unit = 0; unit < nr_units; unit++)
1051 ai->groups[0].cpu_map[unit] = NR_CPUS;
1053 ai->nr_groups = nr_groups;
1054 ai->__ai_size = PFN_ALIGN(ai_size);
1056 return ai;
1060 * pcpu_free_alloc_info - free percpu allocation info
1061 * @ai: pcpu_alloc_info to free
1063 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1065 void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1067 free_bootmem(__pa(ai), ai->__ai_size);
1071 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1072 * @reserved_size: the size of reserved percpu area in bytes
1073 * @dyn_size: minimum free size for dynamic allocation in bytes
1074 * @atom_size: allocation atom size
1075 * @cpu_distance_fn: callback to determine distance between cpus, optional
1077 * This function determines grouping of units, their mappings to cpus
1078 * and other parameters considering needed percpu size, allocation
1079 * atom size and distances between CPUs.
1081 * Groups are always mutliples of atom size and CPUs which are of
1082 * LOCAL_DISTANCE both ways are grouped together and share space for
1083 * units in the same group. The returned configuration is guaranteed
1084 * to have CPUs on different nodes on different groups and >=75% usage
1085 * of allocated virtual address space.
1087 * RETURNS:
1088 * On success, pointer to the new allocation_info is returned. On
1089 * failure, ERR_PTR value is returned.
1091 static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1092 size_t reserved_size, size_t dyn_size,
1093 size_t atom_size,
1094 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1096 static int group_map[NR_CPUS] __initdata;
1097 static int group_cnt[NR_CPUS] __initdata;
1098 const size_t static_size = __per_cpu_end - __per_cpu_start;
1099 int nr_groups = 1, nr_units = 0;
1100 size_t size_sum, min_unit_size, alloc_size;
1101 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1102 int last_allocs, group, unit;
1103 unsigned int cpu, tcpu;
1104 struct pcpu_alloc_info *ai;
1105 unsigned int *cpu_map;
1107 /* this function may be called multiple times */
1108 memset(group_map, 0, sizeof(group_map));
1109 memset(group_cnt, 0, sizeof(group_cnt));
1111 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1112 size_sum = PFN_ALIGN(static_size + reserved_size +
1113 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1114 dyn_size = size_sum - static_size - reserved_size;
1117 * Determine min_unit_size, alloc_size and max_upa such that
1118 * alloc_size is multiple of atom_size and is the smallest
1119 * which can accomodate 4k aligned segments which are equal to
1120 * or larger than min_unit_size.
1122 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1124 alloc_size = roundup(min_unit_size, atom_size);
1125 upa = alloc_size / min_unit_size;
1126 while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1127 upa--;
1128 max_upa = upa;
1130 /* group cpus according to their proximity */
1131 for_each_possible_cpu(cpu) {
1132 group = 0;
1133 next_group:
1134 for_each_possible_cpu(tcpu) {
1135 if (cpu == tcpu)
1136 break;
1137 if (group_map[tcpu] == group && cpu_distance_fn &&
1138 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1139 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1140 group++;
1141 nr_groups = max(nr_groups, group + 1);
1142 goto next_group;
1145 group_map[cpu] = group;
1146 group_cnt[group]++;
1150 * Expand unit size until address space usage goes over 75%
1151 * and then as much as possible without using more address
1152 * space.
1154 last_allocs = INT_MAX;
1155 for (upa = max_upa; upa; upa--) {
1156 int allocs = 0, wasted = 0;
1158 if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1159 continue;
1161 for (group = 0; group < nr_groups; group++) {
1162 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1163 allocs += this_allocs;
1164 wasted += this_allocs * upa - group_cnt[group];
1168 * Don't accept if wastage is over 1/3. The
1169 * greater-than comparison ensures upa==1 always
1170 * passes the following check.
1172 if (wasted > num_possible_cpus() / 3)
1173 continue;
1175 /* and then don't consume more memory */
1176 if (allocs > last_allocs)
1177 break;
1178 last_allocs = allocs;
1179 best_upa = upa;
1181 upa = best_upa;
1183 /* allocate and fill alloc_info */
1184 for (group = 0; group < nr_groups; group++)
1185 nr_units += roundup(group_cnt[group], upa);
1187 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1188 if (!ai)
1189 return ERR_PTR(-ENOMEM);
1190 cpu_map = ai->groups[0].cpu_map;
1192 for (group = 0; group < nr_groups; group++) {
1193 ai->groups[group].cpu_map = cpu_map;
1194 cpu_map += roundup(group_cnt[group], upa);
1197 ai->static_size = static_size;
1198 ai->reserved_size = reserved_size;
1199 ai->dyn_size = dyn_size;
1200 ai->unit_size = alloc_size / upa;
1201 ai->atom_size = atom_size;
1202 ai->alloc_size = alloc_size;
1204 for (group = 0, unit = 0; group_cnt[group]; group++) {
1205 struct pcpu_group_info *gi = &ai->groups[group];
1208 * Initialize base_offset as if all groups are located
1209 * back-to-back. The caller should update this to
1210 * reflect actual allocation.
1212 gi->base_offset = unit * ai->unit_size;
1214 for_each_possible_cpu(cpu)
1215 if (group_map[cpu] == group)
1216 gi->cpu_map[gi->nr_units++] = cpu;
1217 gi->nr_units = roundup(gi->nr_units, upa);
1218 unit += gi->nr_units;
1220 BUG_ON(unit != nr_units);
1222 return ai;
1226 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1227 * @lvl: loglevel
1228 * @ai: allocation info to dump
1230 * Print out information about @ai using loglevel @lvl.
1232 static void pcpu_dump_alloc_info(const char *lvl,
1233 const struct pcpu_alloc_info *ai)
1235 int group_width = 1, cpu_width = 1, width;
1236 char * empty_str = "--------";
1237 int alloc = 0, alloc_end = 0;
1238 int group, v;
1239 int upa, apl; /* units per alloc, allocs per line */
1241 v = ai->nr_groups;
1242 while (v /= 10)
1243 group_width++;
1245 v = num_possible_cpus();
1246 while (v /= 10)
1247 cpu_width++;
1248 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1250 upa = ai->alloc_size / ai->unit_size;
1251 width = upa * (cpu_width + 1) + group_width + 3;
1252 apl = rounddown_pow_of_two(max(60 / width, 1));
1254 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1255 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1256 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1258 for (group = 0; group < ai->nr_groups; group++) {
1259 const struct pcpu_group_info *gi = &ai->groups[group];
1260 int unit = 0, unit_end = 0;
1262 BUG_ON(gi->nr_units % upa);
1263 for (alloc_end += gi->nr_units / upa;
1264 alloc < alloc_end; alloc++) {
1265 if (!(alloc % apl)) {
1266 printk("\n");
1267 printk("%spcpu-alloc: ", lvl);
1269 printk("[%0*d] ", group_width, group);
1271 for (unit_end += upa; unit < unit_end; unit++)
1272 if (gi->cpu_map[unit] != NR_CPUS)
1273 printk("%0*d ", cpu_width,
1274 gi->cpu_map[unit]);
1275 else
1276 printk("%s ", empty_str);
1279 printk("\n");
1283 * pcpu_setup_first_chunk - initialize the first percpu chunk
1284 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1285 * @base_addr: mapped address
1287 * Initialize the first percpu chunk which contains the kernel static
1288 * perpcu area. This function is to be called from arch percpu area
1289 * setup path.
1291 * @ai contains all information necessary to initialize the first
1292 * chunk and prime the dynamic percpu allocator.
1294 * @ai->static_size is the size of static percpu area.
1296 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1297 * reserve after the static area in the first chunk. This reserves
1298 * the first chunk such that it's available only through reserved
1299 * percpu allocation. This is primarily used to serve module percpu
1300 * static areas on architectures where the addressing model has
1301 * limited offset range for symbol relocations to guarantee module
1302 * percpu symbols fall inside the relocatable range.
1304 * @ai->dyn_size determines the number of bytes available for dynamic
1305 * allocation in the first chunk. The area between @ai->static_size +
1306 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1308 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1309 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1310 * @ai->dyn_size.
1312 * @ai->atom_size is the allocation atom size and used as alignment
1313 * for vm areas.
1315 * @ai->alloc_size is the allocation size and always multiple of
1316 * @ai->atom_size. This is larger than @ai->atom_size if
1317 * @ai->unit_size is larger than @ai->atom_size.
1319 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1320 * percpu areas. Units which should be colocated are put into the
1321 * same group. Dynamic VM areas will be allocated according to these
1322 * groupings. If @ai->nr_groups is zero, a single group containing
1323 * all units is assumed.
1325 * The caller should have mapped the first chunk at @base_addr and
1326 * copied static data to each unit.
1328 * If the first chunk ends up with both reserved and dynamic areas, it
1329 * is served by two chunks - one to serve the core static and reserved
1330 * areas and the other for the dynamic area. They share the same vm
1331 * and page map but uses different area allocation map to stay away
1332 * from each other. The latter chunk is circulated in the chunk slots
1333 * and available for dynamic allocation like any other chunks.
1335 * RETURNS:
1336 * 0 on success, -errno on failure.
1338 int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1339 void *base_addr)
1341 static char cpus_buf[4096] __initdata;
1342 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1343 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1344 size_t dyn_size = ai->dyn_size;
1345 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1346 struct pcpu_chunk *schunk, *dchunk = NULL;
1347 unsigned long *group_offsets;
1348 size_t *group_sizes;
1349 unsigned long *unit_off;
1350 unsigned int cpu;
1351 int *unit_map;
1352 int group, unit, i;
1354 cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1356 #define PCPU_SETUP_BUG_ON(cond) do { \
1357 if (unlikely(cond)) { \
1358 pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1359 pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1360 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1361 BUG(); \
1363 } while (0)
1365 /* sanity checks */
1366 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1367 PCPU_SETUP_BUG_ON(!ai->static_size);
1368 PCPU_SETUP_BUG_ON(!base_addr);
1369 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1370 PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1371 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1372 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1373 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1375 /* process group information and build config tables accordingly */
1376 group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1377 group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1378 unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1379 unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1381 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1382 unit_map[cpu] = UINT_MAX;
1383 pcpu_first_unit_cpu = NR_CPUS;
1385 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1386 const struct pcpu_group_info *gi = &ai->groups[group];
1388 group_offsets[group] = gi->base_offset;
1389 group_sizes[group] = gi->nr_units * ai->unit_size;
1391 for (i = 0; i < gi->nr_units; i++) {
1392 cpu = gi->cpu_map[i];
1393 if (cpu == NR_CPUS)
1394 continue;
1396 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1397 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1398 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1400 unit_map[cpu] = unit + i;
1401 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1403 if (pcpu_first_unit_cpu == NR_CPUS)
1404 pcpu_first_unit_cpu = cpu;
1405 pcpu_last_unit_cpu = cpu;
1408 pcpu_nr_units = unit;
1410 for_each_possible_cpu(cpu)
1411 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1413 /* we're done parsing the input, undefine BUG macro and dump config */
1414 #undef PCPU_SETUP_BUG_ON
1415 pcpu_dump_alloc_info(KERN_INFO, ai);
1417 pcpu_nr_groups = ai->nr_groups;
1418 pcpu_group_offsets = group_offsets;
1419 pcpu_group_sizes = group_sizes;
1420 pcpu_unit_map = unit_map;
1421 pcpu_unit_offsets = unit_off;
1423 /* determine basic parameters */
1424 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1425 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1426 pcpu_atom_size = ai->atom_size;
1427 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1428 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1431 * Allocate chunk slots. The additional last slot is for
1432 * empty chunks.
1434 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1435 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1436 for (i = 0; i < pcpu_nr_slots; i++)
1437 INIT_LIST_HEAD(&pcpu_slot[i]);
1440 * Initialize static chunk. If reserved_size is zero, the
1441 * static chunk covers static area + dynamic allocation area
1442 * in the first chunk. If reserved_size is not zero, it
1443 * covers static area + reserved area (mostly used for module
1444 * static percpu allocation).
1446 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1447 INIT_LIST_HEAD(&schunk->list);
1448 schunk->base_addr = base_addr;
1449 schunk->map = smap;
1450 schunk->map_alloc = ARRAY_SIZE(smap);
1451 schunk->immutable = true;
1452 bitmap_fill(schunk->populated, pcpu_unit_pages);
1454 if (ai->reserved_size) {
1455 schunk->free_size = ai->reserved_size;
1456 pcpu_reserved_chunk = schunk;
1457 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1458 } else {
1459 schunk->free_size = dyn_size;
1460 dyn_size = 0; /* dynamic area covered */
1462 schunk->contig_hint = schunk->free_size;
1464 schunk->map[schunk->map_used++] = -ai->static_size;
1465 if (schunk->free_size)
1466 schunk->map[schunk->map_used++] = schunk->free_size;
1468 /* init dynamic chunk if necessary */
1469 if (dyn_size) {
1470 dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1471 INIT_LIST_HEAD(&dchunk->list);
1472 dchunk->base_addr = base_addr;
1473 dchunk->map = dmap;
1474 dchunk->map_alloc = ARRAY_SIZE(dmap);
1475 dchunk->immutable = true;
1476 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1478 dchunk->contig_hint = dchunk->free_size = dyn_size;
1479 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1480 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1483 /* link the first chunk in */
1484 pcpu_first_chunk = dchunk ?: schunk;
1485 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1487 /* we're done */
1488 pcpu_base_addr = base_addr;
1489 return 0;
1492 const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1493 [PCPU_FC_AUTO] = "auto",
1494 [PCPU_FC_EMBED] = "embed",
1495 [PCPU_FC_PAGE] = "page",
1498 enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1500 static int __init percpu_alloc_setup(char *str)
1502 if (0)
1503 /* nada */;
1504 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1505 else if (!strcmp(str, "embed"))
1506 pcpu_chosen_fc = PCPU_FC_EMBED;
1507 #endif
1508 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1509 else if (!strcmp(str, "page"))
1510 pcpu_chosen_fc = PCPU_FC_PAGE;
1511 #endif
1512 else
1513 pr_warning("PERCPU: unknown allocator %s specified\n", str);
1515 return 0;
1517 early_param("percpu_alloc", percpu_alloc_setup);
1519 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1520 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1522 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1523 * @reserved_size: the size of reserved percpu area in bytes
1524 * @dyn_size: minimum free size for dynamic allocation in bytes
1525 * @atom_size: allocation atom size
1526 * @cpu_distance_fn: callback to determine distance between cpus, optional
1527 * @alloc_fn: function to allocate percpu page
1528 * @free_fn: funtion to free percpu page
1530 * This is a helper to ease setting up embedded first percpu chunk and
1531 * can be called where pcpu_setup_first_chunk() is expected.
1533 * If this function is used to setup the first chunk, it is allocated
1534 * by calling @alloc_fn and used as-is without being mapped into
1535 * vmalloc area. Allocations are always whole multiples of @atom_size
1536 * aligned to @atom_size.
1538 * This enables the first chunk to piggy back on the linear physical
1539 * mapping which often uses larger page size. Please note that this
1540 * can result in very sparse cpu->unit mapping on NUMA machines thus
1541 * requiring large vmalloc address space. Don't use this allocator if
1542 * vmalloc space is not orders of magnitude larger than distances
1543 * between node memory addresses (ie. 32bit NUMA machines).
1545 * @dyn_size specifies the minimum dynamic area size.
1547 * If the needed size is smaller than the minimum or specified unit
1548 * size, the leftover is returned using @free_fn.
1550 * RETURNS:
1551 * 0 on success, -errno on failure.
1553 int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1554 size_t atom_size,
1555 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1556 pcpu_fc_alloc_fn_t alloc_fn,
1557 pcpu_fc_free_fn_t free_fn)
1559 void *base = (void *)ULONG_MAX;
1560 void **areas = NULL;
1561 struct pcpu_alloc_info *ai;
1562 size_t size_sum, areas_size, max_distance;
1563 int group, i, rc;
1565 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1566 cpu_distance_fn);
1567 if (IS_ERR(ai))
1568 return PTR_ERR(ai);
1570 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1571 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1573 areas = alloc_bootmem_nopanic(areas_size);
1574 if (!areas) {
1575 rc = -ENOMEM;
1576 goto out_free;
1579 /* allocate, copy and determine base address */
1580 for (group = 0; group < ai->nr_groups; group++) {
1581 struct pcpu_group_info *gi = &ai->groups[group];
1582 unsigned int cpu = NR_CPUS;
1583 void *ptr;
1585 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1586 cpu = gi->cpu_map[i];
1587 BUG_ON(cpu == NR_CPUS);
1589 /* allocate space for the whole group */
1590 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1591 if (!ptr) {
1592 rc = -ENOMEM;
1593 goto out_free_areas;
1595 areas[group] = ptr;
1597 base = min(ptr, base);
1599 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1600 if (gi->cpu_map[i] == NR_CPUS) {
1601 /* unused unit, free whole */
1602 free_fn(ptr, ai->unit_size);
1603 continue;
1605 /* copy and return the unused part */
1606 memcpy(ptr, __per_cpu_load, ai->static_size);
1607 free_fn(ptr + size_sum, ai->unit_size - size_sum);
1611 /* base address is now known, determine group base offsets */
1612 max_distance = 0;
1613 for (group = 0; group < ai->nr_groups; group++) {
1614 ai->groups[group].base_offset = areas[group] - base;
1615 max_distance = max_t(size_t, max_distance,
1616 ai->groups[group].base_offset);
1618 max_distance += ai->unit_size;
1620 /* warn if maximum distance is further than 75% of vmalloc space */
1621 if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1622 pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1623 "space 0x%lx\n",
1624 max_distance, VMALLOC_END - VMALLOC_START);
1625 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1626 /* and fail if we have fallback */
1627 rc = -EINVAL;
1628 goto out_free;
1629 #endif
1632 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1633 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1634 ai->dyn_size, ai->unit_size);
1636 rc = pcpu_setup_first_chunk(ai, base);
1637 goto out_free;
1639 out_free_areas:
1640 for (group = 0; group < ai->nr_groups; group++)
1641 free_fn(areas[group],
1642 ai->groups[group].nr_units * ai->unit_size);
1643 out_free:
1644 pcpu_free_alloc_info(ai);
1645 if (areas)
1646 free_bootmem(__pa(areas), areas_size);
1647 return rc;
1649 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1650 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1652 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1654 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1655 * @reserved_size: the size of reserved percpu area in bytes
1656 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1657 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1658 * @populate_pte_fn: function to populate pte
1660 * This is a helper to ease setting up page-remapped first percpu
1661 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1663 * This is the basic allocator. Static percpu area is allocated
1664 * page-by-page into vmalloc area.
1666 * RETURNS:
1667 * 0 on success, -errno on failure.
1669 int __init pcpu_page_first_chunk(size_t reserved_size,
1670 pcpu_fc_alloc_fn_t alloc_fn,
1671 pcpu_fc_free_fn_t free_fn,
1672 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1674 static struct vm_struct vm;
1675 struct pcpu_alloc_info *ai;
1676 char psize_str[16];
1677 int unit_pages;
1678 size_t pages_size;
1679 struct page **pages;
1680 int unit, i, j, rc;
1682 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1684 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1685 if (IS_ERR(ai))
1686 return PTR_ERR(ai);
1687 BUG_ON(ai->nr_groups != 1);
1688 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1690 unit_pages = ai->unit_size >> PAGE_SHIFT;
1692 /* unaligned allocations can't be freed, round up to page size */
1693 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1694 sizeof(pages[0]));
1695 pages = alloc_bootmem(pages_size);
1697 /* allocate pages */
1698 j = 0;
1699 for (unit = 0; unit < num_possible_cpus(); unit++)
1700 for (i = 0; i < unit_pages; i++) {
1701 unsigned int cpu = ai->groups[0].cpu_map[unit];
1702 void *ptr;
1704 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1705 if (!ptr) {
1706 pr_warning("PERCPU: failed to allocate %s page "
1707 "for cpu%u\n", psize_str, cpu);
1708 goto enomem;
1710 pages[j++] = virt_to_page(ptr);
1713 /* allocate vm area, map the pages and copy static data */
1714 vm.flags = VM_ALLOC;
1715 vm.size = num_possible_cpus() * ai->unit_size;
1716 vm_area_register_early(&vm, PAGE_SIZE);
1718 for (unit = 0; unit < num_possible_cpus(); unit++) {
1719 unsigned long unit_addr =
1720 (unsigned long)vm.addr + unit * ai->unit_size;
1722 for (i = 0; i < unit_pages; i++)
1723 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1725 /* pte already populated, the following shouldn't fail */
1726 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1727 unit_pages);
1728 if (rc < 0)
1729 panic("failed to map percpu area, err=%d\n", rc);
1732 /* copy static data */
1733 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1736 /* we're ready, commit */
1737 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1738 unit_pages, psize_str, vm.addr, ai->static_size,
1739 ai->reserved_size, ai->dyn_size);
1741 rc = pcpu_setup_first_chunk(ai, vm.addr);
1742 goto out_free_ar;
1744 enomem:
1745 while (--j >= 0)
1746 free_fn(page_address(pages[j]), PAGE_SIZE);
1747 rc = -ENOMEM;
1748 out_free_ar:
1749 free_bootmem(__pa(pages), pages_size);
1750 pcpu_free_alloc_info(ai);
1751 return rc;
1753 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1756 * Generic percpu area setup.
1758 * The embedding helper is used because its behavior closely resembles
1759 * the original non-dynamic generic percpu area setup. This is
1760 * important because many archs have addressing restrictions and might
1761 * fail if the percpu area is located far away from the previous
1762 * location. As an added bonus, in non-NUMA cases, embedding is
1763 * generally a good idea TLB-wise because percpu area can piggy back
1764 * on the physical linear memory mapping which uses large page
1765 * mappings on applicable archs.
1767 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1768 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1769 EXPORT_SYMBOL(__per_cpu_offset);
1771 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1772 size_t align)
1774 return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1777 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1779 free_bootmem(__pa(ptr), size);
1782 void __init setup_per_cpu_areas(void)
1784 unsigned long delta;
1785 unsigned int cpu;
1786 int rc;
1789 * Always reserve area for module percpu variables. That's
1790 * what the legacy allocator did.
1792 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1793 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1794 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1795 if (rc < 0)
1796 panic("Failed to initialized percpu areas.");
1798 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1799 for_each_possible_cpu(cpu)
1800 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1802 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1805 * First and reserved chunks are initialized with temporary allocation
1806 * map in initdata so that they can be used before slab is online.
1807 * This function is called after slab is brought up and replaces those
1808 * with properly allocated maps.
1810 void __init percpu_init_late(void)
1812 struct pcpu_chunk *target_chunks[] =
1813 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1814 struct pcpu_chunk *chunk;
1815 unsigned long flags;
1816 int i;
1818 for (i = 0; (chunk = target_chunks[i]); i++) {
1819 int *map;
1820 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1822 BUILD_BUG_ON(size > PAGE_SIZE);
1824 map = pcpu_mem_alloc(size);
1825 BUG_ON(!map);
1827 spin_lock_irqsave(&pcpu_lock, flags);
1828 memcpy(map, chunk->map, size);
1829 chunk->map = map;
1830 spin_unlock_irqrestore(&pcpu_lock, flags);