percpu: finer grained locking to break deadlock and allow atomic free
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / mm / percpu.c
blobbfe6a3afaf45e4be409d8231434447735d40de9c
1 /*
2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
58 #include <linux/mm.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/spinlock.h>
66 #include <linux/vmalloc.h>
67 #include <linux/workqueue.h>
69 #include <asm/cacheflush.h>
70 #include <asm/tlbflush.h>
72 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
73 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
75 struct pcpu_chunk {
76 struct list_head list; /* linked to pcpu_slot lists */
77 struct rb_node rb_node; /* key is chunk->vm->addr */
78 int free_size; /* free bytes in the chunk */
79 int contig_hint; /* max contiguous size hint */
80 struct vm_struct *vm; /* mapped vmalloc region */
81 int map_used; /* # of map entries used */
82 int map_alloc; /* # of map entries allocated */
83 int *map; /* allocation map */
84 bool immutable; /* no [de]population allowed */
85 struct page **page; /* points to page array */
86 struct page *page_ar[]; /* #cpus * UNIT_PAGES */
89 static int pcpu_unit_pages __read_mostly;
90 static int pcpu_unit_size __read_mostly;
91 static int pcpu_chunk_size __read_mostly;
92 static int pcpu_nr_slots __read_mostly;
93 static size_t pcpu_chunk_struct_size __read_mostly;
95 /* the address of the first chunk which starts with the kernel static area */
96 void *pcpu_base_addr __read_mostly;
97 EXPORT_SYMBOL_GPL(pcpu_base_addr);
99 /* optional reserved chunk, only accessible for reserved allocations */
100 static struct pcpu_chunk *pcpu_reserved_chunk;
101 /* offset limit of the reserved chunk */
102 static int pcpu_reserved_chunk_limit;
105 * Synchronization rules.
107 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
108 * protects allocation/reclaim paths, chunks and chunk->page arrays.
109 * The latter is a spinlock and protects the index data structures -
110 * chunk slots, rbtree, chunks and area maps in chunks.
112 * During allocation, pcpu_alloc_mutex is kept locked all the time and
113 * pcpu_lock is grabbed and released as necessary. All actual memory
114 * allocations are done using GFP_KERNEL with pcpu_lock released.
116 * Free path accesses and alters only the index data structures, so it
117 * can be safely called from atomic context. When memory needs to be
118 * returned to the system, free path schedules reclaim_work which
119 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
120 * reclaimed, release both locks and frees the chunks. Note that it's
121 * necessary to grab both locks to remove a chunk from circulation as
122 * allocation path might be referencing the chunk with only
123 * pcpu_alloc_mutex locked.
125 static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
126 static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
128 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
129 static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */
131 /* reclaim work to release fully free chunks, scheduled from free path */
132 static void pcpu_reclaim(struct work_struct *work);
133 static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
135 static int __pcpu_size_to_slot(int size)
137 int highbit = fls(size); /* size is in bytes */
138 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
141 static int pcpu_size_to_slot(int size)
143 if (size == pcpu_unit_size)
144 return pcpu_nr_slots - 1;
145 return __pcpu_size_to_slot(size);
148 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
150 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
151 return 0;
153 return pcpu_size_to_slot(chunk->free_size);
156 static int pcpu_page_idx(unsigned int cpu, int page_idx)
158 return cpu * pcpu_unit_pages + page_idx;
161 static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
162 unsigned int cpu, int page_idx)
164 return &chunk->page[pcpu_page_idx(cpu, page_idx)];
167 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
168 unsigned int cpu, int page_idx)
170 return (unsigned long)chunk->vm->addr +
171 (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
174 static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
175 int page_idx)
177 return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
181 * pcpu_mem_alloc - allocate memory
182 * @size: bytes to allocate
184 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
185 * kzalloc() is used; otherwise, vmalloc() is used. The returned
186 * memory is always zeroed.
188 * CONTEXT:
189 * Does GFP_KERNEL allocation.
191 * RETURNS:
192 * Pointer to the allocated area on success, NULL on failure.
194 static void *pcpu_mem_alloc(size_t size)
196 if (size <= PAGE_SIZE)
197 return kzalloc(size, GFP_KERNEL);
198 else {
199 void *ptr = vmalloc(size);
200 if (ptr)
201 memset(ptr, 0, size);
202 return ptr;
207 * pcpu_mem_free - free memory
208 * @ptr: memory to free
209 * @size: size of the area
211 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
213 static void pcpu_mem_free(void *ptr, size_t size)
215 if (size <= PAGE_SIZE)
216 kfree(ptr);
217 else
218 vfree(ptr);
222 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
223 * @chunk: chunk of interest
224 * @oslot: the previous slot it was on
226 * This function is called after an allocation or free changed @chunk.
227 * New slot according to the changed state is determined and @chunk is
228 * moved to the slot. Note that the reserved chunk is never put on
229 * chunk slots.
231 * CONTEXT:
232 * pcpu_lock.
234 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
236 int nslot = pcpu_chunk_slot(chunk);
238 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
239 if (oslot < nslot)
240 list_move(&chunk->list, &pcpu_slot[nslot]);
241 else
242 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
246 static struct rb_node **pcpu_chunk_rb_search(void *addr,
247 struct rb_node **parentp)
249 struct rb_node **p = &pcpu_addr_root.rb_node;
250 struct rb_node *parent = NULL;
251 struct pcpu_chunk *chunk;
253 while (*p) {
254 parent = *p;
255 chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
257 if (addr < chunk->vm->addr)
258 p = &(*p)->rb_left;
259 else if (addr > chunk->vm->addr)
260 p = &(*p)->rb_right;
261 else
262 break;
265 if (parentp)
266 *parentp = parent;
267 return p;
271 * pcpu_chunk_addr_search - search for chunk containing specified address
272 * @addr: address to search for
274 * Look for chunk which might contain @addr. More specifically, it
275 * searchs for the chunk with the highest start address which isn't
276 * beyond @addr.
278 * CONTEXT:
279 * pcpu_lock.
281 * RETURNS:
282 * The address of the found chunk.
284 static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
286 struct rb_node *n, *parent;
287 struct pcpu_chunk *chunk;
289 /* is it in the reserved chunk? */
290 if (pcpu_reserved_chunk) {
291 void *start = pcpu_reserved_chunk->vm->addr;
293 if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
294 return pcpu_reserved_chunk;
297 /* nah... search the regular ones */
298 n = *pcpu_chunk_rb_search(addr, &parent);
299 if (!n) {
300 /* no exactly matching chunk, the parent is the closest */
301 n = parent;
302 BUG_ON(!n);
304 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
306 if (addr < chunk->vm->addr) {
307 /* the parent was the next one, look for the previous one */
308 n = rb_prev(n);
309 BUG_ON(!n);
310 chunk = rb_entry(n, struct pcpu_chunk, rb_node);
313 return chunk;
317 * pcpu_chunk_addr_insert - insert chunk into address rb tree
318 * @new: chunk to insert
320 * Insert @new into address rb tree.
322 * CONTEXT:
323 * pcpu_lock.
325 static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
327 struct rb_node **p, *parent;
329 p = pcpu_chunk_rb_search(new->vm->addr, &parent);
330 BUG_ON(*p);
331 rb_link_node(&new->rb_node, parent, p);
332 rb_insert_color(&new->rb_node, &pcpu_addr_root);
336 * pcpu_extend_area_map - extend area map for allocation
337 * @chunk: target chunk
339 * Extend area map of @chunk so that it can accomodate an allocation.
340 * A single allocation can split an area into three areas, so this
341 * function makes sure that @chunk->map has at least two extra slots.
343 * CONTEXT:
344 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
345 * if area map is extended.
347 * RETURNS:
348 * 0 if noop, 1 if successfully extended, -errno on failure.
350 static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
352 int new_alloc;
353 int *new;
354 size_t size;
356 /* has enough? */
357 if (chunk->map_alloc >= chunk->map_used + 2)
358 return 0;
360 spin_unlock_irq(&pcpu_lock);
362 new_alloc = PCPU_DFL_MAP_ALLOC;
363 while (new_alloc < chunk->map_used + 2)
364 new_alloc *= 2;
366 new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
367 if (!new) {
368 spin_lock_irq(&pcpu_lock);
369 return -ENOMEM;
373 * Acquire pcpu_lock and switch to new area map. Only free
374 * could have happened inbetween, so map_used couldn't have
375 * grown.
377 spin_lock_irq(&pcpu_lock);
378 BUG_ON(new_alloc < chunk->map_used + 2);
380 size = chunk->map_alloc * sizeof(chunk->map[0]);
381 memcpy(new, chunk->map, size);
384 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
385 * one of the first chunks and still using static map.
387 if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
388 pcpu_mem_free(chunk->map, size);
390 chunk->map_alloc = new_alloc;
391 chunk->map = new;
392 return 0;
396 * pcpu_split_block - split a map block
397 * @chunk: chunk of interest
398 * @i: index of map block to split
399 * @head: head size in bytes (can be 0)
400 * @tail: tail size in bytes (can be 0)
402 * Split the @i'th map block into two or three blocks. If @head is
403 * non-zero, @head bytes block is inserted before block @i moving it
404 * to @i+1 and reducing its size by @head bytes.
406 * If @tail is non-zero, the target block, which can be @i or @i+1
407 * depending on @head, is reduced by @tail bytes and @tail byte block
408 * is inserted after the target block.
410 * @chunk->map must have enough free slots to accomodate the split.
412 * CONTEXT:
413 * pcpu_lock.
415 static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
416 int head, int tail)
418 int nr_extra = !!head + !!tail;
420 BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
422 /* insert new subblocks */
423 memmove(&chunk->map[i + nr_extra], &chunk->map[i],
424 sizeof(chunk->map[0]) * (chunk->map_used - i));
425 chunk->map_used += nr_extra;
427 if (head) {
428 chunk->map[i + 1] = chunk->map[i] - head;
429 chunk->map[i++] = head;
431 if (tail) {
432 chunk->map[i++] -= tail;
433 chunk->map[i] = tail;
438 * pcpu_alloc_area - allocate area from a pcpu_chunk
439 * @chunk: chunk of interest
440 * @size: wanted size in bytes
441 * @align: wanted align
443 * Try to allocate @size bytes area aligned at @align from @chunk.
444 * Note that this function only allocates the offset. It doesn't
445 * populate or map the area.
447 * @chunk->map must have at least two free slots.
449 * CONTEXT:
450 * pcpu_lock.
452 * RETURNS:
453 * Allocated offset in @chunk on success, -1 if no matching area is
454 * found.
456 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
458 int oslot = pcpu_chunk_slot(chunk);
459 int max_contig = 0;
460 int i, off;
462 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
463 bool is_last = i + 1 == chunk->map_used;
464 int head, tail;
466 /* extra for alignment requirement */
467 head = ALIGN(off, align) - off;
468 BUG_ON(i == 0 && head != 0);
470 if (chunk->map[i] < 0)
471 continue;
472 if (chunk->map[i] < head + size) {
473 max_contig = max(chunk->map[i], max_contig);
474 continue;
478 * If head is small or the previous block is free,
479 * merge'em. Note that 'small' is defined as smaller
480 * than sizeof(int), which is very small but isn't too
481 * uncommon for percpu allocations.
483 if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
484 if (chunk->map[i - 1] > 0)
485 chunk->map[i - 1] += head;
486 else {
487 chunk->map[i - 1] -= head;
488 chunk->free_size -= head;
490 chunk->map[i] -= head;
491 off += head;
492 head = 0;
495 /* if tail is small, just keep it around */
496 tail = chunk->map[i] - head - size;
497 if (tail < sizeof(int))
498 tail = 0;
500 /* split if warranted */
501 if (head || tail) {
502 pcpu_split_block(chunk, i, head, tail);
503 if (head) {
504 i++;
505 off += head;
506 max_contig = max(chunk->map[i - 1], max_contig);
508 if (tail)
509 max_contig = max(chunk->map[i + 1], max_contig);
512 /* update hint and mark allocated */
513 if (is_last)
514 chunk->contig_hint = max_contig; /* fully scanned */
515 else
516 chunk->contig_hint = max(chunk->contig_hint,
517 max_contig);
519 chunk->free_size -= chunk->map[i];
520 chunk->map[i] = -chunk->map[i];
522 pcpu_chunk_relocate(chunk, oslot);
523 return off;
526 chunk->contig_hint = max_contig; /* fully scanned */
527 pcpu_chunk_relocate(chunk, oslot);
529 /* tell the upper layer that this chunk has no matching area */
530 return -1;
534 * pcpu_free_area - free area to a pcpu_chunk
535 * @chunk: chunk of interest
536 * @freeme: offset of area to free
538 * Free area starting from @freeme to @chunk. Note that this function
539 * only modifies the allocation map. It doesn't depopulate or unmap
540 * the area.
542 * CONTEXT:
543 * pcpu_lock.
545 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
547 int oslot = pcpu_chunk_slot(chunk);
548 int i, off;
550 for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
551 if (off == freeme)
552 break;
553 BUG_ON(off != freeme);
554 BUG_ON(chunk->map[i] > 0);
556 chunk->map[i] = -chunk->map[i];
557 chunk->free_size += chunk->map[i];
559 /* merge with previous? */
560 if (i > 0 && chunk->map[i - 1] >= 0) {
561 chunk->map[i - 1] += chunk->map[i];
562 chunk->map_used--;
563 memmove(&chunk->map[i], &chunk->map[i + 1],
564 (chunk->map_used - i) * sizeof(chunk->map[0]));
565 i--;
567 /* merge with next? */
568 if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
569 chunk->map[i] += chunk->map[i + 1];
570 chunk->map_used--;
571 memmove(&chunk->map[i + 1], &chunk->map[i + 2],
572 (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
575 chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
576 pcpu_chunk_relocate(chunk, oslot);
580 * pcpu_unmap - unmap pages out of a pcpu_chunk
581 * @chunk: chunk of interest
582 * @page_start: page index of the first page to unmap
583 * @page_end: page index of the last page to unmap + 1
584 * @flush: whether to flush cache and tlb or not
586 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
587 * If @flush is true, vcache is flushed before unmapping and tlb
588 * after.
590 static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
591 bool flush)
593 unsigned int last = num_possible_cpus() - 1;
594 unsigned int cpu;
596 /* unmap must not be done on immutable chunk */
597 WARN_ON(chunk->immutable);
600 * Each flushing trial can be very expensive, issue flush on
601 * the whole region at once rather than doing it for each cpu.
602 * This could be an overkill but is more scalable.
604 if (flush)
605 flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
606 pcpu_chunk_addr(chunk, last, page_end));
608 for_each_possible_cpu(cpu)
609 unmap_kernel_range_noflush(
610 pcpu_chunk_addr(chunk, cpu, page_start),
611 (page_end - page_start) << PAGE_SHIFT);
613 /* ditto as flush_cache_vunmap() */
614 if (flush)
615 flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
616 pcpu_chunk_addr(chunk, last, page_end));
620 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
621 * @chunk: chunk to depopulate
622 * @off: offset to the area to depopulate
623 * @size: size of the area to depopulate in bytes
624 * @flush: whether to flush cache and tlb or not
626 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
627 * from @chunk. If @flush is true, vcache is flushed before unmapping
628 * and tlb after.
630 * CONTEXT:
631 * pcpu_alloc_mutex.
633 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
634 bool flush)
636 int page_start = PFN_DOWN(off);
637 int page_end = PFN_UP(off + size);
638 int unmap_start = -1;
639 int uninitialized_var(unmap_end);
640 unsigned int cpu;
641 int i;
643 for (i = page_start; i < page_end; i++) {
644 for_each_possible_cpu(cpu) {
645 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
647 if (!*pagep)
648 continue;
650 __free_page(*pagep);
653 * If it's partial depopulation, it might get
654 * populated or depopulated again. Mark the
655 * page gone.
657 *pagep = NULL;
659 unmap_start = unmap_start < 0 ? i : unmap_start;
660 unmap_end = i + 1;
664 if (unmap_start >= 0)
665 pcpu_unmap(chunk, unmap_start, unmap_end, flush);
669 * pcpu_map - map pages into a pcpu_chunk
670 * @chunk: chunk of interest
671 * @page_start: page index of the first page to map
672 * @page_end: page index of the last page to map + 1
674 * For each cpu, map pages [@page_start,@page_end) into @chunk.
675 * vcache is flushed afterwards.
677 static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
679 unsigned int last = num_possible_cpus() - 1;
680 unsigned int cpu;
681 int err;
683 /* map must not be done on immutable chunk */
684 WARN_ON(chunk->immutable);
686 for_each_possible_cpu(cpu) {
687 err = map_kernel_range_noflush(
688 pcpu_chunk_addr(chunk, cpu, page_start),
689 (page_end - page_start) << PAGE_SHIFT,
690 PAGE_KERNEL,
691 pcpu_chunk_pagep(chunk, cpu, page_start));
692 if (err < 0)
693 return err;
696 /* flush at once, please read comments in pcpu_unmap() */
697 flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
698 pcpu_chunk_addr(chunk, last, page_end));
699 return 0;
703 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
704 * @chunk: chunk of interest
705 * @off: offset to the area to populate
706 * @size: size of the area to populate in bytes
708 * For each cpu, populate and map pages [@page_start,@page_end) into
709 * @chunk. The area is cleared on return.
711 * CONTEXT:
712 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
714 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
716 const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
717 int page_start = PFN_DOWN(off);
718 int page_end = PFN_UP(off + size);
719 int map_start = -1;
720 int uninitialized_var(map_end);
721 unsigned int cpu;
722 int i;
724 for (i = page_start; i < page_end; i++) {
725 if (pcpu_chunk_page_occupied(chunk, i)) {
726 if (map_start >= 0) {
727 if (pcpu_map(chunk, map_start, map_end))
728 goto err;
729 map_start = -1;
731 continue;
734 map_start = map_start < 0 ? i : map_start;
735 map_end = i + 1;
737 for_each_possible_cpu(cpu) {
738 struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
740 *pagep = alloc_pages_node(cpu_to_node(cpu),
741 alloc_mask, 0);
742 if (!*pagep)
743 goto err;
747 if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
748 goto err;
750 for_each_possible_cpu(cpu)
751 memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
752 size);
754 return 0;
755 err:
756 /* likely under heavy memory pressure, give memory back */
757 pcpu_depopulate_chunk(chunk, off, size, true);
758 return -ENOMEM;
761 static void free_pcpu_chunk(struct pcpu_chunk *chunk)
763 if (!chunk)
764 return;
765 if (chunk->vm)
766 free_vm_area(chunk->vm);
767 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
768 kfree(chunk);
771 static struct pcpu_chunk *alloc_pcpu_chunk(void)
773 struct pcpu_chunk *chunk;
775 chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
776 if (!chunk)
777 return NULL;
779 chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
780 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
781 chunk->map[chunk->map_used++] = pcpu_unit_size;
782 chunk->page = chunk->page_ar;
784 chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
785 if (!chunk->vm) {
786 free_pcpu_chunk(chunk);
787 return NULL;
790 INIT_LIST_HEAD(&chunk->list);
791 chunk->free_size = pcpu_unit_size;
792 chunk->contig_hint = pcpu_unit_size;
794 return chunk;
798 * pcpu_alloc - the percpu allocator
799 * @size: size of area to allocate in bytes
800 * @align: alignment of area (max PAGE_SIZE)
801 * @reserved: allocate from the reserved chunk if available
803 * Allocate percpu area of @size bytes aligned at @align.
805 * CONTEXT:
806 * Does GFP_KERNEL allocation.
808 * RETURNS:
809 * Percpu pointer to the allocated area on success, NULL on failure.
811 static void *pcpu_alloc(size_t size, size_t align, bool reserved)
813 struct pcpu_chunk *chunk;
814 int slot, off;
816 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
817 WARN(true, "illegal size (%zu) or align (%zu) for "
818 "percpu allocation\n", size, align);
819 return NULL;
822 mutex_lock(&pcpu_alloc_mutex);
823 spin_lock_irq(&pcpu_lock);
825 /* serve reserved allocations from the reserved chunk if available */
826 if (reserved && pcpu_reserved_chunk) {
827 chunk = pcpu_reserved_chunk;
828 if (size > chunk->contig_hint ||
829 pcpu_extend_area_map(chunk) < 0)
830 goto fail_unlock;
831 off = pcpu_alloc_area(chunk, size, align);
832 if (off >= 0)
833 goto area_found;
834 goto fail_unlock;
837 restart:
838 /* search through normal chunks */
839 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
840 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
841 if (size > chunk->contig_hint)
842 continue;
844 switch (pcpu_extend_area_map(chunk)) {
845 case 0:
846 break;
847 case 1:
848 goto restart; /* pcpu_lock dropped, restart */
849 default:
850 goto fail_unlock;
853 off = pcpu_alloc_area(chunk, size, align);
854 if (off >= 0)
855 goto area_found;
859 /* hmmm... no space left, create a new chunk */
860 spin_unlock_irq(&pcpu_lock);
862 chunk = alloc_pcpu_chunk();
863 if (!chunk)
864 goto fail_unlock_mutex;
866 spin_lock_irq(&pcpu_lock);
867 pcpu_chunk_relocate(chunk, -1);
868 pcpu_chunk_addr_insert(chunk);
869 goto restart;
871 area_found:
872 spin_unlock_irq(&pcpu_lock);
874 /* populate, map and clear the area */
875 if (pcpu_populate_chunk(chunk, off, size)) {
876 spin_lock_irq(&pcpu_lock);
877 pcpu_free_area(chunk, off);
878 goto fail_unlock;
881 mutex_unlock(&pcpu_alloc_mutex);
883 return __addr_to_pcpu_ptr(chunk->vm->addr + off);
885 fail_unlock:
886 spin_unlock_irq(&pcpu_lock);
887 fail_unlock_mutex:
888 mutex_unlock(&pcpu_alloc_mutex);
889 return NULL;
893 * __alloc_percpu - allocate dynamic percpu area
894 * @size: size of area to allocate in bytes
895 * @align: alignment of area (max PAGE_SIZE)
897 * Allocate percpu area of @size bytes aligned at @align. Might
898 * sleep. Might trigger writeouts.
900 * CONTEXT:
901 * Does GFP_KERNEL allocation.
903 * RETURNS:
904 * Percpu pointer to the allocated area on success, NULL on failure.
906 void *__alloc_percpu(size_t size, size_t align)
908 return pcpu_alloc(size, align, false);
910 EXPORT_SYMBOL_GPL(__alloc_percpu);
913 * __alloc_reserved_percpu - allocate reserved percpu area
914 * @size: size of area to allocate in bytes
915 * @align: alignment of area (max PAGE_SIZE)
917 * Allocate percpu area of @size bytes aligned at @align from reserved
918 * percpu area if arch has set it up; otherwise, allocation is served
919 * from the same dynamic area. Might sleep. Might trigger writeouts.
921 * CONTEXT:
922 * Does GFP_KERNEL allocation.
924 * RETURNS:
925 * Percpu pointer to the allocated area on success, NULL on failure.
927 void *__alloc_reserved_percpu(size_t size, size_t align)
929 return pcpu_alloc(size, align, true);
933 * pcpu_reclaim - reclaim fully free chunks, workqueue function
934 * @work: unused
936 * Reclaim all fully free chunks except for the first one.
938 * CONTEXT:
939 * workqueue context.
941 static void pcpu_reclaim(struct work_struct *work)
943 LIST_HEAD(todo);
944 struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
945 struct pcpu_chunk *chunk, *next;
947 mutex_lock(&pcpu_alloc_mutex);
948 spin_lock_irq(&pcpu_lock);
950 list_for_each_entry_safe(chunk, next, head, list) {
951 WARN_ON(chunk->immutable);
953 /* spare the first one */
954 if (chunk == list_first_entry(head, struct pcpu_chunk, list))
955 continue;
957 rb_erase(&chunk->rb_node, &pcpu_addr_root);
958 list_move(&chunk->list, &todo);
961 spin_unlock_irq(&pcpu_lock);
962 mutex_unlock(&pcpu_alloc_mutex);
964 list_for_each_entry_safe(chunk, next, &todo, list) {
965 pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
966 free_pcpu_chunk(chunk);
971 * free_percpu - free percpu area
972 * @ptr: pointer to area to free
974 * Free percpu area @ptr.
976 * CONTEXT:
977 * Can be called from atomic context.
979 void free_percpu(void *ptr)
981 void *addr = __pcpu_ptr_to_addr(ptr);
982 struct pcpu_chunk *chunk;
983 unsigned long flags;
984 int off;
986 if (!ptr)
987 return;
989 spin_lock_irqsave(&pcpu_lock, flags);
991 chunk = pcpu_chunk_addr_search(addr);
992 off = addr - chunk->vm->addr;
994 pcpu_free_area(chunk, off);
996 /* if there are more than one fully free chunks, wake up grim reaper */
997 if (chunk->free_size == pcpu_unit_size) {
998 struct pcpu_chunk *pos;
1000 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1001 if (pos != chunk) {
1002 schedule_work(&pcpu_reclaim_work);
1003 break;
1007 spin_unlock_irqrestore(&pcpu_lock, flags);
1009 EXPORT_SYMBOL_GPL(free_percpu);
1012 * pcpu_setup_first_chunk - initialize the first percpu chunk
1013 * @get_page_fn: callback to fetch page pointer
1014 * @static_size: the size of static percpu area in bytes
1015 * @reserved_size: the size of reserved percpu area in bytes
1016 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
1017 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1018 * @base_addr: mapped address, NULL for auto
1019 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
1021 * Initialize the first percpu chunk which contains the kernel static
1022 * perpcu area. This function is to be called from arch percpu area
1023 * setup path. The first two parameters are mandatory. The rest are
1024 * optional.
1026 * @get_page_fn() should return pointer to percpu page given cpu
1027 * number and page number. It should at least return enough pages to
1028 * cover the static area. The returned pages for static area should
1029 * have been initialized with valid data. If @unit_size is specified,
1030 * it can also return pages after the static area. NULL return
1031 * indicates end of pages for the cpu. Note that @get_page_fn() must
1032 * return the same number of pages for all cpus.
1034 * @reserved_size, if non-zero, specifies the amount of bytes to
1035 * reserve after the static area in the first chunk. This reserves
1036 * the first chunk such that it's available only through reserved
1037 * percpu allocation. This is primarily used to serve module percpu
1038 * static areas on architectures where the addressing model has
1039 * limited offset range for symbol relocations to guarantee module
1040 * percpu symbols fall inside the relocatable range.
1042 * @unit_size, if non-negative, specifies unit size and must be
1043 * aligned to PAGE_SIZE and equal to or larger than @static_size +
1044 * @reserved_size + @dyn_size.
1046 * @dyn_size, if non-negative, limits the number of bytes available
1047 * for dynamic allocation in the first chunk. Specifying non-negative
1048 * value make percpu leave alone the area beyond @static_size +
1049 * @reserved_size + @dyn_size.
1051 * Non-null @base_addr means that the caller already allocated virtual
1052 * region for the first chunk and mapped it. percpu must not mess
1053 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
1054 * @populate_pte_fn doesn't make any sense.
1056 * @populate_pte_fn is used to populate the pagetable. NULL means the
1057 * caller already populated the pagetable.
1059 * If the first chunk ends up with both reserved and dynamic areas, it
1060 * is served by two chunks - one to serve the core static and reserved
1061 * areas and the other for the dynamic area. They share the same vm
1062 * and page map but uses different area allocation map to stay away
1063 * from each other. The latter chunk is circulated in the chunk slots
1064 * and available for dynamic allocation like any other chunks.
1066 * RETURNS:
1067 * The determined pcpu_unit_size which can be used to initialize
1068 * percpu access.
1070 size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
1071 size_t static_size, size_t reserved_size,
1072 ssize_t unit_size, ssize_t dyn_size,
1073 void *base_addr,
1074 pcpu_populate_pte_fn_t populate_pte_fn)
1076 static struct vm_struct first_vm;
1077 static int smap[2], dmap[2];
1078 struct pcpu_chunk *schunk, *dchunk = NULL;
1079 unsigned int cpu;
1080 int nr_pages;
1081 int err, i;
1083 /* santiy checks */
1084 BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
1085 ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
1086 BUG_ON(!static_size);
1087 if (unit_size >= 0) {
1088 BUG_ON(unit_size < static_size + reserved_size +
1089 (dyn_size >= 0 ? dyn_size : 0));
1090 BUG_ON(unit_size & ~PAGE_MASK);
1091 } else {
1092 BUG_ON(dyn_size >= 0);
1093 BUG_ON(base_addr);
1095 BUG_ON(base_addr && populate_pte_fn);
1097 if (unit_size >= 0)
1098 pcpu_unit_pages = unit_size >> PAGE_SHIFT;
1099 else
1100 pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
1101 PFN_UP(static_size + reserved_size));
1103 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1104 pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
1105 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
1106 + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
1108 if (dyn_size < 0)
1109 dyn_size = pcpu_unit_size - static_size - reserved_size;
1112 * Allocate chunk slots. The additional last slot is for
1113 * empty chunks.
1115 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1116 pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1117 for (i = 0; i < pcpu_nr_slots; i++)
1118 INIT_LIST_HEAD(&pcpu_slot[i]);
1121 * Initialize static chunk. If reserved_size is zero, the
1122 * static chunk covers static area + dynamic allocation area
1123 * in the first chunk. If reserved_size is not zero, it
1124 * covers static area + reserved area (mostly used for module
1125 * static percpu allocation).
1127 schunk = alloc_bootmem(pcpu_chunk_struct_size);
1128 INIT_LIST_HEAD(&schunk->list);
1129 schunk->vm = &first_vm;
1130 schunk->map = smap;
1131 schunk->map_alloc = ARRAY_SIZE(smap);
1132 schunk->page = schunk->page_ar;
1134 if (reserved_size) {
1135 schunk->free_size = reserved_size;
1136 pcpu_reserved_chunk = schunk; /* not for dynamic alloc */
1137 } else {
1138 schunk->free_size = dyn_size;
1139 dyn_size = 0; /* dynamic area covered */
1141 schunk->contig_hint = schunk->free_size;
1143 schunk->map[schunk->map_used++] = -static_size;
1144 if (schunk->free_size)
1145 schunk->map[schunk->map_used++] = schunk->free_size;
1147 pcpu_reserved_chunk_limit = static_size + schunk->free_size;
1149 /* init dynamic chunk if necessary */
1150 if (dyn_size) {
1151 dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
1152 INIT_LIST_HEAD(&dchunk->list);
1153 dchunk->vm = &first_vm;
1154 dchunk->map = dmap;
1155 dchunk->map_alloc = ARRAY_SIZE(dmap);
1156 dchunk->page = schunk->page_ar; /* share page map with schunk */
1158 dchunk->contig_hint = dchunk->free_size = dyn_size;
1159 dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1160 dchunk->map[dchunk->map_used++] = dchunk->free_size;
1163 /* allocate vm address */
1164 first_vm.flags = VM_ALLOC;
1165 first_vm.size = pcpu_chunk_size;
1167 if (!base_addr)
1168 vm_area_register_early(&first_vm, PAGE_SIZE);
1169 else {
1171 * Pages already mapped. No need to remap into
1172 * vmalloc area. In this case the first chunks can't
1173 * be mapped or unmapped by percpu and are marked
1174 * immutable.
1176 first_vm.addr = base_addr;
1177 schunk->immutable = true;
1178 if (dchunk)
1179 dchunk->immutable = true;
1182 /* assign pages */
1183 nr_pages = -1;
1184 for_each_possible_cpu(cpu) {
1185 for (i = 0; i < pcpu_unit_pages; i++) {
1186 struct page *page = get_page_fn(cpu, i);
1188 if (!page)
1189 break;
1190 *pcpu_chunk_pagep(schunk, cpu, i) = page;
1193 BUG_ON(i < PFN_UP(static_size));
1195 if (nr_pages < 0)
1196 nr_pages = i;
1197 else
1198 BUG_ON(nr_pages != i);
1201 /* map them */
1202 if (populate_pte_fn) {
1203 for_each_possible_cpu(cpu)
1204 for (i = 0; i < nr_pages; i++)
1205 populate_pte_fn(pcpu_chunk_addr(schunk,
1206 cpu, i));
1208 err = pcpu_map(schunk, 0, nr_pages);
1209 if (err)
1210 panic("failed to setup static percpu area, err=%d\n",
1211 err);
1214 /* link the first chunk in */
1215 if (!dchunk) {
1216 pcpu_chunk_relocate(schunk, -1);
1217 pcpu_chunk_addr_insert(schunk);
1218 } else {
1219 pcpu_chunk_relocate(dchunk, -1);
1220 pcpu_chunk_addr_insert(dchunk);
1223 /* we're done */
1224 pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
1225 return pcpu_unit_size;