2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
66 #include <linux/workqueue.h>
68 #include <asm/cacheflush.h>
69 #include <asm/tlbflush.h>
71 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
72 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
75 struct list_head list
; /* linked to pcpu_slot lists */
76 struct rb_node rb_node
; /* key is chunk->vm->addr */
77 int free_size
; /* free bytes in the chunk */
78 int contig_hint
; /* max contiguous size hint */
79 struct vm_struct
*vm
; /* mapped vmalloc region */
80 int map_used
; /* # of map entries used */
81 int map_alloc
; /* # of map entries allocated */
82 int *map
; /* allocation map */
83 bool immutable
; /* no [de]population allowed */
84 struct page
**page
; /* points to page array */
85 struct page
*page_ar
[]; /* #cpus * UNIT_PAGES */
88 static int pcpu_unit_pages __read_mostly
;
89 static int pcpu_unit_size __read_mostly
;
90 static int pcpu_chunk_size __read_mostly
;
91 static int pcpu_nr_slots __read_mostly
;
92 static size_t pcpu_chunk_struct_size __read_mostly
;
94 /* the address of the first chunk which starts with the kernel static area */
95 void *pcpu_base_addr __read_mostly
;
96 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
98 /* optional reserved chunk, only accessible for reserved allocations */
99 static struct pcpu_chunk
*pcpu_reserved_chunk
;
100 /* offset limit of the reserved chunk */
101 static int pcpu_reserved_chunk_limit
;
104 * One mutex to rule them all.
106 * The following mutex is grabbed in the outermost public alloc/free
107 * interface functions and released only when the operation is
108 * complete. As such, every function in this file other than the
109 * outermost functions are called under pcpu_mutex.
111 * It can easily be switched to use spinlock such that only the area
112 * allocation and page population commit are protected with it doing
113 * actual [de]allocation without holding any lock. However, given
114 * what this allocator does, I think it's better to let them run
117 static DEFINE_MUTEX(pcpu_mutex
);
119 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
120 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
122 /* reclaim work to release fully free chunks, scheduled from free path */
123 static void pcpu_reclaim(struct work_struct
*work
);
124 static DECLARE_WORK(pcpu_reclaim_work
, pcpu_reclaim
);
126 static int __pcpu_size_to_slot(int size
)
128 int highbit
= fls(size
); /* size is in bytes */
129 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
132 static int pcpu_size_to_slot(int size
)
134 if (size
== pcpu_unit_size
)
135 return pcpu_nr_slots
- 1;
136 return __pcpu_size_to_slot(size
);
139 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
141 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
144 return pcpu_size_to_slot(chunk
->free_size
);
147 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
149 return cpu
* pcpu_unit_pages
+ page_idx
;
152 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
153 unsigned int cpu
, int page_idx
)
155 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
158 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
159 unsigned int cpu
, int page_idx
)
161 return (unsigned long)chunk
->vm
->addr
+
162 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
165 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
168 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
172 * pcpu_mem_alloc - allocate memory
173 * @size: bytes to allocate
175 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
176 * kzalloc() is used; otherwise, vmalloc() is used. The returned
177 * memory is always zeroed.
180 * Pointer to the allocated area on success, NULL on failure.
182 static void *pcpu_mem_alloc(size_t size
)
184 if (size
<= PAGE_SIZE
)
185 return kzalloc(size
, GFP_KERNEL
);
187 void *ptr
= vmalloc(size
);
189 memset(ptr
, 0, size
);
195 * pcpu_mem_free - free memory
196 * @ptr: memory to free
197 * @size: size of the area
199 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
201 static void pcpu_mem_free(void *ptr
, size_t size
)
203 if (size
<= PAGE_SIZE
)
210 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
211 * @chunk: chunk of interest
212 * @oslot: the previous slot it was on
214 * This function is called after an allocation or free changed @chunk.
215 * New slot according to the changed state is determined and @chunk is
216 * moved to the slot. Note that the reserved chunk is never put on
219 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
221 int nslot
= pcpu_chunk_slot(chunk
);
223 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
225 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
227 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
231 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
232 struct rb_node
**parentp
)
234 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
235 struct rb_node
*parent
= NULL
;
236 struct pcpu_chunk
*chunk
;
240 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
242 if (addr
< chunk
->vm
->addr
)
244 else if (addr
> chunk
->vm
->addr
)
256 * pcpu_chunk_addr_search - search for chunk containing specified address
257 * @addr: address to search for
259 * Look for chunk which might contain @addr. More specifically, it
260 * searchs for the chunk with the highest start address which isn't
264 * The address of the found chunk.
266 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
268 struct rb_node
*n
, *parent
;
269 struct pcpu_chunk
*chunk
;
271 /* is it in the reserved chunk? */
272 if (pcpu_reserved_chunk
) {
273 void *start
= pcpu_reserved_chunk
->vm
->addr
;
275 if (addr
>= start
&& addr
< start
+ pcpu_reserved_chunk_limit
)
276 return pcpu_reserved_chunk
;
279 /* nah... search the regular ones */
280 n
= *pcpu_chunk_rb_search(addr
, &parent
);
282 /* no exactly matching chunk, the parent is the closest */
286 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
288 if (addr
< chunk
->vm
->addr
) {
289 /* the parent was the next one, look for the previous one */
292 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
299 * pcpu_chunk_addr_insert - insert chunk into address rb tree
300 * @new: chunk to insert
302 * Insert @new into address rb tree.
304 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
306 struct rb_node
**p
, *parent
;
308 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
310 rb_link_node(&new->rb_node
, parent
, p
);
311 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
315 * pcpu_extend_area_map - extend area map for allocation
316 * @chunk: target chunk
318 * Extend area map of @chunk so that it can accomodate an allocation.
319 * A single allocation can split an area into three areas, so this
320 * function makes sure that @chunk->map has at least two extra slots.
323 * 0 if noop, 1 if successfully extended, -errno on failure.
325 static int pcpu_extend_area_map(struct pcpu_chunk
*chunk
)
332 if (chunk
->map_alloc
>= chunk
->map_used
+ 2)
335 new_alloc
= PCPU_DFL_MAP_ALLOC
;
336 while (new_alloc
< chunk
->map_used
+ 2)
339 new = pcpu_mem_alloc(new_alloc
* sizeof(new[0]));
343 size
= chunk
->map_alloc
* sizeof(chunk
->map
[0]);
344 memcpy(new, chunk
->map
, size
);
347 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
348 * one of the first chunks and still using static map.
350 if (chunk
->map_alloc
>= PCPU_DFL_MAP_ALLOC
)
351 pcpu_mem_free(chunk
->map
, size
);
353 chunk
->map_alloc
= new_alloc
;
359 * pcpu_split_block - split a map block
360 * @chunk: chunk of interest
361 * @i: index of map block to split
362 * @head: head size in bytes (can be 0)
363 * @tail: tail size in bytes (can be 0)
365 * Split the @i'th map block into two or three blocks. If @head is
366 * non-zero, @head bytes block is inserted before block @i moving it
367 * to @i+1 and reducing its size by @head bytes.
369 * If @tail is non-zero, the target block, which can be @i or @i+1
370 * depending on @head, is reduced by @tail bytes and @tail byte block
371 * is inserted after the target block.
373 * @chunk->map must have enough free slots to accomodate the split.
375 static void pcpu_split_block(struct pcpu_chunk
*chunk
, int i
,
378 int nr_extra
= !!head
+ !!tail
;
380 BUG_ON(chunk
->map_alloc
< chunk
->map_used
+ nr_extra
);
382 /* insert new subblocks */
383 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
384 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
385 chunk
->map_used
+= nr_extra
;
388 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
389 chunk
->map
[i
++] = head
;
392 chunk
->map
[i
++] -= tail
;
393 chunk
->map
[i
] = tail
;
398 * pcpu_alloc_area - allocate area from a pcpu_chunk
399 * @chunk: chunk of interest
400 * @size: wanted size in bytes
401 * @align: wanted align
403 * Try to allocate @size bytes area aligned at @align from @chunk.
404 * Note that this function only allocates the offset. It doesn't
405 * populate or map the area.
407 * @chunk->map must have at least two free slots.
410 * Allocated offset in @chunk on success, -1 if no matching area is
413 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
415 int oslot
= pcpu_chunk_slot(chunk
);
419 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
420 bool is_last
= i
+ 1 == chunk
->map_used
;
423 /* extra for alignment requirement */
424 head
= ALIGN(off
, align
) - off
;
425 BUG_ON(i
== 0 && head
!= 0);
427 if (chunk
->map
[i
] < 0)
429 if (chunk
->map
[i
] < head
+ size
) {
430 max_contig
= max(chunk
->map
[i
], max_contig
);
435 * If head is small or the previous block is free,
436 * merge'em. Note that 'small' is defined as smaller
437 * than sizeof(int), which is very small but isn't too
438 * uncommon for percpu allocations.
440 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
441 if (chunk
->map
[i
- 1] > 0)
442 chunk
->map
[i
- 1] += head
;
444 chunk
->map
[i
- 1] -= head
;
445 chunk
->free_size
-= head
;
447 chunk
->map
[i
] -= head
;
452 /* if tail is small, just keep it around */
453 tail
= chunk
->map
[i
] - head
- size
;
454 if (tail
< sizeof(int))
457 /* split if warranted */
459 pcpu_split_block(chunk
, i
, head
, tail
);
463 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
466 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
469 /* update hint and mark allocated */
471 chunk
->contig_hint
= max_contig
; /* fully scanned */
473 chunk
->contig_hint
= max(chunk
->contig_hint
,
476 chunk
->free_size
-= chunk
->map
[i
];
477 chunk
->map
[i
] = -chunk
->map
[i
];
479 pcpu_chunk_relocate(chunk
, oslot
);
483 chunk
->contig_hint
= max_contig
; /* fully scanned */
484 pcpu_chunk_relocate(chunk
, oslot
);
486 /* tell the upper layer that this chunk has no matching area */
491 * pcpu_free_area - free area to a pcpu_chunk
492 * @chunk: chunk of interest
493 * @freeme: offset of area to free
495 * Free area starting from @freeme to @chunk. Note that this function
496 * only modifies the allocation map. It doesn't depopulate or unmap
499 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
501 int oslot
= pcpu_chunk_slot(chunk
);
504 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
507 BUG_ON(off
!= freeme
);
508 BUG_ON(chunk
->map
[i
] > 0);
510 chunk
->map
[i
] = -chunk
->map
[i
];
511 chunk
->free_size
+= chunk
->map
[i
];
513 /* merge with previous? */
514 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
515 chunk
->map
[i
- 1] += chunk
->map
[i
];
517 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
518 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
521 /* merge with next? */
522 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
523 chunk
->map
[i
] += chunk
->map
[i
+ 1];
525 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
526 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
529 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
530 pcpu_chunk_relocate(chunk
, oslot
);
534 * pcpu_unmap - unmap pages out of a pcpu_chunk
535 * @chunk: chunk of interest
536 * @page_start: page index of the first page to unmap
537 * @page_end: page index of the last page to unmap + 1
538 * @flush: whether to flush cache and tlb or not
540 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
541 * If @flush is true, vcache is flushed before unmapping and tlb
544 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
547 unsigned int last
= num_possible_cpus() - 1;
550 /* unmap must not be done on immutable chunk */
551 WARN_ON(chunk
->immutable
);
554 * Each flushing trial can be very expensive, issue flush on
555 * the whole region at once rather than doing it for each cpu.
556 * This could be an overkill but is more scalable.
559 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
560 pcpu_chunk_addr(chunk
, last
, page_end
));
562 for_each_possible_cpu(cpu
)
563 unmap_kernel_range_noflush(
564 pcpu_chunk_addr(chunk
, cpu
, page_start
),
565 (page_end
- page_start
) << PAGE_SHIFT
);
567 /* ditto as flush_cache_vunmap() */
569 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
570 pcpu_chunk_addr(chunk
, last
, page_end
));
574 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
575 * @chunk: chunk to depopulate
576 * @off: offset to the area to depopulate
577 * @size: size of the area to depopulate in bytes
578 * @flush: whether to flush cache and tlb or not
580 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
581 * from @chunk. If @flush is true, vcache is flushed before unmapping
584 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
587 int page_start
= PFN_DOWN(off
);
588 int page_end
= PFN_UP(off
+ size
);
589 int unmap_start
= -1;
590 int uninitialized_var(unmap_end
);
594 for (i
= page_start
; i
< page_end
; i
++) {
595 for_each_possible_cpu(cpu
) {
596 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
604 * If it's partial depopulation, it might get
605 * populated or depopulated again. Mark the
610 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
615 if (unmap_start
>= 0)
616 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
620 * pcpu_map - map pages into a pcpu_chunk
621 * @chunk: chunk of interest
622 * @page_start: page index of the first page to map
623 * @page_end: page index of the last page to map + 1
625 * For each cpu, map pages [@page_start,@page_end) into @chunk.
626 * vcache is flushed afterwards.
628 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
630 unsigned int last
= num_possible_cpus() - 1;
634 /* map must not be done on immutable chunk */
635 WARN_ON(chunk
->immutable
);
637 for_each_possible_cpu(cpu
) {
638 err
= map_kernel_range_noflush(
639 pcpu_chunk_addr(chunk
, cpu
, page_start
),
640 (page_end
- page_start
) << PAGE_SHIFT
,
642 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
647 /* flush at once, please read comments in pcpu_unmap() */
648 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
649 pcpu_chunk_addr(chunk
, last
, page_end
));
654 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
655 * @chunk: chunk of interest
656 * @off: offset to the area to populate
657 * @size: size of the area to populate in bytes
659 * For each cpu, populate and map pages [@page_start,@page_end) into
660 * @chunk. The area is cleared on return.
662 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
664 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
665 int page_start
= PFN_DOWN(off
);
666 int page_end
= PFN_UP(off
+ size
);
668 int uninitialized_var(map_end
);
672 for (i
= page_start
; i
< page_end
; i
++) {
673 if (pcpu_chunk_page_occupied(chunk
, i
)) {
674 if (map_start
>= 0) {
675 if (pcpu_map(chunk
, map_start
, map_end
))
682 map_start
= map_start
< 0 ? i
: map_start
;
685 for_each_possible_cpu(cpu
) {
686 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
688 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
695 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
698 for_each_possible_cpu(cpu
)
699 memset(chunk
->vm
->addr
+ cpu
* pcpu_unit_size
+ off
, 0,
704 /* likely under heavy memory pressure, give memory back */
705 pcpu_depopulate_chunk(chunk
, off
, size
, true);
709 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
714 free_vm_area(chunk
->vm
);
715 pcpu_mem_free(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]));
719 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
721 struct pcpu_chunk
*chunk
;
723 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
727 chunk
->map
= pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
728 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
729 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
730 chunk
->page
= chunk
->page_ar
;
732 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
734 free_pcpu_chunk(chunk
);
738 INIT_LIST_HEAD(&chunk
->list
);
739 chunk
->free_size
= pcpu_unit_size
;
740 chunk
->contig_hint
= pcpu_unit_size
;
746 * pcpu_alloc - the percpu allocator
747 * @size: size of area to allocate in bytes
748 * @align: alignment of area (max PAGE_SIZE)
749 * @reserved: allocate from the reserved chunk if available
751 * Allocate percpu area of @size bytes aligned at @align. Might
752 * sleep. Might trigger writeouts.
755 * Percpu pointer to the allocated area on success, NULL on failure.
757 static void *pcpu_alloc(size_t size
, size_t align
, bool reserved
)
760 struct pcpu_chunk
*chunk
;
763 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
764 WARN(true, "illegal size (%zu) or align (%zu) for "
765 "percpu allocation\n", size
, align
);
769 mutex_lock(&pcpu_mutex
);
771 /* serve reserved allocations from the reserved chunk if available */
772 if (reserved
&& pcpu_reserved_chunk
) {
773 chunk
= pcpu_reserved_chunk
;
774 if (size
> chunk
->contig_hint
||
775 pcpu_extend_area_map(chunk
) < 0)
777 off
= pcpu_alloc_area(chunk
, size
, align
);
783 /* search through normal chunks */
784 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
785 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
786 if (size
> chunk
->contig_hint
)
788 if (pcpu_extend_area_map(chunk
) < 0)
790 off
= pcpu_alloc_area(chunk
, size
, align
);
796 /* hmmm... no space left, create a new chunk */
797 chunk
= alloc_pcpu_chunk();
800 pcpu_chunk_relocate(chunk
, -1);
801 pcpu_chunk_addr_insert(chunk
);
803 off
= pcpu_alloc_area(chunk
, size
, align
);
808 /* populate, map and clear the area */
809 if (pcpu_populate_chunk(chunk
, off
, size
)) {
810 pcpu_free_area(chunk
, off
);
814 ptr
= __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
816 mutex_unlock(&pcpu_mutex
);
821 * __alloc_percpu - allocate dynamic percpu area
822 * @size: size of area to allocate in bytes
823 * @align: alignment of area (max PAGE_SIZE)
825 * Allocate percpu area of @size bytes aligned at @align. Might
826 * sleep. Might trigger writeouts.
829 * Percpu pointer to the allocated area on success, NULL on failure.
831 void *__alloc_percpu(size_t size
, size_t align
)
833 return pcpu_alloc(size
, align
, false);
835 EXPORT_SYMBOL_GPL(__alloc_percpu
);
838 * __alloc_reserved_percpu - allocate reserved percpu area
839 * @size: size of area to allocate in bytes
840 * @align: alignment of area (max PAGE_SIZE)
842 * Allocate percpu area of @size bytes aligned at @align from reserved
843 * percpu area if arch has set it up; otherwise, allocation is served
844 * from the same dynamic area. Might sleep. Might trigger writeouts.
847 * Percpu pointer to the allocated area on success, NULL on failure.
849 void *__alloc_reserved_percpu(size_t size
, size_t align
)
851 return pcpu_alloc(size
, align
, true);
855 * pcpu_reclaim - reclaim fully free chunks, workqueue function
858 * Reclaim all fully free chunks except for the first one.
860 static void pcpu_reclaim(struct work_struct
*work
)
863 struct list_head
*head
= &pcpu_slot
[pcpu_nr_slots
- 1];
864 struct pcpu_chunk
*chunk
, *next
;
866 mutex_lock(&pcpu_mutex
);
868 list_for_each_entry_safe(chunk
, next
, head
, list
) {
869 WARN_ON(chunk
->immutable
);
871 /* spare the first one */
872 if (chunk
== list_first_entry(head
, struct pcpu_chunk
, list
))
875 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
876 list_move(&chunk
->list
, &todo
);
879 mutex_unlock(&pcpu_mutex
);
881 list_for_each_entry_safe(chunk
, next
, &todo
, list
) {
882 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
883 free_pcpu_chunk(chunk
);
888 * free_percpu - free percpu area
889 * @ptr: pointer to area to free
891 * Free percpu area @ptr. Might sleep.
893 void free_percpu(void *ptr
)
895 void *addr
= __pcpu_ptr_to_addr(ptr
);
896 struct pcpu_chunk
*chunk
;
902 mutex_lock(&pcpu_mutex
);
904 chunk
= pcpu_chunk_addr_search(addr
);
905 off
= addr
- chunk
->vm
->addr
;
907 pcpu_free_area(chunk
, off
);
909 /* if there are more than one fully free chunks, wake up grim reaper */
910 if (chunk
->free_size
== pcpu_unit_size
) {
911 struct pcpu_chunk
*pos
;
913 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
915 schedule_work(&pcpu_reclaim_work
);
920 mutex_unlock(&pcpu_mutex
);
922 EXPORT_SYMBOL_GPL(free_percpu
);
925 * pcpu_setup_first_chunk - initialize the first percpu chunk
926 * @get_page_fn: callback to fetch page pointer
927 * @static_size: the size of static percpu area in bytes
928 * @reserved_size: the size of reserved percpu area in bytes
929 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
930 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
931 * @base_addr: mapped address, NULL for auto
932 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
934 * Initialize the first percpu chunk which contains the kernel static
935 * perpcu area. This function is to be called from arch percpu area
936 * setup path. The first two parameters are mandatory. The rest are
939 * @get_page_fn() should return pointer to percpu page given cpu
940 * number and page number. It should at least return enough pages to
941 * cover the static area. The returned pages for static area should
942 * have been initialized with valid data. If @unit_size is specified,
943 * it can also return pages after the static area. NULL return
944 * indicates end of pages for the cpu. Note that @get_page_fn() must
945 * return the same number of pages for all cpus.
947 * @reserved_size, if non-zero, specifies the amount of bytes to
948 * reserve after the static area in the first chunk. This reserves
949 * the first chunk such that it's available only through reserved
950 * percpu allocation. This is primarily used to serve module percpu
951 * static areas on architectures where the addressing model has
952 * limited offset range for symbol relocations to guarantee module
953 * percpu symbols fall inside the relocatable range.
955 * @unit_size, if non-negative, specifies unit size and must be
956 * aligned to PAGE_SIZE and equal to or larger than @static_size +
957 * @reserved_size + @dyn_size.
959 * @dyn_size, if non-negative, limits the number of bytes available
960 * for dynamic allocation in the first chunk. Specifying non-negative
961 * value make percpu leave alone the area beyond @static_size +
962 * @reserved_size + @dyn_size.
964 * Non-null @base_addr means that the caller already allocated virtual
965 * region for the first chunk and mapped it. percpu must not mess
966 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
967 * @populate_pte_fn doesn't make any sense.
969 * @populate_pte_fn is used to populate the pagetable. NULL means the
970 * caller already populated the pagetable.
972 * If the first chunk ends up with both reserved and dynamic areas, it
973 * is served by two chunks - one to serve the core static and reserved
974 * areas and the other for the dynamic area. They share the same vm
975 * and page map but uses different area allocation map to stay away
976 * from each other. The latter chunk is circulated in the chunk slots
977 * and available for dynamic allocation like any other chunks.
980 * The determined pcpu_unit_size which can be used to initialize
983 size_t __init
pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn
,
984 size_t static_size
, size_t reserved_size
,
985 ssize_t unit_size
, ssize_t dyn_size
,
987 pcpu_populate_pte_fn_t populate_pte_fn
)
989 static struct vm_struct first_vm
;
990 static int smap
[2], dmap
[2];
991 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
997 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
||
998 ARRAY_SIZE(dmap
) >= PCPU_DFL_MAP_ALLOC
);
999 BUG_ON(!static_size
);
1000 if (unit_size
>= 0) {
1001 BUG_ON(unit_size
< static_size
+ reserved_size
+
1002 (dyn_size
>= 0 ? dyn_size
: 0));
1003 BUG_ON(unit_size
& ~PAGE_MASK
);
1005 BUG_ON(dyn_size
>= 0);
1008 BUG_ON(base_addr
&& populate_pte_fn
);
1011 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
1013 pcpu_unit_pages
= max_t(int, PCPU_MIN_UNIT_SIZE
>> PAGE_SHIFT
,
1014 PFN_UP(static_size
+ reserved_size
));
1016 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
1017 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
1018 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
1019 + num_possible_cpus() * pcpu_unit_pages
* sizeof(struct page
*);
1022 dyn_size
= pcpu_unit_size
- static_size
- reserved_size
;
1025 * Allocate chunk slots. The additional last slot is for
1028 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
1029 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
1030 for (i
= 0; i
< pcpu_nr_slots
; i
++)
1031 INIT_LIST_HEAD(&pcpu_slot
[i
]);
1034 * Initialize static chunk. If reserved_size is zero, the
1035 * static chunk covers static area + dynamic allocation area
1036 * in the first chunk. If reserved_size is not zero, it
1037 * covers static area + reserved area (mostly used for module
1038 * static percpu allocation).
1040 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1041 INIT_LIST_HEAD(&schunk
->list
);
1042 schunk
->vm
= &first_vm
;
1044 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1045 schunk
->page
= schunk
->page_ar
;
1047 if (reserved_size
) {
1048 schunk
->free_size
= reserved_size
;
1049 pcpu_reserved_chunk
= schunk
; /* not for dynamic alloc */
1051 schunk
->free_size
= dyn_size
;
1052 dyn_size
= 0; /* dynamic area covered */
1054 schunk
->contig_hint
= schunk
->free_size
;
1056 schunk
->map
[schunk
->map_used
++] = -static_size
;
1057 if (schunk
->free_size
)
1058 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
1060 pcpu_reserved_chunk_limit
= static_size
+ schunk
->free_size
;
1062 /* init dynamic chunk if necessary */
1064 dchunk
= alloc_bootmem(sizeof(struct pcpu_chunk
));
1065 INIT_LIST_HEAD(&dchunk
->list
);
1066 dchunk
->vm
= &first_vm
;
1068 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1069 dchunk
->page
= schunk
->page_ar
; /* share page map with schunk */
1071 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1072 dchunk
->map
[dchunk
->map_used
++] = -pcpu_reserved_chunk_limit
;
1073 dchunk
->map
[dchunk
->map_used
++] = dchunk
->free_size
;
1076 /* allocate vm address */
1077 first_vm
.flags
= VM_ALLOC
;
1078 first_vm
.size
= pcpu_chunk_size
;
1081 vm_area_register_early(&first_vm
, PAGE_SIZE
);
1084 * Pages already mapped. No need to remap into
1085 * vmalloc area. In this case the first chunks can't
1086 * be mapped or unmapped by percpu and are marked
1089 first_vm
.addr
= base_addr
;
1090 schunk
->immutable
= true;
1092 dchunk
->immutable
= true;
1097 for_each_possible_cpu(cpu
) {
1098 for (i
= 0; i
< pcpu_unit_pages
; i
++) {
1099 struct page
*page
= get_page_fn(cpu
, i
);
1103 *pcpu_chunk_pagep(schunk
, cpu
, i
) = page
;
1106 BUG_ON(i
< PFN_UP(static_size
));
1111 BUG_ON(nr_pages
!= i
);
1115 if (populate_pte_fn
) {
1116 for_each_possible_cpu(cpu
)
1117 for (i
= 0; i
< nr_pages
; i
++)
1118 populate_pte_fn(pcpu_chunk_addr(schunk
,
1121 err
= pcpu_map(schunk
, 0, nr_pages
);
1123 panic("failed to setup static percpu area, err=%d\n",
1127 /* link the first chunk in */
1129 pcpu_chunk_relocate(schunk
, -1);
1130 pcpu_chunk_addr_insert(schunk
);
1132 pcpu_chunk_relocate(dchunk
, -1);
1133 pcpu_chunk_addr_insert(dchunk
);
1137 pcpu_base_addr
= (void *)pcpu_chunk_addr(schunk
, 0, 0);
1138 return pcpu_unit_size
;