2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
67 #include <asm/cacheflush.h>
68 #include <asm/tlbflush.h>
70 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
71 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74 struct list_head list
; /* linked to pcpu_slot lists */
75 struct rb_node rb_node
; /* key is chunk->vm->addr */
76 int free_size
; /* free bytes in the chunk */
77 int contig_hint
; /* max contiguous size hint */
78 struct vm_struct
*vm
; /* mapped vmalloc region */
79 int map_used
; /* # of map entries used */
80 int map_alloc
; /* # of map entries allocated */
81 int *map
; /* allocation map */
82 bool immutable
; /* no [de]population allowed */
83 struct page
*page
[]; /* #cpus * UNIT_PAGES */
86 static int pcpu_unit_pages __read_mostly
;
87 static int pcpu_unit_size __read_mostly
;
88 static int pcpu_chunk_size __read_mostly
;
89 static int pcpu_nr_slots __read_mostly
;
90 static size_t pcpu_chunk_struct_size __read_mostly
;
92 /* the address of the first chunk which starts with the kernel static area */
93 void *pcpu_base_addr __read_mostly
;
94 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
96 /* the size of kernel static area */
97 static int pcpu_static_size __read_mostly
;
100 * One mutex to rule them all.
102 * The following mutex is grabbed in the outermost public alloc/free
103 * interface functions and released only when the operation is
104 * complete. As such, every function in this file other than the
105 * outermost functions are called under pcpu_mutex.
107 * It can easily be switched to use spinlock such that only the area
108 * allocation and page population commit are protected with it doing
109 * actual [de]allocation without holding any lock. However, given
110 * what this allocator does, I think it's better to let them run
113 static DEFINE_MUTEX(pcpu_mutex
);
115 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
116 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
118 static int __pcpu_size_to_slot(int size
)
120 int highbit
= fls(size
); /* size is in bytes */
121 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
124 static int pcpu_size_to_slot(int size
)
126 if (size
== pcpu_unit_size
)
127 return pcpu_nr_slots
- 1;
128 return __pcpu_size_to_slot(size
);
131 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
133 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
136 return pcpu_size_to_slot(chunk
->free_size
);
139 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
141 return cpu
* pcpu_unit_pages
+ page_idx
;
144 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
145 unsigned int cpu
, int page_idx
)
147 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
150 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
151 unsigned int cpu
, int page_idx
)
153 return (unsigned long)chunk
->vm
->addr
+
154 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
157 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
160 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
164 * pcpu_realloc - versatile realloc
165 * @p: the current pointer (can be NULL for new allocations)
166 * @size: the current size in bytes (can be 0 for new allocations)
167 * @new_size: the wanted new size in bytes (can be 0 for free)
169 * More robust realloc which can be used to allocate, resize or free a
170 * memory area of arbitrary size. If the needed size goes over
171 * PAGE_SIZE, kernel VM is used.
174 * The new pointer on success, NULL on failure.
176 static void *pcpu_realloc(void *p
, size_t size
, size_t new_size
)
180 if (new_size
<= PAGE_SIZE
)
181 new = kmalloc(new_size
, GFP_KERNEL
);
183 new = vmalloc(new_size
);
184 if (new_size
&& !new)
187 memcpy(new, p
, min(size
, new_size
));
189 memset(new + size
, 0, new_size
- size
);
191 if (size
<= PAGE_SIZE
)
200 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
201 * @chunk: chunk of interest
202 * @oslot: the previous slot it was on
204 * This function is called after an allocation or free changed @chunk.
205 * New slot according to the changed state is determined and @chunk is
208 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
210 int nslot
= pcpu_chunk_slot(chunk
);
212 if (oslot
!= nslot
) {
214 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
216 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
220 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
221 struct rb_node
**parentp
)
223 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
224 struct rb_node
*parent
= NULL
;
225 struct pcpu_chunk
*chunk
;
229 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
231 if (addr
< chunk
->vm
->addr
)
233 else if (addr
> chunk
->vm
->addr
)
245 * pcpu_chunk_addr_search - search for chunk containing specified address
246 * @addr: address to search for
248 * Look for chunk which might contain @addr. More specifically, it
249 * searchs for the chunk with the highest start address which isn't
253 * The address of the found chunk.
255 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
257 struct rb_node
*n
, *parent
;
258 struct pcpu_chunk
*chunk
;
260 n
= *pcpu_chunk_rb_search(addr
, &parent
);
262 /* no exactly matching chunk, the parent is the closest */
266 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
268 if (addr
< chunk
->vm
->addr
) {
269 /* the parent was the next one, look for the previous one */
272 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
279 * pcpu_chunk_addr_insert - insert chunk into address rb tree
280 * @new: chunk to insert
282 * Insert @new into address rb tree.
284 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
286 struct rb_node
**p
, *parent
;
288 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
290 rb_link_node(&new->rb_node
, parent
, p
);
291 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
295 * pcpu_split_block - split a map block
296 * @chunk: chunk of interest
297 * @i: index of map block to split
298 * @head: head size in bytes (can be 0)
299 * @tail: tail size in bytes (can be 0)
301 * Split the @i'th map block into two or three blocks. If @head is
302 * non-zero, @head bytes block is inserted before block @i moving it
303 * to @i+1 and reducing its size by @head bytes.
305 * If @tail is non-zero, the target block, which can be @i or @i+1
306 * depending on @head, is reduced by @tail bytes and @tail byte block
307 * is inserted after the target block.
310 * 0 on success, -errno on failure.
312 static int pcpu_split_block(struct pcpu_chunk
*chunk
, int i
, int head
, int tail
)
314 int nr_extra
= !!head
+ !!tail
;
315 int target
= chunk
->map_used
+ nr_extra
;
317 /* reallocation required? */
318 if (chunk
->map_alloc
< target
) {
319 int new_alloc
= chunk
->map_alloc
;
322 while (new_alloc
< target
)
325 new = pcpu_realloc(chunk
->map
,
326 chunk
->map_alloc
* sizeof(new[0]),
327 new_alloc
* sizeof(new[0]));
331 chunk
->map_alloc
= new_alloc
;
335 /* insert a new subblock */
336 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
337 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
338 chunk
->map_used
+= nr_extra
;
341 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
342 chunk
->map
[i
++] = head
;
345 chunk
->map
[i
++] -= tail
;
346 chunk
->map
[i
] = tail
;
352 * pcpu_alloc_area - allocate area from a pcpu_chunk
353 * @chunk: chunk of interest
354 * @size: wanted size in bytes
355 * @align: wanted align
357 * Try to allocate @size bytes area aligned at @align from @chunk.
358 * Note that this function only allocates the offset. It doesn't
359 * populate or map the area.
362 * Allocated offset in @chunk on success, -errno on failure.
364 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
366 int oslot
= pcpu_chunk_slot(chunk
);
371 * The static chunk initially doesn't have map attached
372 * because kmalloc wasn't available during init. Give it one.
374 if (unlikely(!chunk
->map
)) {
375 chunk
->map
= pcpu_realloc(NULL
, 0,
376 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
380 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
381 chunk
->map
[chunk
->map_used
++] = -pcpu_static_size
;
382 if (chunk
->free_size
)
383 chunk
->map
[chunk
->map_used
++] = chunk
->free_size
;
386 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
387 bool is_last
= i
+ 1 == chunk
->map_used
;
390 /* extra for alignment requirement */
391 head
= ALIGN(off
, align
) - off
;
392 BUG_ON(i
== 0 && head
!= 0);
394 if (chunk
->map
[i
] < 0)
396 if (chunk
->map
[i
] < head
+ size
) {
397 max_contig
= max(chunk
->map
[i
], max_contig
);
402 * If head is small or the previous block is free,
403 * merge'em. Note that 'small' is defined as smaller
404 * than sizeof(int), which is very small but isn't too
405 * uncommon for percpu allocations.
407 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
408 if (chunk
->map
[i
- 1] > 0)
409 chunk
->map
[i
- 1] += head
;
411 chunk
->map
[i
- 1] -= head
;
412 chunk
->free_size
-= head
;
414 chunk
->map
[i
] -= head
;
419 /* if tail is small, just keep it around */
420 tail
= chunk
->map
[i
] - head
- size
;
421 if (tail
< sizeof(int))
424 /* split if warranted */
426 if (pcpu_split_block(chunk
, i
, head
, tail
))
431 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
434 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
437 /* update hint and mark allocated */
439 chunk
->contig_hint
= max_contig
; /* fully scanned */
441 chunk
->contig_hint
= max(chunk
->contig_hint
,
444 chunk
->free_size
-= chunk
->map
[i
];
445 chunk
->map
[i
] = -chunk
->map
[i
];
447 pcpu_chunk_relocate(chunk
, oslot
);
451 chunk
->contig_hint
= max_contig
; /* fully scanned */
452 pcpu_chunk_relocate(chunk
, oslot
);
455 * Tell the upper layer that this chunk has no area left.
456 * Note that this is not an error condition but a notification
457 * to upper layer that it needs to look at other chunks.
458 * -ENOSPC is chosen as it isn't used in memory subsystem and
459 * matches the meaning in a way.
465 * pcpu_free_area - free area to a pcpu_chunk
466 * @chunk: chunk of interest
467 * @freeme: offset of area to free
469 * Free area starting from @freeme to @chunk. Note that this function
470 * only modifies the allocation map. It doesn't depopulate or unmap
473 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
475 int oslot
= pcpu_chunk_slot(chunk
);
478 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
481 BUG_ON(off
!= freeme
);
482 BUG_ON(chunk
->map
[i
] > 0);
484 chunk
->map
[i
] = -chunk
->map
[i
];
485 chunk
->free_size
+= chunk
->map
[i
];
487 /* merge with previous? */
488 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
489 chunk
->map
[i
- 1] += chunk
->map
[i
];
491 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
492 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
495 /* merge with next? */
496 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
497 chunk
->map
[i
] += chunk
->map
[i
+ 1];
499 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
500 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
503 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
504 pcpu_chunk_relocate(chunk
, oslot
);
508 * pcpu_unmap - unmap pages out of a pcpu_chunk
509 * @chunk: chunk of interest
510 * @page_start: page index of the first page to unmap
511 * @page_end: page index of the last page to unmap + 1
512 * @flush: whether to flush cache and tlb or not
514 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
515 * If @flush is true, vcache is flushed before unmapping and tlb
518 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
521 unsigned int last
= num_possible_cpus() - 1;
524 /* unmap must not be done on immutable chunk */
525 WARN_ON(chunk
->immutable
);
528 * Each flushing trial can be very expensive, issue flush on
529 * the whole region at once rather than doing it for each cpu.
530 * This could be an overkill but is more scalable.
533 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
534 pcpu_chunk_addr(chunk
, last
, page_end
));
536 for_each_possible_cpu(cpu
)
537 unmap_kernel_range_noflush(
538 pcpu_chunk_addr(chunk
, cpu
, page_start
),
539 (page_end
- page_start
) << PAGE_SHIFT
);
541 /* ditto as flush_cache_vunmap() */
543 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
544 pcpu_chunk_addr(chunk
, last
, page_end
));
548 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
549 * @chunk: chunk to depopulate
550 * @off: offset to the area to depopulate
551 * @size: size of the area to depopulate in bytes
552 * @flush: whether to flush cache and tlb or not
554 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
555 * from @chunk. If @flush is true, vcache is flushed before unmapping
558 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
561 int page_start
= PFN_DOWN(off
);
562 int page_end
= PFN_UP(off
+ size
);
563 int unmap_start
= -1;
564 int uninitialized_var(unmap_end
);
568 for (i
= page_start
; i
< page_end
; i
++) {
569 for_each_possible_cpu(cpu
) {
570 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
578 * If it's partial depopulation, it might get
579 * populated or depopulated again. Mark the
584 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
589 if (unmap_start
>= 0)
590 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
594 * pcpu_map - map pages into a pcpu_chunk
595 * @chunk: chunk of interest
596 * @page_start: page index of the first page to map
597 * @page_end: page index of the last page to map + 1
599 * For each cpu, map pages [@page_start,@page_end) into @chunk.
600 * vcache is flushed afterwards.
602 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
604 unsigned int last
= num_possible_cpus() - 1;
608 /* map must not be done on immutable chunk */
609 WARN_ON(chunk
->immutable
);
611 for_each_possible_cpu(cpu
) {
612 err
= map_kernel_range_noflush(
613 pcpu_chunk_addr(chunk
, cpu
, page_start
),
614 (page_end
- page_start
) << PAGE_SHIFT
,
616 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
621 /* flush at once, please read comments in pcpu_unmap() */
622 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
623 pcpu_chunk_addr(chunk
, last
, page_end
));
628 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
629 * @chunk: chunk of interest
630 * @off: offset to the area to populate
631 * @size: size of the area to populate in bytes
633 * For each cpu, populate and map pages [@page_start,@page_end) into
634 * @chunk. The area is cleared on return.
636 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
638 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
639 int page_start
= PFN_DOWN(off
);
640 int page_end
= PFN_UP(off
+ size
);
642 int uninitialized_var(map_end
);
646 for (i
= page_start
; i
< page_end
; i
++) {
647 if (pcpu_chunk_page_occupied(chunk
, i
)) {
648 if (map_start
>= 0) {
649 if (pcpu_map(chunk
, map_start
, map_end
))
656 map_start
= map_start
< 0 ? i
: map_start
;
659 for_each_possible_cpu(cpu
) {
660 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
662 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
669 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
672 for_each_possible_cpu(cpu
)
673 memset(chunk
->vm
->addr
+ cpu
* pcpu_unit_size
+ off
, 0,
678 /* likely under heavy memory pressure, give memory back */
679 pcpu_depopulate_chunk(chunk
, off
, size
, true);
683 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
688 free_vm_area(chunk
->vm
);
689 pcpu_realloc(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]), 0);
693 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
695 struct pcpu_chunk
*chunk
;
697 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
701 chunk
->map
= pcpu_realloc(NULL
, 0,
702 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
703 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
704 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
706 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
708 free_pcpu_chunk(chunk
);
712 INIT_LIST_HEAD(&chunk
->list
);
713 chunk
->free_size
= pcpu_unit_size
;
714 chunk
->contig_hint
= pcpu_unit_size
;
720 * __alloc_percpu - allocate percpu area
721 * @size: size of area to allocate in bytes
722 * @align: alignment of area (max PAGE_SIZE)
724 * Allocate percpu area of @size bytes aligned at @align. Might
725 * sleep. Might trigger writeouts.
728 * Percpu pointer to the allocated area on success, NULL on failure.
730 void *__alloc_percpu(size_t size
, size_t align
)
733 struct pcpu_chunk
*chunk
;
736 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
737 WARN(true, "illegal size (%zu) or align (%zu) for "
738 "percpu allocation\n", size
, align
);
742 mutex_lock(&pcpu_mutex
);
745 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
746 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
747 if (size
> chunk
->contig_hint
)
749 off
= pcpu_alloc_area(chunk
, size
, align
);
757 /* hmmm... no space left, create a new chunk */
758 chunk
= alloc_pcpu_chunk();
761 pcpu_chunk_relocate(chunk
, -1);
762 pcpu_chunk_addr_insert(chunk
);
764 off
= pcpu_alloc_area(chunk
, size
, align
);
769 /* populate, map and clear the area */
770 if (pcpu_populate_chunk(chunk
, off
, size
)) {
771 pcpu_free_area(chunk
, off
);
775 ptr
= __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
777 mutex_unlock(&pcpu_mutex
);
780 EXPORT_SYMBOL_GPL(__alloc_percpu
);
782 static void pcpu_kill_chunk(struct pcpu_chunk
*chunk
)
784 WARN_ON(chunk
->immutable
);
785 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
786 list_del(&chunk
->list
);
787 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
788 free_pcpu_chunk(chunk
);
792 * free_percpu - free percpu area
793 * @ptr: pointer to area to free
795 * Free percpu area @ptr. Might sleep.
797 void free_percpu(void *ptr
)
799 void *addr
= __pcpu_ptr_to_addr(ptr
);
800 struct pcpu_chunk
*chunk
;
806 mutex_lock(&pcpu_mutex
);
808 chunk
= pcpu_chunk_addr_search(addr
);
809 off
= addr
- chunk
->vm
->addr
;
811 pcpu_free_area(chunk
, off
);
813 /* the chunk became fully free, kill one if there are other free ones */
814 if (chunk
->free_size
== pcpu_unit_size
) {
815 struct pcpu_chunk
*pos
;
817 list_for_each_entry(pos
,
818 &pcpu_slot
[pcpu_chunk_slot(chunk
)], list
)
820 pcpu_kill_chunk(pos
);
825 mutex_unlock(&pcpu_mutex
);
827 EXPORT_SYMBOL_GPL(free_percpu
);
830 * pcpu_setup_first_chunk - initialize the first percpu chunk
831 * @get_page_fn: callback to fetch page pointer
832 * @static_size: the size of static percpu area in bytes
833 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto
834 * @free_size: free size in bytes, 0 for auto
835 * @base_addr: mapped address, NULL for auto
836 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
838 * Initialize the first percpu chunk which contains the kernel static
839 * perpcu area. This function is to be called from arch percpu area
840 * setup path. The first two parameters are mandatory. The rest are
843 * @get_page_fn() should return pointer to percpu page given cpu
844 * number and page number. It should at least return enough pages to
845 * cover the static area. The returned pages for static area should
846 * have been initialized with valid data. If @unit_size is specified,
847 * it can also return pages after the static area. NULL return
848 * indicates end of pages for the cpu. Note that @get_page_fn() must
849 * return the same number of pages for all cpus.
851 * @unit_size, if non-zero, determines unit size and must be aligned
852 * to PAGE_SIZE and equal to or larger than @static_size + @free_size.
854 * @free_size determines the number of free bytes after the static
855 * area in the first chunk. If zero, whatever left is available.
856 * Specifying non-zero value make percpu leave the area after
857 * @static_size + @free_size alone.
859 * Non-null @base_addr means that the caller already allocated virtual
860 * region for the first chunk and mapped it. percpu must not mess
861 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
862 * @populate_pte_fn doesn't make any sense.
864 * @populate_pte_fn is used to populate the pagetable. NULL means the
865 * caller already populated the pagetable.
868 * The determined pcpu_unit_size which can be used to initialize
871 size_t __init
pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn
,
872 size_t static_size
, size_t unit_size
,
873 size_t free_size
, void *base_addr
,
874 pcpu_populate_pte_fn_t populate_pte_fn
)
876 static struct vm_struct static_vm
;
877 struct pcpu_chunk
*static_chunk
;
883 BUG_ON(!static_size
);
884 BUG_ON(!unit_size
&& free_size
);
885 BUG_ON(unit_size
&& unit_size
< static_size
+ free_size
);
886 BUG_ON(unit_size
& ~PAGE_MASK
);
887 BUG_ON(base_addr
&& !unit_size
);
888 BUG_ON(base_addr
&& populate_pte_fn
);
891 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
893 pcpu_unit_pages
= max_t(int, PCPU_MIN_UNIT_SIZE
>> PAGE_SHIFT
,
894 PFN_UP(static_size
));
896 pcpu_static_size
= static_size
;
897 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
898 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
899 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
900 + num_possible_cpus() * pcpu_unit_pages
* sizeof(struct page
*);
903 * Allocate chunk slots. The additional last slot is for
906 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
907 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
908 for (i
= 0; i
< pcpu_nr_slots
; i
++)
909 INIT_LIST_HEAD(&pcpu_slot
[i
]);
911 /* init static_chunk */
912 static_chunk
= alloc_bootmem(pcpu_chunk_struct_size
);
913 INIT_LIST_HEAD(&static_chunk
->list
);
914 static_chunk
->vm
= &static_vm
;
917 static_chunk
->free_size
= free_size
;
919 static_chunk
->free_size
= pcpu_unit_size
- pcpu_static_size
;
921 static_chunk
->contig_hint
= static_chunk
->free_size
;
923 /* allocate vm address */
924 static_vm
.flags
= VM_ALLOC
;
925 static_vm
.size
= pcpu_chunk_size
;
928 vm_area_register_early(&static_vm
, PAGE_SIZE
);
931 * Pages already mapped. No need to remap into
932 * vmalloc area. In this case the static chunk can't
933 * be mapped or unmapped by percpu and is marked
936 static_vm
.addr
= base_addr
;
937 static_chunk
->immutable
= true;
942 for_each_possible_cpu(cpu
) {
943 for (i
= 0; i
< pcpu_unit_pages
; i
++) {
944 struct page
*page
= get_page_fn(cpu
, i
);
948 *pcpu_chunk_pagep(static_chunk
, cpu
, i
) = page
;
951 BUG_ON(i
< PFN_UP(pcpu_static_size
));
956 BUG_ON(nr_pages
!= i
);
960 if (populate_pte_fn
) {
961 for_each_possible_cpu(cpu
)
962 for (i
= 0; i
< nr_pages
; i
++)
963 populate_pte_fn(pcpu_chunk_addr(static_chunk
,
966 err
= pcpu_map(static_chunk
, 0, nr_pages
);
968 panic("failed to setup static percpu area, err=%d\n",
972 /* link static_chunk in */
973 pcpu_chunk_relocate(static_chunk
, -1);
974 pcpu_chunk_addr_insert(static_chunk
);
977 pcpu_base_addr
= (void *)pcpu_chunk_addr(static_chunk
, 0, 0);
978 return pcpu_unit_size
;