2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_first_chunk() during percpu area initialization to
52 * setup the first chunk containing the kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
67 #include <asm/cacheflush.h>
68 #include <asm/tlbflush.h>
70 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
71 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
74 struct list_head list
; /* linked to pcpu_slot lists */
75 struct rb_node rb_node
; /* key is chunk->vm->addr */
76 int free_size
; /* free bytes in the chunk */
77 int contig_hint
; /* max contiguous size hint */
78 struct vm_struct
*vm
; /* mapped vmalloc region */
79 int map_used
; /* # of map entries used */
80 int map_alloc
; /* # of map entries allocated */
81 int *map
; /* allocation map */
82 bool immutable
; /* no [de]population allowed */
83 struct page
*page
[]; /* #cpus * UNIT_PAGES */
86 static int pcpu_unit_pages __read_mostly
;
87 static int pcpu_unit_size __read_mostly
;
88 static int pcpu_chunk_size __read_mostly
;
89 static int pcpu_nr_slots __read_mostly
;
90 static size_t pcpu_chunk_struct_size __read_mostly
;
92 /* the address of the first chunk which starts with the kernel static area */
93 void *pcpu_base_addr __read_mostly
;
94 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
97 * One mutex to rule them all.
99 * The following mutex is grabbed in the outermost public alloc/free
100 * interface functions and released only when the operation is
101 * complete. As such, every function in this file other than the
102 * outermost functions are called under pcpu_mutex.
104 * It can easily be switched to use spinlock such that only the area
105 * allocation and page population commit are protected with it doing
106 * actual [de]allocation without holding any lock. However, given
107 * what this allocator does, I think it's better to let them run
110 static DEFINE_MUTEX(pcpu_mutex
);
112 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
113 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
115 static int __pcpu_size_to_slot(int size
)
117 int highbit
= fls(size
); /* size is in bytes */
118 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
121 static int pcpu_size_to_slot(int size
)
123 if (size
== pcpu_unit_size
)
124 return pcpu_nr_slots
- 1;
125 return __pcpu_size_to_slot(size
);
128 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
130 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
133 return pcpu_size_to_slot(chunk
->free_size
);
136 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
138 return cpu
* pcpu_unit_pages
+ page_idx
;
141 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
142 unsigned int cpu
, int page_idx
)
144 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
147 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
148 unsigned int cpu
, int page_idx
)
150 return (unsigned long)chunk
->vm
->addr
+
151 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
154 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
157 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
161 * pcpu_realloc - versatile realloc
162 * @p: the current pointer (can be NULL for new allocations)
163 * @size: the current size in bytes (can be 0 for new allocations)
164 * @new_size: the wanted new size in bytes (can be 0 for free)
166 * More robust realloc which can be used to allocate, resize or free a
167 * memory area of arbitrary size. If the needed size goes over
168 * PAGE_SIZE, kernel VM is used.
171 * The new pointer on success, NULL on failure.
173 static void *pcpu_realloc(void *p
, size_t size
, size_t new_size
)
177 if (new_size
<= PAGE_SIZE
)
178 new = kmalloc(new_size
, GFP_KERNEL
);
180 new = vmalloc(new_size
);
181 if (new_size
&& !new)
184 memcpy(new, p
, min(size
, new_size
));
186 memset(new + size
, 0, new_size
- size
);
188 if (size
<= PAGE_SIZE
)
197 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
198 * @chunk: chunk of interest
199 * @oslot: the previous slot it was on
201 * This function is called after an allocation or free changed @chunk.
202 * New slot according to the changed state is determined and @chunk is
205 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
207 int nslot
= pcpu_chunk_slot(chunk
);
209 if (oslot
!= nslot
) {
211 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
213 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
217 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
218 struct rb_node
**parentp
)
220 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
221 struct rb_node
*parent
= NULL
;
222 struct pcpu_chunk
*chunk
;
226 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
228 if (addr
< chunk
->vm
->addr
)
230 else if (addr
> chunk
->vm
->addr
)
242 * pcpu_chunk_addr_search - search for chunk containing specified address
243 * @addr: address to search for
245 * Look for chunk which might contain @addr. More specifically, it
246 * searchs for the chunk with the highest start address which isn't
250 * The address of the found chunk.
252 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
254 struct rb_node
*n
, *parent
;
255 struct pcpu_chunk
*chunk
;
257 n
= *pcpu_chunk_rb_search(addr
, &parent
);
259 /* no exactly matching chunk, the parent is the closest */
263 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
265 if (addr
< chunk
->vm
->addr
) {
266 /* the parent was the next one, look for the previous one */
269 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
276 * pcpu_chunk_addr_insert - insert chunk into address rb tree
277 * @new: chunk to insert
279 * Insert @new into address rb tree.
281 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
283 struct rb_node
**p
, *parent
;
285 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
287 rb_link_node(&new->rb_node
, parent
, p
);
288 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
292 * pcpu_split_block - split a map block
293 * @chunk: chunk of interest
294 * @i: index of map block to split
295 * @head: head size in bytes (can be 0)
296 * @tail: tail size in bytes (can be 0)
298 * Split the @i'th map block into two or three blocks. If @head is
299 * non-zero, @head bytes block is inserted before block @i moving it
300 * to @i+1 and reducing its size by @head bytes.
302 * If @tail is non-zero, the target block, which can be @i or @i+1
303 * depending on @head, is reduced by @tail bytes and @tail byte block
304 * is inserted after the target block.
307 * 0 on success, -errno on failure.
309 static int pcpu_split_block(struct pcpu_chunk
*chunk
, int i
, int head
, int tail
)
311 int nr_extra
= !!head
+ !!tail
;
312 int target
= chunk
->map_used
+ nr_extra
;
314 /* reallocation required? */
315 if (chunk
->map_alloc
< target
) {
319 new_alloc
= PCPU_DFL_MAP_ALLOC
;
320 while (new_alloc
< target
)
323 if (chunk
->map_alloc
< PCPU_DFL_MAP_ALLOC
) {
325 * map_alloc smaller than the default size
326 * indicates that the chunk is one of the
327 * first chunks and still using static map.
328 * Allocate a dynamic one and copy.
330 new = pcpu_realloc(NULL
, 0, new_alloc
* sizeof(new[0]));
332 memcpy(new, chunk
->map
,
333 chunk
->map_alloc
* sizeof(new[0]));
335 new = pcpu_realloc(chunk
->map
,
336 chunk
->map_alloc
* sizeof(new[0]),
337 new_alloc
* sizeof(new[0]));
341 chunk
->map_alloc
= new_alloc
;
345 /* insert a new subblock */
346 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
347 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
348 chunk
->map_used
+= nr_extra
;
351 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
352 chunk
->map
[i
++] = head
;
355 chunk
->map
[i
++] -= tail
;
356 chunk
->map
[i
] = tail
;
362 * pcpu_alloc_area - allocate area from a pcpu_chunk
363 * @chunk: chunk of interest
364 * @size: wanted size in bytes
365 * @align: wanted align
367 * Try to allocate @size bytes area aligned at @align from @chunk.
368 * Note that this function only allocates the offset. It doesn't
369 * populate or map the area.
372 * Allocated offset in @chunk on success, -errno on failure.
374 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
376 int oslot
= pcpu_chunk_slot(chunk
);
380 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
381 bool is_last
= i
+ 1 == chunk
->map_used
;
384 /* extra for alignment requirement */
385 head
= ALIGN(off
, align
) - off
;
386 BUG_ON(i
== 0 && head
!= 0);
388 if (chunk
->map
[i
] < 0)
390 if (chunk
->map
[i
] < head
+ size
) {
391 max_contig
= max(chunk
->map
[i
], max_contig
);
396 * If head is small or the previous block is free,
397 * merge'em. Note that 'small' is defined as smaller
398 * than sizeof(int), which is very small but isn't too
399 * uncommon for percpu allocations.
401 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
402 if (chunk
->map
[i
- 1] > 0)
403 chunk
->map
[i
- 1] += head
;
405 chunk
->map
[i
- 1] -= head
;
406 chunk
->free_size
-= head
;
408 chunk
->map
[i
] -= head
;
413 /* if tail is small, just keep it around */
414 tail
= chunk
->map
[i
] - head
- size
;
415 if (tail
< sizeof(int))
418 /* split if warranted */
420 if (pcpu_split_block(chunk
, i
, head
, tail
))
425 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
428 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
431 /* update hint and mark allocated */
433 chunk
->contig_hint
= max_contig
; /* fully scanned */
435 chunk
->contig_hint
= max(chunk
->contig_hint
,
438 chunk
->free_size
-= chunk
->map
[i
];
439 chunk
->map
[i
] = -chunk
->map
[i
];
441 pcpu_chunk_relocate(chunk
, oslot
);
445 chunk
->contig_hint
= max_contig
; /* fully scanned */
446 pcpu_chunk_relocate(chunk
, oslot
);
449 * Tell the upper layer that this chunk has no area left.
450 * Note that this is not an error condition but a notification
451 * to upper layer that it needs to look at other chunks.
452 * -ENOSPC is chosen as it isn't used in memory subsystem and
453 * matches the meaning in a way.
459 * pcpu_free_area - free area to a pcpu_chunk
460 * @chunk: chunk of interest
461 * @freeme: offset of area to free
463 * Free area starting from @freeme to @chunk. Note that this function
464 * only modifies the allocation map. It doesn't depopulate or unmap
467 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
469 int oslot
= pcpu_chunk_slot(chunk
);
472 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
475 BUG_ON(off
!= freeme
);
476 BUG_ON(chunk
->map
[i
] > 0);
478 chunk
->map
[i
] = -chunk
->map
[i
];
479 chunk
->free_size
+= chunk
->map
[i
];
481 /* merge with previous? */
482 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
483 chunk
->map
[i
- 1] += chunk
->map
[i
];
485 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
486 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
489 /* merge with next? */
490 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
491 chunk
->map
[i
] += chunk
->map
[i
+ 1];
493 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
494 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
497 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
498 pcpu_chunk_relocate(chunk
, oslot
);
502 * pcpu_unmap - unmap pages out of a pcpu_chunk
503 * @chunk: chunk of interest
504 * @page_start: page index of the first page to unmap
505 * @page_end: page index of the last page to unmap + 1
506 * @flush: whether to flush cache and tlb or not
508 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
509 * If @flush is true, vcache is flushed before unmapping and tlb
512 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
515 unsigned int last
= num_possible_cpus() - 1;
518 /* unmap must not be done on immutable chunk */
519 WARN_ON(chunk
->immutable
);
522 * Each flushing trial can be very expensive, issue flush on
523 * the whole region at once rather than doing it for each cpu.
524 * This could be an overkill but is more scalable.
527 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
528 pcpu_chunk_addr(chunk
, last
, page_end
));
530 for_each_possible_cpu(cpu
)
531 unmap_kernel_range_noflush(
532 pcpu_chunk_addr(chunk
, cpu
, page_start
),
533 (page_end
- page_start
) << PAGE_SHIFT
);
535 /* ditto as flush_cache_vunmap() */
537 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
538 pcpu_chunk_addr(chunk
, last
, page_end
));
542 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
543 * @chunk: chunk to depopulate
544 * @off: offset to the area to depopulate
545 * @size: size of the area to depopulate in bytes
546 * @flush: whether to flush cache and tlb or not
548 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
549 * from @chunk. If @flush is true, vcache is flushed before unmapping
552 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
555 int page_start
= PFN_DOWN(off
);
556 int page_end
= PFN_UP(off
+ size
);
557 int unmap_start
= -1;
558 int uninitialized_var(unmap_end
);
562 for (i
= page_start
; i
< page_end
; i
++) {
563 for_each_possible_cpu(cpu
) {
564 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
572 * If it's partial depopulation, it might get
573 * populated or depopulated again. Mark the
578 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
583 if (unmap_start
>= 0)
584 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
588 * pcpu_map - map pages into a pcpu_chunk
589 * @chunk: chunk of interest
590 * @page_start: page index of the first page to map
591 * @page_end: page index of the last page to map + 1
593 * For each cpu, map pages [@page_start,@page_end) into @chunk.
594 * vcache is flushed afterwards.
596 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
598 unsigned int last
= num_possible_cpus() - 1;
602 /* map must not be done on immutable chunk */
603 WARN_ON(chunk
->immutable
);
605 for_each_possible_cpu(cpu
) {
606 err
= map_kernel_range_noflush(
607 pcpu_chunk_addr(chunk
, cpu
, page_start
),
608 (page_end
- page_start
) << PAGE_SHIFT
,
610 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
615 /* flush at once, please read comments in pcpu_unmap() */
616 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
617 pcpu_chunk_addr(chunk
, last
, page_end
));
622 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
623 * @chunk: chunk of interest
624 * @off: offset to the area to populate
625 * @size: size of the area to populate in bytes
627 * For each cpu, populate and map pages [@page_start,@page_end) into
628 * @chunk. The area is cleared on return.
630 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
632 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
633 int page_start
= PFN_DOWN(off
);
634 int page_end
= PFN_UP(off
+ size
);
636 int uninitialized_var(map_end
);
640 for (i
= page_start
; i
< page_end
; i
++) {
641 if (pcpu_chunk_page_occupied(chunk
, i
)) {
642 if (map_start
>= 0) {
643 if (pcpu_map(chunk
, map_start
, map_end
))
650 map_start
= map_start
< 0 ? i
: map_start
;
653 for_each_possible_cpu(cpu
) {
654 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
656 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
663 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
666 for_each_possible_cpu(cpu
)
667 memset(chunk
->vm
->addr
+ cpu
* pcpu_unit_size
+ off
, 0,
672 /* likely under heavy memory pressure, give memory back */
673 pcpu_depopulate_chunk(chunk
, off
, size
, true);
677 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
682 free_vm_area(chunk
->vm
);
683 pcpu_realloc(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]), 0);
687 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
689 struct pcpu_chunk
*chunk
;
691 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
695 chunk
->map
= pcpu_realloc(NULL
, 0,
696 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
697 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
698 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
700 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
702 free_pcpu_chunk(chunk
);
706 INIT_LIST_HEAD(&chunk
->list
);
707 chunk
->free_size
= pcpu_unit_size
;
708 chunk
->contig_hint
= pcpu_unit_size
;
714 * __alloc_percpu - allocate percpu area
715 * @size: size of area to allocate in bytes
716 * @align: alignment of area (max PAGE_SIZE)
718 * Allocate percpu area of @size bytes aligned at @align. Might
719 * sleep. Might trigger writeouts.
722 * Percpu pointer to the allocated area on success, NULL on failure.
724 void *__alloc_percpu(size_t size
, size_t align
)
727 struct pcpu_chunk
*chunk
;
730 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
731 WARN(true, "illegal size (%zu) or align (%zu) for "
732 "percpu allocation\n", size
, align
);
736 mutex_lock(&pcpu_mutex
);
739 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
740 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
741 if (size
> chunk
->contig_hint
)
743 off
= pcpu_alloc_area(chunk
, size
, align
);
751 /* hmmm... no space left, create a new chunk */
752 chunk
= alloc_pcpu_chunk();
755 pcpu_chunk_relocate(chunk
, -1);
756 pcpu_chunk_addr_insert(chunk
);
758 off
= pcpu_alloc_area(chunk
, size
, align
);
763 /* populate, map and clear the area */
764 if (pcpu_populate_chunk(chunk
, off
, size
)) {
765 pcpu_free_area(chunk
, off
);
769 ptr
= __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
771 mutex_unlock(&pcpu_mutex
);
774 EXPORT_SYMBOL_GPL(__alloc_percpu
);
776 static void pcpu_kill_chunk(struct pcpu_chunk
*chunk
)
778 WARN_ON(chunk
->immutable
);
779 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
780 list_del(&chunk
->list
);
781 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
782 free_pcpu_chunk(chunk
);
786 * free_percpu - free percpu area
787 * @ptr: pointer to area to free
789 * Free percpu area @ptr. Might sleep.
791 void free_percpu(void *ptr
)
793 void *addr
= __pcpu_ptr_to_addr(ptr
);
794 struct pcpu_chunk
*chunk
;
800 mutex_lock(&pcpu_mutex
);
802 chunk
= pcpu_chunk_addr_search(addr
);
803 off
= addr
- chunk
->vm
->addr
;
805 pcpu_free_area(chunk
, off
);
807 /* the chunk became fully free, kill one if there are other free ones */
808 if (chunk
->free_size
== pcpu_unit_size
) {
809 struct pcpu_chunk
*pos
;
811 list_for_each_entry(pos
,
812 &pcpu_slot
[pcpu_chunk_slot(chunk
)], list
)
814 pcpu_kill_chunk(pos
);
819 mutex_unlock(&pcpu_mutex
);
821 EXPORT_SYMBOL_GPL(free_percpu
);
824 * pcpu_setup_first_chunk - initialize the first percpu chunk
825 * @get_page_fn: callback to fetch page pointer
826 * @static_size: the size of static percpu area in bytes
827 * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto
828 * @dyn_size: free size for dynamic allocation in bytes, 0 for auto
829 * @base_addr: mapped address, NULL for auto
830 * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
832 * Initialize the first percpu chunk which contains the kernel static
833 * perpcu area. This function is to be called from arch percpu area
834 * setup path. The first two parameters are mandatory. The rest are
837 * @get_page_fn() should return pointer to percpu page given cpu
838 * number and page number. It should at least return enough pages to
839 * cover the static area. The returned pages for static area should
840 * have been initialized with valid data. If @unit_size is specified,
841 * it can also return pages after the static area. NULL return
842 * indicates end of pages for the cpu. Note that @get_page_fn() must
843 * return the same number of pages for all cpus.
845 * @unit_size, if non-zero, determines unit size and must be aligned
846 * to PAGE_SIZE and equal to or larger than @static_size + @dyn_size.
848 * @dyn_size determines the number of free bytes after the static
849 * area in the first chunk. If zero, whatever left is available.
850 * Specifying non-zero value make percpu leave the area after
851 * @static_size + @dyn_size alone.
853 * Non-null @base_addr means that the caller already allocated virtual
854 * region for the first chunk and mapped it. percpu must not mess
855 * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL
856 * @populate_pte_fn doesn't make any sense.
858 * @populate_pte_fn is used to populate the pagetable. NULL means the
859 * caller already populated the pagetable.
862 * The determined pcpu_unit_size which can be used to initialize
865 size_t __init
pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn
,
866 size_t static_size
, size_t unit_size
,
867 size_t dyn_size
, void *base_addr
,
868 pcpu_populate_pte_fn_t populate_pte_fn
)
870 static struct vm_struct first_vm
;
872 struct pcpu_chunk
*schunk
;
878 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
);
879 BUG_ON(!static_size
);
880 BUG_ON(!unit_size
&& dyn_size
);
881 BUG_ON(unit_size
&& unit_size
< static_size
+ dyn_size
);
882 BUG_ON(unit_size
& ~PAGE_MASK
);
883 BUG_ON(base_addr
&& !unit_size
);
884 BUG_ON(base_addr
&& populate_pte_fn
);
887 pcpu_unit_pages
= unit_size
>> PAGE_SHIFT
;
889 pcpu_unit_pages
= max_t(int, PCPU_MIN_UNIT_SIZE
>> PAGE_SHIFT
,
890 PFN_UP(static_size
));
892 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
893 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
894 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
895 + num_possible_cpus() * pcpu_unit_pages
* sizeof(struct page
*);
898 * Allocate chunk slots. The additional last slot is for
901 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
902 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
903 for (i
= 0; i
< pcpu_nr_slots
; i
++)
904 INIT_LIST_HEAD(&pcpu_slot
[i
]);
906 /* init static chunk */
907 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
908 INIT_LIST_HEAD(&schunk
->list
);
909 schunk
->vm
= &first_vm
;
911 schunk
->map_alloc
= ARRAY_SIZE(smap
);
914 schunk
->free_size
= dyn_size
;
916 schunk
->free_size
= pcpu_unit_size
- static_size
;
918 schunk
->contig_hint
= schunk
->free_size
;
920 schunk
->map
[schunk
->map_used
++] = -static_size
;
921 if (schunk
->free_size
)
922 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
924 /* allocate vm address */
925 first_vm
.flags
= VM_ALLOC
;
926 first_vm
.size
= pcpu_chunk_size
;
929 vm_area_register_early(&first_vm
, PAGE_SIZE
);
932 * Pages already mapped. No need to remap into
933 * vmalloc area. In this case the static chunk can't
934 * be mapped or unmapped by percpu and is marked
937 first_vm
.addr
= base_addr
;
938 schunk
->immutable
= true;
943 for_each_possible_cpu(cpu
) {
944 for (i
= 0; i
< pcpu_unit_pages
; i
++) {
945 struct page
*page
= get_page_fn(cpu
, i
);
949 *pcpu_chunk_pagep(schunk
, cpu
, i
) = page
;
952 BUG_ON(i
< PFN_UP(static_size
));
957 BUG_ON(nr_pages
!= i
);
961 if (populate_pte_fn
) {
962 for_each_possible_cpu(cpu
)
963 for (i
= 0; i
< nr_pages
; i
++)
964 populate_pte_fn(pcpu_chunk_addr(schunk
,
967 err
= pcpu_map(schunk
, 0, nr_pages
);
969 panic("failed to setup static percpu area, err=%d\n",
973 /* link the first chunk in */
974 pcpu_chunk_relocate(schunk
, -1);
975 pcpu_chunk_addr_insert(schunk
);
978 pcpu_base_addr
= (void *)pcpu_chunk_addr(schunk
, 0, 0);
979 return pcpu_unit_size
;