2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of num_possible_cpus() units and the first chunk
12 * is used for static percpu variables in the kernel image (special
13 * boot time alloc/init handling necessary as these areas need to be
14 * brought up before allocation services are running). Unit grows as
15 * necessary and all units grow or shrink in unison. When a chunk is
16 * filled up, another chunk is allocated. ie. in vmalloc area
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring
26 * percpu base registers UNIT_SIZE apart.
28 * There are usually many small percpu allocations many of them as
29 * small as 4 bytes. The allocator organizes chunks into lists
30 * according to free size and tries to allocate from the fullest one.
31 * Each chunk keeps the maximum contiguous area size hint which is
32 * guaranteed to be eqaul to or larger than the maximum contiguous
33 * area in the chunk. This helps the allocator not to iterate the
34 * chunk maps unnecessarily.
36 * Allocation state in each chunk is kept using an array of integers
37 * on chunk->map. A positive value in the map represents a free
38 * region and negative allocated. Allocation inside a chunk is done
39 * by scanning this map sequentially and serving the first matching
40 * entry. This is mostly copied from the percpu_modalloc() allocator.
41 * Chunks are also linked into a rb tree to ease address to chunk
42 * mapping during free.
44 * To use this allocator, arch code should do the followings.
46 * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back
51 * - use pcpu_setup_static() during percpu area initialization to
52 * setup kernel static percpu area
55 #include <linux/bitmap.h>
56 #include <linux/bootmem.h>
57 #include <linux/list.h>
59 #include <linux/module.h>
60 #include <linux/mutex.h>
61 #include <linux/percpu.h>
62 #include <linux/pfn.h>
63 #include <linux/rbtree.h>
64 #include <linux/slab.h>
65 #include <linux/vmalloc.h>
67 #include <asm/cacheflush.h>
68 #include <asm/tlbflush.h>
70 #define PCPU_MIN_UNIT_PAGES_SHIFT 4 /* also max alloc size */
71 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
72 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
75 struct list_head list
; /* linked to pcpu_slot lists */
76 struct rb_node rb_node
; /* key is chunk->vm->addr */
77 int free_size
; /* free bytes in the chunk */
78 int contig_hint
; /* max contiguous size hint */
79 struct vm_struct
*vm
; /* mapped vmalloc region */
80 int map_used
; /* # of map entries used */
81 int map_alloc
; /* # of map entries allocated */
82 int *map
; /* allocation map */
83 struct page
*page
[]; /* #cpus * UNIT_PAGES */
86 static int pcpu_unit_pages_shift
;
87 static int pcpu_unit_pages
;
88 static int pcpu_unit_shift
;
89 static int pcpu_unit_size
;
90 static int pcpu_chunk_size
;
91 static int pcpu_nr_slots
;
92 static size_t pcpu_chunk_struct_size
;
94 /* the address of the first chunk which starts with the kernel static area */
96 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
98 /* the size of kernel static area */
99 static int pcpu_static_size
;
102 * One mutex to rule them all.
104 * The following mutex is grabbed in the outermost public alloc/free
105 * interface functions and released only when the operation is
106 * complete. As such, every function in this file other than the
107 * outermost functions are called under pcpu_mutex.
109 * It can easily be switched to use spinlock such that only the area
110 * allocation and page population commit are protected with it doing
111 * actual [de]allocation without holding any lock. However, given
112 * what this allocator does, I think it's better to let them run
115 static DEFINE_MUTEX(pcpu_mutex
);
117 static struct list_head
*pcpu_slot
; /* chunk list slots */
118 static struct rb_root pcpu_addr_root
= RB_ROOT
; /* chunks by address */
120 static int pcpu_size_to_slot(int size
)
122 int highbit
= fls(size
); /* size is in bytes */
123 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
126 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
128 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
131 return pcpu_size_to_slot(chunk
->free_size
);
134 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
136 return (cpu
<< pcpu_unit_pages_shift
) + page_idx
;
139 static struct page
**pcpu_chunk_pagep(struct pcpu_chunk
*chunk
,
140 unsigned int cpu
, int page_idx
)
142 return &chunk
->page
[pcpu_page_idx(cpu
, page_idx
)];
145 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
146 unsigned int cpu
, int page_idx
)
148 return (unsigned long)chunk
->vm
->addr
+
149 (pcpu_page_idx(cpu
, page_idx
) << PAGE_SHIFT
);
152 static bool pcpu_chunk_page_occupied(struct pcpu_chunk
*chunk
,
155 return *pcpu_chunk_pagep(chunk
, 0, page_idx
) != NULL
;
159 * pcpu_realloc - versatile realloc
160 * @p: the current pointer (can be NULL for new allocations)
161 * @size: the current size in bytes (can be 0 for new allocations)
162 * @new_size: the wanted new size in bytes (can be 0 for free)
164 * More robust realloc which can be used to allocate, resize or free a
165 * memory area of arbitrary size. If the needed size goes over
166 * PAGE_SIZE, kernel VM is used.
169 * The new pointer on success, NULL on failure.
171 static void *pcpu_realloc(void *p
, size_t size
, size_t new_size
)
175 if (new_size
<= PAGE_SIZE
)
176 new = kmalloc(new_size
, GFP_KERNEL
);
178 new = vmalloc(new_size
);
179 if (new_size
&& !new)
182 memcpy(new, p
, min(size
, new_size
));
184 memset(new + size
, 0, new_size
- size
);
186 if (size
<= PAGE_SIZE
)
195 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
196 * @chunk: chunk of interest
197 * @oslot: the previous slot it was on
199 * This function is called after an allocation or free changed @chunk.
200 * New slot according to the changed state is determined and @chunk is
203 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
205 int nslot
= pcpu_chunk_slot(chunk
);
207 if (oslot
!= nslot
) {
209 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
211 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
215 static struct rb_node
**pcpu_chunk_rb_search(void *addr
,
216 struct rb_node
**parentp
)
218 struct rb_node
**p
= &pcpu_addr_root
.rb_node
;
219 struct rb_node
*parent
= NULL
;
220 struct pcpu_chunk
*chunk
;
224 chunk
= rb_entry(parent
, struct pcpu_chunk
, rb_node
);
226 if (addr
< chunk
->vm
->addr
)
228 else if (addr
> chunk
->vm
->addr
)
240 * pcpu_chunk_addr_search - search for chunk containing specified address
241 * @addr: address to search for
243 * Look for chunk which might contain @addr. More specifically, it
244 * searchs for the chunk with the highest start address which isn't
248 * The address of the found chunk.
250 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
252 struct rb_node
*n
, *parent
;
253 struct pcpu_chunk
*chunk
;
255 n
= *pcpu_chunk_rb_search(addr
, &parent
);
257 /* no exactly matching chunk, the parent is the closest */
261 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
263 if (addr
< chunk
->vm
->addr
) {
264 /* the parent was the next one, look for the previous one */
267 chunk
= rb_entry(n
, struct pcpu_chunk
, rb_node
);
274 * pcpu_chunk_addr_insert - insert chunk into address rb tree
275 * @new: chunk to insert
277 * Insert @new into address rb tree.
279 static void pcpu_chunk_addr_insert(struct pcpu_chunk
*new)
281 struct rb_node
**p
, *parent
;
283 p
= pcpu_chunk_rb_search(new->vm
->addr
, &parent
);
285 rb_link_node(&new->rb_node
, parent
, p
);
286 rb_insert_color(&new->rb_node
, &pcpu_addr_root
);
290 * pcpu_split_block - split a map block
291 * @chunk: chunk of interest
292 * @i: index of map block to split
293 * @head: head size in bytes (can be 0)
294 * @tail: tail size in bytes (can be 0)
296 * Split the @i'th map block into two or three blocks. If @head is
297 * non-zero, @head bytes block is inserted before block @i moving it
298 * to @i+1 and reducing its size by @head bytes.
300 * If @tail is non-zero, the target block, which can be @i or @i+1
301 * depending on @head, is reduced by @tail bytes and @tail byte block
302 * is inserted after the target block.
305 * 0 on success, -errno on failure.
307 static int pcpu_split_block(struct pcpu_chunk
*chunk
, int i
, int head
, int tail
)
309 int nr_extra
= !!head
+ !!tail
;
310 int target
= chunk
->map_used
+ nr_extra
;
312 /* reallocation required? */
313 if (chunk
->map_alloc
< target
) {
314 int new_alloc
= chunk
->map_alloc
;
317 while (new_alloc
< target
)
320 new = pcpu_realloc(chunk
->map
,
321 chunk
->map_alloc
* sizeof(new[0]),
322 new_alloc
* sizeof(new[0]));
326 chunk
->map_alloc
= new_alloc
;
330 /* insert a new subblock */
331 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
332 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
333 chunk
->map_used
+= nr_extra
;
336 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
337 chunk
->map
[i
++] = head
;
340 chunk
->map
[i
++] -= tail
;
341 chunk
->map
[i
] = tail
;
347 * pcpu_alloc_area - allocate area from a pcpu_chunk
348 * @chunk: chunk of interest
349 * @size: wanted size in bytes
350 * @align: wanted align
352 * Try to allocate @size bytes area aligned at @align from @chunk.
353 * Note that this function only allocates the offset. It doesn't
354 * populate or map the area.
357 * Allocated offset in @chunk on success, -errno on failure.
359 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
361 int oslot
= pcpu_chunk_slot(chunk
);
366 * The static chunk initially doesn't have map attached
367 * because kmalloc wasn't available during init. Give it one.
369 if (unlikely(!chunk
->map
)) {
370 chunk
->map
= pcpu_realloc(NULL
, 0,
371 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
375 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
376 chunk
->map
[chunk
->map_used
++] = -pcpu_static_size
;
377 if (chunk
->free_size
)
378 chunk
->map
[chunk
->map_used
++] = chunk
->free_size
;
381 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
382 bool is_last
= i
+ 1 == chunk
->map_used
;
385 /* extra for alignment requirement */
386 head
= ALIGN(off
, align
) - off
;
387 BUG_ON(i
== 0 && head
!= 0);
389 if (chunk
->map
[i
] < 0)
391 if (chunk
->map
[i
] < head
+ size
) {
392 max_contig
= max(chunk
->map
[i
], max_contig
);
397 * If head is small or the previous block is free,
398 * merge'em. Note that 'small' is defined as smaller
399 * than sizeof(int), which is very small but isn't too
400 * uncommon for percpu allocations.
402 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
403 if (chunk
->map
[i
- 1] > 0)
404 chunk
->map
[i
- 1] += head
;
406 chunk
->map
[i
- 1] -= head
;
407 chunk
->free_size
-= head
;
409 chunk
->map
[i
] -= head
;
414 /* if tail is small, just keep it around */
415 tail
= chunk
->map
[i
] - head
- size
;
416 if (tail
< sizeof(int))
419 /* split if warranted */
421 if (pcpu_split_block(chunk
, i
, head
, tail
))
426 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
429 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
432 /* update hint and mark allocated */
434 chunk
->contig_hint
= max_contig
; /* fully scanned */
436 chunk
->contig_hint
= max(chunk
->contig_hint
,
439 chunk
->free_size
-= chunk
->map
[i
];
440 chunk
->map
[i
] = -chunk
->map
[i
];
442 pcpu_chunk_relocate(chunk
, oslot
);
446 chunk
->contig_hint
= max_contig
; /* fully scanned */
447 pcpu_chunk_relocate(chunk
, oslot
);
450 * Tell the upper layer that this chunk has no area left.
451 * Note that this is not an error condition but a notification
452 * to upper layer that it needs to look at other chunks.
453 * -ENOSPC is chosen as it isn't used in memory subsystem and
454 * matches the meaning in a way.
460 * pcpu_free_area - free area to a pcpu_chunk
461 * @chunk: chunk of interest
462 * @freeme: offset of area to free
464 * Free area starting from @freeme to @chunk. Note that this function
465 * only modifies the allocation map. It doesn't depopulate or unmap
468 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
470 int oslot
= pcpu_chunk_slot(chunk
);
473 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
476 BUG_ON(off
!= freeme
);
477 BUG_ON(chunk
->map
[i
] > 0);
479 chunk
->map
[i
] = -chunk
->map
[i
];
480 chunk
->free_size
+= chunk
->map
[i
];
482 /* merge with previous? */
483 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
484 chunk
->map
[i
- 1] += chunk
->map
[i
];
486 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
487 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
490 /* merge with next? */
491 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
492 chunk
->map
[i
] += chunk
->map
[i
+ 1];
494 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
495 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
498 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
499 pcpu_chunk_relocate(chunk
, oslot
);
503 * pcpu_unmap - unmap pages out of a pcpu_chunk
504 * @chunk: chunk of interest
505 * @page_start: page index of the first page to unmap
506 * @page_end: page index of the last page to unmap + 1
507 * @flush: whether to flush cache and tlb or not
509 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
510 * If @flush is true, vcache is flushed before unmapping and tlb
513 static void pcpu_unmap(struct pcpu_chunk
*chunk
, int page_start
, int page_end
,
516 unsigned int last
= num_possible_cpus() - 1;
520 * Each flushing trial can be very expensive, issue flush on
521 * the whole region at once rather than doing it for each cpu.
522 * This could be an overkill but is more scalable.
525 flush_cache_vunmap(pcpu_chunk_addr(chunk
, 0, page_start
),
526 pcpu_chunk_addr(chunk
, last
, page_end
));
528 for_each_possible_cpu(cpu
)
529 unmap_kernel_range_noflush(
530 pcpu_chunk_addr(chunk
, cpu
, page_start
),
531 (page_end
- page_start
) << PAGE_SHIFT
);
533 /* ditto as flush_cache_vunmap() */
535 flush_tlb_kernel_range(pcpu_chunk_addr(chunk
, 0, page_start
),
536 pcpu_chunk_addr(chunk
, last
, page_end
));
540 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
541 * @chunk: chunk to depopulate
542 * @off: offset to the area to depopulate
543 * @size: size of the area to depopulate in bytes
544 * @flush: whether to flush cache and tlb or not
546 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
547 * from @chunk. If @flush is true, vcache is flushed before unmapping
550 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
,
553 int page_start
= PFN_DOWN(off
);
554 int page_end
= PFN_UP(off
+ size
);
555 int unmap_start
= -1;
556 int uninitialized_var(unmap_end
);
560 for (i
= page_start
; i
< page_end
; i
++) {
561 for_each_possible_cpu(cpu
) {
562 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
570 * If it's partial depopulation, it might get
571 * populated or depopulated again. Mark the
576 unmap_start
= unmap_start
< 0 ? i
: unmap_start
;
581 if (unmap_start
>= 0)
582 pcpu_unmap(chunk
, unmap_start
, unmap_end
, flush
);
586 * pcpu_map - map pages into a pcpu_chunk
587 * @chunk: chunk of interest
588 * @page_start: page index of the first page to map
589 * @page_end: page index of the last page to map + 1
591 * For each cpu, map pages [@page_start,@page_end) into @chunk.
592 * vcache is flushed afterwards.
594 static int pcpu_map(struct pcpu_chunk
*chunk
, int page_start
, int page_end
)
596 unsigned int last
= num_possible_cpus() - 1;
600 for_each_possible_cpu(cpu
) {
601 err
= map_kernel_range_noflush(
602 pcpu_chunk_addr(chunk
, cpu
, page_start
),
603 (page_end
- page_start
) << PAGE_SHIFT
,
605 pcpu_chunk_pagep(chunk
, cpu
, page_start
));
610 /* flush at once, please read comments in pcpu_unmap() */
611 flush_cache_vmap(pcpu_chunk_addr(chunk
, 0, page_start
),
612 pcpu_chunk_addr(chunk
, last
, page_end
));
617 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
618 * @chunk: chunk of interest
619 * @off: offset to the area to populate
620 * @size: size of the area to populate in bytes
622 * For each cpu, populate and map pages [@page_start,@page_end) into
623 * @chunk. The area is cleared on return.
625 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
627 const gfp_t alloc_mask
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
628 int page_start
= PFN_DOWN(off
);
629 int page_end
= PFN_UP(off
+ size
);
635 for (i
= page_start
; i
< page_end
; i
++) {
636 if (pcpu_chunk_page_occupied(chunk
, i
)) {
637 if (map_start
>= 0) {
638 if (pcpu_map(chunk
, map_start
, map_end
))
645 map_start
= map_start
< 0 ? i
: map_start
;
648 for_each_possible_cpu(cpu
) {
649 struct page
**pagep
= pcpu_chunk_pagep(chunk
, cpu
, i
);
651 *pagep
= alloc_pages_node(cpu_to_node(cpu
),
658 if (map_start
>= 0 && pcpu_map(chunk
, map_start
, map_end
))
661 for_each_possible_cpu(cpu
)
662 memset(chunk
->vm
->addr
+ (cpu
<< pcpu_unit_shift
) + off
, 0,
667 /* likely under heavy memory pressure, give memory back */
668 pcpu_depopulate_chunk(chunk
, off
, size
, true);
672 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
677 free_vm_area(chunk
->vm
);
678 pcpu_realloc(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]), 0);
682 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
684 struct pcpu_chunk
*chunk
;
686 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
690 chunk
->map
= pcpu_realloc(NULL
, 0,
691 PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
692 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
693 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
695 chunk
->vm
= get_vm_area(pcpu_chunk_size
, GFP_KERNEL
);
697 free_pcpu_chunk(chunk
);
701 INIT_LIST_HEAD(&chunk
->list
);
702 chunk
->free_size
= pcpu_unit_size
;
703 chunk
->contig_hint
= pcpu_unit_size
;
709 * __alloc_percpu - allocate percpu area
710 * @size: size of area to allocate in bytes
711 * @align: alignment of area (max PAGE_SIZE)
713 * Allocate percpu area of @size bytes aligned at @align. Might
714 * sleep. Might trigger writeouts.
717 * Percpu pointer to the allocated area on success, NULL on failure.
719 void *__alloc_percpu(size_t size
, size_t align
)
722 struct pcpu_chunk
*chunk
;
725 if (unlikely(!size
|| size
> PAGE_SIZE
<< PCPU_MIN_UNIT_PAGES_SHIFT
||
726 align
> PAGE_SIZE
)) {
727 WARN(true, "illegal size (%zu) or align (%zu) for "
728 "percpu allocation\n", size
, align
);
732 mutex_lock(&pcpu_mutex
);
735 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
736 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
737 if (size
> chunk
->contig_hint
)
739 off
= pcpu_alloc_area(chunk
, size
, align
);
747 /* hmmm... no space left, create a new chunk */
748 chunk
= alloc_pcpu_chunk();
751 pcpu_chunk_relocate(chunk
, -1);
752 pcpu_chunk_addr_insert(chunk
);
754 off
= pcpu_alloc_area(chunk
, size
, align
);
759 /* populate, map and clear the area */
760 if (pcpu_populate_chunk(chunk
, off
, size
)) {
761 pcpu_free_area(chunk
, off
);
765 ptr
= __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
767 mutex_unlock(&pcpu_mutex
);
770 EXPORT_SYMBOL_GPL(__alloc_percpu
);
772 static void pcpu_kill_chunk(struct pcpu_chunk
*chunk
)
774 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
, false);
775 list_del(&chunk
->list
);
776 rb_erase(&chunk
->rb_node
, &pcpu_addr_root
);
777 free_pcpu_chunk(chunk
);
781 * free_percpu - free percpu area
782 * @ptr: pointer to area to free
784 * Free percpu area @ptr. Might sleep.
786 void free_percpu(void *ptr
)
788 void *addr
= __pcpu_ptr_to_addr(ptr
);
789 struct pcpu_chunk
*chunk
;
795 mutex_lock(&pcpu_mutex
);
797 chunk
= pcpu_chunk_addr_search(addr
);
798 off
= addr
- chunk
->vm
->addr
;
800 pcpu_free_area(chunk
, off
);
802 /* the chunk became fully free, kill one if there are other free ones */
803 if (chunk
->free_size
== pcpu_unit_size
) {
804 struct pcpu_chunk
*pos
;
806 list_for_each_entry(pos
,
807 &pcpu_slot
[pcpu_chunk_slot(chunk
)], list
)
809 pcpu_kill_chunk(pos
);
814 mutex_unlock(&pcpu_mutex
);
816 EXPORT_SYMBOL_GPL(free_percpu
);
819 * pcpu_setup_static - initialize kernel static percpu area
820 * @populate_pte_fn: callback to allocate pagetable
821 * @pages: num_possible_cpus() * PFN_UP(cpu_size) pages
822 * @cpu_size: the size of static percpu area in bytes
824 * Initialize kernel static percpu area. The caller should allocate
825 * all the necessary pages and pass them in @pages.
826 * @populate_pte_fn() is called on each page to be used for percpu
827 * mapping and is responsible for making sure all the necessary page
828 * tables for the page is allocated.
831 * The determined pcpu_unit_size which can be used to initialize
834 size_t __init
pcpu_setup_static(pcpu_populate_pte_fn_t populate_pte_fn
,
835 struct page
**pages
, size_t cpu_size
)
837 static struct vm_struct static_vm
;
838 struct pcpu_chunk
*static_chunk
;
839 int nr_cpu_pages
= DIV_ROUND_UP(cpu_size
, PAGE_SIZE
);
843 pcpu_unit_pages_shift
= max_t(int, PCPU_MIN_UNIT_PAGES_SHIFT
,
844 order_base_2(cpu_size
) - PAGE_SHIFT
);
846 pcpu_static_size
= cpu_size
;
847 pcpu_unit_pages
= 1 << pcpu_unit_pages_shift
;
848 pcpu_unit_shift
= PAGE_SHIFT
+ pcpu_unit_pages_shift
;
849 pcpu_unit_size
= 1 << pcpu_unit_shift
;
850 pcpu_chunk_size
= num_possible_cpus() * pcpu_unit_size
;
851 pcpu_nr_slots
= pcpu_size_to_slot(pcpu_unit_size
) + 1;
852 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
)
853 + (1 << pcpu_unit_pages_shift
) * sizeof(struct page
*);
855 /* allocate chunk slots */
856 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
857 for (i
= 0; i
< pcpu_nr_slots
; i
++)
858 INIT_LIST_HEAD(&pcpu_slot
[i
]);
860 /* init and register vm area */
861 static_vm
.flags
= VM_ALLOC
;
862 static_vm
.size
= pcpu_chunk_size
;
863 vm_area_register_early(&static_vm
);
865 /* init static_chunk */
866 static_chunk
= alloc_bootmem(pcpu_chunk_struct_size
);
867 INIT_LIST_HEAD(&static_chunk
->list
);
868 static_chunk
->vm
= &static_vm
;
869 static_chunk
->free_size
= pcpu_unit_size
- pcpu_static_size
;
870 static_chunk
->contig_hint
= static_chunk
->free_size
;
872 /* assign pages and map them */
873 for_each_possible_cpu(cpu
) {
874 for (i
= 0; i
< nr_cpu_pages
; i
++) {
875 *pcpu_chunk_pagep(static_chunk
, cpu
, i
) = *pages
++;
876 populate_pte_fn(pcpu_chunk_addr(static_chunk
, cpu
, i
));
880 err
= pcpu_map(static_chunk
, 0, nr_cpu_pages
);
882 panic("failed to setup static percpu area, err=%d\n", err
);
884 /* link static_chunk in */
885 pcpu_chunk_relocate(static_chunk
, -1);
886 pcpu_chunk_addr_insert(static_chunk
);
889 pcpu_base_addr
= (void *)pcpu_chunk_addr(static_chunk
, 0, 0);
890 return pcpu_unit_size
;