2 * linux/mm/percpu.c - percpu memory allocator
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks in vmalloc area. Each
11 * chunk is consisted of boot-time determined number of units and the
12 * first chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated. ie. in
20 * ------------------- ------------------- ------------
21 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
22 * ------------------- ...... ------------------- .... ------------
24 * Allocation is done in offset-size areas of single unit space. Ie,
25 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
26 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
27 * cpus. On NUMA, the mapping can be non-linear and even sparse.
28 * Percpu access can be done by configuring percpu base registers
29 * according to cpu to unit mapping and pcpu_unit_size.
31 * There are usually many small percpu allocations many of them being
32 * as small as 4 bytes. The allocator organizes chunks into lists
33 * according to free size and tries to allocate from the fullest one.
34 * Each chunk keeps the maximum contiguous area size hint which is
35 * guaranteed to be eqaul to or larger than the maximum contiguous
36 * area in the chunk. This helps the allocator not to iterate the
37 * chunk maps unnecessarily.
39 * Allocation state in each chunk is kept using an array of integers
40 * on chunk->map. A positive value in the map represents a free
41 * region and negative allocated. Allocation inside a chunk is done
42 * by scanning this map sequentially and serving the first matching
43 * entry. This is mostly copied from the percpu_modalloc() allocator.
44 * Chunks can be determined from the address using the index field
45 * in the page struct. The index field contains a pointer to the chunk.
47 * To use this allocator, arch code should do the followings.
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 * regular address to percpu pointer and back if they need to be
53 * different from the default
55 * - use pcpu_setup_first_chunk() during percpu area initialization to
56 * setup the first chunk containing the kernel static percpu area
59 #include <linux/bitmap.h>
60 #include <linux/bootmem.h>
61 #include <linux/err.h>
62 #include <linux/list.h>
63 #include <linux/log2.h>
65 #include <linux/module.h>
66 #include <linux/mutex.h>
67 #include <linux/percpu.h>
68 #include <linux/pfn.h>
69 #include <linux/slab.h>
70 #include <linux/spinlock.h>
71 #include <linux/vmalloc.h>
72 #include <linux/workqueue.h>
74 #include <asm/cacheflush.h>
75 #include <asm/sections.h>
76 #include <asm/tlbflush.h>
78 #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
79 #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
81 /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
82 #ifndef __addr_to_pcpu_ptr
83 #define __addr_to_pcpu_ptr(addr) \
84 (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \
85 + (unsigned long)__per_cpu_start)
87 #ifndef __pcpu_ptr_to_addr
88 #define __pcpu_ptr_to_addr(ptr) \
89 (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \
90 - (unsigned long)__per_cpu_start)
94 struct list_head list
; /* linked to pcpu_slot lists */
95 int free_size
; /* free bytes in the chunk */
96 int contig_hint
; /* max contiguous size hint */
97 struct vm_struct
*vm
; /* mapped vmalloc region */
98 int map_used
; /* # of map entries used */
99 int map_alloc
; /* # of map entries allocated */
100 int *map
; /* allocation map */
101 bool immutable
; /* no [de]population allowed */
102 unsigned long populated
[]; /* populated bitmap */
105 static int pcpu_unit_pages __read_mostly
;
106 static int pcpu_unit_size __read_mostly
;
107 static int pcpu_nr_units __read_mostly
;
108 static int pcpu_chunk_size __read_mostly
;
109 static int pcpu_nr_slots __read_mostly
;
110 static size_t pcpu_chunk_struct_size __read_mostly
;
112 /* cpus with the lowest and highest unit numbers */
113 static unsigned int pcpu_first_unit_cpu __read_mostly
;
114 static unsigned int pcpu_last_unit_cpu __read_mostly
;
116 /* the address of the first chunk which starts with the kernel static area */
117 void *pcpu_base_addr __read_mostly
;
118 EXPORT_SYMBOL_GPL(pcpu_base_addr
);
120 static const int *pcpu_unit_map __read_mostly
; /* cpu -> unit */
121 const unsigned long *pcpu_unit_offsets __read_mostly
; /* cpu -> unit offset */
124 * The first chunk which always exists. Note that unlike other
125 * chunks, this one can be allocated and mapped in several different
126 * ways and thus often doesn't live in the vmalloc area.
128 static struct pcpu_chunk
*pcpu_first_chunk
;
131 * Optional reserved chunk. This chunk reserves part of the first
132 * chunk and serves it for reserved allocations. The amount of
133 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
134 * area doesn't exist, the following variables contain NULL and 0
137 static struct pcpu_chunk
*pcpu_reserved_chunk
;
138 static int pcpu_reserved_chunk_limit
;
141 * Synchronization rules.
143 * There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
144 * protects allocation/reclaim paths, chunks, populated bitmap and
145 * vmalloc mapping. The latter is a spinlock and protects the index
146 * data structures - chunk slots, chunks and area maps in chunks.
148 * During allocation, pcpu_alloc_mutex is kept locked all the time and
149 * pcpu_lock is grabbed and released as necessary. All actual memory
150 * allocations are done using GFP_KERNEL with pcpu_lock released.
152 * Free path accesses and alters only the index data structures, so it
153 * can be safely called from atomic context. When memory needs to be
154 * returned to the system, free path schedules reclaim_work which
155 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
156 * reclaimed, release both locks and frees the chunks. Note that it's
157 * necessary to grab both locks to remove a chunk from circulation as
158 * allocation path might be referencing the chunk with only
159 * pcpu_alloc_mutex locked.
161 static DEFINE_MUTEX(pcpu_alloc_mutex
); /* protects whole alloc and reclaim */
162 static DEFINE_SPINLOCK(pcpu_lock
); /* protects index data structures */
164 static struct list_head
*pcpu_slot __read_mostly
; /* chunk list slots */
166 /* reclaim work to release fully free chunks, scheduled from free path */
167 static void pcpu_reclaim(struct work_struct
*work
);
168 static DECLARE_WORK(pcpu_reclaim_work
, pcpu_reclaim
);
170 static int __pcpu_size_to_slot(int size
)
172 int highbit
= fls(size
); /* size is in bytes */
173 return max(highbit
- PCPU_SLOT_BASE_SHIFT
+ 2, 1);
176 static int pcpu_size_to_slot(int size
)
178 if (size
== pcpu_unit_size
)
179 return pcpu_nr_slots
- 1;
180 return __pcpu_size_to_slot(size
);
183 static int pcpu_chunk_slot(const struct pcpu_chunk
*chunk
)
185 if (chunk
->free_size
< sizeof(int) || chunk
->contig_hint
< sizeof(int))
188 return pcpu_size_to_slot(chunk
->free_size
);
191 static int pcpu_page_idx(unsigned int cpu
, int page_idx
)
193 return pcpu_unit_map
[cpu
] * pcpu_unit_pages
+ page_idx
;
196 static unsigned long pcpu_chunk_addr(struct pcpu_chunk
*chunk
,
197 unsigned int cpu
, int page_idx
)
199 return (unsigned long)chunk
->vm
->addr
+ pcpu_unit_offsets
[cpu
] +
200 (page_idx
<< PAGE_SHIFT
);
203 static struct page
*pcpu_chunk_page(struct pcpu_chunk
*chunk
,
204 unsigned int cpu
, int page_idx
)
206 /* must not be used on pre-mapped chunk */
207 WARN_ON(chunk
->immutable
);
209 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk
, cpu
, page_idx
));
212 /* set the pointer to a chunk in a page struct */
213 static void pcpu_set_page_chunk(struct page
*page
, struct pcpu_chunk
*pcpu
)
215 page
->index
= (unsigned long)pcpu
;
218 /* obtain pointer to a chunk from a page struct */
219 static struct pcpu_chunk
*pcpu_get_page_chunk(struct page
*page
)
221 return (struct pcpu_chunk
*)page
->index
;
224 static void pcpu_next_unpop(struct pcpu_chunk
*chunk
, int *rs
, int *re
, int end
)
226 *rs
= find_next_zero_bit(chunk
->populated
, end
, *rs
);
227 *re
= find_next_bit(chunk
->populated
, end
, *rs
+ 1);
230 static void pcpu_next_pop(struct pcpu_chunk
*chunk
, int *rs
, int *re
, int end
)
232 *rs
= find_next_bit(chunk
->populated
, end
, *rs
);
233 *re
= find_next_zero_bit(chunk
->populated
, end
, *rs
+ 1);
237 * (Un)populated page region iterators. Iterate over (un)populated
238 * page regions betwen @start and @end in @chunk. @rs and @re should
239 * be integer variables and will be set to start and end page index of
240 * the current region.
242 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
243 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
245 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
247 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
248 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
250 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
253 * pcpu_mem_alloc - allocate memory
254 * @size: bytes to allocate
256 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
257 * kzalloc() is used; otherwise, vmalloc() is used. The returned
258 * memory is always zeroed.
261 * Does GFP_KERNEL allocation.
264 * Pointer to the allocated area on success, NULL on failure.
266 static void *pcpu_mem_alloc(size_t size
)
268 if (size
<= PAGE_SIZE
)
269 return kzalloc(size
, GFP_KERNEL
);
271 void *ptr
= vmalloc(size
);
273 memset(ptr
, 0, size
);
279 * pcpu_mem_free - free memory
280 * @ptr: memory to free
281 * @size: size of the area
283 * Free @ptr. @ptr should have been allocated using pcpu_mem_alloc().
285 static void pcpu_mem_free(void *ptr
, size_t size
)
287 if (size
<= PAGE_SIZE
)
294 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
295 * @chunk: chunk of interest
296 * @oslot: the previous slot it was on
298 * This function is called after an allocation or free changed @chunk.
299 * New slot according to the changed state is determined and @chunk is
300 * moved to the slot. Note that the reserved chunk is never put on
306 static void pcpu_chunk_relocate(struct pcpu_chunk
*chunk
, int oslot
)
308 int nslot
= pcpu_chunk_slot(chunk
);
310 if (chunk
!= pcpu_reserved_chunk
&& oslot
!= nslot
) {
312 list_move(&chunk
->list
, &pcpu_slot
[nslot
]);
314 list_move_tail(&chunk
->list
, &pcpu_slot
[nslot
]);
319 * pcpu_chunk_addr_search - determine chunk containing specified address
320 * @addr: address for which the chunk needs to be determined.
323 * The address of the found chunk.
325 static struct pcpu_chunk
*pcpu_chunk_addr_search(void *addr
)
327 void *first_start
= pcpu_first_chunk
->vm
->addr
;
329 /* is it in the first chunk? */
330 if (addr
>= first_start
&& addr
< first_start
+ pcpu_unit_size
) {
331 /* is it in the reserved area? */
332 if (addr
< first_start
+ pcpu_reserved_chunk_limit
)
333 return pcpu_reserved_chunk
;
334 return pcpu_first_chunk
;
338 * The address is relative to unit0 which might be unused and
339 * thus unmapped. Offset the address to the unit space of the
340 * current processor before looking it up in the vmalloc
341 * space. Note that any possible cpu id can be used here, so
342 * there's no need to worry about preemption or cpu hotplug.
344 addr
+= pcpu_unit_offsets
[smp_processor_id()];
345 return pcpu_get_page_chunk(vmalloc_to_page(addr
));
349 * pcpu_extend_area_map - extend area map for allocation
350 * @chunk: target chunk
352 * Extend area map of @chunk so that it can accomodate an allocation.
353 * A single allocation can split an area into three areas, so this
354 * function makes sure that @chunk->map has at least two extra slots.
357 * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
358 * if area map is extended.
361 * 0 if noop, 1 if successfully extended, -errno on failure.
363 static int pcpu_extend_area_map(struct pcpu_chunk
*chunk
)
370 if (chunk
->map_alloc
>= chunk
->map_used
+ 2)
373 spin_unlock_irq(&pcpu_lock
);
375 new_alloc
= PCPU_DFL_MAP_ALLOC
;
376 while (new_alloc
< chunk
->map_used
+ 2)
379 new = pcpu_mem_alloc(new_alloc
* sizeof(new[0]));
381 spin_lock_irq(&pcpu_lock
);
386 * Acquire pcpu_lock and switch to new area map. Only free
387 * could have happened inbetween, so map_used couldn't have
390 spin_lock_irq(&pcpu_lock
);
391 BUG_ON(new_alloc
< chunk
->map_used
+ 2);
393 size
= chunk
->map_alloc
* sizeof(chunk
->map
[0]);
394 memcpy(new, chunk
->map
, size
);
397 * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
398 * one of the first chunks and still using static map.
400 if (chunk
->map_alloc
>= PCPU_DFL_MAP_ALLOC
)
401 pcpu_mem_free(chunk
->map
, size
);
403 chunk
->map_alloc
= new_alloc
;
409 * pcpu_split_block - split a map block
410 * @chunk: chunk of interest
411 * @i: index of map block to split
412 * @head: head size in bytes (can be 0)
413 * @tail: tail size in bytes (can be 0)
415 * Split the @i'th map block into two or three blocks. If @head is
416 * non-zero, @head bytes block is inserted before block @i moving it
417 * to @i+1 and reducing its size by @head bytes.
419 * If @tail is non-zero, the target block, which can be @i or @i+1
420 * depending on @head, is reduced by @tail bytes and @tail byte block
421 * is inserted after the target block.
423 * @chunk->map must have enough free slots to accomodate the split.
428 static void pcpu_split_block(struct pcpu_chunk
*chunk
, int i
,
431 int nr_extra
= !!head
+ !!tail
;
433 BUG_ON(chunk
->map_alloc
< chunk
->map_used
+ nr_extra
);
435 /* insert new subblocks */
436 memmove(&chunk
->map
[i
+ nr_extra
], &chunk
->map
[i
],
437 sizeof(chunk
->map
[0]) * (chunk
->map_used
- i
));
438 chunk
->map_used
+= nr_extra
;
441 chunk
->map
[i
+ 1] = chunk
->map
[i
] - head
;
442 chunk
->map
[i
++] = head
;
445 chunk
->map
[i
++] -= tail
;
446 chunk
->map
[i
] = tail
;
451 * pcpu_alloc_area - allocate area from a pcpu_chunk
452 * @chunk: chunk of interest
453 * @size: wanted size in bytes
454 * @align: wanted align
456 * Try to allocate @size bytes area aligned at @align from @chunk.
457 * Note that this function only allocates the offset. It doesn't
458 * populate or map the area.
460 * @chunk->map must have at least two free slots.
466 * Allocated offset in @chunk on success, -1 if no matching area is
469 static int pcpu_alloc_area(struct pcpu_chunk
*chunk
, int size
, int align
)
471 int oslot
= pcpu_chunk_slot(chunk
);
475 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++])) {
476 bool is_last
= i
+ 1 == chunk
->map_used
;
479 /* extra for alignment requirement */
480 head
= ALIGN(off
, align
) - off
;
481 BUG_ON(i
== 0 && head
!= 0);
483 if (chunk
->map
[i
] < 0)
485 if (chunk
->map
[i
] < head
+ size
) {
486 max_contig
= max(chunk
->map
[i
], max_contig
);
491 * If head is small or the previous block is free,
492 * merge'em. Note that 'small' is defined as smaller
493 * than sizeof(int), which is very small but isn't too
494 * uncommon for percpu allocations.
496 if (head
&& (head
< sizeof(int) || chunk
->map
[i
- 1] > 0)) {
497 if (chunk
->map
[i
- 1] > 0)
498 chunk
->map
[i
- 1] += head
;
500 chunk
->map
[i
- 1] -= head
;
501 chunk
->free_size
-= head
;
503 chunk
->map
[i
] -= head
;
508 /* if tail is small, just keep it around */
509 tail
= chunk
->map
[i
] - head
- size
;
510 if (tail
< sizeof(int))
513 /* split if warranted */
515 pcpu_split_block(chunk
, i
, head
, tail
);
519 max_contig
= max(chunk
->map
[i
- 1], max_contig
);
522 max_contig
= max(chunk
->map
[i
+ 1], max_contig
);
525 /* update hint and mark allocated */
527 chunk
->contig_hint
= max_contig
; /* fully scanned */
529 chunk
->contig_hint
= max(chunk
->contig_hint
,
532 chunk
->free_size
-= chunk
->map
[i
];
533 chunk
->map
[i
] = -chunk
->map
[i
];
535 pcpu_chunk_relocate(chunk
, oslot
);
539 chunk
->contig_hint
= max_contig
; /* fully scanned */
540 pcpu_chunk_relocate(chunk
, oslot
);
542 /* tell the upper layer that this chunk has no matching area */
547 * pcpu_free_area - free area to a pcpu_chunk
548 * @chunk: chunk of interest
549 * @freeme: offset of area to free
551 * Free area starting from @freeme to @chunk. Note that this function
552 * only modifies the allocation map. It doesn't depopulate or unmap
558 static void pcpu_free_area(struct pcpu_chunk
*chunk
, int freeme
)
560 int oslot
= pcpu_chunk_slot(chunk
);
563 for (i
= 0, off
= 0; i
< chunk
->map_used
; off
+= abs(chunk
->map
[i
++]))
566 BUG_ON(off
!= freeme
);
567 BUG_ON(chunk
->map
[i
] > 0);
569 chunk
->map
[i
] = -chunk
->map
[i
];
570 chunk
->free_size
+= chunk
->map
[i
];
572 /* merge with previous? */
573 if (i
> 0 && chunk
->map
[i
- 1] >= 0) {
574 chunk
->map
[i
- 1] += chunk
->map
[i
];
576 memmove(&chunk
->map
[i
], &chunk
->map
[i
+ 1],
577 (chunk
->map_used
- i
) * sizeof(chunk
->map
[0]));
580 /* merge with next? */
581 if (i
+ 1 < chunk
->map_used
&& chunk
->map
[i
+ 1] >= 0) {
582 chunk
->map
[i
] += chunk
->map
[i
+ 1];
584 memmove(&chunk
->map
[i
+ 1], &chunk
->map
[i
+ 2],
585 (chunk
->map_used
- (i
+ 1)) * sizeof(chunk
->map
[0]));
588 chunk
->contig_hint
= max(chunk
->map
[i
], chunk
->contig_hint
);
589 pcpu_chunk_relocate(chunk
, oslot
);
593 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
594 * @chunk: chunk of interest
595 * @bitmapp: output parameter for bitmap
596 * @may_alloc: may allocate the array
598 * Returns pointer to array of pointers to struct page and bitmap,
599 * both of which can be indexed with pcpu_page_idx(). The returned
600 * array is cleared to zero and *@bitmapp is copied from
601 * @chunk->populated. Note that there is only one array and bitmap
602 * and access exclusion is the caller's responsibility.
605 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
606 * Otherwise, don't care.
609 * Pointer to temp pages array on success, NULL on failure.
611 static struct page
**pcpu_get_pages_and_bitmap(struct pcpu_chunk
*chunk
,
612 unsigned long **bitmapp
,
615 static struct page
**pages
;
616 static unsigned long *bitmap
;
617 size_t pages_size
= pcpu_nr_units
* pcpu_unit_pages
* sizeof(pages
[0]);
618 size_t bitmap_size
= BITS_TO_LONGS(pcpu_unit_pages
) *
619 sizeof(unsigned long);
621 if (!pages
|| !bitmap
) {
622 if (may_alloc
&& !pages
)
623 pages
= pcpu_mem_alloc(pages_size
);
624 if (may_alloc
&& !bitmap
)
625 bitmap
= pcpu_mem_alloc(bitmap_size
);
626 if (!pages
|| !bitmap
)
630 memset(pages
, 0, pages_size
);
631 bitmap_copy(bitmap
, chunk
->populated
, pcpu_unit_pages
);
638 * pcpu_free_pages - free pages which were allocated for @chunk
639 * @chunk: chunk pages were allocated for
640 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
641 * @populated: populated bitmap
642 * @page_start: page index of the first page to be freed
643 * @page_end: page index of the last page to be freed + 1
645 * Free pages [@page_start and @page_end) in @pages for all units.
646 * The pages were allocated for @chunk.
648 static void pcpu_free_pages(struct pcpu_chunk
*chunk
,
649 struct page
**pages
, unsigned long *populated
,
650 int page_start
, int page_end
)
655 for_each_possible_cpu(cpu
) {
656 for (i
= page_start
; i
< page_end
; i
++) {
657 struct page
*page
= pages
[pcpu_page_idx(cpu
, i
)];
666 * pcpu_alloc_pages - allocates pages for @chunk
667 * @chunk: target chunk
668 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
669 * @populated: populated bitmap
670 * @page_start: page index of the first page to be allocated
671 * @page_end: page index of the last page to be allocated + 1
673 * Allocate pages [@page_start,@page_end) into @pages for all units.
674 * The allocation is for @chunk. Percpu core doesn't care about the
675 * content of @pages and will pass it verbatim to pcpu_map_pages().
677 static int pcpu_alloc_pages(struct pcpu_chunk
*chunk
,
678 struct page
**pages
, unsigned long *populated
,
679 int page_start
, int page_end
)
681 const gfp_t gfp
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
685 for_each_possible_cpu(cpu
) {
686 for (i
= page_start
; i
< page_end
; i
++) {
687 struct page
**pagep
= &pages
[pcpu_page_idx(cpu
, i
)];
689 *pagep
= alloc_pages_node(cpu_to_node(cpu
), gfp
, 0);
691 pcpu_free_pages(chunk
, pages
, populated
,
692 page_start
, page_end
);
701 * pcpu_pre_unmap_flush - flush cache prior to unmapping
702 * @chunk: chunk the regions to be flushed belongs to
703 * @page_start: page index of the first page to be flushed
704 * @page_end: page index of the last page to be flushed + 1
706 * Pages in [@page_start,@page_end) of @chunk are about to be
707 * unmapped. Flush cache. As each flushing trial can be very
708 * expensive, issue flush on the whole region at once rather than
709 * doing it for each cpu. This could be an overkill but is more
712 static void pcpu_pre_unmap_flush(struct pcpu_chunk
*chunk
,
713 int page_start
, int page_end
)
716 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
717 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
720 static void __pcpu_unmap_pages(unsigned long addr
, int nr_pages
)
722 unmap_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
);
726 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
727 * @chunk: chunk of interest
728 * @pages: pages array which can be used to pass information to free
729 * @populated: populated bitmap
730 * @page_start: page index of the first page to unmap
731 * @page_end: page index of the last page to unmap + 1
733 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
734 * Corresponding elements in @pages were cleared by the caller and can
735 * be used to carry information to pcpu_free_pages() which will be
736 * called after all unmaps are finished. The caller should call
737 * proper pre/post flush functions.
739 static void pcpu_unmap_pages(struct pcpu_chunk
*chunk
,
740 struct page
**pages
, unsigned long *populated
,
741 int page_start
, int page_end
)
746 for_each_possible_cpu(cpu
) {
747 for (i
= page_start
; i
< page_end
; i
++) {
750 page
= pcpu_chunk_page(chunk
, cpu
, i
);
752 pages
[pcpu_page_idx(cpu
, i
)] = page
;
754 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
755 page_end
- page_start
);
758 for (i
= page_start
; i
< page_end
; i
++)
759 __clear_bit(i
, populated
);
763 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
764 * @chunk: pcpu_chunk the regions to be flushed belong to
765 * @page_start: page index of the first page to be flushed
766 * @page_end: page index of the last page to be flushed + 1
768 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
769 * TLB for the regions. This can be skipped if the area is to be
770 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
772 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
773 * for the whole region.
775 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk
*chunk
,
776 int page_start
, int page_end
)
778 flush_tlb_kernel_range(
779 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
780 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
783 static int __pcpu_map_pages(unsigned long addr
, struct page
**pages
,
786 return map_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
,
791 * pcpu_map_pages - map pages into a pcpu_chunk
792 * @chunk: chunk of interest
793 * @pages: pages array containing pages to be mapped
794 * @populated: populated bitmap
795 * @page_start: page index of the first page to map
796 * @page_end: page index of the last page to map + 1
798 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
799 * caller is responsible for calling pcpu_post_map_flush() after all
800 * mappings are complete.
802 * This function is responsible for setting corresponding bits in
803 * @chunk->populated bitmap and whatever is necessary for reverse
804 * lookup (addr -> chunk).
806 static int pcpu_map_pages(struct pcpu_chunk
*chunk
,
807 struct page
**pages
, unsigned long *populated
,
808 int page_start
, int page_end
)
810 unsigned int cpu
, tcpu
;
813 for_each_possible_cpu(cpu
) {
814 err
= __pcpu_map_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
815 &pages
[pcpu_page_idx(cpu
, page_start
)],
816 page_end
- page_start
);
821 /* mapping successful, link chunk and mark populated */
822 for (i
= page_start
; i
< page_end
; i
++) {
823 for_each_possible_cpu(cpu
)
824 pcpu_set_page_chunk(pages
[pcpu_page_idx(cpu
, i
)],
826 __set_bit(i
, populated
);
832 for_each_possible_cpu(tcpu
) {
835 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, tcpu
, page_start
),
836 page_end
- page_start
);
842 * pcpu_post_map_flush - flush cache after mapping
843 * @chunk: pcpu_chunk the regions to be flushed belong to
844 * @page_start: page index of the first page to be flushed
845 * @page_end: page index of the last page to be flushed + 1
847 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
850 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
851 * for the whole region.
853 static void pcpu_post_map_flush(struct pcpu_chunk
*chunk
,
854 int page_start
, int page_end
)
857 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
858 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
862 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
863 * @chunk: chunk to depopulate
864 * @off: offset to the area to depopulate
865 * @size: size of the area to depopulate in bytes
866 * @flush: whether to flush cache and tlb or not
868 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
869 * from @chunk. If @flush is true, vcache is flushed before unmapping
875 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
877 int page_start
= PFN_DOWN(off
);
878 int page_end
= PFN_UP(off
+ size
);
880 unsigned long *populated
;
883 /* quick path, check whether it's empty already */
884 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
885 if (rs
== page_start
&& re
== page_end
)
890 /* immutable chunks can't be depopulated */
891 WARN_ON(chunk
->immutable
);
894 * If control reaches here, there must have been at least one
895 * successful population attempt so the temp pages array must
898 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, false);
902 pcpu_pre_unmap_flush(chunk
, page_start
, page_end
);
904 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
905 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
907 /* no need to flush tlb, vmalloc will handle it lazily */
909 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
910 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
912 /* commit new bitmap */
913 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
917 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
918 * @chunk: chunk of interest
919 * @off: offset to the area to populate
920 * @size: size of the area to populate in bytes
922 * For each cpu, populate and map pages [@page_start,@page_end) into
923 * @chunk. The area is cleared on return.
926 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
928 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
930 int page_start
= PFN_DOWN(off
);
931 int page_end
= PFN_UP(off
+ size
);
932 int free_end
= page_start
, unmap_end
= page_start
;
934 unsigned long *populated
;
938 /* quick path, check whether all pages are already there */
939 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
) {
940 if (rs
== page_start
&& re
== page_end
)
945 /* need to allocate and map pages, this chunk can't be immutable */
946 WARN_ON(chunk
->immutable
);
948 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, true);
953 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
954 rc
= pcpu_alloc_pages(chunk
, pages
, populated
, rs
, re
);
960 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
961 rc
= pcpu_map_pages(chunk
, pages
, populated
, rs
, re
);
966 pcpu_post_map_flush(chunk
, page_start
, page_end
);
968 /* commit new bitmap */
969 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
971 for_each_possible_cpu(cpu
)
972 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
976 pcpu_pre_unmap_flush(chunk
, page_start
, unmap_end
);
977 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, unmap_end
)
978 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
979 pcpu_post_unmap_tlb_flush(chunk
, page_start
, unmap_end
);
981 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, free_end
)
982 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
986 static void free_pcpu_chunk(struct pcpu_chunk
*chunk
)
991 free_vm_area(chunk
->vm
);
992 pcpu_mem_free(chunk
->map
, chunk
->map_alloc
* sizeof(chunk
->map
[0]));
996 static struct pcpu_chunk
*alloc_pcpu_chunk(void)
998 struct pcpu_chunk
*chunk
;
1000 chunk
= kzalloc(pcpu_chunk_struct_size
, GFP_KERNEL
);
1004 chunk
->map
= pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC
* sizeof(chunk
->map
[0]));
1005 chunk
->map_alloc
= PCPU_DFL_MAP_ALLOC
;
1006 chunk
->map
[chunk
->map_used
++] = pcpu_unit_size
;
1008 chunk
->vm
= get_vm_area(pcpu_chunk_size
, VM_ALLOC
);
1010 free_pcpu_chunk(chunk
);
1014 INIT_LIST_HEAD(&chunk
->list
);
1015 chunk
->free_size
= pcpu_unit_size
;
1016 chunk
->contig_hint
= pcpu_unit_size
;
1022 * pcpu_alloc - the percpu allocator
1023 * @size: size of area to allocate in bytes
1024 * @align: alignment of area (max PAGE_SIZE)
1025 * @reserved: allocate from the reserved chunk if available
1027 * Allocate percpu area of @size bytes aligned at @align.
1030 * Does GFP_KERNEL allocation.
1033 * Percpu pointer to the allocated area on success, NULL on failure.
1035 static void *pcpu_alloc(size_t size
, size_t align
, bool reserved
)
1037 struct pcpu_chunk
*chunk
;
1040 if (unlikely(!size
|| size
> PCPU_MIN_UNIT_SIZE
|| align
> PAGE_SIZE
)) {
1041 WARN(true, "illegal size (%zu) or align (%zu) for "
1042 "percpu allocation\n", size
, align
);
1046 mutex_lock(&pcpu_alloc_mutex
);
1047 spin_lock_irq(&pcpu_lock
);
1049 /* serve reserved allocations from the reserved chunk if available */
1050 if (reserved
&& pcpu_reserved_chunk
) {
1051 chunk
= pcpu_reserved_chunk
;
1052 if (size
> chunk
->contig_hint
||
1053 pcpu_extend_area_map(chunk
) < 0)
1055 off
= pcpu_alloc_area(chunk
, size
, align
);
1062 /* search through normal chunks */
1063 for (slot
= pcpu_size_to_slot(size
); slot
< pcpu_nr_slots
; slot
++) {
1064 list_for_each_entry(chunk
, &pcpu_slot
[slot
], list
) {
1065 if (size
> chunk
->contig_hint
)
1068 switch (pcpu_extend_area_map(chunk
)) {
1072 goto restart
; /* pcpu_lock dropped, restart */
1077 off
= pcpu_alloc_area(chunk
, size
, align
);
1083 /* hmmm... no space left, create a new chunk */
1084 spin_unlock_irq(&pcpu_lock
);
1086 chunk
= alloc_pcpu_chunk();
1088 goto fail_unlock_mutex
;
1090 spin_lock_irq(&pcpu_lock
);
1091 pcpu_chunk_relocate(chunk
, -1);
1095 spin_unlock_irq(&pcpu_lock
);
1097 /* populate, map and clear the area */
1098 if (pcpu_populate_chunk(chunk
, off
, size
)) {
1099 spin_lock_irq(&pcpu_lock
);
1100 pcpu_free_area(chunk
, off
);
1104 mutex_unlock(&pcpu_alloc_mutex
);
1106 /* return address relative to unit0 */
1107 return __addr_to_pcpu_ptr(chunk
->vm
->addr
+ off
);
1110 spin_unlock_irq(&pcpu_lock
);
1112 mutex_unlock(&pcpu_alloc_mutex
);
1117 * __alloc_percpu - allocate dynamic percpu area
1118 * @size: size of area to allocate in bytes
1119 * @align: alignment of area (max PAGE_SIZE)
1121 * Allocate percpu area of @size bytes aligned at @align. Might
1122 * sleep. Might trigger writeouts.
1125 * Does GFP_KERNEL allocation.
1128 * Percpu pointer to the allocated area on success, NULL on failure.
1130 void *__alloc_percpu(size_t size
, size_t align
)
1132 return pcpu_alloc(size
, align
, false);
1134 EXPORT_SYMBOL_GPL(__alloc_percpu
);
1137 * __alloc_reserved_percpu - allocate reserved percpu area
1138 * @size: size of area to allocate in bytes
1139 * @align: alignment of area (max PAGE_SIZE)
1141 * Allocate percpu area of @size bytes aligned at @align from reserved
1142 * percpu area if arch has set it up; otherwise, allocation is served
1143 * from the same dynamic area. Might sleep. Might trigger writeouts.
1146 * Does GFP_KERNEL allocation.
1149 * Percpu pointer to the allocated area on success, NULL on failure.
1151 void *__alloc_reserved_percpu(size_t size
, size_t align
)
1153 return pcpu_alloc(size
, align
, true);
1157 * pcpu_reclaim - reclaim fully free chunks, workqueue function
1160 * Reclaim all fully free chunks except for the first one.
1163 * workqueue context.
1165 static void pcpu_reclaim(struct work_struct
*work
)
1168 struct list_head
*head
= &pcpu_slot
[pcpu_nr_slots
- 1];
1169 struct pcpu_chunk
*chunk
, *next
;
1171 mutex_lock(&pcpu_alloc_mutex
);
1172 spin_lock_irq(&pcpu_lock
);
1174 list_for_each_entry_safe(chunk
, next
, head
, list
) {
1175 WARN_ON(chunk
->immutable
);
1177 /* spare the first one */
1178 if (chunk
== list_first_entry(head
, struct pcpu_chunk
, list
))
1181 list_move(&chunk
->list
, &todo
);
1184 spin_unlock_irq(&pcpu_lock
);
1186 list_for_each_entry_safe(chunk
, next
, &todo
, list
) {
1187 pcpu_depopulate_chunk(chunk
, 0, pcpu_unit_size
);
1188 free_pcpu_chunk(chunk
);
1191 mutex_unlock(&pcpu_alloc_mutex
);
1195 * free_percpu - free percpu area
1196 * @ptr: pointer to area to free
1198 * Free percpu area @ptr.
1201 * Can be called from atomic context.
1203 void free_percpu(void *ptr
)
1205 void *addr
= __pcpu_ptr_to_addr(ptr
);
1206 struct pcpu_chunk
*chunk
;
1207 unsigned long flags
;
1213 spin_lock_irqsave(&pcpu_lock
, flags
);
1215 chunk
= pcpu_chunk_addr_search(addr
);
1216 off
= addr
- chunk
->vm
->addr
;
1218 pcpu_free_area(chunk
, off
);
1220 /* if there are more than one fully free chunks, wake up grim reaper */
1221 if (chunk
->free_size
== pcpu_unit_size
) {
1222 struct pcpu_chunk
*pos
;
1224 list_for_each_entry(pos
, &pcpu_slot
[pcpu_nr_slots
- 1], list
)
1226 schedule_work(&pcpu_reclaim_work
);
1231 spin_unlock_irqrestore(&pcpu_lock
, flags
);
1233 EXPORT_SYMBOL_GPL(free_percpu
);
1235 static inline size_t pcpu_calc_fc_sizes(size_t static_size
,
1236 size_t reserved_size
,
1241 size_sum
= PFN_ALIGN(static_size
+ reserved_size
+
1242 (*dyn_sizep
>= 0 ? *dyn_sizep
: 0));
1243 if (*dyn_sizep
!= 0)
1244 *dyn_sizep
= size_sum
- static_size
- reserved_size
;
1250 * pcpu_alloc_alloc_info - allocate percpu allocation info
1251 * @nr_groups: the number of groups
1252 * @nr_units: the number of units
1254 * Allocate ai which is large enough for @nr_groups groups containing
1255 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1256 * cpu_map array which is long enough for @nr_units and filled with
1257 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1258 * pointer of other groups.
1261 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1264 struct pcpu_alloc_info
* __init
pcpu_alloc_alloc_info(int nr_groups
,
1267 struct pcpu_alloc_info
*ai
;
1268 size_t base_size
, ai_size
;
1272 base_size
= ALIGN(sizeof(*ai
) + nr_groups
* sizeof(ai
->groups
[0]),
1273 __alignof__(ai
->groups
[0].cpu_map
[0]));
1274 ai_size
= base_size
+ nr_units
* sizeof(ai
->groups
[0].cpu_map
[0]);
1276 ptr
= alloc_bootmem_nopanic(PFN_ALIGN(ai_size
));
1282 ai
->groups
[0].cpu_map
= ptr
;
1284 for (unit
= 0; unit
< nr_units
; unit
++)
1285 ai
->groups
[0].cpu_map
[unit
] = NR_CPUS
;
1287 ai
->nr_groups
= nr_groups
;
1288 ai
->__ai_size
= PFN_ALIGN(ai_size
);
1294 * pcpu_free_alloc_info - free percpu allocation info
1295 * @ai: pcpu_alloc_info to free
1297 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1299 void __init
pcpu_free_alloc_info(struct pcpu_alloc_info
*ai
)
1301 free_bootmem(__pa(ai
), ai
->__ai_size
);
1305 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1306 * @reserved_size: the size of reserved percpu area in bytes
1307 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1308 * @atom_size: allocation atom size
1309 * @cpu_distance_fn: callback to determine distance between cpus, optional
1311 * This function determines grouping of units, their mappings to cpus
1312 * and other parameters considering needed percpu size, allocation
1313 * atom size and distances between CPUs.
1315 * Groups are always mutliples of atom size and CPUs which are of
1316 * LOCAL_DISTANCE both ways are grouped together and share space for
1317 * units in the same group. The returned configuration is guaranteed
1318 * to have CPUs on different nodes on different groups and >=75% usage
1319 * of allocated virtual address space.
1322 * On success, pointer to the new allocation_info is returned. On
1323 * failure, ERR_PTR value is returned.
1325 struct pcpu_alloc_info
* __init
pcpu_build_alloc_info(
1326 size_t reserved_size
, ssize_t dyn_size
,
1328 pcpu_fc_cpu_distance_fn_t cpu_distance_fn
)
1330 static int group_map
[NR_CPUS
] __initdata
;
1331 static int group_cnt
[NR_CPUS
] __initdata
;
1332 const size_t static_size
= __per_cpu_end
- __per_cpu_start
;
1333 int group_cnt_max
= 0, nr_groups
= 1, nr_units
= 0;
1334 size_t size_sum
, min_unit_size
, alloc_size
;
1335 int upa
, max_upa
, uninitialized_var(best_upa
); /* units_per_alloc */
1336 int last_allocs
, group
, unit
;
1337 unsigned int cpu
, tcpu
;
1338 struct pcpu_alloc_info
*ai
;
1339 unsigned int *cpu_map
;
1342 * Determine min_unit_size, alloc_size and max_upa such that
1343 * alloc_size is multiple of atom_size and is the smallest
1344 * which can accomodate 4k aligned segments which are equal to
1345 * or larger than min_unit_size.
1347 size_sum
= pcpu_calc_fc_sizes(static_size
, reserved_size
, &dyn_size
);
1348 min_unit_size
= max_t(size_t, size_sum
, PCPU_MIN_UNIT_SIZE
);
1350 alloc_size
= roundup(min_unit_size
, atom_size
);
1351 upa
= alloc_size
/ min_unit_size
;
1352 while (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1356 /* group cpus according to their proximity */
1357 for_each_possible_cpu(cpu
) {
1360 for_each_possible_cpu(tcpu
) {
1363 if (group_map
[tcpu
] == group
&& cpu_distance_fn
&&
1364 (cpu_distance_fn(cpu
, tcpu
) > LOCAL_DISTANCE
||
1365 cpu_distance_fn(tcpu
, cpu
) > LOCAL_DISTANCE
)) {
1367 nr_groups
= max(nr_groups
, group
+ 1);
1371 group_map
[cpu
] = group
;
1373 group_cnt_max
= max(group_cnt_max
, group_cnt
[group
]);
1377 * Expand unit size until address space usage goes over 75%
1378 * and then as much as possible without using more address
1381 last_allocs
= INT_MAX
;
1382 for (upa
= max_upa
; upa
; upa
--) {
1383 int allocs
= 0, wasted
= 0;
1385 if (alloc_size
% upa
|| ((alloc_size
/ upa
) & ~PAGE_MASK
))
1388 for (group
= 0; group
< nr_groups
; group
++) {
1389 int this_allocs
= DIV_ROUND_UP(group_cnt
[group
], upa
);
1390 allocs
+= this_allocs
;
1391 wasted
+= this_allocs
* upa
- group_cnt
[group
];
1395 * Don't accept if wastage is over 25%. The
1396 * greater-than comparison ensures upa==1 always
1397 * passes the following check.
1399 if (wasted
> num_possible_cpus() / 3)
1402 /* and then don't consume more memory */
1403 if (allocs
> last_allocs
)
1405 last_allocs
= allocs
;
1410 /* allocate and fill alloc_info */
1411 for (group
= 0; group
< nr_groups
; group
++)
1412 nr_units
+= roundup(group_cnt
[group
], upa
);
1414 ai
= pcpu_alloc_alloc_info(nr_groups
, nr_units
);
1416 return ERR_PTR(-ENOMEM
);
1417 cpu_map
= ai
->groups
[0].cpu_map
;
1419 for (group
= 0; group
< nr_groups
; group
++) {
1420 ai
->groups
[group
].cpu_map
= cpu_map
;
1421 cpu_map
+= roundup(group_cnt
[group
], upa
);
1424 ai
->static_size
= static_size
;
1425 ai
->reserved_size
= reserved_size
;
1426 ai
->dyn_size
= dyn_size
;
1427 ai
->unit_size
= alloc_size
/ upa
;
1428 ai
->atom_size
= atom_size
;
1429 ai
->alloc_size
= alloc_size
;
1431 for (group
= 0, unit
= 0; group_cnt
[group
]; group
++) {
1432 struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1435 * Initialize base_offset as if all groups are located
1436 * back-to-back. The caller should update this to
1437 * reflect actual allocation.
1439 gi
->base_offset
= unit
* ai
->unit_size
;
1441 for_each_possible_cpu(cpu
)
1442 if (group_map
[cpu
] == group
)
1443 gi
->cpu_map
[gi
->nr_units
++] = cpu
;
1444 gi
->nr_units
= roundup(gi
->nr_units
, upa
);
1445 unit
+= gi
->nr_units
;
1447 BUG_ON(unit
!= nr_units
);
1453 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1455 * @ai: allocation info to dump
1457 * Print out information about @ai using loglevel @lvl.
1459 static void pcpu_dump_alloc_info(const char *lvl
,
1460 const struct pcpu_alloc_info
*ai
)
1462 int group_width
= 1, cpu_width
= 1, width
;
1463 char empty_str
[] = "--------";
1464 int alloc
= 0, alloc_end
= 0;
1466 int upa
, apl
; /* units per alloc, allocs per line */
1472 v
= num_possible_cpus();
1475 empty_str
[min_t(int, cpu_width
, sizeof(empty_str
) - 1)] = '\0';
1477 upa
= ai
->alloc_size
/ ai
->unit_size
;
1478 width
= upa
* (cpu_width
+ 1) + group_width
+ 3;
1479 apl
= rounddown_pow_of_two(max(60 / width
, 1));
1481 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1482 lvl
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
1483 ai
->unit_size
, ai
->alloc_size
/ ai
->atom_size
, ai
->atom_size
);
1485 for (group
= 0; group
< ai
->nr_groups
; group
++) {
1486 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1487 int unit
= 0, unit_end
= 0;
1489 BUG_ON(gi
->nr_units
% upa
);
1490 for (alloc_end
+= gi
->nr_units
/ upa
;
1491 alloc
< alloc_end
; alloc
++) {
1492 if (!(alloc
% apl
)) {
1494 printk("%spcpu-alloc: ", lvl
);
1496 printk("[%0*d] ", group_width
, group
);
1498 for (unit_end
+= upa
; unit
< unit_end
; unit
++)
1499 if (gi
->cpu_map
[unit
] != NR_CPUS
)
1500 printk("%0*d ", cpu_width
,
1503 printk("%s ", empty_str
);
1510 * pcpu_setup_first_chunk - initialize the first percpu chunk
1511 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1512 * @base_addr: mapped address
1514 * Initialize the first percpu chunk which contains the kernel static
1515 * perpcu area. This function is to be called from arch percpu area
1518 * @ai contains all information necessary to initialize the first
1519 * chunk and prime the dynamic percpu allocator.
1521 * @ai->static_size is the size of static percpu area.
1523 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1524 * reserve after the static area in the first chunk. This reserves
1525 * the first chunk such that it's available only through reserved
1526 * percpu allocation. This is primarily used to serve module percpu
1527 * static areas on architectures where the addressing model has
1528 * limited offset range for symbol relocations to guarantee module
1529 * percpu symbols fall inside the relocatable range.
1531 * @ai->dyn_size determines the number of bytes available for dynamic
1532 * allocation in the first chunk. The area between @ai->static_size +
1533 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1535 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1536 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1539 * @ai->atom_size is the allocation atom size and used as alignment
1542 * @ai->alloc_size is the allocation size and always multiple of
1543 * @ai->atom_size. This is larger than @ai->atom_size if
1544 * @ai->unit_size is larger than @ai->atom_size.
1546 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1547 * percpu areas. Units which should be colocated are put into the
1548 * same group. Dynamic VM areas will be allocated according to these
1549 * groupings. If @ai->nr_groups is zero, a single group containing
1550 * all units is assumed.
1552 * The caller should have mapped the first chunk at @base_addr and
1553 * copied static data to each unit.
1555 * If the first chunk ends up with both reserved and dynamic areas, it
1556 * is served by two chunks - one to serve the core static and reserved
1557 * areas and the other for the dynamic area. They share the same vm
1558 * and page map but uses different area allocation map to stay away
1559 * from each other. The latter chunk is circulated in the chunk slots
1560 * and available for dynamic allocation like any other chunks.
1563 * 0 on success, -errno on failure.
1565 int __init
pcpu_setup_first_chunk(const struct pcpu_alloc_info
*ai
,
1568 static struct vm_struct first_vm
;
1569 static int smap
[2], dmap
[2];
1570 size_t dyn_size
= ai
->dyn_size
;
1571 size_t size_sum
= ai
->static_size
+ ai
->reserved_size
+ dyn_size
;
1572 struct pcpu_chunk
*schunk
, *dchunk
= NULL
;
1573 unsigned long *unit_off
;
1579 BUILD_BUG_ON(ARRAY_SIZE(smap
) >= PCPU_DFL_MAP_ALLOC
||
1580 ARRAY_SIZE(dmap
) >= PCPU_DFL_MAP_ALLOC
);
1581 BUG_ON(ai
->nr_groups
<= 0);
1582 BUG_ON(!ai
->static_size
);
1584 BUG_ON(ai
->unit_size
< size_sum
);
1585 BUG_ON(ai
->unit_size
& ~PAGE_MASK
);
1586 BUG_ON(ai
->unit_size
< PCPU_MIN_UNIT_SIZE
);
1588 pcpu_dump_alloc_info(KERN_DEBUG
, ai
);
1590 /* determine number of units and initialize unit_map and base */
1591 unit_map
= alloc_bootmem(nr_cpu_ids
* sizeof(unit_map
[0]));
1592 unit_off
= alloc_bootmem(nr_cpu_ids
* sizeof(unit_off
[0]));
1594 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++)
1595 unit_map
[cpu
] = NR_CPUS
;
1596 pcpu_first_unit_cpu
= NR_CPUS
;
1598 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
1599 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1601 for (i
= 0; i
< gi
->nr_units
; i
++) {
1602 cpu
= gi
->cpu_map
[i
];
1606 BUG_ON(cpu
> nr_cpu_ids
|| !cpu_possible(cpu
));
1607 BUG_ON(unit_map
[cpu
] != NR_CPUS
);
1609 unit_map
[cpu
] = unit
+ i
;
1610 unit_off
[cpu
] = gi
->base_offset
+ i
* ai
->unit_size
;
1612 if (pcpu_first_unit_cpu
== NR_CPUS
)
1613 pcpu_first_unit_cpu
= cpu
;
1616 pcpu_last_unit_cpu
= cpu
;
1617 pcpu_nr_units
= unit
;
1619 for_each_possible_cpu(cpu
)
1620 BUG_ON(unit_map
[cpu
] == NR_CPUS
);
1622 pcpu_unit_map
= unit_map
;
1623 pcpu_unit_offsets
= unit_off
;
1625 /* determine basic parameters */
1626 pcpu_unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
1627 pcpu_unit_size
= pcpu_unit_pages
<< PAGE_SHIFT
;
1628 pcpu_chunk_size
= pcpu_nr_units
* pcpu_unit_size
;
1629 pcpu_chunk_struct_size
= sizeof(struct pcpu_chunk
) +
1630 BITS_TO_LONGS(pcpu_unit_pages
) * sizeof(unsigned long);
1632 first_vm
.flags
= VM_ALLOC
;
1633 first_vm
.size
= pcpu_chunk_size
;
1634 first_vm
.addr
= base_addr
;
1637 * Allocate chunk slots. The additional last slot is for
1640 pcpu_nr_slots
= __pcpu_size_to_slot(pcpu_unit_size
) + 2;
1641 pcpu_slot
= alloc_bootmem(pcpu_nr_slots
* sizeof(pcpu_slot
[0]));
1642 for (i
= 0; i
< pcpu_nr_slots
; i
++)
1643 INIT_LIST_HEAD(&pcpu_slot
[i
]);
1646 * Initialize static chunk. If reserved_size is zero, the
1647 * static chunk covers static area + dynamic allocation area
1648 * in the first chunk. If reserved_size is not zero, it
1649 * covers static area + reserved area (mostly used for module
1650 * static percpu allocation).
1652 schunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1653 INIT_LIST_HEAD(&schunk
->list
);
1654 schunk
->vm
= &first_vm
;
1656 schunk
->map_alloc
= ARRAY_SIZE(smap
);
1657 schunk
->immutable
= true;
1658 bitmap_fill(schunk
->populated
, pcpu_unit_pages
);
1660 if (ai
->reserved_size
) {
1661 schunk
->free_size
= ai
->reserved_size
;
1662 pcpu_reserved_chunk
= schunk
;
1663 pcpu_reserved_chunk_limit
= ai
->static_size
+ ai
->reserved_size
;
1665 schunk
->free_size
= dyn_size
;
1666 dyn_size
= 0; /* dynamic area covered */
1668 schunk
->contig_hint
= schunk
->free_size
;
1670 schunk
->map
[schunk
->map_used
++] = -ai
->static_size
;
1671 if (schunk
->free_size
)
1672 schunk
->map
[schunk
->map_used
++] = schunk
->free_size
;
1674 /* init dynamic chunk if necessary */
1676 dchunk
= alloc_bootmem(pcpu_chunk_struct_size
);
1677 INIT_LIST_HEAD(&dchunk
->list
);
1678 dchunk
->vm
= &first_vm
;
1680 dchunk
->map_alloc
= ARRAY_SIZE(dmap
);
1681 dchunk
->immutable
= true;
1682 bitmap_fill(dchunk
->populated
, pcpu_unit_pages
);
1684 dchunk
->contig_hint
= dchunk
->free_size
= dyn_size
;
1685 dchunk
->map
[dchunk
->map_used
++] = -pcpu_reserved_chunk_limit
;
1686 dchunk
->map
[dchunk
->map_used
++] = dchunk
->free_size
;
1689 /* link the first chunk in */
1690 pcpu_first_chunk
= dchunk
?: schunk
;
1691 pcpu_chunk_relocate(pcpu_first_chunk
, -1);
1694 pcpu_base_addr
= schunk
->vm
->addr
;
1698 const char *pcpu_fc_names
[PCPU_FC_NR
] __initdata
= {
1699 [PCPU_FC_AUTO
] = "auto",
1700 [PCPU_FC_EMBED
] = "embed",
1701 [PCPU_FC_PAGE
] = "page",
1702 [PCPU_FC_LPAGE
] = "lpage",
1705 enum pcpu_fc pcpu_chosen_fc __initdata
= PCPU_FC_AUTO
;
1707 static int __init
percpu_alloc_setup(char *str
)
1711 #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1712 else if (!strcmp(str
, "embed"))
1713 pcpu_chosen_fc
= PCPU_FC_EMBED
;
1715 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1716 else if (!strcmp(str
, "page"))
1717 pcpu_chosen_fc
= PCPU_FC_PAGE
;
1719 #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
1720 else if (!strcmp(str
, "lpage"))
1721 pcpu_chosen_fc
= PCPU_FC_LPAGE
;
1724 pr_warning("PERCPU: unknown allocator %s specified\n", str
);
1728 early_param("percpu_alloc", percpu_alloc_setup
);
1730 #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1731 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1733 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1734 * @reserved_size: the size of reserved percpu area in bytes
1735 * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
1737 * This is a helper to ease setting up embedded first percpu chunk and
1738 * can be called where pcpu_setup_first_chunk() is expected.
1740 * If this function is used to setup the first chunk, it is allocated
1741 * as a contiguous area using bootmem allocator and used as-is without
1742 * being mapped into vmalloc area. This enables the first chunk to
1743 * piggy back on the linear physical mapping which often uses larger
1746 * When @dyn_size is positive, dynamic area might be larger than
1747 * specified to fill page alignment. When @dyn_size is auto,
1748 * @dyn_size is just big enough to fill page alignment after static
1749 * and reserved areas.
1751 * If the needed size is smaller than the minimum or specified unit
1752 * size, the leftover is returned to the bootmem allocator.
1755 * 0 on success, -errno on failure.
1757 int __init
pcpu_embed_first_chunk(size_t reserved_size
, ssize_t dyn_size
)
1759 struct pcpu_alloc_info
*ai
;
1760 size_t size_sum
, chunk_size
;
1765 ai
= pcpu_build_alloc_info(reserved_size
, dyn_size
, PAGE_SIZE
, NULL
);
1768 BUG_ON(ai
->nr_groups
!= 1);
1769 BUG_ON(ai
->groups
[0].nr_units
!= num_possible_cpus());
1771 size_sum
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
1772 chunk_size
= ai
->unit_size
* num_possible_cpus();
1774 base
= __alloc_bootmem_nopanic(chunk_size
, PAGE_SIZE
,
1775 __pa(MAX_DMA_ADDRESS
));
1777 pr_warning("PERCPU: failed to allocate %zu bytes for "
1778 "embedding\n", chunk_size
);
1783 /* return the leftover and copy */
1784 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
1785 void *ptr
= base
+ unit
* ai
->unit_size
;
1787 free_bootmem(__pa(ptr
+ size_sum
), ai
->unit_size
- size_sum
);
1788 memcpy(ptr
, __per_cpu_load
, ai
->static_size
);
1791 /* we're ready, commit */
1792 pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1793 PFN_DOWN(size_sum
), base
, ai
->static_size
, ai
->reserved_size
,
1794 ai
->dyn_size
, ai
->unit_size
);
1796 rc
= pcpu_setup_first_chunk(ai
, base
);
1798 pcpu_free_alloc_info(ai
);
1801 #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK ||
1802 !CONFIG_HAVE_SETUP_PER_CPU_AREA */
1804 #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1806 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1807 * @reserved_size: the size of reserved percpu area in bytes
1808 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1809 * @free_fn: funtion to free percpu page, always called with PAGE_SIZE
1810 * @populate_pte_fn: function to populate pte
1812 * This is a helper to ease setting up page-remapped first percpu
1813 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1815 * This is the basic allocator. Static percpu area is allocated
1816 * page-by-page into vmalloc area.
1819 * 0 on success, -errno on failure.
1821 int __init
pcpu_page_first_chunk(size_t reserved_size
,
1822 pcpu_fc_alloc_fn_t alloc_fn
,
1823 pcpu_fc_free_fn_t free_fn
,
1824 pcpu_fc_populate_pte_fn_t populate_pte_fn
)
1826 static struct vm_struct vm
;
1827 struct pcpu_alloc_info
*ai
;
1831 struct page
**pages
;
1834 snprintf(psize_str
, sizeof(psize_str
), "%luK", PAGE_SIZE
>> 10);
1836 ai
= pcpu_build_alloc_info(reserved_size
, -1, PAGE_SIZE
, NULL
);
1839 BUG_ON(ai
->nr_groups
!= 1);
1840 BUG_ON(ai
->groups
[0].nr_units
!= num_possible_cpus());
1842 unit_pages
= ai
->unit_size
>> PAGE_SHIFT
;
1844 /* unaligned allocations can't be freed, round up to page size */
1845 pages_size
= PFN_ALIGN(unit_pages
* num_possible_cpus() *
1847 pages
= alloc_bootmem(pages_size
);
1849 /* allocate pages */
1851 for (unit
= 0; unit
< num_possible_cpus(); unit
++)
1852 for (i
= 0; i
< unit_pages
; i
++) {
1853 unsigned int cpu
= ai
->groups
[0].cpu_map
[unit
];
1856 ptr
= alloc_fn(cpu
, PAGE_SIZE
, PAGE_SIZE
);
1858 pr_warning("PERCPU: failed to allocate %s page "
1859 "for cpu%u\n", psize_str
, cpu
);
1862 pages
[j
++] = virt_to_page(ptr
);
1865 /* allocate vm area, map the pages and copy static data */
1866 vm
.flags
= VM_ALLOC
;
1867 vm
.size
= num_possible_cpus() * ai
->unit_size
;
1868 vm_area_register_early(&vm
, PAGE_SIZE
);
1870 for (unit
= 0; unit
< num_possible_cpus(); unit
++) {
1871 unsigned long unit_addr
=
1872 (unsigned long)vm
.addr
+ unit
* ai
->unit_size
;
1874 for (i
= 0; i
< unit_pages
; i
++)
1875 populate_pte_fn(unit_addr
+ (i
<< PAGE_SHIFT
));
1877 /* pte already populated, the following shouldn't fail */
1878 rc
= __pcpu_map_pages(unit_addr
, &pages
[unit
* unit_pages
],
1881 panic("failed to map percpu area, err=%d\n", rc
);
1884 * FIXME: Archs with virtual cache should flush local
1885 * cache for the linear mapping here - something
1886 * equivalent to flush_cache_vmap() on the local cpu.
1887 * flush_cache_vmap() can't be used as most supporting
1888 * data structures are not set up yet.
1891 /* copy static data */
1892 memcpy((void *)unit_addr
, __per_cpu_load
, ai
->static_size
);
1895 /* we're ready, commit */
1896 pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1897 unit_pages
, psize_str
, vm
.addr
, ai
->static_size
,
1898 ai
->reserved_size
, ai
->dyn_size
);
1900 rc
= pcpu_setup_first_chunk(ai
, vm
.addr
);
1905 free_fn(page_address(pages
[j
]), PAGE_SIZE
);
1908 free_bootmem(__pa(pages
), pages_size
);
1909 pcpu_free_alloc_info(ai
);
1912 #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */
1914 #ifdef CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK
1920 static size_t pcpul_size
;
1921 static size_t pcpul_lpage_size
;
1922 static int pcpul_nr_lpages
;
1923 static struct pcpul_ent
*pcpul_map
;
1925 static bool __init
pcpul_unit_to_cpu(int unit
, const struct pcpu_alloc_info
*ai
,
1930 for (group
= 0, cunit
= 0; group
< ai
->nr_groups
; group
++) {
1931 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1933 if (unit
< cunit
+ gi
->nr_units
) {
1935 *cpup
= gi
->cpu_map
[unit
- cunit
];
1938 cunit
+= gi
->nr_units
;
1944 static int __init
pcpul_cpu_to_unit(int cpu
, const struct pcpu_alloc_info
*ai
)
1948 for (group
= 0, unit
= 0; group
< ai
->nr_groups
; group
++, unit
+= i
) {
1949 const struct pcpu_group_info
*gi
= &ai
->groups
[group
];
1951 for (i
= 0; i
< gi
->nr_units
; i
++)
1952 if (gi
->cpu_map
[i
] == cpu
)
1959 * pcpu_lpage_first_chunk - remap the first percpu chunk using large page
1960 * @ai: pcpu_alloc_info
1961 * @alloc_fn: function to allocate percpu lpage, always called with lpage_size
1962 * @free_fn: function to free percpu memory, @size <= lpage_size
1963 * @map_fn: function to map percpu lpage, always called with lpage_size
1965 * This allocator uses large page to build and map the first chunk.
1966 * Unlike other helpers, the caller should provide fully initialized
1967 * @ai. This can be done using pcpu_build_alloc_info(). This two
1968 * stage initialization is to allow arch code to evaluate the
1969 * parameters before committing to it.
1971 * Large pages are allocated as directed by @unit_map and other
1972 * parameters and mapped to vmalloc space. Unused holes are returned
1973 * to the page allocator. Note that these holes end up being actively
1974 * mapped twice - once to the physical mapping and to the vmalloc area
1975 * for the first percpu chunk. Depending on architecture, this might
1976 * cause problem when changing page attributes of the returned area.
1977 * These double mapped areas can be detected using
1978 * pcpu_lpage_remapped().
1981 * 0 on success, -errno on failure.
1983 int __init
pcpu_lpage_first_chunk(const struct pcpu_alloc_info
*ai
,
1984 pcpu_fc_alloc_fn_t alloc_fn
,
1985 pcpu_fc_free_fn_t free_fn
,
1986 pcpu_fc_map_fn_t map_fn
)
1988 static struct vm_struct vm
;
1989 const size_t lpage_size
= ai
->atom_size
;
1990 size_t chunk_size
, map_size
;
1992 int i
, j
, unit
, nr_units
, rc
;
1995 for (i
= 0; i
< ai
->nr_groups
; i
++)
1996 nr_units
+= ai
->groups
[i
].nr_units
;
1998 chunk_size
= ai
->unit_size
* nr_units
;
1999 BUG_ON(chunk_size
% lpage_size
);
2001 pcpul_size
= ai
->static_size
+ ai
->reserved_size
+ ai
->dyn_size
;
2002 pcpul_lpage_size
= lpage_size
;
2003 pcpul_nr_lpages
= chunk_size
/ lpage_size
;
2005 /* allocate pointer array and alloc large pages */
2006 map_size
= pcpul_nr_lpages
* sizeof(pcpul_map
[0]);
2007 pcpul_map
= alloc_bootmem(map_size
);
2009 /* allocate all pages */
2010 for (i
= 0; i
< pcpul_nr_lpages
; i
++) {
2011 size_t offset
= i
* lpage_size
;
2012 int first_unit
= offset
/ ai
->unit_size
;
2013 int last_unit
= (offset
+ lpage_size
- 1) / ai
->unit_size
;
2016 /* find out which cpu is mapped to this unit */
2017 for (unit
= first_unit
; unit
<= last_unit
; unit
++)
2018 if (pcpul_unit_to_cpu(unit
, ai
, &cpu
))
2022 ptr
= alloc_fn(cpu
, lpage_size
, lpage_size
);
2024 pr_warning("PERCPU: failed to allocate large page "
2025 "for cpu%u\n", cpu
);
2029 pcpul_map
[i
].ptr
= ptr
;
2032 /* return unused holes */
2033 for (unit
= 0; unit
< nr_units
; unit
++) {
2034 size_t start
= unit
* ai
->unit_size
;
2035 size_t end
= start
+ ai
->unit_size
;
2038 /* don't free used part of occupied unit */
2039 if (pcpul_unit_to_cpu(unit
, ai
, NULL
))
2040 start
+= pcpul_size
;
2042 /* unit can span more than one page, punch the holes */
2043 for (off
= start
; off
< end
; off
= next
) {
2044 void *ptr
= pcpul_map
[off
/ lpage_size
].ptr
;
2045 next
= min(roundup(off
+ 1, lpage_size
), end
);
2047 free_fn(ptr
+ off
% lpage_size
, next
- off
);
2051 /* allocate address, map and copy */
2052 vm
.flags
= VM_ALLOC
;
2053 vm
.size
= chunk_size
;
2054 vm_area_register_early(&vm
, ai
->unit_size
);
2056 for (i
= 0; i
< pcpul_nr_lpages
; i
++) {
2057 if (!pcpul_map
[i
].ptr
)
2059 pcpul_map
[i
].map_addr
= vm
.addr
+ i
* lpage_size
;
2060 map_fn(pcpul_map
[i
].ptr
, lpage_size
, pcpul_map
[i
].map_addr
);
2063 for_each_possible_cpu(cpu
)
2064 memcpy(vm
.addr
+ pcpul_cpu_to_unit(cpu
, ai
) * ai
->unit_size
,
2065 __per_cpu_load
, ai
->static_size
);
2067 /* we're ready, commit */
2068 pr_info("PERCPU: large pages @%p s%zu r%zu d%zu u%zu\n",
2069 vm
.addr
, ai
->static_size
, ai
->reserved_size
, ai
->dyn_size
,
2072 rc
= pcpu_setup_first_chunk(ai
, vm
.addr
);
2075 * Sort pcpul_map array for pcpu_lpage_remapped(). Unmapped
2076 * lpages are pushed to the end and trimmed.
2078 for (i
= 0; i
< pcpul_nr_lpages
- 1; i
++)
2079 for (j
= i
+ 1; j
< pcpul_nr_lpages
; j
++) {
2080 struct pcpul_ent tmp
;
2082 if (!pcpul_map
[j
].ptr
)
2084 if (pcpul_map
[i
].ptr
&&
2085 pcpul_map
[i
].ptr
< pcpul_map
[j
].ptr
)
2089 pcpul_map
[i
] = pcpul_map
[j
];
2093 while (pcpul_nr_lpages
&& !pcpul_map
[pcpul_nr_lpages
- 1].ptr
)
2099 for (i
= 0; i
< pcpul_nr_lpages
; i
++)
2100 if (pcpul_map
[i
].ptr
)
2101 free_fn(pcpul_map
[i
].ptr
, lpage_size
);
2102 free_bootmem(__pa(pcpul_map
), map_size
);
2107 * pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
2108 * @kaddr: the kernel address in question
2110 * Determine whether @kaddr falls in the pcpul recycled area. This is
2111 * used by pageattr to detect VM aliases and break up the pcpu large
2112 * page mapping such that the same physical page is not mapped under
2113 * different attributes.
2115 * The recycled area is always at the tail of a partially used large
2119 * Address of corresponding remapped pcpu address if match is found;
2122 void *pcpu_lpage_remapped(void *kaddr
)
2124 unsigned long lpage_mask
= pcpul_lpage_size
- 1;
2125 void *lpage_addr
= (void *)((unsigned long)kaddr
& ~lpage_mask
);
2126 unsigned long offset
= (unsigned long)kaddr
& lpage_mask
;
2127 int left
= 0, right
= pcpul_nr_lpages
- 1;
2130 /* pcpul in use at all? */
2134 /* okay, perform binary search */
2135 while (left
<= right
) {
2136 pos
= (left
+ right
) / 2;
2138 if (pcpul_map
[pos
].ptr
< lpage_addr
)
2140 else if (pcpul_map
[pos
].ptr
> lpage_addr
)
2143 return pcpul_map
[pos
].map_addr
+ offset
;
2148 #endif /* CONFIG_NEED_PER_CPU_LPAGE_FIRST_CHUNK */
2151 * Generic percpu area setup.
2153 * The embedding helper is used because its behavior closely resembles
2154 * the original non-dynamic generic percpu area setup. This is
2155 * important because many archs have addressing restrictions and might
2156 * fail if the percpu area is located far away from the previous
2157 * location. As an added bonus, in non-NUMA cases, embedding is
2158 * generally a good idea TLB-wise because percpu area can piggy back
2159 * on the physical linear memory mapping which uses large page
2160 * mappings on applicable archs.
2162 #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2163 unsigned long __per_cpu_offset
[NR_CPUS
] __read_mostly
;
2164 EXPORT_SYMBOL(__per_cpu_offset
);
2166 void __init
setup_per_cpu_areas(void)
2168 unsigned long delta
;
2173 * Always reserve area for module percpu variables. That's
2174 * what the legacy allocator did.
2176 rc
= pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE
,
2177 PERCPU_DYNAMIC_RESERVE
);
2179 panic("Failed to initialized percpu areas.");
2181 delta
= (unsigned long)pcpu_base_addr
- (unsigned long)__per_cpu_start
;
2182 for_each_possible_cpu(cpu
)
2183 __per_cpu_offset
[cpu
] = delta
+ pcpu_unit_offsets
[cpu
];
2185 #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */