2 * mm/percpu-vm.c - vmalloc area based chunk allocation
4 * Copyright (C) 2010 SUSE Linux Products GmbH
5 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
7 * This file is released under the GPLv2.
9 * Chunks are mapped into vmalloc areas and populated page by page.
10 * This is the default chunk allocator.
13 static struct page
*pcpu_chunk_page(struct pcpu_chunk
*chunk
,
14 unsigned int cpu
, int page_idx
)
16 /* must not be used on pre-mapped chunk */
17 WARN_ON(chunk
->immutable
);
19 return vmalloc_to_page((void *)pcpu_chunk_addr(chunk
, cpu
, page_idx
));
23 * pcpu_get_pages_and_bitmap - get temp pages array and bitmap
24 * @chunk: chunk of interest
25 * @bitmapp: output parameter for bitmap
26 * @may_alloc: may allocate the array
28 * Returns pointer to array of pointers to struct page and bitmap,
29 * both of which can be indexed with pcpu_page_idx(). The returned
30 * array is cleared to zero and *@bitmapp is copied from
31 * @chunk->populated. Note that there is only one array and bitmap
32 * and access exclusion is the caller's responsibility.
35 * pcpu_alloc_mutex and does GFP_KERNEL allocation if @may_alloc.
36 * Otherwise, don't care.
39 * Pointer to temp pages array on success, NULL on failure.
41 static struct page
**pcpu_get_pages_and_bitmap(struct pcpu_chunk
*chunk
,
42 unsigned long **bitmapp
,
45 static struct page
**pages
;
46 static unsigned long *bitmap
;
47 size_t pages_size
= pcpu_nr_units
* pcpu_unit_pages
* sizeof(pages
[0]);
48 size_t bitmap_size
= BITS_TO_LONGS(pcpu_unit_pages
) *
49 sizeof(unsigned long);
51 if (!pages
|| !bitmap
) {
52 if (may_alloc
&& !pages
)
53 pages
= pcpu_mem_alloc(pages_size
);
54 if (may_alloc
&& !bitmap
)
55 bitmap
= pcpu_mem_alloc(bitmap_size
);
56 if (!pages
|| !bitmap
)
60 memset(pages
, 0, pages_size
);
61 bitmap_copy(bitmap
, chunk
->populated
, pcpu_unit_pages
);
68 * pcpu_free_pages - free pages which were allocated for @chunk
69 * @chunk: chunk pages were allocated for
70 * @pages: array of pages to be freed, indexed by pcpu_page_idx()
71 * @populated: populated bitmap
72 * @page_start: page index of the first page to be freed
73 * @page_end: page index of the last page to be freed + 1
75 * Free pages [@page_start and @page_end) in @pages for all units.
76 * The pages were allocated for @chunk.
78 static void pcpu_free_pages(struct pcpu_chunk
*chunk
,
79 struct page
**pages
, unsigned long *populated
,
80 int page_start
, int page_end
)
85 for_each_possible_cpu(cpu
) {
86 for (i
= page_start
; i
< page_end
; i
++) {
87 struct page
*page
= pages
[pcpu_page_idx(cpu
, i
)];
96 * pcpu_alloc_pages - allocates pages for @chunk
97 * @chunk: target chunk
98 * @pages: array to put the allocated pages into, indexed by pcpu_page_idx()
99 * @populated: populated bitmap
100 * @page_start: page index of the first page to be allocated
101 * @page_end: page index of the last page to be allocated + 1
103 * Allocate pages [@page_start,@page_end) into @pages for all units.
104 * The allocation is for @chunk. Percpu core doesn't care about the
105 * content of @pages and will pass it verbatim to pcpu_map_pages().
107 static int pcpu_alloc_pages(struct pcpu_chunk
*chunk
,
108 struct page
**pages
, unsigned long *populated
,
109 int page_start
, int page_end
)
111 const gfp_t gfp
= GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_COLD
;
115 for_each_possible_cpu(cpu
) {
116 for (i
= page_start
; i
< page_end
; i
++) {
117 struct page
**pagep
= &pages
[pcpu_page_idx(cpu
, i
)];
119 *pagep
= alloc_pages_node(cpu_to_node(cpu
), gfp
, 0);
121 pcpu_free_pages(chunk
, pages
, populated
,
122 page_start
, page_end
);
131 * pcpu_pre_unmap_flush - flush cache prior to unmapping
132 * @chunk: chunk the regions to be flushed belongs to
133 * @page_start: page index of the first page to be flushed
134 * @page_end: page index of the last page to be flushed + 1
136 * Pages in [@page_start,@page_end) of @chunk are about to be
137 * unmapped. Flush cache. As each flushing trial can be very
138 * expensive, issue flush on the whole region at once rather than
139 * doing it for each cpu. This could be an overkill but is more
142 static void pcpu_pre_unmap_flush(struct pcpu_chunk
*chunk
,
143 int page_start
, int page_end
)
146 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
147 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
150 static void __pcpu_unmap_pages(unsigned long addr
, int nr_pages
)
152 unmap_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
);
156 * pcpu_unmap_pages - unmap pages out of a pcpu_chunk
157 * @chunk: chunk of interest
158 * @pages: pages array which can be used to pass information to free
159 * @populated: populated bitmap
160 * @page_start: page index of the first page to unmap
161 * @page_end: page index of the last page to unmap + 1
163 * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
164 * Corresponding elements in @pages were cleared by the caller and can
165 * be used to carry information to pcpu_free_pages() which will be
166 * called after all unmaps are finished. The caller should call
167 * proper pre/post flush functions.
169 static void pcpu_unmap_pages(struct pcpu_chunk
*chunk
,
170 struct page
**pages
, unsigned long *populated
,
171 int page_start
, int page_end
)
176 for_each_possible_cpu(cpu
) {
177 for (i
= page_start
; i
< page_end
; i
++) {
180 page
= pcpu_chunk_page(chunk
, cpu
, i
);
182 pages
[pcpu_page_idx(cpu
, i
)] = page
;
184 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
185 page_end
- page_start
);
188 for (i
= page_start
; i
< page_end
; i
++)
189 __clear_bit(i
, populated
);
193 * pcpu_post_unmap_tlb_flush - flush TLB after unmapping
194 * @chunk: pcpu_chunk the regions to be flushed belong to
195 * @page_start: page index of the first page to be flushed
196 * @page_end: page index of the last page to be flushed + 1
198 * Pages [@page_start,@page_end) of @chunk have been unmapped. Flush
199 * TLB for the regions. This can be skipped if the area is to be
200 * returned to vmalloc as vmalloc will handle TLB flushing lazily.
202 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
203 * for the whole region.
205 static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk
*chunk
,
206 int page_start
, int page_end
)
208 flush_tlb_kernel_range(
209 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
210 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
213 static int __pcpu_map_pages(unsigned long addr
, struct page
**pages
,
216 return map_kernel_range_noflush(addr
, nr_pages
<< PAGE_SHIFT
,
221 * pcpu_map_pages - map pages into a pcpu_chunk
222 * @chunk: chunk of interest
223 * @pages: pages array containing pages to be mapped
224 * @populated: populated bitmap
225 * @page_start: page index of the first page to map
226 * @page_end: page index of the last page to map + 1
228 * For each cpu, map pages [@page_start,@page_end) into @chunk. The
229 * caller is responsible for calling pcpu_post_map_flush() after all
230 * mappings are complete.
232 * This function is responsible for setting corresponding bits in
233 * @chunk->populated bitmap and whatever is necessary for reverse
234 * lookup (addr -> chunk).
236 static int pcpu_map_pages(struct pcpu_chunk
*chunk
,
237 struct page
**pages
, unsigned long *populated
,
238 int page_start
, int page_end
)
240 unsigned int cpu
, tcpu
;
243 for_each_possible_cpu(cpu
) {
244 err
= __pcpu_map_pages(pcpu_chunk_addr(chunk
, cpu
, page_start
),
245 &pages
[pcpu_page_idx(cpu
, page_start
)],
246 page_end
- page_start
);
251 /* mapping successful, link chunk and mark populated */
252 for (i
= page_start
; i
< page_end
; i
++) {
253 for_each_possible_cpu(cpu
)
254 pcpu_set_page_chunk(pages
[pcpu_page_idx(cpu
, i
)],
256 __set_bit(i
, populated
);
262 for_each_possible_cpu(tcpu
) {
265 __pcpu_unmap_pages(pcpu_chunk_addr(chunk
, tcpu
, page_start
),
266 page_end
- page_start
);
272 * pcpu_post_map_flush - flush cache after mapping
273 * @chunk: pcpu_chunk the regions to be flushed belong to
274 * @page_start: page index of the first page to be flushed
275 * @page_end: page index of the last page to be flushed + 1
277 * Pages [@page_start,@page_end) of @chunk have been mapped. Flush
280 * As with pcpu_pre_unmap_flush(), TLB flushing also is done at once
281 * for the whole region.
283 static void pcpu_post_map_flush(struct pcpu_chunk
*chunk
,
284 int page_start
, int page_end
)
287 pcpu_chunk_addr(chunk
, pcpu_first_unit_cpu
, page_start
),
288 pcpu_chunk_addr(chunk
, pcpu_last_unit_cpu
, page_end
));
292 * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
293 * @chunk: chunk of interest
294 * @off: offset to the area to populate
295 * @size: size of the area to populate in bytes
297 * For each cpu, populate and map pages [@page_start,@page_end) into
298 * @chunk. The area is cleared on return.
301 * pcpu_alloc_mutex, does GFP_KERNEL allocation.
303 static int pcpu_populate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
305 int page_start
= PFN_DOWN(off
);
306 int page_end
= PFN_UP(off
+ size
);
307 int free_end
= page_start
, unmap_end
= page_start
;
309 unsigned long *populated
;
313 /* quick path, check whether all pages are already there */
315 pcpu_next_pop(chunk
, &rs
, &re
, page_end
);
316 if (rs
== page_start
&& re
== page_end
)
319 /* need to allocate and map pages, this chunk can't be immutable */
320 WARN_ON(chunk
->immutable
);
322 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, true);
327 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
328 rc
= pcpu_alloc_pages(chunk
, pages
, populated
, rs
, re
);
334 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, page_end
) {
335 rc
= pcpu_map_pages(chunk
, pages
, populated
, rs
, re
);
340 pcpu_post_map_flush(chunk
, page_start
, page_end
);
342 /* commit new bitmap */
343 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
345 for_each_possible_cpu(cpu
)
346 memset((void *)pcpu_chunk_addr(chunk
, cpu
, 0) + off
, 0, size
);
350 pcpu_pre_unmap_flush(chunk
, page_start
, unmap_end
);
351 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, unmap_end
)
352 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
353 pcpu_post_unmap_tlb_flush(chunk
, page_start
, unmap_end
);
355 pcpu_for_each_unpop_region(chunk
, rs
, re
, page_start
, free_end
)
356 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
361 * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
362 * @chunk: chunk to depopulate
363 * @off: offset to the area to depopulate
364 * @size: size of the area to depopulate in bytes
365 * @flush: whether to flush cache and tlb or not
367 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
368 * from @chunk. If @flush is true, vcache is flushed before unmapping
374 static void pcpu_depopulate_chunk(struct pcpu_chunk
*chunk
, int off
, int size
)
376 int page_start
= PFN_DOWN(off
);
377 int page_end
= PFN_UP(off
+ size
);
379 unsigned long *populated
;
382 /* quick path, check whether it's empty already */
384 pcpu_next_unpop(chunk
, &rs
, &re
, page_end
);
385 if (rs
== page_start
&& re
== page_end
)
388 /* immutable chunks can't be depopulated */
389 WARN_ON(chunk
->immutable
);
392 * If control reaches here, there must have been at least one
393 * successful population attempt so the temp pages array must
396 pages
= pcpu_get_pages_and_bitmap(chunk
, &populated
, false);
400 pcpu_pre_unmap_flush(chunk
, page_start
, page_end
);
402 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
403 pcpu_unmap_pages(chunk
, pages
, populated
, rs
, re
);
405 /* no need to flush tlb, vmalloc will handle it lazily */
407 pcpu_for_each_pop_region(chunk
, rs
, re
, page_start
, page_end
)
408 pcpu_free_pages(chunk
, pages
, populated
, rs
, re
);
410 /* commit new bitmap */
411 bitmap_copy(chunk
->populated
, populated
, pcpu_unit_pages
);
414 static struct pcpu_chunk
*pcpu_create_chunk(void)
416 struct pcpu_chunk
*chunk
;
417 struct vm_struct
**vms
;
419 chunk
= pcpu_alloc_chunk();
423 vms
= pcpu_get_vm_areas(pcpu_group_offsets
, pcpu_group_sizes
,
424 pcpu_nr_groups
, pcpu_atom_size
);
426 pcpu_free_chunk(chunk
);
431 chunk
->base_addr
= vms
[0]->addr
- pcpu_group_offsets
[0];
435 static void pcpu_destroy_chunk(struct pcpu_chunk
*chunk
)
437 if (chunk
&& chunk
->data
)
438 pcpu_free_vm_areas(chunk
->data
, pcpu_nr_groups
);
439 pcpu_free_chunk(chunk
);
442 static struct page
*pcpu_addr_to_page(void *addr
)
444 return vmalloc_to_page(addr
);
447 static int __init
pcpu_verify_alloc_info(const struct pcpu_alloc_info
*ai
)
449 /* no extra restriction */