1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/memblock.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
40 static void * __ref
__earlyonly_bootmem_alloc(int node
,
45 return memblock_alloc_try_nid_raw(size
, align
, goal
,
46 MEMBLOCK_ALLOC_ACCESSIBLE
, node
);
49 void * __meminit
vmemmap_alloc_block(unsigned long size
, int node
)
51 /* If the main allocator is up use that, fallback to bootmem. */
52 if (slab_is_available()) {
53 gfp_t gfp_mask
= GFP_KERNEL
|__GFP_RETRY_MAYFAIL
|__GFP_NOWARN
;
54 int order
= get_order(size
);
58 page
= alloc_pages_node(node
, gfp_mask
, order
);
60 return page_address(page
);
63 warn_alloc(gfp_mask
& ~__GFP_NOWARN
, NULL
,
64 "vmemmap alloc failure: order:%u", order
);
69 return __earlyonly_bootmem_alloc(node
, size
, size
,
70 __pa(MAX_DMA_ADDRESS
));
73 /* need to make sure size is all the same during early stage */
74 void * __meminit
vmemmap_alloc_block_buf(unsigned long size
, int node
)
76 void *ptr
= sparse_buffer_alloc(size
);
79 ptr
= vmemmap_alloc_block(size
, node
);
83 static unsigned long __meminit
vmem_altmap_next_pfn(struct vmem_altmap
*altmap
)
85 return altmap
->base_pfn
+ altmap
->reserve
+ altmap
->alloc
89 static unsigned long __meminit
vmem_altmap_nr_free(struct vmem_altmap
*altmap
)
91 unsigned long allocated
= altmap
->alloc
+ altmap
->align
;
93 if (altmap
->free
> allocated
)
94 return altmap
->free
- allocated
;
99 * altmap_alloc_block_buf - allocate pages from the device page map
100 * @altmap: device page map
101 * @size: size (in bytes) of the allocation
103 * Allocations are aligned to the size of the request.
105 void * __meminit
altmap_alloc_block_buf(unsigned long size
,
106 struct vmem_altmap
*altmap
)
108 unsigned long pfn
, nr_pfns
, nr_align
;
110 if (size
& ~PAGE_MASK
) {
111 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
116 pfn
= vmem_altmap_next_pfn(altmap
);
117 nr_pfns
= size
>> PAGE_SHIFT
;
118 nr_align
= 1UL << find_first_bit(&nr_pfns
, BITS_PER_LONG
);
119 nr_align
= ALIGN(pfn
, nr_align
) - pfn
;
120 if (nr_pfns
+ nr_align
> vmem_altmap_nr_free(altmap
))
123 altmap
->alloc
+= nr_pfns
;
124 altmap
->align
+= nr_align
;
127 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
128 __func__
, pfn
, altmap
->alloc
, altmap
->align
, nr_pfns
);
129 return __va(__pfn_to_phys(pfn
));
132 void __meminit
vmemmap_verify(pte_t
*pte
, int node
,
133 unsigned long start
, unsigned long end
)
135 unsigned long pfn
= pte_pfn(*pte
);
136 int actual_node
= early_pfn_to_nid(pfn
);
138 if (node_distance(actual_node
, node
) > LOCAL_DISTANCE
)
139 pr_warn("[%lx-%lx] potential offnode page_structs\n",
143 pte_t
* __meminit
vmemmap_pte_populate(pmd_t
*pmd
, unsigned long addr
, int node
)
145 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
146 if (pte_none(*pte
)) {
148 void *p
= vmemmap_alloc_block_buf(PAGE_SIZE
, node
);
151 entry
= pfn_pte(__pa(p
) >> PAGE_SHIFT
, PAGE_KERNEL
);
152 set_pte_at(&init_mm
, addr
, pte
, entry
);
157 static void * __meminit
vmemmap_alloc_block_zero(unsigned long size
, int node
)
159 void *p
= vmemmap_alloc_block(size
, node
);
168 pmd_t
* __meminit
vmemmap_pmd_populate(pud_t
*pud
, unsigned long addr
, int node
)
170 pmd_t
*pmd
= pmd_offset(pud
, addr
);
171 if (pmd_none(*pmd
)) {
172 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
175 pmd_populate_kernel(&init_mm
, pmd
, p
);
180 pud_t
* __meminit
vmemmap_pud_populate(p4d_t
*p4d
, unsigned long addr
, int node
)
182 pud_t
*pud
= pud_offset(p4d
, addr
);
183 if (pud_none(*pud
)) {
184 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
187 pud_populate(&init_mm
, pud
, p
);
192 p4d_t
* __meminit
vmemmap_p4d_populate(pgd_t
*pgd
, unsigned long addr
, int node
)
194 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
195 if (p4d_none(*p4d
)) {
196 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
199 p4d_populate(&init_mm
, p4d
, p
);
204 pgd_t
* __meminit
vmemmap_pgd_populate(unsigned long addr
, int node
)
206 pgd_t
*pgd
= pgd_offset_k(addr
);
207 if (pgd_none(*pgd
)) {
208 void *p
= vmemmap_alloc_block_zero(PAGE_SIZE
, node
);
211 pgd_populate(&init_mm
, pgd
, p
);
216 int __meminit
vmemmap_populate_basepages(unsigned long start
,
217 unsigned long end
, int node
)
219 unsigned long addr
= start
;
226 for (; addr
< end
; addr
+= PAGE_SIZE
) {
227 pgd
= vmemmap_pgd_populate(addr
, node
);
230 p4d
= vmemmap_p4d_populate(pgd
, addr
, node
);
233 pud
= vmemmap_pud_populate(p4d
, addr
, node
);
236 pmd
= vmemmap_pmd_populate(pud
, addr
, node
);
239 pte
= vmemmap_pte_populate(pmd
, addr
, node
);
242 vmemmap_verify(pte
, node
, addr
, addr
+ PAGE_SIZE
);
248 struct page
* __meminit
sparse_mem_map_populate(unsigned long pnum
, int nid
,
249 struct vmem_altmap
*altmap
)
255 map
= pfn_to_page(pnum
* PAGES_PER_SECTION
);
256 start
= (unsigned long)map
;
257 end
= (unsigned long)(map
+ PAGES_PER_SECTION
);
259 if (vmemmap_populate(start
, end
, nid
, altmap
))