2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/radix-tree.h>
14 #include <linux/memremap.h>
15 #include <linux/device.h>
16 #include <linux/types.h>
17 #include <linux/pfn_t.h>
20 #include <linux/memory_hotplug.h>
23 /* temporary while we convert existing ioremap_cache users to memremap */
24 __weak
void __iomem
*ioremap_cache(resource_size_t offset
, unsigned long size
)
26 return ioremap(offset
, size
);
30 static void *try_ram_remap(resource_size_t offset
, size_t size
)
32 unsigned long pfn
= PHYS_PFN(offset
);
34 /* In the simple case just return the existing linear address */
35 if (pfn_valid(pfn
) && !PageHighMem(pfn_to_page(pfn
)))
37 return NULL
; /* fallback to ioremap_cache */
41 * memremap() - remap an iomem_resource as cacheable memory
42 * @offset: iomem resource start address
43 * @size: size of remap
44 * @flags: either MEMREMAP_WB or MEMREMAP_WT
46 * memremap() is "ioremap" for cases where it is known that the resource
47 * being mapped does not have i/o side effects and the __iomem
48 * annotation is not applicable.
50 * MEMREMAP_WB - matches the default mapping for System RAM on
51 * the architecture. This is usually a read-allocate write-back cache.
52 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
53 * memremap() will bypass establishing a new mapping and instead return
54 * a pointer into the direct map.
56 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
57 * cache or are written through to memory and never exist in a
58 * cache-dirty state with respect to program visibility. Attempts to
59 * map System RAM with this mapping type will fail.
61 void *memremap(resource_size_t offset
, size_t size
, unsigned long flags
)
63 int is_ram
= region_intersects(offset
, size
,
64 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
67 if (is_ram
== REGION_MIXED
) {
68 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
69 &offset
, (unsigned long) size
);
73 /* Try all mapping types requested until one returns non-NULL */
74 if (flags
& MEMREMAP_WB
) {
75 flags
&= ~MEMREMAP_WB
;
77 * MEMREMAP_WB is special in that it can be satisifed
78 * from the direct map. Some archs depend on the
79 * capability of memremap() to autodetect cases where
80 * the requested range is potentially in System RAM.
82 if (is_ram
== REGION_INTERSECTS
)
83 addr
= try_ram_remap(offset
, size
);
85 addr
= ioremap_cache(offset
, size
);
89 * If we don't have a mapping yet and more request flags are
90 * pending then we will be attempting to establish a new virtual
91 * address mapping. Enforce that this mapping is not aliasing
94 if (!addr
&& is_ram
== REGION_INTERSECTS
&& flags
) {
95 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
96 &offset
, (unsigned long) size
);
100 if (!addr
&& (flags
& MEMREMAP_WT
)) {
101 flags
&= ~MEMREMAP_WT
;
102 addr
= ioremap_wt(offset
, size
);
107 EXPORT_SYMBOL(memremap
);
109 void memunmap(void *addr
)
111 if (is_vmalloc_addr(addr
))
112 iounmap((void __iomem
*) addr
);
114 EXPORT_SYMBOL(memunmap
);
116 static void devm_memremap_release(struct device
*dev
, void *res
)
118 memunmap(*(void **)res
);
121 static int devm_memremap_match(struct device
*dev
, void *res
, void *match_data
)
123 return *(void **)res
== match_data
;
126 void *devm_memremap(struct device
*dev
, resource_size_t offset
,
127 size_t size
, unsigned long flags
)
131 ptr
= devres_alloc_node(devm_memremap_release
, sizeof(*ptr
), GFP_KERNEL
,
134 return ERR_PTR(-ENOMEM
);
136 addr
= memremap(offset
, size
, flags
);
139 devres_add(dev
, ptr
);
142 return ERR_PTR(-ENXIO
);
147 EXPORT_SYMBOL(devm_memremap
);
149 void devm_memunmap(struct device
*dev
, void *addr
)
151 WARN_ON(devres_release(dev
, devm_memremap_release
,
152 devm_memremap_match
, addr
));
154 EXPORT_SYMBOL(devm_memunmap
);
156 pfn_t
phys_to_pfn_t(phys_addr_t addr
, u64 flags
)
158 return __pfn_to_pfn_t(addr
>> PAGE_SHIFT
, flags
);
160 EXPORT_SYMBOL(phys_to_pfn_t
);
162 #ifdef CONFIG_ZONE_DEVICE
163 static DEFINE_MUTEX(pgmap_lock
);
164 static RADIX_TREE(pgmap_radix
, GFP_KERNEL
);
165 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
166 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
170 struct percpu_ref
*ref
;
171 struct dev_pagemap pgmap
;
172 struct vmem_altmap altmap
;
175 void get_zone_device_page(struct page
*page
)
177 percpu_ref_get(page
->pgmap
->ref
);
179 EXPORT_SYMBOL(get_zone_device_page
);
181 void put_zone_device_page(struct page
*page
)
183 put_dev_pagemap(page
->pgmap
);
185 EXPORT_SYMBOL(put_zone_device_page
);
187 static void pgmap_radix_release(struct resource
*res
)
189 resource_size_t key
, align_start
, align_size
, align_end
;
191 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
192 align_size
= ALIGN(resource_size(res
), SECTION_SIZE
);
193 align_end
= align_start
+ align_size
- 1;
195 mutex_lock(&pgmap_lock
);
196 for (key
= res
->start
; key
<= res
->end
; key
+= SECTION_SIZE
)
197 radix_tree_delete(&pgmap_radix
, key
>> PA_SECTION_SHIFT
);
198 mutex_unlock(&pgmap_lock
);
201 static unsigned long pfn_first(struct page_map
*page_map
)
203 struct dev_pagemap
*pgmap
= &page_map
->pgmap
;
204 const struct resource
*res
= &page_map
->res
;
205 struct vmem_altmap
*altmap
= pgmap
->altmap
;
208 pfn
= res
->start
>> PAGE_SHIFT
;
210 pfn
+= vmem_altmap_offset(altmap
);
214 static unsigned long pfn_end(struct page_map
*page_map
)
216 const struct resource
*res
= &page_map
->res
;
218 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
221 #define for_each_device_pfn(pfn, map) \
222 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
224 static void devm_memremap_pages_release(struct device
*dev
, void *data
)
226 struct page_map
*page_map
= data
;
227 struct resource
*res
= &page_map
->res
;
228 resource_size_t align_start
, align_size
;
229 struct dev_pagemap
*pgmap
= &page_map
->pgmap
;
231 if (percpu_ref_tryget_live(pgmap
->ref
)) {
232 dev_WARN(dev
, "%s: page mapping is still live!\n", __func__
);
233 percpu_ref_put(pgmap
->ref
);
236 /* pages are dead and unused, undo the arch mapping */
237 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
238 align_size
= ALIGN(resource_size(res
), SECTION_SIZE
);
239 arch_remove_memory(align_start
, align_size
);
240 pgmap_radix_release(res
);
241 dev_WARN_ONCE(dev
, pgmap
->altmap
&& pgmap
->altmap
->alloc
,
242 "%s: failed to free all reserved pages\n", __func__
);
245 /* assumes rcu_read_lock() held at entry */
246 struct dev_pagemap
*find_dev_pagemap(resource_size_t phys
)
248 struct page_map
*page_map
;
250 WARN_ON_ONCE(!rcu_read_lock_held());
252 page_map
= radix_tree_lookup(&pgmap_radix
, phys
>> PA_SECTION_SHIFT
);
253 return page_map
? &page_map
->pgmap
: NULL
;
257 * devm_memremap_pages - remap and provide memmap backing for the given resource
258 * @dev: hosting device for @res
259 * @res: "host memory" address range
260 * @ref: a live per-cpu reference count
261 * @altmap: optional descriptor for allocating the memmap from @res
264 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
265 * (or devm release event).
267 * 2/ @res is expected to be a host memory range that could feasibly be
268 * treated as a "System RAM" range, i.e. not a device mmio range, but
269 * this is not enforced.
271 void *devm_memremap_pages(struct device
*dev
, struct resource
*res
,
272 struct percpu_ref
*ref
, struct vmem_altmap
*altmap
)
274 resource_size_t key
, align_start
, align_size
, align_end
;
275 struct dev_pagemap
*pgmap
;
276 struct page_map
*page_map
;
277 int error
, nid
, is_ram
;
280 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
281 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
283 is_ram
= region_intersects(align_start
, align_size
,
284 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
286 if (is_ram
== REGION_MIXED
) {
287 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
289 return ERR_PTR(-ENXIO
);
292 if (is_ram
== REGION_INTERSECTS
)
293 return __va(res
->start
);
295 if (altmap
&& !IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP
)) {
296 dev_err(dev
, "%s: altmap requires CONFIG_SPARSEMEM_VMEMMAP=y\n",
298 return ERR_PTR(-ENXIO
);
302 return ERR_PTR(-EINVAL
);
304 page_map
= devres_alloc_node(devm_memremap_pages_release
,
305 sizeof(*page_map
), GFP_KERNEL
, dev_to_node(dev
));
307 return ERR_PTR(-ENOMEM
);
308 pgmap
= &page_map
->pgmap
;
310 memcpy(&page_map
->res
, res
, sizeof(*res
));
314 memcpy(&page_map
->altmap
, altmap
, sizeof(*altmap
));
315 pgmap
->altmap
= &page_map
->altmap
;
318 pgmap
->res
= &page_map
->res
;
320 mutex_lock(&pgmap_lock
);
322 align_end
= align_start
+ align_size
- 1;
323 for (key
= align_start
; key
<= align_end
; key
+= SECTION_SIZE
) {
324 struct dev_pagemap
*dup
;
327 dup
= find_dev_pagemap(key
);
330 dev_err(dev
, "%s: %pr collides with mapping for %s\n",
331 __func__
, res
, dev_name(dup
->dev
));
335 error
= radix_tree_insert(&pgmap_radix
, key
>> PA_SECTION_SHIFT
,
338 dev_err(dev
, "%s: failed: %d\n", __func__
, error
);
342 mutex_unlock(&pgmap_lock
);
346 nid
= dev_to_node(dev
);
350 error
= arch_add_memory(nid
, align_start
, align_size
, true);
354 for_each_device_pfn(pfn
, page_map
) {
355 struct page
*page
= pfn_to_page(pfn
);
358 * ZONE_DEVICE pages union ->lru with a ->pgmap back
359 * pointer. It is a bug if a ZONE_DEVICE page is ever
360 * freed or placed on a driver-private list. Seed the
361 * storage with LIST_POISON* values.
363 list_del(&page
->lru
);
366 devres_add(dev
, page_map
);
367 return __va(res
->start
);
371 pgmap_radix_release(res
);
372 devres_free(page_map
);
373 return ERR_PTR(error
);
375 EXPORT_SYMBOL(devm_memremap_pages
);
377 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
379 /* number of pfns from base where pfn_to_page() is valid */
380 return altmap
->reserve
+ altmap
->free
;
383 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
385 altmap
->alloc
-= nr_pfns
;
388 #ifdef CONFIG_SPARSEMEM_VMEMMAP
389 struct vmem_altmap
*to_vmem_altmap(unsigned long memmap_start
)
392 * 'memmap_start' is the virtual address for the first "struct
393 * page" in this range of the vmemmap array. In the case of
394 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
395 * pointer arithmetic, so we can perform this to_vmem_altmap()
396 * conversion without concern for the initialization state of
397 * the struct page fields.
399 struct page
*page
= (struct page
*) memmap_start
;
400 struct dev_pagemap
*pgmap
;
403 * Unconditionally retrieve a dev_pagemap associated with the
404 * given physical address, this is only for use in the
405 * arch_{add|remove}_memory() for setting up and tearing down
409 pgmap
= find_dev_pagemap(__pfn_to_phys(page_to_pfn(page
)));
412 return pgmap
? pgmap
->altmap
: NULL
;
414 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
415 #endif /* CONFIG_ZONE_DEVICE */