Merge tag 'dma-mapping-4.14' of git://git.infradead.org/users/hch/dma-mapping
[linux-stable.git] / kernel / memremap.c
blob6bcbfbf1a8fdfd2f1008cde707db9a798a68cdc6
1 /*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/radix-tree.h>
14 #include <linux/device.h>
15 #include <linux/types.h>
16 #include <linux/pfn_t.h>
17 #include <linux/io.h>
18 #include <linux/mm.h>
19 #include <linux/memory_hotplug.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
23 #ifndef ioremap_cache
24 /* temporary while we convert existing ioremap_cache users to memremap */
25 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
27 return ioremap(offset, size);
29 #endif
31 #ifndef arch_memremap_wb
32 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
34 return (__force void *)ioremap_cache(offset, size);
36 #endif
38 #ifndef arch_memremap_can_ram_remap
39 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
40 unsigned long flags)
42 return true;
44 #endif
46 static void *try_ram_remap(resource_size_t offset, size_t size,
47 unsigned long flags)
49 unsigned long pfn = PHYS_PFN(offset);
51 /* In the simple case just return the existing linear address */
52 if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
53 arch_memremap_can_ram_remap(offset, size, flags))
54 return __va(offset);
56 return NULL; /* fallback to arch_memremap_wb */
59 /**
60 * memremap() - remap an iomem_resource as cacheable memory
61 * @offset: iomem resource start address
62 * @size: size of remap
63 * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
64 * MEMREMAP_ENC, MEMREMAP_DEC
66 * memremap() is "ioremap" for cases where it is known that the resource
67 * being mapped does not have i/o side effects and the __iomem
68 * annotation is not applicable. In the case of multiple flags, the different
69 * mapping types will be attempted in the order listed below until one of
70 * them succeeds.
72 * MEMREMAP_WB - matches the default mapping for System RAM on
73 * the architecture. This is usually a read-allocate write-back cache.
74 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
75 * memremap() will bypass establishing a new mapping and instead return
76 * a pointer into the direct map.
78 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
79 * cache or are written through to memory and never exist in a
80 * cache-dirty state with respect to program visibility. Attempts to
81 * map System RAM with this mapping type will fail.
83 * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
84 * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
85 * uncached. Attempts to map System RAM with this mapping type will fail.
87 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
89 int is_ram = region_intersects(offset, size,
90 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
91 void *addr = NULL;
93 if (!flags)
94 return NULL;
96 if (is_ram == REGION_MIXED) {
97 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
98 &offset, (unsigned long) size);
99 return NULL;
102 /* Try all mapping types requested until one returns non-NULL */
103 if (flags & MEMREMAP_WB) {
105 * MEMREMAP_WB is special in that it can be satisifed
106 * from the direct map. Some archs depend on the
107 * capability of memremap() to autodetect cases where
108 * the requested range is potentially in System RAM.
110 if (is_ram == REGION_INTERSECTS)
111 addr = try_ram_remap(offset, size, flags);
112 if (!addr)
113 addr = arch_memremap_wb(offset, size);
117 * If we don't have a mapping yet and other request flags are
118 * present then we will be attempting to establish a new virtual
119 * address mapping. Enforce that this mapping is not aliasing
120 * System RAM.
122 if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
123 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
124 &offset, (unsigned long) size);
125 return NULL;
128 if (!addr && (flags & MEMREMAP_WT))
129 addr = ioremap_wt(offset, size);
131 if (!addr && (flags & MEMREMAP_WC))
132 addr = ioremap_wc(offset, size);
134 return addr;
136 EXPORT_SYMBOL(memremap);
138 void memunmap(void *addr)
140 if (is_vmalloc_addr(addr))
141 iounmap((void __iomem *) addr);
143 EXPORT_SYMBOL(memunmap);
145 static void devm_memremap_release(struct device *dev, void *res)
147 memunmap(*(void **)res);
150 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
152 return *(void **)res == match_data;
155 void *devm_memremap(struct device *dev, resource_size_t offset,
156 size_t size, unsigned long flags)
158 void **ptr, *addr;
160 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
161 dev_to_node(dev));
162 if (!ptr)
163 return ERR_PTR(-ENOMEM);
165 addr = memremap(offset, size, flags);
166 if (addr) {
167 *ptr = addr;
168 devres_add(dev, ptr);
169 } else {
170 devres_free(ptr);
171 return ERR_PTR(-ENXIO);
174 return addr;
176 EXPORT_SYMBOL(devm_memremap);
178 void devm_memunmap(struct device *dev, void *addr)
180 WARN_ON(devres_release(dev, devm_memremap_release,
181 devm_memremap_match, addr));
183 EXPORT_SYMBOL(devm_memunmap);
185 #ifdef CONFIG_ZONE_DEVICE
186 static DEFINE_MUTEX(pgmap_lock);
187 static RADIX_TREE(pgmap_radix, GFP_KERNEL);
188 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
189 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
191 struct page_map {
192 struct resource res;
193 struct percpu_ref *ref;
194 struct dev_pagemap pgmap;
195 struct vmem_altmap altmap;
198 static unsigned long order_at(struct resource *res, unsigned long pgoff)
200 unsigned long phys_pgoff = PHYS_PFN(res->start) + pgoff;
201 unsigned long nr_pages, mask;
203 nr_pages = PHYS_PFN(resource_size(res));
204 if (nr_pages == pgoff)
205 return ULONG_MAX;
208 * What is the largest aligned power-of-2 range available from
209 * this resource pgoff to the end of the resource range,
210 * considering the alignment of the current pgoff?
212 mask = phys_pgoff | rounddown_pow_of_two(nr_pages - pgoff);
213 if (!mask)
214 return ULONG_MAX;
216 return find_first_bit(&mask, BITS_PER_LONG);
219 #define foreach_order_pgoff(res, order, pgoff) \
220 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
221 pgoff += 1UL << order, order = order_at((res), pgoff))
223 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
224 int device_private_entry_fault(struct vm_area_struct *vma,
225 unsigned long addr,
226 swp_entry_t entry,
227 unsigned int flags,
228 pmd_t *pmdp)
230 struct page *page = device_private_entry_to_page(entry);
233 * The page_fault() callback must migrate page back to system memory
234 * so that CPU can access it. This might fail for various reasons
235 * (device issue, device was unsafely unplugged, ...). When such
236 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
238 * Note that because memory cgroup charges are accounted to the device
239 * memory, this should never fail because of memory restrictions (but
240 * allocation of regular system page might still fail because we are
241 * out of memory).
243 * There is a more in-depth description of what that callback can and
244 * cannot do, in include/linux/memremap.h
246 return page->pgmap->page_fault(vma, addr, page, flags, pmdp);
248 EXPORT_SYMBOL(device_private_entry_fault);
249 #endif /* CONFIG_DEVICE_PRIVATE */
251 static void pgmap_radix_release(struct resource *res)
253 unsigned long pgoff, order;
255 mutex_lock(&pgmap_lock);
256 foreach_order_pgoff(res, order, pgoff)
257 radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
258 mutex_unlock(&pgmap_lock);
260 synchronize_rcu();
263 static unsigned long pfn_first(struct page_map *page_map)
265 struct dev_pagemap *pgmap = &page_map->pgmap;
266 const struct resource *res = &page_map->res;
267 struct vmem_altmap *altmap = pgmap->altmap;
268 unsigned long pfn;
270 pfn = res->start >> PAGE_SHIFT;
271 if (altmap)
272 pfn += vmem_altmap_offset(altmap);
273 return pfn;
276 static unsigned long pfn_end(struct page_map *page_map)
278 const struct resource *res = &page_map->res;
280 return (res->start + resource_size(res)) >> PAGE_SHIFT;
283 #define for_each_device_pfn(pfn, map) \
284 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
286 static void devm_memremap_pages_release(struct device *dev, void *data)
288 struct page_map *page_map = data;
289 struct resource *res = &page_map->res;
290 resource_size_t align_start, align_size;
291 struct dev_pagemap *pgmap = &page_map->pgmap;
292 unsigned long pfn;
294 for_each_device_pfn(pfn, page_map)
295 put_page(pfn_to_page(pfn));
297 if (percpu_ref_tryget_live(pgmap->ref)) {
298 dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
299 percpu_ref_put(pgmap->ref);
302 /* pages are dead and unused, undo the arch mapping */
303 align_start = res->start & ~(SECTION_SIZE - 1);
304 align_size = ALIGN(resource_size(res), SECTION_SIZE);
306 mem_hotplug_begin();
307 arch_remove_memory(align_start, align_size);
308 mem_hotplug_done();
310 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
311 pgmap_radix_release(res);
312 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
313 "%s: failed to free all reserved pages\n", __func__);
316 /* assumes rcu_read_lock() held at entry */
317 struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
319 struct page_map *page_map;
321 WARN_ON_ONCE(!rcu_read_lock_held());
323 page_map = radix_tree_lookup(&pgmap_radix, PHYS_PFN(phys));
324 return page_map ? &page_map->pgmap : NULL;
328 * devm_memremap_pages - remap and provide memmap backing for the given resource
329 * @dev: hosting device for @res
330 * @res: "host memory" address range
331 * @ref: a live per-cpu reference count
332 * @altmap: optional descriptor for allocating the memmap from @res
334 * Notes:
335 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time
336 * (or devm release event). The expected order of events is that @ref has
337 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
338 * wait for the completion of all references being dropped and
339 * percpu_ref_exit() must occur after devm_memremap_pages_release().
341 * 2/ @res is expected to be a host memory range that could feasibly be
342 * treated as a "System RAM" range, i.e. not a device mmio range, but
343 * this is not enforced.
345 void *devm_memremap_pages(struct device *dev, struct resource *res,
346 struct percpu_ref *ref, struct vmem_altmap *altmap)
348 resource_size_t align_start, align_size, align_end;
349 unsigned long pfn, pgoff, order;
350 pgprot_t pgprot = PAGE_KERNEL;
351 struct dev_pagemap *pgmap;
352 struct page_map *page_map;
353 int error, nid, is_ram;
355 align_start = res->start & ~(SECTION_SIZE - 1);
356 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
357 - align_start;
358 is_ram = region_intersects(align_start, align_size,
359 IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
361 if (is_ram == REGION_MIXED) {
362 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
363 __func__, res);
364 return ERR_PTR(-ENXIO);
367 if (is_ram == REGION_INTERSECTS)
368 return __va(res->start);
370 if (!ref)
371 return ERR_PTR(-EINVAL);
373 page_map = devres_alloc_node(devm_memremap_pages_release,
374 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
375 if (!page_map)
376 return ERR_PTR(-ENOMEM);
377 pgmap = &page_map->pgmap;
379 memcpy(&page_map->res, res, sizeof(*res));
381 pgmap->dev = dev;
382 if (altmap) {
383 memcpy(&page_map->altmap, altmap, sizeof(*altmap));
384 pgmap->altmap = &page_map->altmap;
386 pgmap->ref = ref;
387 pgmap->res = &page_map->res;
388 pgmap->type = MEMORY_DEVICE_HOST;
389 pgmap->page_fault = NULL;
390 pgmap->page_free = NULL;
391 pgmap->data = NULL;
393 mutex_lock(&pgmap_lock);
394 error = 0;
395 align_end = align_start + align_size - 1;
397 foreach_order_pgoff(res, order, pgoff) {
398 struct dev_pagemap *dup;
400 rcu_read_lock();
401 dup = find_dev_pagemap(res->start + PFN_PHYS(pgoff));
402 rcu_read_unlock();
403 if (dup) {
404 dev_err(dev, "%s: %pr collides with mapping for %s\n",
405 __func__, res, dev_name(dup->dev));
406 error = -EBUSY;
407 break;
409 error = __radix_tree_insert(&pgmap_radix,
410 PHYS_PFN(res->start) + pgoff, order, page_map);
411 if (error) {
412 dev_err(dev, "%s: failed: %d\n", __func__, error);
413 break;
416 mutex_unlock(&pgmap_lock);
417 if (error)
418 goto err_radix;
420 nid = dev_to_node(dev);
421 if (nid < 0)
422 nid = numa_mem_id();
424 error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(align_start), 0,
425 align_size);
426 if (error)
427 goto err_pfn_remap;
429 mem_hotplug_begin();
430 error = arch_add_memory(nid, align_start, align_size, false);
431 if (!error)
432 move_pfn_range_to_zone(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
433 align_start >> PAGE_SHIFT,
434 align_size >> PAGE_SHIFT);
435 mem_hotplug_done();
436 if (error)
437 goto err_add_memory;
439 for_each_device_pfn(pfn, page_map) {
440 struct page *page = pfn_to_page(pfn);
443 * ZONE_DEVICE pages union ->lru with a ->pgmap back
444 * pointer. It is a bug if a ZONE_DEVICE page is ever
445 * freed or placed on a driver-private list. Seed the
446 * storage with LIST_POISON* values.
448 list_del(&page->lru);
449 page->pgmap = pgmap;
450 percpu_ref_get(ref);
452 devres_add(dev, page_map);
453 return __va(res->start);
455 err_add_memory:
456 untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
457 err_pfn_remap:
458 err_radix:
459 pgmap_radix_release(res);
460 devres_free(page_map);
461 return ERR_PTR(error);
463 EXPORT_SYMBOL(devm_memremap_pages);
465 unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
467 /* number of pfns from base where pfn_to_page() is valid */
468 return altmap->reserve + altmap->free;
471 void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
473 altmap->alloc -= nr_pfns;
476 struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
479 * 'memmap_start' is the virtual address for the first "struct
480 * page" in this range of the vmemmap array. In the case of
481 * CONFIG_SPARSEMEM_VMEMMAP a page_to_pfn conversion is simple
482 * pointer arithmetic, so we can perform this to_vmem_altmap()
483 * conversion without concern for the initialization state of
484 * the struct page fields.
486 struct page *page = (struct page *) memmap_start;
487 struct dev_pagemap *pgmap;
490 * Unconditionally retrieve a dev_pagemap associated with the
491 * given physical address, this is only for use in the
492 * arch_{add|remove}_memory() for setting up and tearing down
493 * the memmap.
495 rcu_read_lock();
496 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page)));
497 rcu_read_unlock();
499 return pgmap ? pgmap->altmap : NULL;
501 #endif /* CONFIG_ZONE_DEVICE */
504 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE) || IS_ENABLED(CONFIG_DEVICE_PUBLIC)
505 void put_zone_device_private_or_public_page(struct page *page)
507 int count = page_ref_dec_return(page);
510 * If refcount is 1 then page is freed and refcount is stable as nobody
511 * holds a reference on the page.
513 if (count == 1) {
514 /* Clear Active bit in case of parallel mark_page_accessed */
515 __ClearPageActive(page);
516 __ClearPageWaiters(page);
518 page->mapping = NULL;
519 mem_cgroup_uncharge(page);
521 page->pgmap->page_free(page, page->pgmap->data);
522 } else if (!count)
523 __put_page(page);
525 EXPORT_SYMBOL(put_zone_device_private_or_public_page);
526 #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */