1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright(c) 2015 Intel Corporation. All rights reserved. */
3 #include <linux/radix-tree.h>
4 #include <linux/device.h>
5 #include <linux/types.h>
6 #include <linux/pfn_t.h>
9 #include <linux/memory_hotplug.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 #include <linux/wait_bit.h>
14 static DEFINE_MUTEX(pgmap_lock
);
15 static RADIX_TREE(pgmap_radix
, GFP_KERNEL
);
16 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
17 #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
19 static unsigned long order_at(struct resource
*res
, unsigned long pgoff
)
21 unsigned long phys_pgoff
= PHYS_PFN(res
->start
) + pgoff
;
22 unsigned long nr_pages
, mask
;
24 nr_pages
= PHYS_PFN(resource_size(res
));
25 if (nr_pages
== pgoff
)
29 * What is the largest aligned power-of-2 range available from
30 * this resource pgoff to the end of the resource range,
31 * considering the alignment of the current pgoff?
33 mask
= phys_pgoff
| rounddown_pow_of_two(nr_pages
- pgoff
);
37 return find_first_bit(&mask
, BITS_PER_LONG
);
40 #define foreach_order_pgoff(res, order, pgoff) \
41 for (pgoff = 0, order = order_at((res), pgoff); order < ULONG_MAX; \
42 pgoff += 1UL << order, order = order_at((res), pgoff))
44 #if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
45 int device_private_entry_fault(struct vm_area_struct
*vma
,
51 struct page
*page
= device_private_entry_to_page(entry
);
54 * The page_fault() callback must migrate page back to system memory
55 * so that CPU can access it. This might fail for various reasons
56 * (device issue, device was unsafely unplugged, ...). When such
57 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
59 * Note that because memory cgroup charges are accounted to the device
60 * memory, this should never fail because of memory restrictions (but
61 * allocation of regular system page might still fail because we are
64 * There is a more in-depth description of what that callback can and
65 * cannot do, in include/linux/memremap.h
67 return page
->pgmap
->page_fault(vma
, addr
, page
, flags
, pmdp
);
69 EXPORT_SYMBOL(device_private_entry_fault
);
70 #endif /* CONFIG_DEVICE_PRIVATE */
72 static void pgmap_radix_release(struct resource
*res
, unsigned long end_pgoff
)
74 unsigned long pgoff
, order
;
76 mutex_lock(&pgmap_lock
);
77 foreach_order_pgoff(res
, order
, pgoff
) {
78 if (pgoff
>= end_pgoff
)
80 radix_tree_delete(&pgmap_radix
, PHYS_PFN(res
->start
) + pgoff
);
82 mutex_unlock(&pgmap_lock
);
87 static unsigned long pfn_first(struct dev_pagemap
*pgmap
)
89 const struct resource
*res
= &pgmap
->res
;
90 struct vmem_altmap
*altmap
= &pgmap
->altmap
;
93 pfn
= res
->start
>> PAGE_SHIFT
;
94 if (pgmap
->altmap_valid
)
95 pfn
+= vmem_altmap_offset(altmap
);
99 static unsigned long pfn_end(struct dev_pagemap
*pgmap
)
101 const struct resource
*res
= &pgmap
->res
;
103 return (res
->start
+ resource_size(res
)) >> PAGE_SHIFT
;
106 static unsigned long pfn_next(unsigned long pfn
)
113 #define for_each_device_pfn(pfn, map) \
114 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
116 static void devm_memremap_pages_release(void *data
)
118 struct dev_pagemap
*pgmap
= data
;
119 struct device
*dev
= pgmap
->dev
;
120 struct resource
*res
= &pgmap
->res
;
121 resource_size_t align_start
, align_size
;
124 for_each_device_pfn(pfn
, pgmap
)
125 put_page(pfn_to_page(pfn
));
127 if (percpu_ref_tryget_live(pgmap
->ref
)) {
128 dev_WARN(dev
, "%s: page mapping is still live!\n", __func__
);
129 percpu_ref_put(pgmap
->ref
);
132 /* pages are dead and unused, undo the arch mapping */
133 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
134 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
138 arch_remove_memory(align_start
, align_size
, pgmap
->altmap_valid
?
139 &pgmap
->altmap
: NULL
);
142 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
143 pgmap_radix_release(res
, -1);
144 dev_WARN_ONCE(dev
, pgmap
->altmap
.alloc
,
145 "%s: failed to free all reserved pages\n", __func__
);
149 * devm_memremap_pages - remap and provide memmap backing for the given resource
150 * @dev: hosting device for @res
151 * @pgmap: pointer to a struct dev_pgmap
154 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
155 * by the caller before passing it to this function
157 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
158 * must be set to true
160 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
161 * time (or devm release event). The expected order of events is that ref has
162 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
163 * wait for the completion of all references being dropped and
164 * percpu_ref_exit() must occur after devm_memremap_pages_release().
166 * 4/ res is expected to be a host memory range that could feasibly be
167 * treated as a "System RAM" range, i.e. not a device mmio range, but
168 * this is not enforced.
170 void *devm_memremap_pages(struct device
*dev
, struct dev_pagemap
*pgmap
)
172 resource_size_t align_start
, align_size
, align_end
;
173 struct vmem_altmap
*altmap
= pgmap
->altmap_valid
?
174 &pgmap
->altmap
: NULL
;
175 struct resource
*res
= &pgmap
->res
;
176 unsigned long pfn
, pgoff
, order
;
177 pgprot_t pgprot
= PAGE_KERNEL
;
178 int error
, nid
, is_ram
;
180 align_start
= res
->start
& ~(SECTION_SIZE
- 1);
181 align_size
= ALIGN(res
->start
+ resource_size(res
), SECTION_SIZE
)
183 is_ram
= region_intersects(align_start
, align_size
,
184 IORESOURCE_SYSTEM_RAM
, IORES_DESC_NONE
);
186 if (is_ram
== REGION_MIXED
) {
187 WARN_ONCE(1, "%s attempted on mixed region %pr\n",
189 return ERR_PTR(-ENXIO
);
192 if (is_ram
== REGION_INTERSECTS
)
193 return __va(res
->start
);
196 return ERR_PTR(-EINVAL
);
200 mutex_lock(&pgmap_lock
);
202 align_end
= align_start
+ align_size
- 1;
204 foreach_order_pgoff(res
, order
, pgoff
) {
205 error
= __radix_tree_insert(&pgmap_radix
,
206 PHYS_PFN(res
->start
) + pgoff
, order
, pgmap
);
208 dev_err(dev
, "%s: failed: %d\n", __func__
, error
);
212 mutex_unlock(&pgmap_lock
);
216 nid
= dev_to_node(dev
);
220 error
= track_pfn_remap(NULL
, &pgprot
, PHYS_PFN(align_start
), 0,
226 error
= arch_add_memory(nid
, align_start
, align_size
, altmap
, false);
228 move_pfn_range_to_zone(&NODE_DATA(nid
)->node_zones
[ZONE_DEVICE
],
229 align_start
>> PAGE_SHIFT
,
230 align_size
>> PAGE_SHIFT
, altmap
);
235 for_each_device_pfn(pfn
, pgmap
) {
236 struct page
*page
= pfn_to_page(pfn
);
239 * ZONE_DEVICE pages union ->lru with a ->pgmap back
240 * pointer. It is a bug if a ZONE_DEVICE page is ever
241 * freed or placed on a driver-private list. Seed the
242 * storage with LIST_POISON* values.
244 list_del(&page
->lru
);
246 percpu_ref_get(pgmap
->ref
);
249 devm_add_action(dev
, devm_memremap_pages_release
, pgmap
);
251 return __va(res
->start
);
254 untrack_pfn(NULL
, PHYS_PFN(align_start
), align_size
);
257 pgmap_radix_release(res
, pgoff
);
258 return ERR_PTR(error
);
260 EXPORT_SYMBOL(devm_memremap_pages
);
262 unsigned long vmem_altmap_offset(struct vmem_altmap
*altmap
)
264 /* number of pfns from base where pfn_to_page() is valid */
265 return altmap
->reserve
+ altmap
->free
;
268 void vmem_altmap_free(struct vmem_altmap
*altmap
, unsigned long nr_pfns
)
270 altmap
->alloc
-= nr_pfns
;
274 * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
275 * @pfn: page frame number to lookup page_map
276 * @pgmap: optional known pgmap that already has a reference
278 * If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
279 * is non-NULL but does not cover @pfn the reference to it will be released.
281 struct dev_pagemap
*get_dev_pagemap(unsigned long pfn
,
282 struct dev_pagemap
*pgmap
)
284 resource_size_t phys
= PFN_PHYS(pfn
);
287 * In the cached case we're already holding a live reference.
290 if (phys
>= pgmap
->res
.start
&& phys
<= pgmap
->res
.end
)
292 put_dev_pagemap(pgmap
);
295 /* fall back to slow path lookup */
297 pgmap
= radix_tree_lookup(&pgmap_radix
, PHYS_PFN(phys
));
298 if (pgmap
&& !percpu_ref_tryget_live(pgmap
->ref
))
304 EXPORT_SYMBOL_GPL(get_dev_pagemap
);
306 #ifdef CONFIG_DEV_PAGEMAP_OPS
307 DEFINE_STATIC_KEY_FALSE(devmap_managed_key
);
308 EXPORT_SYMBOL_GPL(devmap_managed_key
);
309 static atomic_t devmap_enable
;
312 * Toggle the static key for ->page_free() callbacks when dev_pagemap
315 void dev_pagemap_get_ops(void)
317 if (atomic_inc_return(&devmap_enable
) == 1)
318 static_branch_enable(&devmap_managed_key
);
320 EXPORT_SYMBOL_GPL(dev_pagemap_get_ops
);
322 void dev_pagemap_put_ops(void)
324 if (atomic_dec_and_test(&devmap_enable
))
325 static_branch_disable(&devmap_managed_key
);
327 EXPORT_SYMBOL_GPL(dev_pagemap_put_ops
);
329 void __put_devmap_managed_page(struct page
*page
)
331 int count
= page_ref_dec_return(page
);
334 * If refcount is 1 then page is freed and refcount is stable as nobody
335 * holds a reference on the page.
338 /* Clear Active bit in case of parallel mark_page_accessed */
339 __ClearPageActive(page
);
340 __ClearPageWaiters(page
);
342 page
->mapping
= NULL
;
343 mem_cgroup_uncharge(page
);
345 page
->pgmap
->page_free(page
, page
->pgmap
->data
);
349 EXPORT_SYMBOL_GPL(__put_devmap_managed_page
);
350 #endif /* CONFIG_DEV_PAGEMAP_OPS */