net/mlx5e: Make function mlx5e_change_rep_mtu() static
[linux-2.6/btrfs-unstable.git] / drivers / base / dma-coherent.c
blob597d40893862696ed76457c7071c8d5fd074f612
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Coherent per-device memory handling.
4 * Borrowed from i386
5 */
6 #include <linux/io.h>
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/dma-mapping.h>
12 struct dma_coherent_mem {
13 void *virt_base;
14 dma_addr_t device_base;
15 unsigned long pfn_base;
16 int size;
17 int flags;
18 unsigned long *bitmap;
19 spinlock_t spinlock;
20 bool use_dev_dma_pfn_offset;
23 static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init;
25 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev)
27 if (dev && dev->dma_mem)
28 return dev->dma_mem;
29 return NULL;
32 static inline dma_addr_t dma_get_device_base(struct device *dev,
33 struct dma_coherent_mem * mem)
35 if (mem->use_dev_dma_pfn_offset)
36 return (mem->pfn_base - dev->dma_pfn_offset) << PAGE_SHIFT;
37 else
38 return mem->device_base;
41 static int dma_init_coherent_memory(
42 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
43 struct dma_coherent_mem **mem)
45 struct dma_coherent_mem *dma_mem = NULL;
46 void __iomem *mem_base = NULL;
47 int pages = size >> PAGE_SHIFT;
48 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
49 int ret;
51 if (!size) {
52 ret = -EINVAL;
53 goto out;
56 mem_base = memremap(phys_addr, size, MEMREMAP_WC);
57 if (!mem_base) {
58 ret = -EINVAL;
59 goto out;
61 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
62 if (!dma_mem) {
63 ret = -ENOMEM;
64 goto out;
66 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
67 if (!dma_mem->bitmap) {
68 ret = -ENOMEM;
69 goto out;
72 dma_mem->virt_base = mem_base;
73 dma_mem->device_base = device_addr;
74 dma_mem->pfn_base = PFN_DOWN(phys_addr);
75 dma_mem->size = pages;
76 dma_mem->flags = flags;
77 spin_lock_init(&dma_mem->spinlock);
79 *mem = dma_mem;
80 return 0;
82 out:
83 kfree(dma_mem);
84 if (mem_base)
85 memunmap(mem_base);
86 return ret;
89 static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
91 if (!mem)
92 return;
94 memunmap(mem->virt_base);
95 kfree(mem->bitmap);
96 kfree(mem);
99 static int dma_assign_coherent_memory(struct device *dev,
100 struct dma_coherent_mem *mem)
102 if (!dev)
103 return -ENODEV;
105 if (dev->dma_mem)
106 return -EBUSY;
108 dev->dma_mem = mem;
109 return 0;
112 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
113 dma_addr_t device_addr, size_t size, int flags)
115 struct dma_coherent_mem *mem;
116 int ret;
118 ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
119 if (ret)
120 return ret;
122 ret = dma_assign_coherent_memory(dev, mem);
123 if (ret)
124 dma_release_coherent_memory(mem);
125 return ret;
127 EXPORT_SYMBOL(dma_declare_coherent_memory);
129 void dma_release_declared_memory(struct device *dev)
131 struct dma_coherent_mem *mem = dev->dma_mem;
133 if (!mem)
134 return;
135 dma_release_coherent_memory(mem);
136 dev->dma_mem = NULL;
138 EXPORT_SYMBOL(dma_release_declared_memory);
140 void *dma_mark_declared_memory_occupied(struct device *dev,
141 dma_addr_t device_addr, size_t size)
143 struct dma_coherent_mem *mem = dev->dma_mem;
144 unsigned long flags;
145 int pos, err;
147 size += device_addr & ~PAGE_MASK;
149 if (!mem)
150 return ERR_PTR(-EINVAL);
152 spin_lock_irqsave(&mem->spinlock, flags);
153 pos = PFN_DOWN(device_addr - dma_get_device_base(dev, mem));
154 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
155 spin_unlock_irqrestore(&mem->spinlock, flags);
157 if (err != 0)
158 return ERR_PTR(err);
159 return mem->virt_base + (pos << PAGE_SHIFT);
161 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
163 static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
164 ssize_t size, dma_addr_t *dma_handle)
166 int order = get_order(size);
167 unsigned long flags;
168 int pageno;
169 void *ret;
171 spin_lock_irqsave(&mem->spinlock, flags);
173 if (unlikely(size > (mem->size << PAGE_SHIFT)))
174 goto err;
176 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
177 if (unlikely(pageno < 0))
178 goto err;
181 * Memory was found in the coherent area.
183 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
184 ret = mem->virt_base + (pageno << PAGE_SHIFT);
185 spin_unlock_irqrestore(&mem->spinlock, flags);
186 memset(ret, 0, size);
187 return ret;
188 err:
189 spin_unlock_irqrestore(&mem->spinlock, flags);
190 return NULL;
194 * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool
195 * @dev: device from which we allocate memory
196 * @size: size of requested memory area
197 * @dma_handle: This will be filled with the correct dma handle
198 * @ret: This pointer will be filled with the virtual address
199 * to allocated area.
201 * This function should be only called from per-arch dma_alloc_coherent()
202 * to support allocation from per-device coherent memory pools.
204 * Returns 0 if dma_alloc_coherent should continue with allocating from
205 * generic memory areas, or !0 if dma_alloc_coherent should return @ret.
207 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
208 dma_addr_t *dma_handle, void **ret)
210 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
212 if (!mem)
213 return 0;
215 *ret = __dma_alloc_from_coherent(mem, size, dma_handle);
216 if (*ret)
217 return 1;
220 * In the case where the allocation can not be satisfied from the
221 * per-device area, try to fall back to generic memory if the
222 * constraints allow it.
224 return mem->flags & DMA_MEMORY_EXCLUSIVE;
226 EXPORT_SYMBOL(dma_alloc_from_dev_coherent);
228 void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
230 if (!dma_coherent_default_memory)
231 return NULL;
233 return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
234 dma_handle);
237 static int __dma_release_from_coherent(struct dma_coherent_mem *mem,
238 int order, void *vaddr)
240 if (mem && vaddr >= mem->virt_base && vaddr <
241 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
242 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
243 unsigned long flags;
245 spin_lock_irqsave(&mem->spinlock, flags);
246 bitmap_release_region(mem->bitmap, page, order);
247 spin_unlock_irqrestore(&mem->spinlock, flags);
248 return 1;
250 return 0;
254 * dma_release_from_dev_coherent() - free memory to device coherent memory pool
255 * @dev: device from which the memory was allocated
256 * @order: the order of pages allocated
257 * @vaddr: virtual address of allocated pages
259 * This checks whether the memory was allocated from the per-device
260 * coherent memory pool and if so, releases that memory.
262 * Returns 1 if we correctly released the memory, or 0 if the caller should
263 * proceed with releasing memory from generic pools.
265 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr)
267 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
269 return __dma_release_from_coherent(mem, order, vaddr);
271 EXPORT_SYMBOL(dma_release_from_dev_coherent);
273 int dma_release_from_global_coherent(int order, void *vaddr)
275 if (!dma_coherent_default_memory)
276 return 0;
278 return __dma_release_from_coherent(dma_coherent_default_memory, order,
279 vaddr);
282 static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
283 struct vm_area_struct *vma, void *vaddr, size_t size, int *ret)
285 if (mem && vaddr >= mem->virt_base && vaddr + size <=
286 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
287 unsigned long off = vma->vm_pgoff;
288 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
289 int user_count = vma_pages(vma);
290 int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
292 *ret = -ENXIO;
293 if (off < count && user_count <= count - off) {
294 unsigned long pfn = mem->pfn_base + start + off;
295 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
296 user_count << PAGE_SHIFT,
297 vma->vm_page_prot);
299 return 1;
301 return 0;
305 * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool
306 * @dev: device from which the memory was allocated
307 * @vma: vm_area for the userspace memory
308 * @vaddr: cpu address returned by dma_alloc_from_dev_coherent
309 * @size: size of the memory buffer allocated
310 * @ret: result from remap_pfn_range()
312 * This checks whether the memory was allocated from the per-device
313 * coherent memory pool and if so, maps that memory to the provided vma.
315 * Returns 1 if @vaddr belongs to the device coherent pool and the caller
316 * should return @ret, or 0 if they should proceed with mapping memory from
317 * generic areas.
319 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
320 void *vaddr, size_t size, int *ret)
322 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev);
324 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret);
326 EXPORT_SYMBOL(dma_mmap_from_dev_coherent);
328 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr,
329 size_t size, int *ret)
331 if (!dma_coherent_default_memory)
332 return 0;
334 return __dma_mmap_from_coherent(dma_coherent_default_memory, vma,
335 vaddr, size, ret);
339 * Support for reserved memory regions defined in device tree
341 #ifdef CONFIG_OF_RESERVED_MEM
342 #include <linux/of.h>
343 #include <linux/of_fdt.h>
344 #include <linux/of_reserved_mem.h>
346 static struct reserved_mem *dma_reserved_default_memory __initdata;
348 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
350 struct dma_coherent_mem *mem = rmem->priv;
351 int ret;
353 if (!mem) {
354 ret = dma_init_coherent_memory(rmem->base, rmem->base,
355 rmem->size,
356 DMA_MEMORY_EXCLUSIVE, &mem);
357 if (ret) {
358 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
359 &rmem->base, (unsigned long)rmem->size / SZ_1M);
360 return ret;
363 mem->use_dev_dma_pfn_offset = true;
364 rmem->priv = mem;
365 dma_assign_coherent_memory(dev, mem);
366 return 0;
369 static void rmem_dma_device_release(struct reserved_mem *rmem,
370 struct device *dev)
372 if (dev)
373 dev->dma_mem = NULL;
376 static const struct reserved_mem_ops rmem_dma_ops = {
377 .device_init = rmem_dma_device_init,
378 .device_release = rmem_dma_device_release,
381 static int __init rmem_dma_setup(struct reserved_mem *rmem)
383 unsigned long node = rmem->fdt_node;
385 if (of_get_flat_dt_prop(node, "reusable", NULL))
386 return -EINVAL;
388 #ifdef CONFIG_ARM
389 if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
390 pr_err("Reserved memory: regions without no-map are not yet supported\n");
391 return -EINVAL;
394 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) {
395 WARN(dma_reserved_default_memory,
396 "Reserved memory: region for default DMA coherent area is redefined\n");
397 dma_reserved_default_memory = rmem;
399 #endif
401 rmem->ops = &rmem_dma_ops;
402 pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
403 &rmem->base, (unsigned long)rmem->size / SZ_1M);
404 return 0;
407 static int __init dma_init_reserved_memory(void)
409 const struct reserved_mem_ops *ops;
410 int ret;
412 if (!dma_reserved_default_memory)
413 return -ENOMEM;
415 ops = dma_reserved_default_memory->ops;
418 * We rely on rmem_dma_device_init() does not propagate error of
419 * dma_assign_coherent_memory() for "NULL" device.
421 ret = ops->device_init(dma_reserved_default_memory, NULL);
423 if (!ret) {
424 dma_coherent_default_memory = dma_reserved_default_memory->priv;
425 pr_info("DMA: default coherent area is set\n");
428 return ret;
431 core_initcall(dma_init_reserved_memory);
433 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
434 #endif