2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
33 enum dma_data_direction dma_dir
;
34 struct sg_table
*dma_sgt
;
37 struct vb2_vmarea_handler handler
;
39 struct sg_table
*sgt_base
;
42 struct vm_area_struct
*vma
;
45 struct dma_buf_attachment
*db_attach
;
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
53 static void vb2_dc_sgt_foreach_page(struct sg_table
*sgt
,
54 void (*cb
)(struct page
*pg
))
56 struct scatterlist
*s
;
59 for_each_sg(sgt
->sgl
, s
, sgt
->orig_nents
, i
) {
60 struct page
*page
= sg_page(s
);
61 unsigned int n_pages
= PAGE_ALIGN(s
->offset
+ s
->length
)
65 for (j
= 0; j
< n_pages
; ++j
, ++page
)
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table
*sgt
)
72 struct scatterlist
*s
;
73 dma_addr_t expected
= sg_dma_address(sgt
->sgl
);
75 unsigned long size
= 0;
77 for_each_sg(sgt
->sgl
, s
, sgt
->nents
, i
) {
78 if (sg_dma_address(s
) != expected
)
80 expected
= sg_dma_address(s
) + sg_dma_len(s
);
81 size
+= sg_dma_len(s
);
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
90 static void *vb2_dc_cookie(void *buf_priv
)
92 struct vb2_dc_buf
*buf
= buf_priv
;
94 return &buf
->dma_addr
;
97 static void *vb2_dc_vaddr(void *buf_priv
)
99 struct vb2_dc_buf
*buf
= buf_priv
;
104 static unsigned int vb2_dc_num_users(void *buf_priv
)
106 struct vb2_dc_buf
*buf
= buf_priv
;
108 return atomic_read(&buf
->refcount
);
111 static void vb2_dc_prepare(void *buf_priv
)
113 struct vb2_dc_buf
*buf
= buf_priv
;
114 struct sg_table
*sgt
= buf
->dma_sgt
;
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt
|| buf
->db_attach
)
120 dma_sync_sg_for_device(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
123 static void vb2_dc_finish(void *buf_priv
)
125 struct vb2_dc_buf
*buf
= buf_priv
;
126 struct sg_table
*sgt
= buf
->dma_sgt
;
128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt
|| buf
->db_attach
)
132 dma_sync_sg_for_cpu(buf
->dev
, sgt
->sgl
, sgt
->nents
, buf
->dma_dir
);
135 /*********************************************/
136 /* callbacks for MMAP buffers */
137 /*********************************************/
139 static void vb2_dc_put(void *buf_priv
)
141 struct vb2_dc_buf
*buf
= buf_priv
;
143 if (!atomic_dec_and_test(&buf
->refcount
))
147 sg_free_table(buf
->sgt_base
);
148 kfree(buf
->sgt_base
);
150 dma_free_coherent(buf
->dev
, buf
->size
, buf
->vaddr
, buf
->dma_addr
);
151 put_device(buf
->dev
);
155 static void *vb2_dc_alloc(void *alloc_ctx
, unsigned long size
)
157 struct vb2_dc_conf
*conf
= alloc_ctx
;
158 struct device
*dev
= conf
->dev
;
159 struct vb2_dc_buf
*buf
;
161 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
163 return ERR_PTR(-ENOMEM
);
165 /* align image size to PAGE_SIZE */
166 size
= PAGE_ALIGN(size
);
168 buf
->vaddr
= dma_alloc_coherent(dev
, size
, &buf
->dma_addr
, GFP_KERNEL
);
170 dev_err(dev
, "dma_alloc_coherent of size %ld failed\n", size
);
172 return ERR_PTR(-ENOMEM
);
175 /* Prevent the device from being released while the buffer is used */
176 buf
->dev
= get_device(dev
);
179 buf
->handler
.refcount
= &buf
->refcount
;
180 buf
->handler
.put
= vb2_dc_put
;
181 buf
->handler
.arg
= buf
;
183 atomic_inc(&buf
->refcount
);
188 static int vb2_dc_mmap(void *buf_priv
, struct vm_area_struct
*vma
)
190 struct vb2_dc_buf
*buf
= buf_priv
;
194 printk(KERN_ERR
"No buffer to map\n");
199 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
204 ret
= dma_mmap_coherent(buf
->dev
, vma
, buf
->vaddr
,
205 buf
->dma_addr
, buf
->size
);
208 pr_err("Remapping memory failed, error: %d\n", ret
);
212 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
213 vma
->vm_private_data
= &buf
->handler
;
214 vma
->vm_ops
= &vb2_common_vm_ops
;
216 vma
->vm_ops
->open(vma
);
218 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
219 __func__
, (unsigned long)buf
->dma_addr
, vma
->vm_start
,
225 /*********************************************/
226 /* DMABUF ops for exporters */
227 /*********************************************/
229 struct vb2_dc_attachment
{
231 enum dma_data_direction dir
;
234 static int vb2_dc_dmabuf_ops_attach(struct dma_buf
*dbuf
, struct device
*dev
,
235 struct dma_buf_attachment
*dbuf_attach
)
237 struct vb2_dc_attachment
*attach
;
239 struct scatterlist
*rd
, *wr
;
240 struct sg_table
*sgt
;
241 struct vb2_dc_buf
*buf
= dbuf
->priv
;
244 attach
= kzalloc(sizeof(*attach
), GFP_KERNEL
);
249 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
250 * map the same scatter list to multiple attachments at the same time.
252 ret
= sg_alloc_table(sgt
, buf
->sgt_base
->orig_nents
, GFP_KERNEL
);
258 rd
= buf
->sgt_base
->sgl
;
260 for (i
= 0; i
< sgt
->orig_nents
; ++i
) {
261 sg_set_page(wr
, sg_page(rd
), rd
->length
, rd
->offset
);
266 attach
->dir
= DMA_NONE
;
267 dbuf_attach
->priv
= attach
;
272 static void vb2_dc_dmabuf_ops_detach(struct dma_buf
*dbuf
,
273 struct dma_buf_attachment
*db_attach
)
275 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
276 struct sg_table
*sgt
;
283 /* release the scatterlist cache */
284 if (attach
->dir
!= DMA_NONE
)
285 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
289 db_attach
->priv
= NULL
;
292 static struct sg_table
*vb2_dc_dmabuf_ops_map(
293 struct dma_buf_attachment
*db_attach
, enum dma_data_direction dir
)
295 struct vb2_dc_attachment
*attach
= db_attach
->priv
;
296 /* stealing dmabuf mutex to serialize map/unmap operations */
297 struct mutex
*lock
= &db_attach
->dmabuf
->lock
;
298 struct sg_table
*sgt
;
304 /* return previously mapped sg table */
305 if (attach
->dir
== dir
) {
310 /* release any previous cache */
311 if (attach
->dir
!= DMA_NONE
) {
312 dma_unmap_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
,
314 attach
->dir
= DMA_NONE
;
317 /* mapping to the client with new direction */
318 ret
= dma_map_sg(db_attach
->dev
, sgt
->sgl
, sgt
->orig_nents
, dir
);
320 pr_err("failed to map scatterlist\n");
322 return ERR_PTR(-EIO
);
332 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment
*db_attach
,
333 struct sg_table
*sgt
, enum dma_data_direction dir
)
335 /* nothing to be done here */
338 static void vb2_dc_dmabuf_ops_release(struct dma_buf
*dbuf
)
340 /* drop reference obtained in vb2_dc_get_dmabuf */
341 vb2_dc_put(dbuf
->priv
);
344 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf
*dbuf
, unsigned long pgnum
)
346 struct vb2_dc_buf
*buf
= dbuf
->priv
;
348 return buf
->vaddr
+ pgnum
* PAGE_SIZE
;
351 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf
*dbuf
)
353 struct vb2_dc_buf
*buf
= dbuf
->priv
;
358 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf
*dbuf
,
359 struct vm_area_struct
*vma
)
361 return vb2_dc_mmap(dbuf
->priv
, vma
);
364 static struct dma_buf_ops vb2_dc_dmabuf_ops
= {
365 .attach
= vb2_dc_dmabuf_ops_attach
,
366 .detach
= vb2_dc_dmabuf_ops_detach
,
367 .map_dma_buf
= vb2_dc_dmabuf_ops_map
,
368 .unmap_dma_buf
= vb2_dc_dmabuf_ops_unmap
,
369 .kmap
= vb2_dc_dmabuf_ops_kmap
,
370 .kmap_atomic
= vb2_dc_dmabuf_ops_kmap
,
371 .vmap
= vb2_dc_dmabuf_ops_vmap
,
372 .mmap
= vb2_dc_dmabuf_ops_mmap
,
373 .release
= vb2_dc_dmabuf_ops_release
,
376 static struct sg_table
*vb2_dc_get_base_sgt(struct vb2_dc_buf
*buf
)
379 struct sg_table
*sgt
;
381 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
383 dev_err(buf
->dev
, "failed to alloc sg table\n");
387 ret
= dma_get_sgtable(buf
->dev
, sgt
, buf
->vaddr
, buf
->dma_addr
,
390 dev_err(buf
->dev
, "failed to get scatterlist from DMA API\n");
398 static struct dma_buf
*vb2_dc_get_dmabuf(void *buf_priv
)
400 struct vb2_dc_buf
*buf
= buf_priv
;
401 struct dma_buf
*dbuf
;
404 buf
->sgt_base
= vb2_dc_get_base_sgt(buf
);
406 if (WARN_ON(!buf
->sgt_base
))
409 dbuf
= dma_buf_export(buf
, &vb2_dc_dmabuf_ops
, buf
->size
, 0);
413 /* dmabuf keeps reference to vb2 buffer */
414 atomic_inc(&buf
->refcount
);
419 /*********************************************/
420 /* callbacks for USERPTR buffers */
421 /*********************************************/
423 static inline int vma_is_io(struct vm_area_struct
*vma
)
425 return !!(vma
->vm_flags
& (VM_IO
| VM_PFNMAP
));
428 static int vb2_dc_get_user_pages(unsigned long start
, struct page
**pages
,
429 int n_pages
, struct vm_area_struct
*vma
, int write
)
431 if (vma_is_io(vma
)) {
434 for (i
= 0; i
< n_pages
; ++i
, start
+= PAGE_SIZE
) {
436 int ret
= follow_pfn(vma
, start
, &pfn
);
439 pr_err("no page for address %lu\n", start
);
442 pages
[i
] = pfn_to_page(pfn
);
447 n
= get_user_pages(current
, current
->mm
, start
& PAGE_MASK
,
448 n_pages
, write
, 1, pages
, NULL
);
449 /* negative error means that no page was pinned */
452 pr_err("got only %d of %d user pages\n", n
, n_pages
);
454 put_page(pages
[--n
]);
462 static void vb2_dc_put_dirty_page(struct page
*page
)
464 set_page_dirty_lock(page
);
468 static void vb2_dc_put_userptr(void *buf_priv
)
470 struct vb2_dc_buf
*buf
= buf_priv
;
471 struct sg_table
*sgt
= buf
->dma_sgt
;
473 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
474 if (!vma_is_io(buf
->vma
))
475 vb2_dc_sgt_foreach_page(sgt
, vb2_dc_put_dirty_page
);
479 vb2_put_vma(buf
->vma
);
483 static void *vb2_dc_get_userptr(void *alloc_ctx
, unsigned long vaddr
,
484 unsigned long size
, int write
)
486 struct vb2_dc_conf
*conf
= alloc_ctx
;
487 struct vb2_dc_buf
*buf
;
490 unsigned long offset
;
494 struct vm_area_struct
*vma
;
495 struct sg_table
*sgt
;
496 unsigned long contig_size
;
497 unsigned long dma_align
= dma_get_cache_alignment();
499 /* Only cache aligned DMA transfers are reliable */
500 if (!IS_ALIGNED(vaddr
| size
, dma_align
)) {
501 pr_debug("user data must be aligned to %lu bytes\n", dma_align
);
502 return ERR_PTR(-EINVAL
);
506 pr_debug("size is zero\n");
507 return ERR_PTR(-EINVAL
);
510 buf
= kzalloc(sizeof *buf
, GFP_KERNEL
);
512 return ERR_PTR(-ENOMEM
);
514 buf
->dev
= conf
->dev
;
515 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
517 start
= vaddr
& PAGE_MASK
;
518 offset
= vaddr
& ~PAGE_MASK
;
519 end
= PAGE_ALIGN(vaddr
+ size
);
520 n_pages
= (end
- start
) >> PAGE_SHIFT
;
522 pages
= kmalloc(n_pages
* sizeof(pages
[0]), GFP_KERNEL
);
525 pr_err("failed to allocate pages table\n");
529 /* current->mm->mmap_sem is taken by videobuf2 core */
530 vma
= find_vma(current
->mm
, vaddr
);
532 pr_err("no vma for address %lu\n", vaddr
);
537 if (vma
->vm_end
< vaddr
+ size
) {
538 pr_err("vma at %lu is too small for %lu bytes\n", vaddr
, size
);
543 buf
->vma
= vb2_get_vma(vma
);
545 pr_err("failed to copy vma\n");
550 /* extract page list from userspace mapping */
551 ret
= vb2_dc_get_user_pages(start
, pages
, n_pages
, vma
, write
);
553 pr_err("failed to get user pages\n");
557 sgt
= kzalloc(sizeof(*sgt
), GFP_KERNEL
);
559 pr_err("failed to allocate sg table\n");
561 goto fail_get_user_pages
;
564 ret
= sg_alloc_table_from_pages(sgt
, pages
, n_pages
,
565 offset
, size
, GFP_KERNEL
);
567 pr_err("failed to initialize sg table\n");
571 /* pages are no longer needed */
575 sgt
->nents
= dma_map_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
,
577 if (sgt
->nents
<= 0) {
578 pr_err("failed to map scatterlist\n");
583 contig_size
= vb2_dc_get_contiguous_size(sgt
);
584 if (contig_size
< size
) {
585 pr_err("contiguous mapping is too small %lu/%lu\n",
591 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
598 dma_unmap_sg(buf
->dev
, sgt
->sgl
, sgt
->orig_nents
, buf
->dma_dir
);
601 if (!vma_is_io(buf
->vma
))
602 vb2_dc_sgt_foreach_page(sgt
, put_page
);
609 if (pages
&& !vma_is_io(buf
->vma
))
611 put_page(pages
[--n_pages
]);
614 vb2_put_vma(buf
->vma
);
617 kfree(pages
); /* kfree is NULL-proof */
625 /*********************************************/
626 /* callbacks for DMABUF buffers */
627 /*********************************************/
629 static int vb2_dc_map_dmabuf(void *mem_priv
)
631 struct vb2_dc_buf
*buf
= mem_priv
;
632 struct sg_table
*sgt
;
633 unsigned long contig_size
;
635 if (WARN_ON(!buf
->db_attach
)) {
636 pr_err("trying to pin a non attached buffer\n");
640 if (WARN_ON(buf
->dma_sgt
)) {
641 pr_err("dmabuf buffer is already pinned\n");
645 /* get the associated scatterlist for this buffer */
646 sgt
= dma_buf_map_attachment(buf
->db_attach
, buf
->dma_dir
);
647 if (IS_ERR_OR_NULL(sgt
)) {
648 pr_err("Error getting dmabuf scatterlist\n");
652 /* checking if dmabuf is big enough to store contiguous chunk */
653 contig_size
= vb2_dc_get_contiguous_size(sgt
);
654 if (contig_size
< buf
->size
) {
655 pr_err("contiguous chunk is too small %lu/%lu b\n",
656 contig_size
, buf
->size
);
657 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
661 buf
->dma_addr
= sg_dma_address(sgt
->sgl
);
667 static void vb2_dc_unmap_dmabuf(void *mem_priv
)
669 struct vb2_dc_buf
*buf
= mem_priv
;
670 struct sg_table
*sgt
= buf
->dma_sgt
;
672 if (WARN_ON(!buf
->db_attach
)) {
673 pr_err("trying to unpin a not attached buffer\n");
678 pr_err("dmabuf buffer is already unpinned\n");
682 dma_buf_unmap_attachment(buf
->db_attach
, sgt
, buf
->dma_dir
);
688 static void vb2_dc_detach_dmabuf(void *mem_priv
)
690 struct vb2_dc_buf
*buf
= mem_priv
;
692 /* if vb2 works correctly you should never detach mapped buffer */
693 if (WARN_ON(buf
->dma_addr
))
694 vb2_dc_unmap_dmabuf(buf
);
696 /* detach this attachment */
697 dma_buf_detach(buf
->db_attach
->dmabuf
, buf
->db_attach
);
701 static void *vb2_dc_attach_dmabuf(void *alloc_ctx
, struct dma_buf
*dbuf
,
702 unsigned long size
, int write
)
704 struct vb2_dc_conf
*conf
= alloc_ctx
;
705 struct vb2_dc_buf
*buf
;
706 struct dma_buf_attachment
*dba
;
708 if (dbuf
->size
< size
)
709 return ERR_PTR(-EFAULT
);
711 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
713 return ERR_PTR(-ENOMEM
);
715 buf
->dev
= conf
->dev
;
716 /* create attachment for the dmabuf with the user device */
717 dba
= dma_buf_attach(dbuf
, buf
->dev
);
719 pr_err("failed to attach dmabuf\n");
724 buf
->dma_dir
= write
? DMA_FROM_DEVICE
: DMA_TO_DEVICE
;
726 buf
->db_attach
= dba
;
731 /*********************************************/
732 /* DMA CONTIG exported functions */
733 /*********************************************/
735 const struct vb2_mem_ops vb2_dma_contig_memops
= {
736 .alloc
= vb2_dc_alloc
,
738 .get_dmabuf
= vb2_dc_get_dmabuf
,
739 .cookie
= vb2_dc_cookie
,
740 .vaddr
= vb2_dc_vaddr
,
742 .get_userptr
= vb2_dc_get_userptr
,
743 .put_userptr
= vb2_dc_put_userptr
,
744 .prepare
= vb2_dc_prepare
,
745 .finish
= vb2_dc_finish
,
746 .map_dmabuf
= vb2_dc_map_dmabuf
,
747 .unmap_dmabuf
= vb2_dc_unmap_dmabuf
,
748 .attach_dmabuf
= vb2_dc_attach_dmabuf
,
749 .detach_dmabuf
= vb2_dc_detach_dmabuf
,
750 .num_users
= vb2_dc_num_users
,
752 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops
);
754 void *vb2_dma_contig_init_ctx(struct device
*dev
)
756 struct vb2_dc_conf
*conf
;
758 conf
= kzalloc(sizeof *conf
, GFP_KERNEL
);
760 return ERR_PTR(-ENOMEM
);
766 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx
);
768 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx
)
772 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx
);
774 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
775 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
776 MODULE_LICENSE("GPL");