2 * Framework for buffer objects that can be shared across devices/subsystems.
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/anon_inodes.h>
29 #include <linux/export.h>
31 static inline int is_dma_buf_file(struct file
*);
33 static int dma_buf_release(struct inode
*inode
, struct file
*file
)
35 struct dma_buf
*dmabuf
;
37 if (!is_dma_buf_file(file
))
40 dmabuf
= file
->private_data
;
42 dmabuf
->ops
->release(dmabuf
);
47 static int dma_buf_mmap_internal(struct file
*file
, struct vm_area_struct
*vma
)
49 struct dma_buf
*dmabuf
;
51 if (!is_dma_buf_file(file
))
54 dmabuf
= file
->private_data
;
56 /* check for overflowing the buffer's size */
57 if (vma
->vm_pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
58 dmabuf
->size
>> PAGE_SHIFT
)
61 return dmabuf
->ops
->mmap(dmabuf
, vma
);
64 static const struct file_operations dma_buf_fops
= {
65 .release
= dma_buf_release
,
66 .mmap
= dma_buf_mmap_internal
,
70 * is_dma_buf_file - Check if struct file* is associated with dma_buf
72 static inline int is_dma_buf_file(struct file
*file
)
74 return file
->f_op
== &dma_buf_fops
;
78 * dma_buf_export - Creates a new dma_buf, and associates an anon file
79 * with this buffer, so it can be exported.
80 * Also connect the allocator specific data and ops to the buffer.
82 * @priv: [in] Attach private data of allocator to this buffer
83 * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
84 * @size: [in] Size of the buffer
85 * @flags: [in] mode flags for the file.
87 * Returns, on success, a newly created dma_buf object, which wraps the
88 * supplied private data and operations for dma_buf_ops. On either missing
89 * ops, or error in allocating struct dma_buf, will return negative error.
92 struct dma_buf
*dma_buf_export(void *priv
, const struct dma_buf_ops
*ops
,
93 size_t size
, int flags
)
95 struct dma_buf
*dmabuf
;
98 if (WARN_ON(!priv
|| !ops
100 || !ops
->unmap_dma_buf
105 return ERR_PTR(-EINVAL
);
108 dmabuf
= kzalloc(sizeof(struct dma_buf
), GFP_KERNEL
);
110 return ERR_PTR(-ENOMEM
);
116 file
= anon_inode_getfile("dmabuf", &dma_buf_fops
, dmabuf
, flags
);
120 mutex_init(&dmabuf
->lock
);
121 INIT_LIST_HEAD(&dmabuf
->attachments
);
125 EXPORT_SYMBOL_GPL(dma_buf_export
);
129 * dma_buf_fd - returns a file descriptor for the given dma_buf
130 * @dmabuf: [in] pointer to dma_buf for which fd is required.
131 * @flags: [in] flags to give to fd
133 * On success, returns an associated 'fd'. Else, returns error.
135 int dma_buf_fd(struct dma_buf
*dmabuf
, int flags
)
139 if (!dmabuf
|| !dmabuf
->file
)
142 error
= get_unused_fd_flags(flags
);
147 fd_install(fd
, dmabuf
->file
);
151 EXPORT_SYMBOL_GPL(dma_buf_fd
);
154 * dma_buf_get - returns the dma_buf structure related to an fd
155 * @fd: [in] fd associated with the dma_buf to be returned
157 * On success, returns the dma_buf structure associated with an fd; uses
158 * file's refcounting done by fget to increase refcount. returns ERR_PTR
161 struct dma_buf
*dma_buf_get(int fd
)
168 return ERR_PTR(-EBADF
);
170 if (!is_dma_buf_file(file
)) {
172 return ERR_PTR(-EINVAL
);
175 return file
->private_data
;
177 EXPORT_SYMBOL_GPL(dma_buf_get
);
180 * dma_buf_put - decreases refcount of the buffer
181 * @dmabuf: [in] buffer to reduce refcount of
183 * Uses file's refcounting done implicitly by fput()
185 void dma_buf_put(struct dma_buf
*dmabuf
)
187 if (WARN_ON(!dmabuf
|| !dmabuf
->file
))
192 EXPORT_SYMBOL_GPL(dma_buf_put
);
195 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
196 * calls attach() of dma_buf_ops to allow device-specific attach functionality
197 * @dmabuf: [in] buffer to attach device to.
198 * @dev: [in] device to be attached.
200 * Returns struct dma_buf_attachment * for this attachment; may return negative
204 struct dma_buf_attachment
*dma_buf_attach(struct dma_buf
*dmabuf
,
207 struct dma_buf_attachment
*attach
;
210 if (WARN_ON(!dmabuf
|| !dev
))
211 return ERR_PTR(-EINVAL
);
213 attach
= kzalloc(sizeof(struct dma_buf_attachment
), GFP_KERNEL
);
215 return ERR_PTR(-ENOMEM
);
218 attach
->dmabuf
= dmabuf
;
220 mutex_lock(&dmabuf
->lock
);
222 if (dmabuf
->ops
->attach
) {
223 ret
= dmabuf
->ops
->attach(dmabuf
, dev
, attach
);
227 list_add(&attach
->node
, &dmabuf
->attachments
);
229 mutex_unlock(&dmabuf
->lock
);
234 mutex_unlock(&dmabuf
->lock
);
237 EXPORT_SYMBOL_GPL(dma_buf_attach
);
240 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
241 * optionally calls detach() of dma_buf_ops for device-specific detach
242 * @dmabuf: [in] buffer to detach from.
243 * @attach: [in] attachment to be detached; is free'd after this call.
246 void dma_buf_detach(struct dma_buf
*dmabuf
, struct dma_buf_attachment
*attach
)
248 if (WARN_ON(!dmabuf
|| !attach
))
251 mutex_lock(&dmabuf
->lock
);
252 list_del(&attach
->node
);
253 if (dmabuf
->ops
->detach
)
254 dmabuf
->ops
->detach(dmabuf
, attach
);
256 mutex_unlock(&dmabuf
->lock
);
259 EXPORT_SYMBOL_GPL(dma_buf_detach
);
262 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
263 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
265 * @attach: [in] attachment whose scatterlist is to be returned
266 * @direction: [in] direction of DMA transfer
268 * Returns sg_table containing the scatterlist to be returned; may return NULL
272 struct sg_table
*dma_buf_map_attachment(struct dma_buf_attachment
*attach
,
273 enum dma_data_direction direction
)
275 struct sg_table
*sg_table
= ERR_PTR(-EINVAL
);
279 if (WARN_ON(!attach
|| !attach
->dmabuf
))
280 return ERR_PTR(-EINVAL
);
282 sg_table
= attach
->dmabuf
->ops
->map_dma_buf(attach
, direction
);
286 EXPORT_SYMBOL_GPL(dma_buf_map_attachment
);
289 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
290 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
292 * @attach: [in] attachment to unmap buffer from
293 * @sg_table: [in] scatterlist info of the buffer to unmap
294 * @direction: [in] direction of DMA transfer
297 void dma_buf_unmap_attachment(struct dma_buf_attachment
*attach
,
298 struct sg_table
*sg_table
,
299 enum dma_data_direction direction
)
303 if (WARN_ON(!attach
|| !attach
->dmabuf
|| !sg_table
))
306 attach
->dmabuf
->ops
->unmap_dma_buf(attach
, sg_table
,
309 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment
);
313 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
314 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
315 * preparations. Coherency is only guaranteed in the specified range for the
316 * specified access direction.
317 * @dmabuf: [in] buffer to prepare cpu access for.
318 * @start: [in] start of range for cpu access.
319 * @len: [in] length of range for cpu access.
320 * @direction: [in] length of range for cpu access.
322 * Can return negative error values, returns 0 on success.
324 int dma_buf_begin_cpu_access(struct dma_buf
*dmabuf
, size_t start
, size_t len
,
325 enum dma_data_direction direction
)
329 if (WARN_ON(!dmabuf
))
332 if (dmabuf
->ops
->begin_cpu_access
)
333 ret
= dmabuf
->ops
->begin_cpu_access(dmabuf
, start
, len
, direction
);
337 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access
);
340 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
341 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
342 * actions. Coherency is only guaranteed in the specified range for the
343 * specified access direction.
344 * @dmabuf: [in] buffer to complete cpu access for.
345 * @start: [in] start of range for cpu access.
346 * @len: [in] length of range for cpu access.
347 * @direction: [in] length of range for cpu access.
349 * This call must always succeed.
351 void dma_buf_end_cpu_access(struct dma_buf
*dmabuf
, size_t start
, size_t len
,
352 enum dma_data_direction direction
)
356 if (dmabuf
->ops
->end_cpu_access
)
357 dmabuf
->ops
->end_cpu_access(dmabuf
, start
, len
, direction
);
359 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access
);
362 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
363 * space. The same restrictions as for kmap_atomic and friends apply.
364 * @dmabuf: [in] buffer to map page from.
365 * @page_num: [in] page in PAGE_SIZE units to map.
367 * This call must always succeed, any necessary preparations that might fail
368 * need to be done in begin_cpu_access.
370 void *dma_buf_kmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
)
374 return dmabuf
->ops
->kmap_atomic(dmabuf
, page_num
);
376 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic
);
379 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
380 * @dmabuf: [in] buffer to unmap page from.
381 * @page_num: [in] page in PAGE_SIZE units to unmap.
382 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
384 * This call must always succeed.
386 void dma_buf_kunmap_atomic(struct dma_buf
*dmabuf
, unsigned long page_num
,
391 if (dmabuf
->ops
->kunmap_atomic
)
392 dmabuf
->ops
->kunmap_atomic(dmabuf
, page_num
, vaddr
);
394 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic
);
397 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
398 * same restrictions as for kmap and friends apply.
399 * @dmabuf: [in] buffer to map page from.
400 * @page_num: [in] page in PAGE_SIZE units to map.
402 * This call must always succeed, any necessary preparations that might fail
403 * need to be done in begin_cpu_access.
405 void *dma_buf_kmap(struct dma_buf
*dmabuf
, unsigned long page_num
)
409 return dmabuf
->ops
->kmap(dmabuf
, page_num
);
411 EXPORT_SYMBOL_GPL(dma_buf_kmap
);
414 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
415 * @dmabuf: [in] buffer to unmap page from.
416 * @page_num: [in] page in PAGE_SIZE units to unmap.
417 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
419 * This call must always succeed.
421 void dma_buf_kunmap(struct dma_buf
*dmabuf
, unsigned long page_num
,
426 if (dmabuf
->ops
->kunmap
)
427 dmabuf
->ops
->kunmap(dmabuf
, page_num
, vaddr
);
429 EXPORT_SYMBOL_GPL(dma_buf_kunmap
);
433 * dma_buf_mmap - Setup up a userspace mmap with the given vma
434 * @dmabuf: [in] buffer that should back the vma
435 * @vma: [in] vma for the mmap
436 * @pgoff: [in] offset in pages where this mmap should start within the
439 * This function adjusts the passed in vma so that it points at the file of the
440 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
441 * checking on the size of the vma. Then it calls the exporters mmap function to
442 * set up the mapping.
444 * Can return negative error values, returns 0 on success.
446 int dma_buf_mmap(struct dma_buf
*dmabuf
, struct vm_area_struct
*vma
,
449 if (WARN_ON(!dmabuf
|| !vma
))
452 /* check for offset overflow */
453 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) < pgoff
)
456 /* check for overflowing the buffer's size */
457 if (pgoff
+ ((vma
->vm_end
- vma
->vm_start
) >> PAGE_SHIFT
) >
458 dmabuf
->size
>> PAGE_SHIFT
)
461 /* readjust the vma */
465 vma
->vm_file
= get_file(dmabuf
->file
);
467 vma
->vm_pgoff
= pgoff
;
469 return dmabuf
->ops
->mmap(dmabuf
, vma
);
471 EXPORT_SYMBOL_GPL(dma_buf_mmap
);
474 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
475 * address space. Same restrictions as for vmap and friends apply.
476 * @dmabuf: [in] buffer to vmap
478 * This call may fail due to lack of virtual mapping address space.
479 * These calls are optional in drivers. The intended use for them
480 * is for mapping objects linear in kernel space for high use objects.
481 * Please attempt to use kmap/kunmap before thinking about these interfaces.
483 void *dma_buf_vmap(struct dma_buf
*dmabuf
)
485 if (WARN_ON(!dmabuf
))
488 if (dmabuf
->ops
->vmap
)
489 return dmabuf
->ops
->vmap(dmabuf
);
492 EXPORT_SYMBOL_GPL(dma_buf_vmap
);
495 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
496 * @dmabuf: [in] buffer to vunmap
497 * @vaddr: [in] vmap to vunmap
499 void dma_buf_vunmap(struct dma_buf
*dmabuf
, void *vaddr
)
501 if (WARN_ON(!dmabuf
))
504 if (dmabuf
->ops
->vunmap
)
505 dmabuf
->ops
->vunmap(dmabuf
, vaddr
);
507 EXPORT_SYMBOL_GPL(dma_buf_vunmap
);