const: mark struct vm_struct_operations
[firewire-audio.git] / drivers / media / video / videobuf-dma-contig.c
blob635ffc7b03910582b215b3399f27f4eef912b181
1 /*
2 * helper functions for physically contiguous capture buffers
4 * The functions support hardware lacking scatter gather support
5 * (i.e. the buffers must be linear in physical memory)
7 * Copyright (c) 2008 Magnus Damm
9 * Based on videobuf-vmalloc.c,
10 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/pagemap.h>
21 #include <linux/dma-mapping.h>
22 #include <media/videobuf-dma-contig.h>
24 struct videobuf_dma_contig_memory {
25 u32 magic;
26 void *vaddr;
27 dma_addr_t dma_handle;
28 unsigned long size;
29 int is_userptr;
32 #define MAGIC_DC_MEM 0x0733ac61
33 #define MAGIC_CHECK(is, should) \
34 if (unlikely((is) != (should))) { \
35 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
36 BUG(); \
39 static void
40 videobuf_vm_open(struct vm_area_struct *vma)
42 struct videobuf_mapping *map = vma->vm_private_data;
44 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
45 map, map->count, vma->vm_start, vma->vm_end);
47 map->count++;
50 static void videobuf_vm_close(struct vm_area_struct *vma)
52 struct videobuf_mapping *map = vma->vm_private_data;
53 struct videobuf_queue *q = map->q;
54 int i;
56 dev_dbg(map->q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
57 map, map->count, vma->vm_start, vma->vm_end);
59 map->count--;
60 if (0 == map->count) {
61 struct videobuf_dma_contig_memory *mem;
63 dev_dbg(map->q->dev, "munmap %p q=%p\n", map, q);
64 mutex_lock(&q->vb_lock);
66 /* We need first to cancel streams, before unmapping */
67 if (q->streaming)
68 videobuf_queue_cancel(q);
70 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
71 if (NULL == q->bufs[i])
72 continue;
74 if (q->bufs[i]->map != map)
75 continue;
77 mem = q->bufs[i]->priv;
78 if (mem) {
79 /* This callback is called only if kernel has
80 allocated memory and this memory is mmapped.
81 In this case, memory should be freed,
82 in order to do memory unmap.
85 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
87 /* vfree is not atomic - can't be
88 called with IRQ's disabled
90 dev_dbg(map->q->dev, "buf[%d] freeing %p\n",
91 i, mem->vaddr);
93 dma_free_coherent(q->dev, mem->size,
94 mem->vaddr, mem->dma_handle);
95 mem->vaddr = NULL;
98 q->bufs[i]->map = NULL;
99 q->bufs[i]->baddr = 0;
102 kfree(map);
104 mutex_unlock(&q->vb_lock);
108 static const struct vm_operations_struct videobuf_vm_ops = {
109 .open = videobuf_vm_open,
110 .close = videobuf_vm_close,
114 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
115 * @mem: per-buffer private videobuf-dma-contig data
117 * This function resets the user space pointer
119 static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
121 mem->is_userptr = 0;
122 mem->dma_handle = 0;
123 mem->size = 0;
127 * videobuf_dma_contig_user_get() - setup user space memory pointer
128 * @mem: per-buffer private videobuf-dma-contig data
129 * @vb: video buffer to map
131 * This function validates and sets up a pointer to user space memory.
132 * Only physically contiguous pfn-mapped memory is accepted.
134 * Returns 0 if successful.
136 static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
137 struct videobuf_buffer *vb)
139 struct mm_struct *mm = current->mm;
140 struct vm_area_struct *vma;
141 unsigned long prev_pfn, this_pfn;
142 unsigned long pages_done, user_address;
143 int ret;
145 mem->size = PAGE_ALIGN(vb->size);
146 mem->is_userptr = 0;
147 ret = -EINVAL;
149 down_read(&mm->mmap_sem);
151 vma = find_vma(mm, vb->baddr);
152 if (!vma)
153 goto out_up;
155 if ((vb->baddr + mem->size) > vma->vm_end)
156 goto out_up;
158 pages_done = 0;
159 prev_pfn = 0; /* kill warning */
160 user_address = vb->baddr;
162 while (pages_done < (mem->size >> PAGE_SHIFT)) {
163 ret = follow_pfn(vma, user_address, &this_pfn);
164 if (ret)
165 break;
167 if (pages_done == 0)
168 mem->dma_handle = this_pfn << PAGE_SHIFT;
169 else if (this_pfn != (prev_pfn + 1))
170 ret = -EFAULT;
172 if (ret)
173 break;
175 prev_pfn = this_pfn;
176 user_address += PAGE_SIZE;
177 pages_done++;
180 if (!ret)
181 mem->is_userptr = 1;
183 out_up:
184 up_read(&current->mm->mmap_sem);
186 return ret;
189 static void *__videobuf_alloc(size_t size)
191 struct videobuf_dma_contig_memory *mem;
192 struct videobuf_buffer *vb;
194 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
195 if (vb) {
196 mem = vb->priv = ((char *)vb) + size;
197 mem->magic = MAGIC_DC_MEM;
200 return vb;
203 static void *__videobuf_to_vmalloc(struct videobuf_buffer *buf)
205 struct videobuf_dma_contig_memory *mem = buf->priv;
207 BUG_ON(!mem);
208 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
210 return mem->vaddr;
213 static int __videobuf_iolock(struct videobuf_queue *q,
214 struct videobuf_buffer *vb,
215 struct v4l2_framebuffer *fbuf)
217 struct videobuf_dma_contig_memory *mem = vb->priv;
219 BUG_ON(!mem);
220 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
222 switch (vb->memory) {
223 case V4L2_MEMORY_MMAP:
224 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
226 /* All handling should be done by __videobuf_mmap_mapper() */
227 if (!mem->vaddr) {
228 dev_err(q->dev, "memory is not alloced/mmapped.\n");
229 return -EINVAL;
231 break;
232 case V4L2_MEMORY_USERPTR:
233 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
235 /* handle pointer from user space */
236 if (vb->baddr)
237 return videobuf_dma_contig_user_get(mem, vb);
239 /* allocate memory for the read() method */
240 mem->size = PAGE_ALIGN(vb->size);
241 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
242 &mem->dma_handle, GFP_KERNEL);
243 if (!mem->vaddr) {
244 dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
245 mem->size);
246 return -ENOMEM;
249 dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
250 mem->vaddr, mem->size);
251 break;
252 case V4L2_MEMORY_OVERLAY:
253 default:
254 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
255 __func__);
256 return -EINVAL;
259 return 0;
262 static int __videobuf_mmap_free(struct videobuf_queue *q)
264 unsigned int i;
266 dev_dbg(q->dev, "%s\n", __func__);
267 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
268 if (q->bufs[i] && q->bufs[i]->map)
269 return -EBUSY;
272 return 0;
275 static int __videobuf_mmap_mapper(struct videobuf_queue *q,
276 struct vm_area_struct *vma)
278 struct videobuf_dma_contig_memory *mem;
279 struct videobuf_mapping *map;
280 unsigned int first;
281 int retval;
282 unsigned long size, offset = vma->vm_pgoff << PAGE_SHIFT;
284 dev_dbg(q->dev, "%s\n", __func__);
285 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED))
286 return -EINVAL;
288 /* look for first buffer to map */
289 for (first = 0; first < VIDEO_MAX_FRAME; first++) {
290 if (!q->bufs[first])
291 continue;
293 if (V4L2_MEMORY_MMAP != q->bufs[first]->memory)
294 continue;
295 if (q->bufs[first]->boff == offset)
296 break;
298 if (VIDEO_MAX_FRAME == first) {
299 dev_dbg(q->dev, "invalid user space offset [offset=0x%lx]\n",
300 offset);
301 return -EINVAL;
304 /* create mapping + update buffer list */
305 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
306 if (!map)
307 return -ENOMEM;
309 q->bufs[first]->map = map;
310 map->start = vma->vm_start;
311 map->end = vma->vm_end;
312 map->q = q;
314 q->bufs[first]->baddr = vma->vm_start;
316 mem = q->bufs[first]->priv;
317 BUG_ON(!mem);
318 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
320 mem->size = PAGE_ALIGN(q->bufs[first]->bsize);
321 mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
322 &mem->dma_handle, GFP_KERNEL);
323 if (!mem->vaddr) {
324 dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
325 mem->size);
326 goto error;
328 dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
329 mem->vaddr, mem->size);
331 /* Try to remap memory */
333 size = vma->vm_end - vma->vm_start;
334 size = (size < mem->size) ? size : mem->size;
336 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
337 retval = remap_pfn_range(vma, vma->vm_start,
338 mem->dma_handle >> PAGE_SHIFT,
339 size, vma->vm_page_prot);
340 if (retval) {
341 dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
342 dma_free_coherent(q->dev, mem->size,
343 mem->vaddr, mem->dma_handle);
344 goto error;
347 vma->vm_ops = &videobuf_vm_ops;
348 vma->vm_flags |= VM_DONTEXPAND;
349 vma->vm_private_data = map;
351 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
352 map, q, vma->vm_start, vma->vm_end,
353 (long int) q->bufs[first]->bsize,
354 vma->vm_pgoff, first);
356 videobuf_vm_open(vma);
358 return 0;
360 error:
361 kfree(map);
362 return -ENOMEM;
365 static int __videobuf_copy_to_user(struct videobuf_queue *q,
366 char __user *data, size_t count,
367 int nonblocking)
369 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
370 void *vaddr;
372 BUG_ON(!mem);
373 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
374 BUG_ON(!mem->vaddr);
376 /* copy to userspace */
377 if (count > q->read_buf->size - q->read_off)
378 count = q->read_buf->size - q->read_off;
380 vaddr = mem->vaddr;
382 if (copy_to_user(data, vaddr + q->read_off, count))
383 return -EFAULT;
385 return count;
388 static int __videobuf_copy_stream(struct videobuf_queue *q,
389 char __user *data, size_t count, size_t pos,
390 int vbihack, int nonblocking)
392 unsigned int *fc;
393 struct videobuf_dma_contig_memory *mem = q->read_buf->priv;
395 BUG_ON(!mem);
396 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
398 if (vbihack) {
399 /* dirty, undocumented hack -- pass the frame counter
400 * within the last four bytes of each vbi data block.
401 * We need that one to maintain backward compatibility
402 * to all vbi decoding software out there ... */
403 fc = (unsigned int *)mem->vaddr;
404 fc += (q->read_buf->size >> 2) - 1;
405 *fc = q->read_buf->field_count >> 1;
406 dev_dbg(q->dev, "vbihack: %d\n", *fc);
409 /* copy stuff using the common method */
410 count = __videobuf_copy_to_user(q, data, count, nonblocking);
412 if ((count == -EFAULT) && (pos == 0))
413 return -EFAULT;
415 return count;
418 static struct videobuf_qtype_ops qops = {
419 .magic = MAGIC_QTYPE_OPS,
421 .alloc = __videobuf_alloc,
422 .iolock = __videobuf_iolock,
423 .mmap_free = __videobuf_mmap_free,
424 .mmap_mapper = __videobuf_mmap_mapper,
425 .video_copy_to_user = __videobuf_copy_to_user,
426 .copy_stream = __videobuf_copy_stream,
427 .vmalloc = __videobuf_to_vmalloc,
430 void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
431 struct videobuf_queue_ops *ops,
432 struct device *dev,
433 spinlock_t *irqlock,
434 enum v4l2_buf_type type,
435 enum v4l2_field field,
436 unsigned int msize,
437 void *priv)
439 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
440 priv, &qops);
442 EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
444 dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
446 struct videobuf_dma_contig_memory *mem = buf->priv;
448 BUG_ON(!mem);
449 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
451 return mem->dma_handle;
453 EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
455 void videobuf_dma_contig_free(struct videobuf_queue *q,
456 struct videobuf_buffer *buf)
458 struct videobuf_dma_contig_memory *mem = buf->priv;
460 /* mmapped memory can't be freed here, otherwise mmapped region
461 would be released, while still needed. In this case, the memory
462 release should happen inside videobuf_vm_close().
463 So, it should free memory only if the memory were allocated for
464 read() operation.
466 if (buf->memory != V4L2_MEMORY_USERPTR)
467 return;
469 if (!mem)
470 return;
472 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
474 /* handle user space pointer case */
475 if (buf->baddr) {
476 videobuf_dma_contig_user_put(mem);
477 return;
480 /* read() method */
481 dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
482 mem->vaddr = NULL;
484 EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
486 MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
487 MODULE_AUTHOR("Magnus Damm");
488 MODULE_LICENSE("GPL");