[media] omap3isp: queue: Use sg_alloc_table_from_pages()
[linux-2.6/btrfs-unstable.git] / drivers / media / platform / omap3isp / ispqueue.c
blobcee1b5d29cfa0436f19bae5b0b547de44f423776
1 /*
2 * ispqueue.c
4 * TI OMAP3 ISP - Video buffers queue handling
6 * Copyright (C) 2010 Nokia Corporation
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
26 #include <asm/cacheflush.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/mm.h>
29 #include <linux/omap-iommu.h>
30 #include <linux/pagemap.h>
31 #include <linux/poll.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
37 #include "isp.h"
38 #include "ispqueue.h"
39 #include "ispvideo.h"
41 /* -----------------------------------------------------------------------------
42 * Video buffers management
46 * isp_video_buffer_cache_sync - Keep the buffers coherent between CPU and ISP
48 * The typical operation required here is Cache Invalidation across
49 * the (user space) buffer address range. And this _must_ be done
50 * at QBUF stage (and *only* at QBUF).
52 * We try to use optimal cache invalidation function:
53 * - dmac_map_area:
54 * - used when the number of pages are _low_.
55 * - it becomes quite slow as the number of pages increase.
56 * - for 648x492 viewfinder (150 pages) it takes 1.3 ms.
57 * - for 5 Mpix buffer (2491 pages) it takes between 25-50 ms.
59 * - flush_cache_all:
60 * - used when the number of pages are _high_.
61 * - time taken in the range of 500-900 us.
62 * - has a higher penalty but, as whole dcache + icache is invalidated
65 * FIXME: dmac_inv_range crashes randomly on the user space buffer
66 * address. Fall back to flush_cache_all for now.
68 #define ISP_CACHE_FLUSH_PAGES_MAX 0
70 static void isp_video_buffer_cache_sync(struct isp_video_buffer *buf)
72 if (buf->skip_cache)
73 return;
75 if (buf->vbuf.m.userptr == 0 || buf->npages == 0 ||
76 buf->npages > ISP_CACHE_FLUSH_PAGES_MAX)
77 flush_cache_all();
78 else {
79 dmac_map_area((void *)buf->vbuf.m.userptr, buf->vbuf.length,
80 DMA_FROM_DEVICE);
81 outer_inv_range(buf->vbuf.m.userptr,
82 buf->vbuf.m.userptr + buf->vbuf.length);
87 * isp_video_buffer_lock_vma - Prevent VMAs from being unmapped
89 * Lock the VMAs underlying the given buffer into memory. This avoids the
90 * userspace buffer mapping from being swapped out, making VIPT cache handling
91 * easier.
93 * Note that the pages will not be freed as the buffers have been locked to
94 * memory using by a call to get_user_pages(), but the userspace mapping could
95 * still disappear if the VMAs are not locked. This is caused by the memory
96 * management code trying to be as lock-less as possible, which results in the
97 * userspace mapping manager not finding out that the pages are locked under
98 * some conditions.
100 static int isp_video_buffer_lock_vma(struct isp_video_buffer *buf, int lock)
102 struct vm_area_struct *vma;
103 unsigned long start;
104 unsigned long end;
105 int ret = 0;
107 if (buf->vbuf.memory == V4L2_MEMORY_MMAP)
108 return 0;
110 /* We can be called from workqueue context if the current task dies to
111 * unlock the VMAs. In that case there's no current memory management
112 * context so unlocking can't be performed, but the VMAs have been or
113 * are getting destroyed anyway so it doesn't really matter.
115 if (!current || !current->mm)
116 return lock ? -EINVAL : 0;
118 start = buf->vbuf.m.userptr;
119 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
121 down_write(&current->mm->mmap_sem);
122 spin_lock(&current->mm->page_table_lock);
124 do {
125 vma = find_vma(current->mm, start);
126 if (vma == NULL) {
127 ret = -EFAULT;
128 goto out;
131 if (lock)
132 vma->vm_flags |= VM_LOCKED;
133 else
134 vma->vm_flags &= ~VM_LOCKED;
136 start = vma->vm_end + 1;
137 } while (vma->vm_end < end);
139 if (lock)
140 buf->vm_flags |= VM_LOCKED;
141 else
142 buf->vm_flags &= ~VM_LOCKED;
144 out:
145 spin_unlock(&current->mm->page_table_lock);
146 up_write(&current->mm->mmap_sem);
147 return ret;
151 * isp_video_buffer_prepare_kernel - Build scatter list for a kernel-allocated
152 * buffer
154 * Retrieve the sgtable using the DMA API.
156 static int isp_video_buffer_prepare_kernel(struct isp_video_buffer *buf)
158 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
159 struct isp_video *video = vfh->video;
161 return dma_get_sgtable(video->isp->dev, &buf->sgt, buf->vaddr,
162 buf->paddr, PAGE_ALIGN(buf->vbuf.length));
166 * isp_video_buffer_cleanup - Release pages for a userspace VMA.
168 * Release pages locked by a call isp_video_buffer_prepare_user and free the
169 * pages table.
171 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
173 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
174 struct isp_video *video = vfh->video;
175 enum dma_data_direction direction;
176 DEFINE_DMA_ATTRS(attrs);
177 unsigned int i;
179 if (buf->dma) {
180 omap_iommu_vunmap(video->isp->domain, video->isp->dev,
181 buf->dma);
182 buf->dma = 0;
185 if (buf->vbuf.memory == V4L2_MEMORY_USERPTR) {
186 if (buf->skip_cache)
187 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
189 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
190 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
191 dma_unmap_sg_attrs(buf->queue->dev, buf->sgt.sgl,
192 buf->sgt.orig_nents, direction, &attrs);
195 sg_free_table(&buf->sgt);
197 if (buf->pages != NULL) {
198 isp_video_buffer_lock_vma(buf, 0);
200 for (i = 0; i < buf->npages; ++i)
201 page_cache_release(buf->pages[i]);
203 vfree(buf->pages);
204 buf->pages = NULL;
207 buf->npages = 0;
208 buf->skip_cache = false;
212 * isp_video_buffer_prepare_user - Prepare a userspace buffer.
214 * This function creates a scatter list with a 1:1 mapping for a userspace VMA.
215 * The number of pages is first computed based on the buffer size, and pages are
216 * then retrieved by a call to get_user_pages.
218 * Pages are pinned to memory by get_user_pages, making them available for DMA
219 * transfers. However, due to memory management optimization, it seems the
220 * get_user_pages doesn't guarantee that the pinned pages will not be written
221 * to swap and removed from the userspace mapping(s). When this happens, a page
222 * fault can be generated when accessing those unmapped pages.
224 * If the fault is triggered by a page table walk caused by VIPT cache
225 * management operations, the page fault handler might oops if the MM semaphore
226 * is held, as it can't handle kernel page faults in that case. To fix that, a
227 * fixup entry needs to be added to the cache management code, or the userspace
228 * VMA must be locked to avoid removing pages from the userspace mapping in the
229 * first place.
231 * If the number of pages retrieved is smaller than the number required by the
232 * buffer size, the function returns -EFAULT.
234 static int isp_video_buffer_prepare_user(struct isp_video_buffer *buf)
236 unsigned int offset;
237 unsigned long data;
238 unsigned int first;
239 unsigned int last;
240 int ret;
242 data = buf->vbuf.m.userptr;
243 first = (data & PAGE_MASK) >> PAGE_SHIFT;
244 last = ((data + buf->vbuf.length - 1) & PAGE_MASK) >> PAGE_SHIFT;
245 offset = data & ~PAGE_MASK;
247 buf->npages = last - first + 1;
248 buf->pages = vmalloc(buf->npages * sizeof(buf->pages[0]));
249 if (buf->pages == NULL)
250 return -ENOMEM;
252 down_read(&current->mm->mmap_sem);
253 ret = get_user_pages(current, current->mm, data & PAGE_MASK,
254 buf->npages,
255 buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE, 0,
256 buf->pages, NULL);
257 up_read(&current->mm->mmap_sem);
259 if (ret != buf->npages) {
260 buf->npages = ret < 0 ? 0 : ret;
261 return -EFAULT;
264 ret = isp_video_buffer_lock_vma(buf, 1);
265 if (ret < 0)
266 return ret;
268 ret = sg_alloc_table_from_pages(&buf->sgt, buf->pages, buf->npages,
269 offset, buf->vbuf.length, GFP_KERNEL);
270 if (ret < 0)
271 return ret;
273 return 0;
277 * isp_video_buffer_prepare_pfnmap - Prepare a VM_PFNMAP userspace buffer
279 * Userspace VM_PFNMAP buffers are supported only if they are contiguous in
280 * memory and if they span a single VMA. Start by validating the user pointer to
281 * make sure it fulfils that condition, and then build a scatter list of
282 * physically contiguous pages starting at the buffer memory physical address.
284 * Return 0 on success, -EFAULT if the buffer isn't valid or -ENOMEM if memory
285 * can't be allocated.
287 static int isp_video_buffer_prepare_pfnmap(struct isp_video_buffer *buf)
289 struct vm_area_struct *vma;
290 struct scatterlist *sg;
291 unsigned long prev_pfn;
292 unsigned long this_pfn;
293 unsigned long start;
294 unsigned int offset;
295 unsigned long end;
296 unsigned long pfn;
297 unsigned int i;
298 int ret = 0;
300 start = buf->vbuf.m.userptr;
301 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
302 offset = start & ~PAGE_MASK;
304 buf->npages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
305 buf->pages = NULL;
307 down_read(&current->mm->mmap_sem);
308 vma = find_vma(current->mm, start);
309 if (vma == NULL || vma->vm_end < end) {
310 ret = -EFAULT;
311 goto unlock;
314 for (prev_pfn = 0; start <= end; start += PAGE_SIZE) {
315 ret = follow_pfn(vma, start, &this_pfn);
316 if (ret < 0)
317 goto unlock;
319 if (prev_pfn == 0)
320 pfn = this_pfn;
321 else if (this_pfn != prev_pfn + 1) {
322 ret = -EFAULT;
323 goto unlock;
326 prev_pfn = this_pfn;
329 unlock:
330 up_read(&current->mm->mmap_sem);
331 if (ret < 0)
332 return ret;
334 ret = sg_alloc_table(&buf->sgt, buf->npages, GFP_KERNEL);
335 if (ret < 0)
336 return ret;
338 for (sg = buf->sgt.sgl, i = 0; i < buf->npages; ++i, ++pfn) {
339 sg_set_page(sg, pfn_to_page(pfn), PAGE_SIZE - offset, offset);
340 sg = sg_next(sg);
341 offset = 0;
344 return 0;
348 * isp_video_buffer_prepare_vm_flags - Get VMA flags for a userspace address
350 * This function locates the VMAs for the buffer's userspace address and checks
351 * that their flags match. The only flag that we need to care for at the moment
352 * is VM_PFNMAP.
354 * The buffer vm_flags field is set to the first VMA flags.
356 * Return -EFAULT if no VMA can be found for part of the buffer, or if the VMAs
357 * have incompatible flags.
359 static int isp_video_buffer_prepare_vm_flags(struct isp_video_buffer *buf)
361 struct vm_area_struct *vma;
362 pgprot_t uninitialized_var(vm_page_prot);
363 unsigned long start;
364 unsigned long end;
365 int ret = -EFAULT;
367 start = buf->vbuf.m.userptr;
368 end = buf->vbuf.m.userptr + buf->vbuf.length - 1;
370 down_read(&current->mm->mmap_sem);
372 do {
373 vma = find_vma(current->mm, start);
374 if (vma == NULL)
375 goto done;
377 if (start == buf->vbuf.m.userptr) {
378 buf->vm_flags = vma->vm_flags;
379 vm_page_prot = vma->vm_page_prot;
382 if ((buf->vm_flags ^ vma->vm_flags) & VM_PFNMAP)
383 goto done;
385 if (vm_page_prot != vma->vm_page_prot)
386 goto done;
388 start = vma->vm_end + 1;
389 } while (vma->vm_end < end);
391 /* Skip cache management to enhance performances for non-cached or
392 * write-combining buffers.
394 if (vm_page_prot == pgprot_noncached(vm_page_prot) ||
395 vm_page_prot == pgprot_writecombine(vm_page_prot))
396 buf->skip_cache = true;
398 ret = 0;
400 done:
401 up_read(&current->mm->mmap_sem);
402 return ret;
406 * isp_video_buffer_prepare - Make a buffer ready for operation
408 * Preparing a buffer involves:
410 * - validating VMAs (userspace buffers only)
411 * - locking pages and VMAs into memory (userspace buffers only)
412 * - building page and scatter-gather lists
413 * - mapping buffers for DMA operation
414 * - performing driver-specific preparation
416 * The function must be called in userspace context with a valid mm context
417 * (this excludes cleanup paths such as sys_close when the userspace process
418 * segfaults).
420 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
422 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
423 struct isp_video *video = vfh->video;
424 enum dma_data_direction direction;
425 DEFINE_DMA_ATTRS(attrs);
426 unsigned long addr;
427 int ret;
429 switch (buf->vbuf.memory) {
430 case V4L2_MEMORY_MMAP:
431 ret = isp_video_buffer_prepare_kernel(buf);
432 if (ret < 0)
433 goto done;
434 break;
436 case V4L2_MEMORY_USERPTR:
437 ret = isp_video_buffer_prepare_vm_flags(buf);
438 if (ret < 0)
439 return ret;
441 if (buf->vm_flags & VM_PFNMAP)
442 ret = isp_video_buffer_prepare_pfnmap(buf);
443 else
444 ret = isp_video_buffer_prepare_user(buf);
446 if (ret < 0)
447 goto done;
449 if (buf->skip_cache)
450 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
452 direction = buf->vbuf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE
453 ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
454 ret = dma_map_sg_attrs(buf->queue->dev, buf->sgt.sgl,
455 buf->sgt.orig_nents, direction, &attrs);
456 if (ret <= 0) {
457 ret = -EFAULT;
458 goto done;
461 break;
463 default:
464 return -EINVAL;
467 addr = omap_iommu_vmap(video->isp->domain, video->isp->dev, 0,
468 &buf->sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8);
469 if (IS_ERR_VALUE(addr)) {
470 ret = -EIO;
471 goto done;
474 buf->dma = addr;
476 if (!IS_ALIGNED(addr, 32)) {
477 dev_dbg(video->isp->dev,
478 "Buffer address must be aligned to 32 bytes boundary.\n");
479 ret = -EINVAL;
480 goto done;
483 if (buf->queue->ops->buffer_prepare)
484 ret = buf->queue->ops->buffer_prepare(buf);
486 done:
487 if (ret < 0) {
488 isp_video_buffer_cleanup(buf);
489 return ret;
492 return ret;
496 * isp_video_queue_query - Query the status of a given buffer
498 * Locking: must be called with the queue lock held.
500 static void isp_video_buffer_query(struct isp_video_buffer *buf,
501 struct v4l2_buffer *vbuf)
503 memcpy(vbuf, &buf->vbuf, sizeof(*vbuf));
505 if (buf->vma_use_count)
506 vbuf->flags |= V4L2_BUF_FLAG_MAPPED;
508 switch (buf->state) {
509 case ISP_BUF_STATE_ERROR:
510 vbuf->flags |= V4L2_BUF_FLAG_ERROR;
511 /* Fallthrough */
512 case ISP_BUF_STATE_DONE:
513 vbuf->flags |= V4L2_BUF_FLAG_DONE;
514 break;
515 case ISP_BUF_STATE_QUEUED:
516 case ISP_BUF_STATE_ACTIVE:
517 vbuf->flags |= V4L2_BUF_FLAG_QUEUED;
518 break;
519 case ISP_BUF_STATE_IDLE:
520 default:
521 break;
526 * isp_video_buffer_wait - Wait for a buffer to be ready
528 * In non-blocking mode, return immediately with 0 if the buffer is ready or
529 * -EAGAIN if the buffer is in the QUEUED or ACTIVE state.
531 * In blocking mode, wait (interruptibly but with no timeout) on the buffer wait
532 * queue using the same condition.
534 static int isp_video_buffer_wait(struct isp_video_buffer *buf, int nonblocking)
536 if (nonblocking) {
537 return (buf->state != ISP_BUF_STATE_QUEUED &&
538 buf->state != ISP_BUF_STATE_ACTIVE)
539 ? 0 : -EAGAIN;
542 return wait_event_interruptible(buf->wait,
543 buf->state != ISP_BUF_STATE_QUEUED &&
544 buf->state != ISP_BUF_STATE_ACTIVE);
547 /* -----------------------------------------------------------------------------
548 * Queue management
552 * isp_video_queue_free - Free video buffers memory
554 * Buffers can only be freed if the queue isn't streaming and if no buffer is
555 * mapped to userspace. Return -EBUSY if those conditions aren't satisfied.
557 * This function must be called with the queue lock held.
559 static int isp_video_queue_free(struct isp_video_queue *queue)
561 unsigned int i;
563 if (queue->streaming)
564 return -EBUSY;
566 for (i = 0; i < queue->count; ++i) {
567 if (queue->buffers[i]->vma_use_count != 0)
568 return -EBUSY;
571 for (i = 0; i < queue->count; ++i) {
572 struct isp_video_buffer *buf = queue->buffers[i];
574 isp_video_buffer_cleanup(buf);
576 if (buf->vaddr) {
577 dma_free_coherent(queue->dev,
578 PAGE_ALIGN(buf->vbuf.length),
579 buf->vaddr, buf->paddr);
580 buf->vaddr = NULL;
583 kfree(buf);
584 queue->buffers[i] = NULL;
587 INIT_LIST_HEAD(&queue->queue);
588 queue->count = 0;
589 return 0;
593 * isp_video_queue_alloc - Allocate video buffers memory
595 * This function must be called with the queue lock held.
597 static int isp_video_queue_alloc(struct isp_video_queue *queue,
598 unsigned int nbuffers,
599 unsigned int size, enum v4l2_memory memory)
601 struct isp_video_buffer *buf;
602 dma_addr_t dma;
603 unsigned int i;
604 void *mem;
605 int ret;
607 /* Start by freeing the buffers. */
608 ret = isp_video_queue_free(queue);
609 if (ret < 0)
610 return ret;
612 /* Bail out if no buffers should be allocated. */
613 if (nbuffers == 0)
614 return 0;
616 /* Initialize the allocated buffers. */
617 for (i = 0; i < nbuffers; ++i) {
618 buf = kzalloc(queue->bufsize, GFP_KERNEL);
619 if (buf == NULL)
620 break;
622 if (memory == V4L2_MEMORY_MMAP) {
623 /* Allocate video buffers memory for mmap mode. Align
624 * the size to the page size.
626 mem = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
627 &dma, GFP_KERNEL);
628 if (mem == NULL) {
629 kfree(buf);
630 break;
633 buf->vbuf.m.offset = i * PAGE_ALIGN(size);
634 buf->vaddr = mem;
635 buf->paddr = dma;
638 buf->vbuf.index = i;
639 buf->vbuf.length = size;
640 buf->vbuf.type = queue->type;
641 buf->vbuf.flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
642 buf->vbuf.field = V4L2_FIELD_NONE;
643 buf->vbuf.memory = memory;
645 buf->queue = queue;
646 init_waitqueue_head(&buf->wait);
648 queue->buffers[i] = buf;
651 if (i == 0)
652 return -ENOMEM;
654 queue->count = i;
655 return nbuffers;
659 * omap3isp_video_queue_cleanup - Clean up the video buffers queue
660 * @queue: Video buffers queue
662 * Free all allocated resources and clean up the video buffers queue. The queue
663 * must not be busy (no ongoing video stream) and buffers must have been
664 * unmapped.
666 * Return 0 on success or -EBUSY if the queue is busy or buffers haven't been
667 * unmapped.
669 int omap3isp_video_queue_cleanup(struct isp_video_queue *queue)
671 return isp_video_queue_free(queue);
675 * omap3isp_video_queue_init - Initialize the video buffers queue
676 * @queue: Video buffers queue
677 * @type: V4L2 buffer type (capture or output)
678 * @ops: Driver-specific queue operations
679 * @dev: Device used for DMA operations
680 * @bufsize: Size of the driver-specific buffer structure
682 * Initialize the video buffers queue with the supplied parameters.
684 * The queue type must be one of V4L2_BUF_TYPE_VIDEO_CAPTURE or
685 * V4L2_BUF_TYPE_VIDEO_OUTPUT. Other buffer types are not supported yet.
687 * Buffer objects will be allocated using the given buffer size to allow room
688 * for driver-specific fields. Driver-specific buffer structures must start
689 * with a struct isp_video_buffer field. Drivers with no driver-specific buffer
690 * structure must pass the size of the isp_video_buffer structure in the bufsize
691 * parameter.
693 * Return 0 on success.
695 int omap3isp_video_queue_init(struct isp_video_queue *queue,
696 enum v4l2_buf_type type,
697 const struct isp_video_queue_operations *ops,
698 struct device *dev, unsigned int bufsize)
700 INIT_LIST_HEAD(&queue->queue);
701 mutex_init(&queue->lock);
702 spin_lock_init(&queue->irqlock);
704 queue->type = type;
705 queue->ops = ops;
706 queue->dev = dev;
707 queue->bufsize = bufsize;
709 return 0;
712 /* -----------------------------------------------------------------------------
713 * V4L2 operations
717 * omap3isp_video_queue_reqbufs - Allocate video buffers memory
719 * This function is intended to be used as a VIDIOC_REQBUFS ioctl handler. It
720 * allocated video buffer objects and, for MMAP buffers, buffer memory.
722 * If the number of buffers is 0, all buffers are freed and the function returns
723 * without performing any allocation.
725 * If the number of buffers is not 0, currently allocated buffers (if any) are
726 * freed and the requested number of buffers are allocated. Depending on
727 * driver-specific requirements and on memory availability, a number of buffer
728 * smaller or bigger than requested can be allocated. This isn't considered as
729 * an error.
731 * Return 0 on success or one of the following error codes:
733 * -EINVAL if the buffer type or index are invalid
734 * -EBUSY if the queue is busy (streaming or buffers mapped)
735 * -ENOMEM if the buffers can't be allocated due to an out-of-memory condition
737 int omap3isp_video_queue_reqbufs(struct isp_video_queue *queue,
738 struct v4l2_requestbuffers *rb)
740 unsigned int nbuffers = rb->count;
741 unsigned int size;
742 int ret;
744 if (rb->type != queue->type)
745 return -EINVAL;
747 queue->ops->queue_prepare(queue, &nbuffers, &size);
748 if (size == 0)
749 return -EINVAL;
751 nbuffers = min_t(unsigned int, nbuffers, ISP_VIDEO_MAX_BUFFERS);
753 mutex_lock(&queue->lock);
755 ret = isp_video_queue_alloc(queue, nbuffers, size, rb->memory);
756 if (ret < 0)
757 goto done;
759 rb->count = ret;
760 ret = 0;
762 done:
763 mutex_unlock(&queue->lock);
764 return ret;
768 * omap3isp_video_queue_querybuf - Query the status of a buffer in a queue
770 * This function is intended to be used as a VIDIOC_QUERYBUF ioctl handler. It
771 * returns the status of a given video buffer.
773 * Return 0 on success or -EINVAL if the buffer type or index are invalid.
775 int omap3isp_video_queue_querybuf(struct isp_video_queue *queue,
776 struct v4l2_buffer *vbuf)
778 struct isp_video_buffer *buf;
779 int ret = 0;
781 if (vbuf->type != queue->type)
782 return -EINVAL;
784 mutex_lock(&queue->lock);
786 if (vbuf->index >= queue->count) {
787 ret = -EINVAL;
788 goto done;
791 buf = queue->buffers[vbuf->index];
792 isp_video_buffer_query(buf, vbuf);
794 done:
795 mutex_unlock(&queue->lock);
796 return ret;
800 * omap3isp_video_queue_qbuf - Queue a buffer
802 * This function is intended to be used as a VIDIOC_QBUF ioctl handler.
804 * The v4l2_buffer structure passed from userspace is first sanity tested. If
805 * sane, the buffer is then processed and added to the main queue and, if the
806 * queue is streaming, to the IRQ queue.
808 * Before being enqueued, USERPTR buffers are checked for address changes. If
809 * the buffer has a different userspace address, the old memory area is unlocked
810 * and the new memory area is locked.
812 int omap3isp_video_queue_qbuf(struct isp_video_queue *queue,
813 struct v4l2_buffer *vbuf)
815 struct isp_video_buffer *buf;
816 unsigned long flags;
817 int ret = -EINVAL;
819 if (vbuf->type != queue->type)
820 goto done;
822 mutex_lock(&queue->lock);
824 if (vbuf->index >= queue->count)
825 goto done;
827 buf = queue->buffers[vbuf->index];
829 if (vbuf->memory != buf->vbuf.memory)
830 goto done;
832 if (buf->state != ISP_BUF_STATE_IDLE)
833 goto done;
835 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
836 vbuf->length < buf->vbuf.length)
837 goto done;
839 if (vbuf->memory == V4L2_MEMORY_USERPTR &&
840 vbuf->m.userptr != buf->vbuf.m.userptr) {
841 isp_video_buffer_cleanup(buf);
842 buf->vbuf.m.userptr = vbuf->m.userptr;
843 buf->prepared = 0;
846 if (!buf->prepared) {
847 ret = isp_video_buffer_prepare(buf);
848 if (ret < 0)
849 goto done;
850 buf->prepared = 1;
853 isp_video_buffer_cache_sync(buf);
855 buf->state = ISP_BUF_STATE_QUEUED;
856 list_add_tail(&buf->stream, &queue->queue);
858 if (queue->streaming) {
859 spin_lock_irqsave(&queue->irqlock, flags);
860 queue->ops->buffer_queue(buf);
861 spin_unlock_irqrestore(&queue->irqlock, flags);
864 ret = 0;
866 done:
867 mutex_unlock(&queue->lock);
868 return ret;
872 * omap3isp_video_queue_dqbuf - Dequeue a buffer
874 * This function is intended to be used as a VIDIOC_DQBUF ioctl handler.
876 * Wait until a buffer is ready to be dequeued, remove it from the queue and
877 * copy its information to the v4l2_buffer structure.
879 * If the nonblocking argument is not zero and no buffer is ready, return
880 * -EAGAIN immediately instead of waiting.
882 * If no buffer has been enqueued, or if the requested buffer type doesn't match
883 * the queue type, return -EINVAL.
885 int omap3isp_video_queue_dqbuf(struct isp_video_queue *queue,
886 struct v4l2_buffer *vbuf, int nonblocking)
888 struct isp_video_buffer *buf;
889 int ret;
891 if (vbuf->type != queue->type)
892 return -EINVAL;
894 mutex_lock(&queue->lock);
896 if (list_empty(&queue->queue)) {
897 ret = -EINVAL;
898 goto done;
901 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
902 ret = isp_video_buffer_wait(buf, nonblocking);
903 if (ret < 0)
904 goto done;
906 list_del(&buf->stream);
908 isp_video_buffer_query(buf, vbuf);
909 buf->state = ISP_BUF_STATE_IDLE;
910 vbuf->flags &= ~V4L2_BUF_FLAG_QUEUED;
912 done:
913 mutex_unlock(&queue->lock);
914 return ret;
918 * omap3isp_video_queue_streamon - Start streaming
920 * This function is intended to be used as a VIDIOC_STREAMON ioctl handler. It
921 * starts streaming on the queue and calls the buffer_queue operation for all
922 * queued buffers.
924 * Return 0 on success.
926 int omap3isp_video_queue_streamon(struct isp_video_queue *queue)
928 struct isp_video_buffer *buf;
929 unsigned long flags;
931 mutex_lock(&queue->lock);
933 if (queue->streaming)
934 goto done;
936 queue->streaming = 1;
938 spin_lock_irqsave(&queue->irqlock, flags);
939 list_for_each_entry(buf, &queue->queue, stream)
940 queue->ops->buffer_queue(buf);
941 spin_unlock_irqrestore(&queue->irqlock, flags);
943 done:
944 mutex_unlock(&queue->lock);
945 return 0;
949 * omap3isp_video_queue_streamoff - Stop streaming
951 * This function is intended to be used as a VIDIOC_STREAMOFF ioctl handler. It
952 * stops streaming on the queue and wakes up all the buffers.
954 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
955 * delayed works before calling this function to make sure no buffer will be
956 * touched by the driver and/or hardware.
958 void omap3isp_video_queue_streamoff(struct isp_video_queue *queue)
960 struct isp_video_buffer *buf;
961 unsigned long flags;
962 unsigned int i;
964 mutex_lock(&queue->lock);
966 if (!queue->streaming)
967 goto done;
969 queue->streaming = 0;
971 spin_lock_irqsave(&queue->irqlock, flags);
972 for (i = 0; i < queue->count; ++i) {
973 buf = queue->buffers[i];
975 if (buf->state == ISP_BUF_STATE_ACTIVE)
976 wake_up(&buf->wait);
978 buf->state = ISP_BUF_STATE_IDLE;
980 spin_unlock_irqrestore(&queue->irqlock, flags);
982 INIT_LIST_HEAD(&queue->queue);
984 done:
985 mutex_unlock(&queue->lock);
989 * omap3isp_video_queue_discard_done - Discard all buffers marked as DONE
991 * This function is intended to be used with suspend/resume operations. It
992 * discards all 'done' buffers as they would be too old to be requested after
993 * resume.
995 * Drivers must stop the hardware and synchronize with interrupt handlers and/or
996 * delayed works before calling this function to make sure no buffer will be
997 * touched by the driver and/or hardware.
999 void omap3isp_video_queue_discard_done(struct isp_video_queue *queue)
1001 struct isp_video_buffer *buf;
1002 unsigned int i;
1004 mutex_lock(&queue->lock);
1006 if (!queue->streaming)
1007 goto done;
1009 for (i = 0; i < queue->count; ++i) {
1010 buf = queue->buffers[i];
1012 if (buf->state == ISP_BUF_STATE_DONE)
1013 buf->state = ISP_BUF_STATE_ERROR;
1016 done:
1017 mutex_unlock(&queue->lock);
1020 static void isp_video_queue_vm_open(struct vm_area_struct *vma)
1022 struct isp_video_buffer *buf = vma->vm_private_data;
1024 buf->vma_use_count++;
1027 static void isp_video_queue_vm_close(struct vm_area_struct *vma)
1029 struct isp_video_buffer *buf = vma->vm_private_data;
1031 buf->vma_use_count--;
1034 static const struct vm_operations_struct isp_video_queue_vm_ops = {
1035 .open = isp_video_queue_vm_open,
1036 .close = isp_video_queue_vm_close,
1040 * omap3isp_video_queue_mmap - Map buffers to userspace
1042 * This function is intended to be used as an mmap() file operation handler. It
1043 * maps a buffer to userspace based on the VMA offset.
1045 * Only buffers of memory type MMAP are supported.
1047 int omap3isp_video_queue_mmap(struct isp_video_queue *queue,
1048 struct vm_area_struct *vma)
1050 struct isp_video_buffer *uninitialized_var(buf);
1051 unsigned long size;
1052 unsigned int i;
1053 int ret = 0;
1055 mutex_lock(&queue->lock);
1057 for (i = 0; i < queue->count; ++i) {
1058 buf = queue->buffers[i];
1059 if ((buf->vbuf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff)
1060 break;
1063 if (i == queue->count) {
1064 ret = -EINVAL;
1065 goto done;
1068 size = vma->vm_end - vma->vm_start;
1070 if (buf->vbuf.memory != V4L2_MEMORY_MMAP ||
1071 size != PAGE_ALIGN(buf->vbuf.length)) {
1072 ret = -EINVAL;
1073 goto done;
1076 /* dma_mmap_coherent() uses vm_pgoff as an offset inside the buffer
1077 * while we used it to identify the buffer and want to map the whole
1078 * buffer.
1080 vma->vm_pgoff = 0;
1082 ret = dma_mmap_coherent(queue->dev, vma, buf->vaddr, buf->paddr, size);
1083 if (ret < 0)
1084 goto done;
1086 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1087 vma->vm_ops = &isp_video_queue_vm_ops;
1088 vma->vm_private_data = buf;
1089 isp_video_queue_vm_open(vma);
1091 done:
1092 mutex_unlock(&queue->lock);
1093 return ret;
1097 * omap3isp_video_queue_poll - Poll video queue state
1099 * This function is intended to be used as a poll() file operation handler. It
1100 * polls the state of the video buffer at the front of the queue and returns an
1101 * events mask.
1103 * If no buffer is present at the front of the queue, POLLERR is returned.
1105 unsigned int omap3isp_video_queue_poll(struct isp_video_queue *queue,
1106 struct file *file, poll_table *wait)
1108 struct isp_video_buffer *buf;
1109 unsigned int mask = 0;
1111 mutex_lock(&queue->lock);
1112 if (list_empty(&queue->queue)) {
1113 mask |= POLLERR;
1114 goto done;
1116 buf = list_first_entry(&queue->queue, struct isp_video_buffer, stream);
1118 poll_wait(file, &buf->wait, wait);
1119 if (buf->state == ISP_BUF_STATE_DONE ||
1120 buf->state == ISP_BUF_STATE_ERROR) {
1121 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1122 mask |= POLLIN | POLLRDNORM;
1123 else
1124 mask |= POLLOUT | POLLWRNORM;
1127 done:
1128 mutex_unlock(&queue->lock);
1129 return mask;