2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug
, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
55 struct videobuf_buffer
*videobuf_alloc_vb(struct videobuf_queue
*q
)
57 struct videobuf_buffer
*vb
;
59 BUG_ON(q
->msize
< sizeof(*vb
));
61 if (!q
->int_ops
|| !q
->int_ops
->alloc_vb
) {
62 printk(KERN_ERR
"No specific ops defined!\n");
66 vb
= q
->int_ops
->alloc_vb(q
->msize
);
68 init_waitqueue_head(&vb
->done
);
69 vb
->magic
= MAGIC_BUFFER
;
74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb
);
76 #define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
77 vb->state != VIDEOBUF_QUEUED)
78 int videobuf_waiton(struct videobuf_buffer
*vb
, int non_blocking
, int intr
)
80 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
90 return wait_event_interruptible(vb
->done
, WAITON_CONDITION
);
92 wait_event(vb
->done
, WAITON_CONDITION
);
96 EXPORT_SYMBOL_GPL(videobuf_waiton
);
98 int videobuf_iolock(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
99 struct v4l2_framebuffer
*fbuf
)
101 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
102 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
104 return CALL(q
, iolock
, q
, vb
, fbuf
);
106 EXPORT_SYMBOL_GPL(videobuf_iolock
);
108 void *videobuf_queue_to_vaddr(struct videobuf_queue
*q
,
109 struct videobuf_buffer
*buf
)
111 if (q
->int_ops
->vaddr
)
112 return q
->int_ops
->vaddr(buf
);
115 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr
);
117 /* --------------------------------------------------------------------- */
120 void videobuf_queue_core_init(struct videobuf_queue
*q
,
121 const struct videobuf_queue_ops
*ops
,
124 enum v4l2_buf_type type
,
125 enum v4l2_field field
,
128 struct videobuf_qtype_ops
*int_ops
)
131 memset(q
, 0, sizeof(*q
));
132 q
->irqlock
= irqlock
;
139 q
->int_ops
= int_ops
;
141 /* All buffer operations are mandatory */
142 BUG_ON(!q
->ops
->buf_setup
);
143 BUG_ON(!q
->ops
->buf_prepare
);
144 BUG_ON(!q
->ops
->buf_queue
);
145 BUG_ON(!q
->ops
->buf_release
);
147 /* Lock is mandatory for queue_cancel to work */
150 /* Having implementations for abstract methods are mandatory */
153 mutex_init(&q
->vb_lock
);
154 init_waitqueue_head(&q
->wait
);
155 INIT_LIST_HEAD(&q
->stream
);
157 EXPORT_SYMBOL_GPL(videobuf_queue_core_init
);
159 /* Locking: Only usage in bttv unsafe find way to remove */
160 int videobuf_queue_is_busy(struct videobuf_queue
*q
)
164 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
167 dprintk(1, "busy: streaming active\n");
171 dprintk(1, "busy: pending read #1\n");
175 dprintk(1, "busy: pending read #2\n");
178 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
179 if (NULL
== q
->bufs
[i
])
181 if (q
->bufs
[i
]->map
) {
182 dprintk(1, "busy: buffer #%d mapped\n", i
);
185 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
186 dprintk(1, "busy: buffer #%d queued\n", i
);
189 if (q
->bufs
[i
]->state
== VIDEOBUF_ACTIVE
) {
190 dprintk(1, "busy: buffer #%d avtive\n", i
);
196 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy
);
199 * __videobuf_free() - free all the buffers and their control structures
201 * This function can only be called if streaming/reading is off, i.e. no buffers
202 * are under control of the driver.
204 /* Locking: Caller holds q->vb_lock */
205 static int __videobuf_free(struct videobuf_queue
*q
)
209 dprintk(1, "%s\n", __func__
);
213 if (q
->streaming
|| q
->reading
) {
214 dprintk(1, "Cannot free buffers when streaming or reading\n");
218 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
220 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++)
221 if (q
->bufs
[i
] && q
->bufs
[i
]->map
) {
222 dprintk(1, "Cannot free mmapped buffers\n");
226 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
227 if (NULL
== q
->bufs
[i
])
229 q
->ops
->buf_release(q
, q
->bufs
[i
]);
237 /* Locking: Caller holds q->vb_lock */
238 void videobuf_queue_cancel(struct videobuf_queue
*q
)
240 unsigned long flags
= 0;
245 wake_up_interruptible_sync(&q
->wait
);
247 /* remove queued buffers from list */
248 spin_lock_irqsave(q
->irqlock
, flags
);
249 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
250 if (NULL
== q
->bufs
[i
])
252 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
253 list_del(&q
->bufs
[i
]->queue
);
254 q
->bufs
[i
]->state
= VIDEOBUF_ERROR
;
255 wake_up_all(&q
->bufs
[i
]->done
);
258 spin_unlock_irqrestore(q
->irqlock
, flags
);
260 /* free all buffers + clear queue */
261 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
262 if (NULL
== q
->bufs
[i
])
264 q
->ops
->buf_release(q
, q
->bufs
[i
]);
266 INIT_LIST_HEAD(&q
->stream
);
268 EXPORT_SYMBOL_GPL(videobuf_queue_cancel
);
270 /* --------------------------------------------------------------------- */
272 /* Locking: Caller holds q->vb_lock */
273 enum v4l2_field
videobuf_next_field(struct videobuf_queue
*q
)
275 enum v4l2_field field
= q
->field
;
277 BUG_ON(V4L2_FIELD_ANY
== field
);
279 if (V4L2_FIELD_ALTERNATE
== field
) {
280 if (V4L2_FIELD_TOP
== q
->last
) {
281 field
= V4L2_FIELD_BOTTOM
;
282 q
->last
= V4L2_FIELD_BOTTOM
;
284 field
= V4L2_FIELD_TOP
;
285 q
->last
= V4L2_FIELD_TOP
;
290 EXPORT_SYMBOL_GPL(videobuf_next_field
);
292 /* Locking: Caller holds q->vb_lock */
293 static void videobuf_status(struct videobuf_queue
*q
, struct v4l2_buffer
*b
,
294 struct videobuf_buffer
*vb
, enum v4l2_buf_type type
)
296 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
297 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
302 b
->memory
= vb
->memory
;
304 case V4L2_MEMORY_MMAP
:
305 b
->m
.offset
= vb
->boff
;
306 b
->length
= vb
->bsize
;
308 case V4L2_MEMORY_USERPTR
:
309 b
->m
.userptr
= vb
->baddr
;
310 b
->length
= vb
->bsize
;
312 case V4L2_MEMORY_OVERLAY
:
313 b
->m
.offset
= vb
->boff
;
319 b
->flags
|= V4L2_BUF_FLAG_MAPPED
;
322 case VIDEOBUF_PREPARED
:
323 case VIDEOBUF_QUEUED
:
324 case VIDEOBUF_ACTIVE
:
325 b
->flags
|= V4L2_BUF_FLAG_QUEUED
;
328 b
->flags
|= V4L2_BUF_FLAG_ERROR
;
331 b
->flags
|= V4L2_BUF_FLAG_DONE
;
333 case VIDEOBUF_NEEDS_INIT
:
339 if (vb
->input
!= UNSET
) {
340 b
->flags
|= V4L2_BUF_FLAG_INPUT
;
341 b
->input
= vb
->input
;
344 b
->field
= vb
->field
;
345 b
->timestamp
= vb
->ts
;
346 b
->bytesused
= vb
->size
;
347 b
->sequence
= vb
->field_count
>> 1;
350 int videobuf_mmap_free(struct videobuf_queue
*q
)
353 mutex_lock(&q
->vb_lock
);
354 ret
= __videobuf_free(q
);
355 mutex_unlock(&q
->vb_lock
);
358 EXPORT_SYMBOL_GPL(videobuf_mmap_free
);
360 /* Locking: Caller holds q->vb_lock */
361 int __videobuf_mmap_setup(struct videobuf_queue
*q
,
362 unsigned int bcount
, unsigned int bsize
,
363 enum v4l2_memory memory
)
368 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
370 err
= __videobuf_free(q
);
374 /* Allocate and initialize buffers */
375 for (i
= 0; i
< bcount
; i
++) {
376 q
->bufs
[i
] = videobuf_alloc_vb(q
);
378 if (NULL
== q
->bufs
[i
])
382 q
->bufs
[i
]->input
= UNSET
;
383 q
->bufs
[i
]->memory
= memory
;
384 q
->bufs
[i
]->bsize
= bsize
;
386 case V4L2_MEMORY_MMAP
:
387 q
->bufs
[i
]->boff
= PAGE_ALIGN(bsize
) * i
;
389 case V4L2_MEMORY_USERPTR
:
390 case V4L2_MEMORY_OVERLAY
:
399 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i
, bsize
);
403 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup
);
405 int videobuf_mmap_setup(struct videobuf_queue
*q
,
406 unsigned int bcount
, unsigned int bsize
,
407 enum v4l2_memory memory
)
410 mutex_lock(&q
->vb_lock
);
411 ret
= __videobuf_mmap_setup(q
, bcount
, bsize
, memory
);
412 mutex_unlock(&q
->vb_lock
);
415 EXPORT_SYMBOL_GPL(videobuf_mmap_setup
);
417 int videobuf_reqbufs(struct videobuf_queue
*q
,
418 struct v4l2_requestbuffers
*req
)
420 unsigned int size
, count
;
423 if (req
->count
< 1) {
424 dprintk(1, "reqbufs: count invalid (%d)\n", req
->count
);
428 if (req
->memory
!= V4L2_MEMORY_MMAP
&&
429 req
->memory
!= V4L2_MEMORY_USERPTR
&&
430 req
->memory
!= V4L2_MEMORY_OVERLAY
) {
431 dprintk(1, "reqbufs: memory type invalid\n");
435 mutex_lock(&q
->vb_lock
);
436 if (req
->type
!= q
->type
) {
437 dprintk(1, "reqbufs: queue type invalid\n");
443 dprintk(1, "reqbufs: streaming already exists\n");
447 if (!list_empty(&q
->stream
)) {
448 dprintk(1, "reqbufs: stream running\n");
454 if (count
> VIDEO_MAX_FRAME
)
455 count
= VIDEO_MAX_FRAME
;
457 q
->ops
->buf_setup(q
, &count
, &size
);
458 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
460 (unsigned int)((count
* PAGE_ALIGN(size
)) >> PAGE_SHIFT
));
462 retval
= __videobuf_mmap_setup(q
, count
, size
, req
->memory
);
464 dprintk(1, "reqbufs: mmap setup returned %d\n", retval
);
472 mutex_unlock(&q
->vb_lock
);
475 EXPORT_SYMBOL_GPL(videobuf_reqbufs
);
477 int videobuf_querybuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
481 mutex_lock(&q
->vb_lock
);
482 if (unlikely(b
->type
!= q
->type
)) {
483 dprintk(1, "querybuf: Wrong type.\n");
486 if (unlikely(b
->index
>= VIDEO_MAX_FRAME
)) {
487 dprintk(1, "querybuf: index out of range.\n");
490 if (unlikely(NULL
== q
->bufs
[b
->index
])) {
491 dprintk(1, "querybuf: buffer is null.\n");
495 videobuf_status(q
, b
, q
->bufs
[b
->index
], q
->type
);
499 mutex_unlock(&q
->vb_lock
);
502 EXPORT_SYMBOL_GPL(videobuf_querybuf
);
504 int videobuf_qbuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
506 struct videobuf_buffer
*buf
;
507 enum v4l2_field field
;
508 unsigned long flags
= 0;
511 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
513 if (b
->memory
== V4L2_MEMORY_MMAP
)
514 down_read(¤t
->mm
->mmap_sem
);
516 mutex_lock(&q
->vb_lock
);
519 dprintk(1, "qbuf: Reading running...\n");
523 if (b
->type
!= q
->type
) {
524 dprintk(1, "qbuf: Wrong type.\n");
527 if (b
->index
>= VIDEO_MAX_FRAME
) {
528 dprintk(1, "qbuf: index out of range.\n");
531 buf
= q
->bufs
[b
->index
];
533 dprintk(1, "qbuf: buffer is null.\n");
536 MAGIC_CHECK(buf
->magic
, MAGIC_BUFFER
);
537 if (buf
->memory
!= b
->memory
) {
538 dprintk(1, "qbuf: memory type is wrong.\n");
541 if (buf
->state
!= VIDEOBUF_NEEDS_INIT
&& buf
->state
!= VIDEOBUF_IDLE
) {
542 dprintk(1, "qbuf: buffer is already queued or active.\n");
546 if (b
->flags
& V4L2_BUF_FLAG_INPUT
) {
547 if (b
->input
>= q
->inputs
) {
548 dprintk(1, "qbuf: wrong input.\n");
551 buf
->input
= b
->input
;
557 case V4L2_MEMORY_MMAP
:
558 if (0 == buf
->baddr
) {
559 dprintk(1, "qbuf: mmap requested "
560 "but buffer addr is zero!\n");
563 if (q
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
564 || q
->type
== V4L2_BUF_TYPE_VBI_OUTPUT
565 || q
->type
== V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
) {
566 buf
->size
= b
->bytesused
;
567 buf
->field
= b
->field
;
568 buf
->ts
= b
->timestamp
;
571 case V4L2_MEMORY_USERPTR
:
572 if (b
->length
< buf
->bsize
) {
573 dprintk(1, "qbuf: buffer length is not enough\n");
576 if (VIDEOBUF_NEEDS_INIT
!= buf
->state
&&
577 buf
->baddr
!= b
->m
.userptr
)
578 q
->ops
->buf_release(q
, buf
);
579 buf
->baddr
= b
->m
.userptr
;
581 case V4L2_MEMORY_OVERLAY
:
582 buf
->boff
= b
->m
.offset
;
585 dprintk(1, "qbuf: wrong memory type\n");
589 dprintk(1, "qbuf: requesting next field\n");
590 field
= videobuf_next_field(q
);
591 retval
= q
->ops
->buf_prepare(q
, buf
, field
);
593 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval
);
597 list_add_tail(&buf
->stream
, &q
->stream
);
599 spin_lock_irqsave(q
->irqlock
, flags
);
600 q
->ops
->buf_queue(q
, buf
);
601 spin_unlock_irqrestore(q
->irqlock
, flags
);
603 dprintk(1, "qbuf: succeeded\n");
605 wake_up_interruptible_sync(&q
->wait
);
608 mutex_unlock(&q
->vb_lock
);
610 if (b
->memory
== V4L2_MEMORY_MMAP
)
611 up_read(¤t
->mm
->mmap_sem
);
615 EXPORT_SYMBOL_GPL(videobuf_qbuf
);
617 /* Locking: Caller holds q->vb_lock */
618 static int stream_next_buffer_check_queue(struct videobuf_queue
*q
, int noblock
)
624 dprintk(1, "next_buffer: Not streaming\n");
629 if (list_empty(&q
->stream
)) {
632 dprintk(2, "next_buffer: no buffers to dequeue\n");
635 dprintk(2, "next_buffer: waiting on buffer\n");
637 /* Drop lock to avoid deadlock with qbuf */
638 mutex_unlock(&q
->vb_lock
);
640 /* Checking list_empty and streaming is safe without
641 * locks because we goto checks to validate while
642 * holding locks before proceeding */
643 retval
= wait_event_interruptible(q
->wait
,
644 !list_empty(&q
->stream
) || !q
->streaming
);
645 mutex_lock(&q
->vb_lock
);
660 /* Locking: Caller holds q->vb_lock */
661 static int stream_next_buffer(struct videobuf_queue
*q
,
662 struct videobuf_buffer
**vb
, int nonblocking
)
665 struct videobuf_buffer
*buf
= NULL
;
667 retval
= stream_next_buffer_check_queue(q
, nonblocking
);
671 buf
= list_entry(q
->stream
.next
, struct videobuf_buffer
, stream
);
672 retval
= videobuf_waiton(buf
, nonblocking
, 1);
681 int videobuf_dqbuf(struct videobuf_queue
*q
,
682 struct v4l2_buffer
*b
, int nonblocking
)
684 struct videobuf_buffer
*buf
= NULL
;
687 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
689 memset(b
, 0, sizeof(*b
));
690 mutex_lock(&q
->vb_lock
);
692 retval
= stream_next_buffer(q
, &buf
, nonblocking
);
694 dprintk(1, "dqbuf: next_buffer error: %i\n", retval
);
698 switch (buf
->state
) {
700 dprintk(1, "dqbuf: state is error\n");
703 dprintk(1, "dqbuf: state is done\n");
706 dprintk(1, "dqbuf: state invalid\n");
710 CALL(q
, sync
, q
, buf
);
711 videobuf_status(q
, b
, buf
, q
->type
);
712 list_del(&buf
->stream
);
713 buf
->state
= VIDEOBUF_IDLE
;
714 b
->flags
&= ~V4L2_BUF_FLAG_DONE
;
716 mutex_unlock(&q
->vb_lock
);
719 EXPORT_SYMBOL_GPL(videobuf_dqbuf
);
721 int videobuf_streamon(struct videobuf_queue
*q
)
723 struct videobuf_buffer
*buf
;
724 unsigned long flags
= 0;
727 mutex_lock(&q
->vb_lock
);
735 spin_lock_irqsave(q
->irqlock
, flags
);
736 list_for_each_entry(buf
, &q
->stream
, stream
)
737 if (buf
->state
== VIDEOBUF_PREPARED
)
738 q
->ops
->buf_queue(q
, buf
);
739 spin_unlock_irqrestore(q
->irqlock
, flags
);
741 wake_up_interruptible_sync(&q
->wait
);
743 mutex_unlock(&q
->vb_lock
);
746 EXPORT_SYMBOL_GPL(videobuf_streamon
);
748 /* Locking: Caller holds q->vb_lock */
749 static int __videobuf_streamoff(struct videobuf_queue
*q
)
754 videobuf_queue_cancel(q
);
759 int videobuf_streamoff(struct videobuf_queue
*q
)
763 mutex_lock(&q
->vb_lock
);
764 retval
= __videobuf_streamoff(q
);
765 mutex_unlock(&q
->vb_lock
);
769 EXPORT_SYMBOL_GPL(videobuf_streamoff
);
771 /* Locking: Caller holds q->vb_lock */
772 static ssize_t
videobuf_read_zerocopy(struct videobuf_queue
*q
,
774 size_t count
, loff_t
*ppos
)
776 enum v4l2_field field
;
777 unsigned long flags
= 0;
780 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
783 q
->read_buf
= videobuf_alloc_vb(q
);
784 if (NULL
== q
->read_buf
)
787 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
788 q
->read_buf
->baddr
= (unsigned long)data
;
789 q
->read_buf
->bsize
= count
;
791 field
= videobuf_next_field(q
);
792 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
796 /* start capture & wait */
797 spin_lock_irqsave(q
->irqlock
, flags
);
798 q
->ops
->buf_queue(q
, q
->read_buf
);
799 spin_unlock_irqrestore(q
->irqlock
, flags
);
800 retval
= videobuf_waiton(q
->read_buf
, 0, 0);
802 CALL(q
, sync
, q
, q
->read_buf
);
803 if (VIDEOBUF_ERROR
== q
->read_buf
->state
)
806 retval
= q
->read_buf
->size
;
811 q
->ops
->buf_release(q
, q
->read_buf
);
817 static int __videobuf_copy_to_user(struct videobuf_queue
*q
,
818 struct videobuf_buffer
*buf
,
819 char __user
*data
, size_t count
,
822 void *vaddr
= CALL(q
, vaddr
, buf
);
824 /* copy to userspace */
825 if (count
> buf
->size
- q
->read_off
)
826 count
= buf
->size
- q
->read_off
;
828 if (copy_to_user(data
, vaddr
+ q
->read_off
, count
))
834 static int __videobuf_copy_stream(struct videobuf_queue
*q
,
835 struct videobuf_buffer
*buf
,
836 char __user
*data
, size_t count
, size_t pos
,
837 int vbihack
, int nonblocking
)
839 unsigned int *fc
= CALL(q
, vaddr
, buf
);
842 /* dirty, undocumented hack -- pass the frame counter
843 * within the last four bytes of each vbi data block.
844 * We need that one to maintain backward compatibility
845 * to all vbi decoding software out there ... */
846 fc
+= (buf
->size
>> 2) - 1;
847 *fc
= buf
->field_count
>> 1;
848 dprintk(1, "vbihack: %d\n", *fc
);
851 /* copy stuff using the common method */
852 count
= __videobuf_copy_to_user(q
, buf
, data
, count
, nonblocking
);
854 if ((count
== -EFAULT
) && (pos
== 0))
860 ssize_t
videobuf_read_one(struct videobuf_queue
*q
,
861 char __user
*data
, size_t count
, loff_t
*ppos
,
864 enum v4l2_field field
;
865 unsigned long flags
= 0;
866 unsigned size
= 0, nbufs
= 1;
869 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
871 mutex_lock(&q
->vb_lock
);
873 q
->ops
->buf_setup(q
, &nbufs
, &size
);
875 if (NULL
== q
->read_buf
&&
878 retval
= videobuf_read_zerocopy(q
, data
, count
, ppos
);
879 if (retval
>= 0 || retval
== -EIO
)
882 /* fallback to kernel bounce buffer on failures */
885 if (NULL
== q
->read_buf
) {
886 /* need to capture a new frame */
888 q
->read_buf
= videobuf_alloc_vb(q
);
890 dprintk(1, "video alloc=0x%p\n", q
->read_buf
);
891 if (NULL
== q
->read_buf
)
893 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
894 q
->read_buf
->bsize
= count
; /* preferred size */
895 field
= videobuf_next_field(q
);
896 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
904 spin_lock_irqsave(q
->irqlock
, flags
);
905 q
->ops
->buf_queue(q
, q
->read_buf
);
906 spin_unlock_irqrestore(q
->irqlock
, flags
);
911 /* wait until capture is done */
912 retval
= videobuf_waiton(q
->read_buf
, nonblocking
, 1);
916 CALL(q
, sync
, q
, q
->read_buf
);
918 if (VIDEOBUF_ERROR
== q
->read_buf
->state
) {
919 /* catch I/O errors */
920 q
->ops
->buf_release(q
, q
->read_buf
);
927 /* Copy to userspace */
928 retval
= __videobuf_copy_to_user(q
, q
->read_buf
, data
, count
, nonblocking
);
932 q
->read_off
+= retval
;
933 if (q
->read_off
== q
->read_buf
->size
) {
934 /* all data copied, cleanup */
935 q
->ops
->buf_release(q
, q
->read_buf
);
941 mutex_unlock(&q
->vb_lock
);
944 EXPORT_SYMBOL_GPL(videobuf_read_one
);
946 /* Locking: Caller holds q->vb_lock */
947 static int __videobuf_read_start(struct videobuf_queue
*q
)
949 enum v4l2_field field
;
950 unsigned long flags
= 0;
951 unsigned int count
= 0, size
= 0;
954 q
->ops
->buf_setup(q
, &count
, &size
);
957 if (count
> VIDEO_MAX_FRAME
)
958 count
= VIDEO_MAX_FRAME
;
959 size
= PAGE_ALIGN(size
);
961 err
= __videobuf_mmap_setup(q
, count
, size
, V4L2_MEMORY_USERPTR
);
967 for (i
= 0; i
< count
; i
++) {
968 field
= videobuf_next_field(q
);
969 err
= q
->ops
->buf_prepare(q
, q
->bufs
[i
], field
);
972 list_add_tail(&q
->bufs
[i
]->stream
, &q
->stream
);
974 spin_lock_irqsave(q
->irqlock
, flags
);
975 for (i
= 0; i
< count
; i
++)
976 q
->ops
->buf_queue(q
, q
->bufs
[i
]);
977 spin_unlock_irqrestore(q
->irqlock
, flags
);
982 static void __videobuf_read_stop(struct videobuf_queue
*q
)
986 videobuf_queue_cancel(q
);
988 INIT_LIST_HEAD(&q
->stream
);
989 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
990 if (NULL
== q
->bufs
[i
])
998 int videobuf_read_start(struct videobuf_queue
*q
)
1002 mutex_lock(&q
->vb_lock
);
1003 rc
= __videobuf_read_start(q
);
1004 mutex_unlock(&q
->vb_lock
);
1008 EXPORT_SYMBOL_GPL(videobuf_read_start
);
1010 void videobuf_read_stop(struct videobuf_queue
*q
)
1012 mutex_lock(&q
->vb_lock
);
1013 __videobuf_read_stop(q
);
1014 mutex_unlock(&q
->vb_lock
);
1016 EXPORT_SYMBOL_GPL(videobuf_read_stop
);
1018 void videobuf_stop(struct videobuf_queue
*q
)
1020 mutex_lock(&q
->vb_lock
);
1023 __videobuf_streamoff(q
);
1026 __videobuf_read_stop(q
);
1028 mutex_unlock(&q
->vb_lock
);
1030 EXPORT_SYMBOL_GPL(videobuf_stop
);
1032 ssize_t
videobuf_read_stream(struct videobuf_queue
*q
,
1033 char __user
*data
, size_t count
, loff_t
*ppos
,
1034 int vbihack
, int nonblocking
)
1037 unsigned long flags
= 0;
1039 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1041 dprintk(2, "%s\n", __func__
);
1042 mutex_lock(&q
->vb_lock
);
1047 retval
= __videobuf_read_start(q
);
1054 /* get / wait for data */
1055 if (NULL
== q
->read_buf
) {
1056 q
->read_buf
= list_entry(q
->stream
.next
,
1057 struct videobuf_buffer
,
1059 list_del(&q
->read_buf
->stream
);
1062 rc
= videobuf_waiton(q
->read_buf
, nonblocking
, 1);
1069 if (q
->read_buf
->state
== VIDEOBUF_DONE
) {
1070 rc
= __videobuf_copy_stream(q
, q
->read_buf
, data
+ retval
, count
,
1071 retval
, vbihack
, nonblocking
);
1081 q
->read_off
= q
->read_buf
->size
;
1086 /* requeue buffer when done with copying */
1087 if (q
->read_off
== q
->read_buf
->size
) {
1088 list_add_tail(&q
->read_buf
->stream
,
1090 spin_lock_irqsave(q
->irqlock
, flags
);
1091 q
->ops
->buf_queue(q
, q
->read_buf
);
1092 spin_unlock_irqrestore(q
->irqlock
, flags
);
1100 mutex_unlock(&q
->vb_lock
);
1103 EXPORT_SYMBOL_GPL(videobuf_read_stream
);
1105 unsigned int videobuf_poll_stream(struct file
*file
,
1106 struct videobuf_queue
*q
,
1109 struct videobuf_buffer
*buf
= NULL
;
1110 unsigned int rc
= 0;
1112 mutex_lock(&q
->vb_lock
);
1114 if (!list_empty(&q
->stream
))
1115 buf
= list_entry(q
->stream
.next
,
1116 struct videobuf_buffer
, stream
);
1119 __videobuf_read_start(q
);
1122 } else if (NULL
== q
->read_buf
) {
1123 q
->read_buf
= list_entry(q
->stream
.next
,
1124 struct videobuf_buffer
,
1126 list_del(&q
->read_buf
->stream
);
1135 poll_wait(file
, &buf
->done
, wait
);
1136 if (buf
->state
== VIDEOBUF_DONE
||
1137 buf
->state
== VIDEOBUF_ERROR
) {
1139 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
1140 case V4L2_BUF_TYPE_VBI_OUTPUT
:
1141 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
:
1142 rc
= POLLOUT
| POLLWRNORM
;
1145 rc
= POLLIN
| POLLRDNORM
;
1150 mutex_unlock(&q
->vb_lock
);
1153 EXPORT_SYMBOL_GPL(videobuf_poll_stream
);
1155 int videobuf_mmap_mapper(struct videobuf_queue
*q
, struct vm_area_struct
*vma
)
1160 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1162 if (!(vma
->vm_flags
& VM_WRITE
) || !(vma
->vm_flags
& VM_SHARED
)) {
1163 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1167 mutex_lock(&q
->vb_lock
);
1168 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1169 struct videobuf_buffer
*buf
= q
->bufs
[i
];
1171 if (buf
&& buf
->memory
== V4L2_MEMORY_MMAP
&&
1172 buf
->boff
== (vma
->vm_pgoff
<< PAGE_SHIFT
)) {
1173 rc
= CALL(q
, mmap_mapper
, q
, buf
, vma
);
1177 mutex_unlock(&q
->vb_lock
);
1181 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper
);
1183 #ifdef CONFIG_VIDEO_V4L1_COMPAT
1184 int videobuf_cgmbuf(struct videobuf_queue
*q
,
1185 struct video_mbuf
*mbuf
, int count
)
1187 struct v4l2_requestbuffers req
;
1190 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1192 memset(&req
, 0, sizeof(req
));
1195 req
.memory
= V4L2_MEMORY_MMAP
;
1196 rc
= videobuf_reqbufs(q
, &req
);
1200 mbuf
->frames
= req
.count
;
1202 for (i
= 0; i
< mbuf
->frames
; i
++) {
1203 mbuf
->offsets
[i
] = q
->bufs
[i
]->boff
;
1204 mbuf
->size
+= PAGE_ALIGN(q
->bufs
[i
]->bsize
);
1209 EXPORT_SYMBOL_GPL(videobuf_cgmbuf
);