2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug
, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
55 struct videobuf_buffer
*videobuf_alloc(struct videobuf_queue
*q
)
57 struct videobuf_buffer
*vb
;
59 BUG_ON(q
->msize
< sizeof(*vb
));
61 if (!q
->int_ops
|| !q
->int_ops
->alloc
) {
62 printk(KERN_ERR
"No specific ops defined!\n");
66 vb
= q
->int_ops
->alloc(q
->msize
);
68 init_waitqueue_head(&vb
->done
);
69 vb
->magic
= MAGIC_BUFFER
;
74 EXPORT_SYMBOL_GPL(videobuf_alloc
);
76 #define WAITON_CONDITION (vb->state != VIDEOBUF_ACTIVE &&\
77 vb->state != VIDEOBUF_QUEUED)
78 int videobuf_waiton(struct videobuf_buffer
*vb
, int non_blocking
, int intr
)
80 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
90 return wait_event_interruptible(vb
->done
, WAITON_CONDITION
);
92 wait_event(vb
->done
, WAITON_CONDITION
);
96 EXPORT_SYMBOL_GPL(videobuf_waiton
);
98 int videobuf_iolock(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
99 struct v4l2_framebuffer
*fbuf
)
101 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
102 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
104 return CALL(q
, iolock
, q
, vb
, fbuf
);
106 EXPORT_SYMBOL_GPL(videobuf_iolock
);
108 void *videobuf_queue_to_vaddr(struct videobuf_queue
*q
,
109 struct videobuf_buffer
*buf
)
111 if (q
->int_ops
->vaddr
)
112 return q
->int_ops
->vaddr(buf
);
115 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr
);
117 /* --------------------------------------------------------------------- */
120 void videobuf_queue_core_init(struct videobuf_queue
*q
,
121 const struct videobuf_queue_ops
*ops
,
124 enum v4l2_buf_type type
,
125 enum v4l2_field field
,
128 struct videobuf_qtype_ops
*int_ops
)
131 memset(q
, 0, sizeof(*q
));
132 q
->irqlock
= irqlock
;
139 q
->int_ops
= int_ops
;
141 /* All buffer operations are mandatory */
142 BUG_ON(!q
->ops
->buf_setup
);
143 BUG_ON(!q
->ops
->buf_prepare
);
144 BUG_ON(!q
->ops
->buf_queue
);
145 BUG_ON(!q
->ops
->buf_release
);
147 /* Lock is mandatory for queue_cancel to work */
150 /* Having implementations for abstract methods are mandatory */
153 mutex_init(&q
->vb_lock
);
154 init_waitqueue_head(&q
->wait
);
155 INIT_LIST_HEAD(&q
->stream
);
157 EXPORT_SYMBOL_GPL(videobuf_queue_core_init
);
159 /* Locking: Only usage in bttv unsafe find way to remove */
160 int videobuf_queue_is_busy(struct videobuf_queue
*q
)
164 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
167 dprintk(1, "busy: streaming active\n");
171 dprintk(1, "busy: pending read #1\n");
175 dprintk(1, "busy: pending read #2\n");
178 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
179 if (NULL
== q
->bufs
[i
])
181 if (q
->bufs
[i
]->map
) {
182 dprintk(1, "busy: buffer #%d mapped\n", i
);
185 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
186 dprintk(1, "busy: buffer #%d queued\n", i
);
189 if (q
->bufs
[i
]->state
== VIDEOBUF_ACTIVE
) {
190 dprintk(1, "busy: buffer #%d avtive\n", i
);
196 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy
);
198 /* Locking: Caller holds q->vb_lock */
199 void videobuf_queue_cancel(struct videobuf_queue
*q
)
201 unsigned long flags
= 0;
206 wake_up_interruptible_sync(&q
->wait
);
208 /* remove queued buffers from list */
209 spin_lock_irqsave(q
->irqlock
, flags
);
210 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
211 if (NULL
== q
->bufs
[i
])
213 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
214 list_del(&q
->bufs
[i
]->queue
);
215 q
->bufs
[i
]->state
= VIDEOBUF_ERROR
;
216 wake_up_all(&q
->bufs
[i
]->done
);
219 spin_unlock_irqrestore(q
->irqlock
, flags
);
221 /* free all buffers + clear queue */
222 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
223 if (NULL
== q
->bufs
[i
])
225 q
->ops
->buf_release(q
, q
->bufs
[i
]);
227 INIT_LIST_HEAD(&q
->stream
);
229 EXPORT_SYMBOL_GPL(videobuf_queue_cancel
);
231 /* --------------------------------------------------------------------- */
233 /* Locking: Caller holds q->vb_lock */
234 enum v4l2_field
videobuf_next_field(struct videobuf_queue
*q
)
236 enum v4l2_field field
= q
->field
;
238 BUG_ON(V4L2_FIELD_ANY
== field
);
240 if (V4L2_FIELD_ALTERNATE
== field
) {
241 if (V4L2_FIELD_TOP
== q
->last
) {
242 field
= V4L2_FIELD_BOTTOM
;
243 q
->last
= V4L2_FIELD_BOTTOM
;
245 field
= V4L2_FIELD_TOP
;
246 q
->last
= V4L2_FIELD_TOP
;
251 EXPORT_SYMBOL_GPL(videobuf_next_field
);
253 /* Locking: Caller holds q->vb_lock */
254 static void videobuf_status(struct videobuf_queue
*q
, struct v4l2_buffer
*b
,
255 struct videobuf_buffer
*vb
, enum v4l2_buf_type type
)
257 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
258 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
263 b
->memory
= vb
->memory
;
265 case V4L2_MEMORY_MMAP
:
266 b
->m
.offset
= vb
->boff
;
267 b
->length
= vb
->bsize
;
269 case V4L2_MEMORY_USERPTR
:
270 b
->m
.userptr
= vb
->baddr
;
271 b
->length
= vb
->bsize
;
273 case V4L2_MEMORY_OVERLAY
:
274 b
->m
.offset
= vb
->boff
;
280 b
->flags
|= V4L2_BUF_FLAG_MAPPED
;
283 case VIDEOBUF_PREPARED
:
284 case VIDEOBUF_QUEUED
:
285 case VIDEOBUF_ACTIVE
:
286 b
->flags
|= V4L2_BUF_FLAG_QUEUED
;
289 b
->flags
|= V4L2_BUF_FLAG_ERROR
;
292 b
->flags
|= V4L2_BUF_FLAG_DONE
;
294 case VIDEOBUF_NEEDS_INIT
:
300 if (vb
->input
!= UNSET
) {
301 b
->flags
|= V4L2_BUF_FLAG_INPUT
;
302 b
->input
= vb
->input
;
305 b
->field
= vb
->field
;
306 b
->timestamp
= vb
->ts
;
307 b
->bytesused
= vb
->size
;
308 b
->sequence
= vb
->field_count
>> 1;
311 /* Locking: Caller holds q->vb_lock */
312 static int __videobuf_mmap_free(struct videobuf_queue
*q
)
319 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
321 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++)
322 if (q
->bufs
[i
] && q
->bufs
[i
]->map
)
325 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
326 if (NULL
== q
->bufs
[i
])
328 q
->ops
->buf_release(q
, q
->bufs
[i
]);
336 int videobuf_mmap_free(struct videobuf_queue
*q
)
339 mutex_lock(&q
->vb_lock
);
340 ret
= __videobuf_mmap_free(q
);
341 mutex_unlock(&q
->vb_lock
);
344 EXPORT_SYMBOL_GPL(videobuf_mmap_free
);
346 /* Locking: Caller holds q->vb_lock */
347 int __videobuf_mmap_setup(struct videobuf_queue
*q
,
348 unsigned int bcount
, unsigned int bsize
,
349 enum v4l2_memory memory
)
354 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
356 err
= __videobuf_mmap_free(q
);
360 /* Allocate and initialize buffers */
361 for (i
= 0; i
< bcount
; i
++) {
362 q
->bufs
[i
] = videobuf_alloc(q
);
364 if (NULL
== q
->bufs
[i
])
368 q
->bufs
[i
]->input
= UNSET
;
369 q
->bufs
[i
]->memory
= memory
;
370 q
->bufs
[i
]->bsize
= bsize
;
372 case V4L2_MEMORY_MMAP
:
373 q
->bufs
[i
]->boff
= PAGE_ALIGN(bsize
) * i
;
375 case V4L2_MEMORY_USERPTR
:
376 case V4L2_MEMORY_OVERLAY
:
385 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i
, bsize
);
389 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup
);
391 int videobuf_mmap_setup(struct videobuf_queue
*q
,
392 unsigned int bcount
, unsigned int bsize
,
393 enum v4l2_memory memory
)
396 mutex_lock(&q
->vb_lock
);
397 ret
= __videobuf_mmap_setup(q
, bcount
, bsize
, memory
);
398 mutex_unlock(&q
->vb_lock
);
401 EXPORT_SYMBOL_GPL(videobuf_mmap_setup
);
403 int videobuf_reqbufs(struct videobuf_queue
*q
,
404 struct v4l2_requestbuffers
*req
)
406 unsigned int size
, count
;
409 if (req
->count
< 1) {
410 dprintk(1, "reqbufs: count invalid (%d)\n", req
->count
);
414 if (req
->memory
!= V4L2_MEMORY_MMAP
&&
415 req
->memory
!= V4L2_MEMORY_USERPTR
&&
416 req
->memory
!= V4L2_MEMORY_OVERLAY
) {
417 dprintk(1, "reqbufs: memory type invalid\n");
421 mutex_lock(&q
->vb_lock
);
422 if (req
->type
!= q
->type
) {
423 dprintk(1, "reqbufs: queue type invalid\n");
429 dprintk(1, "reqbufs: streaming already exists\n");
433 if (!list_empty(&q
->stream
)) {
434 dprintk(1, "reqbufs: stream running\n");
440 if (count
> VIDEO_MAX_FRAME
)
441 count
= VIDEO_MAX_FRAME
;
443 q
->ops
->buf_setup(q
, &count
, &size
);
444 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
446 (unsigned int)((count
* PAGE_ALIGN(size
)) >> PAGE_SHIFT
));
448 retval
= __videobuf_mmap_setup(q
, count
, size
, req
->memory
);
450 dprintk(1, "reqbufs: mmap setup returned %d\n", retval
);
458 mutex_unlock(&q
->vb_lock
);
461 EXPORT_SYMBOL_GPL(videobuf_reqbufs
);
463 int videobuf_querybuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
467 mutex_lock(&q
->vb_lock
);
468 if (unlikely(b
->type
!= q
->type
)) {
469 dprintk(1, "querybuf: Wrong type.\n");
472 if (unlikely(b
->index
>= VIDEO_MAX_FRAME
)) {
473 dprintk(1, "querybuf: index out of range.\n");
476 if (unlikely(NULL
== q
->bufs
[b
->index
])) {
477 dprintk(1, "querybuf: buffer is null.\n");
481 videobuf_status(q
, b
, q
->bufs
[b
->index
], q
->type
);
485 mutex_unlock(&q
->vb_lock
);
488 EXPORT_SYMBOL_GPL(videobuf_querybuf
);
490 int videobuf_qbuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
492 struct videobuf_buffer
*buf
;
493 enum v4l2_field field
;
494 unsigned long flags
= 0;
497 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
499 if (b
->memory
== V4L2_MEMORY_MMAP
)
500 down_read(¤t
->mm
->mmap_sem
);
502 mutex_lock(&q
->vb_lock
);
505 dprintk(1, "qbuf: Reading running...\n");
509 if (b
->type
!= q
->type
) {
510 dprintk(1, "qbuf: Wrong type.\n");
513 if (b
->index
>= VIDEO_MAX_FRAME
) {
514 dprintk(1, "qbuf: index out of range.\n");
517 buf
= q
->bufs
[b
->index
];
519 dprintk(1, "qbuf: buffer is null.\n");
522 MAGIC_CHECK(buf
->magic
, MAGIC_BUFFER
);
523 if (buf
->memory
!= b
->memory
) {
524 dprintk(1, "qbuf: memory type is wrong.\n");
527 if (buf
->state
!= VIDEOBUF_NEEDS_INIT
&& buf
->state
!= VIDEOBUF_IDLE
) {
528 dprintk(1, "qbuf: buffer is already queued or active.\n");
532 if (b
->flags
& V4L2_BUF_FLAG_INPUT
) {
533 if (b
->input
>= q
->inputs
) {
534 dprintk(1, "qbuf: wrong input.\n");
537 buf
->input
= b
->input
;
543 case V4L2_MEMORY_MMAP
:
544 if (0 == buf
->baddr
) {
545 dprintk(1, "qbuf: mmap requested "
546 "but buffer addr is zero!\n");
549 if (q
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
550 || q
->type
== V4L2_BUF_TYPE_VBI_OUTPUT
551 || q
->type
== V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
) {
552 buf
->size
= b
->bytesused
;
553 buf
->field
= b
->field
;
554 buf
->ts
= b
->timestamp
;
557 case V4L2_MEMORY_USERPTR
:
558 if (b
->length
< buf
->bsize
) {
559 dprintk(1, "qbuf: buffer length is not enough\n");
562 if (VIDEOBUF_NEEDS_INIT
!= buf
->state
&&
563 buf
->baddr
!= b
->m
.userptr
)
564 q
->ops
->buf_release(q
, buf
);
565 buf
->baddr
= b
->m
.userptr
;
567 case V4L2_MEMORY_OVERLAY
:
568 buf
->boff
= b
->m
.offset
;
571 dprintk(1, "qbuf: wrong memory type\n");
575 dprintk(1, "qbuf: requesting next field\n");
576 field
= videobuf_next_field(q
);
577 retval
= q
->ops
->buf_prepare(q
, buf
, field
);
579 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval
);
583 list_add_tail(&buf
->stream
, &q
->stream
);
585 spin_lock_irqsave(q
->irqlock
, flags
);
586 q
->ops
->buf_queue(q
, buf
);
587 spin_unlock_irqrestore(q
->irqlock
, flags
);
589 dprintk(1, "qbuf: succeeded\n");
591 wake_up_interruptible_sync(&q
->wait
);
594 mutex_unlock(&q
->vb_lock
);
596 if (b
->memory
== V4L2_MEMORY_MMAP
)
597 up_read(¤t
->mm
->mmap_sem
);
601 EXPORT_SYMBOL_GPL(videobuf_qbuf
);
603 /* Locking: Caller holds q->vb_lock */
604 static int stream_next_buffer_check_queue(struct videobuf_queue
*q
, int noblock
)
610 dprintk(1, "next_buffer: Not streaming\n");
615 if (list_empty(&q
->stream
)) {
618 dprintk(2, "next_buffer: no buffers to dequeue\n");
621 dprintk(2, "next_buffer: waiting on buffer\n");
623 /* Drop lock to avoid deadlock with qbuf */
624 mutex_unlock(&q
->vb_lock
);
626 /* Checking list_empty and streaming is safe without
627 * locks because we goto checks to validate while
628 * holding locks before proceeding */
629 retval
= wait_event_interruptible(q
->wait
,
630 !list_empty(&q
->stream
) || !q
->streaming
);
631 mutex_lock(&q
->vb_lock
);
646 /* Locking: Caller holds q->vb_lock */
647 static int stream_next_buffer(struct videobuf_queue
*q
,
648 struct videobuf_buffer
**vb
, int nonblocking
)
651 struct videobuf_buffer
*buf
= NULL
;
653 retval
= stream_next_buffer_check_queue(q
, nonblocking
);
657 buf
= list_entry(q
->stream
.next
, struct videobuf_buffer
, stream
);
658 retval
= videobuf_waiton(buf
, nonblocking
, 1);
667 int videobuf_dqbuf(struct videobuf_queue
*q
,
668 struct v4l2_buffer
*b
, int nonblocking
)
670 struct videobuf_buffer
*buf
= NULL
;
673 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
675 memset(b
, 0, sizeof(*b
));
676 mutex_lock(&q
->vb_lock
);
678 retval
= stream_next_buffer(q
, &buf
, nonblocking
);
680 dprintk(1, "dqbuf: next_buffer error: %i\n", retval
);
684 switch (buf
->state
) {
686 dprintk(1, "dqbuf: state is error\n");
689 dprintk(1, "dqbuf: state is done\n");
692 dprintk(1, "dqbuf: state invalid\n");
696 CALL(q
, sync
, q
, buf
);
697 videobuf_status(q
, b
, buf
, q
->type
);
698 list_del(&buf
->stream
);
699 buf
->state
= VIDEOBUF_IDLE
;
700 b
->flags
&= ~V4L2_BUF_FLAG_DONE
;
702 mutex_unlock(&q
->vb_lock
);
705 EXPORT_SYMBOL_GPL(videobuf_dqbuf
);
707 int videobuf_streamon(struct videobuf_queue
*q
)
709 struct videobuf_buffer
*buf
;
710 unsigned long flags
= 0;
713 mutex_lock(&q
->vb_lock
);
721 spin_lock_irqsave(q
->irqlock
, flags
);
722 list_for_each_entry(buf
, &q
->stream
, stream
)
723 if (buf
->state
== VIDEOBUF_PREPARED
)
724 q
->ops
->buf_queue(q
, buf
);
725 spin_unlock_irqrestore(q
->irqlock
, flags
);
727 wake_up_interruptible_sync(&q
->wait
);
729 mutex_unlock(&q
->vb_lock
);
732 EXPORT_SYMBOL_GPL(videobuf_streamon
);
734 /* Locking: Caller holds q->vb_lock */
735 static int __videobuf_streamoff(struct videobuf_queue
*q
)
740 videobuf_queue_cancel(q
);
745 int videobuf_streamoff(struct videobuf_queue
*q
)
749 mutex_lock(&q
->vb_lock
);
750 retval
= __videobuf_streamoff(q
);
751 mutex_unlock(&q
->vb_lock
);
755 EXPORT_SYMBOL_GPL(videobuf_streamoff
);
757 /* Locking: Caller holds q->vb_lock */
758 static ssize_t
videobuf_read_zerocopy(struct videobuf_queue
*q
,
760 size_t count
, loff_t
*ppos
)
762 enum v4l2_field field
;
763 unsigned long flags
= 0;
766 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
769 q
->read_buf
= videobuf_alloc(q
);
770 if (NULL
== q
->read_buf
)
773 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
774 q
->read_buf
->baddr
= (unsigned long)data
;
775 q
->read_buf
->bsize
= count
;
777 field
= videobuf_next_field(q
);
778 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
782 /* start capture & wait */
783 spin_lock_irqsave(q
->irqlock
, flags
);
784 q
->ops
->buf_queue(q
, q
->read_buf
);
785 spin_unlock_irqrestore(q
->irqlock
, flags
);
786 retval
= videobuf_waiton(q
->read_buf
, 0, 0);
788 CALL(q
, sync
, q
, q
->read_buf
);
789 if (VIDEOBUF_ERROR
== q
->read_buf
->state
)
792 retval
= q
->read_buf
->size
;
797 q
->ops
->buf_release(q
, q
->read_buf
);
803 static int __videobuf_copy_to_user(struct videobuf_queue
*q
,
804 struct videobuf_buffer
*buf
,
805 char __user
*data
, size_t count
,
808 void *vaddr
= CALL(q
, vaddr
, buf
);
810 /* copy to userspace */
811 if (count
> buf
->size
- q
->read_off
)
812 count
= buf
->size
- q
->read_off
;
814 if (copy_to_user(data
, vaddr
+ q
->read_off
, count
))
820 static int __videobuf_copy_stream(struct videobuf_queue
*q
,
821 struct videobuf_buffer
*buf
,
822 char __user
*data
, size_t count
, size_t pos
,
823 int vbihack
, int nonblocking
)
825 unsigned int *fc
= CALL(q
, vaddr
, buf
);
828 /* dirty, undocumented hack -- pass the frame counter
829 * within the last four bytes of each vbi data block.
830 * We need that one to maintain backward compatibility
831 * to all vbi decoding software out there ... */
832 fc
+= (buf
->size
>> 2) - 1;
833 *fc
= buf
->field_count
>> 1;
834 dprintk(1, "vbihack: %d\n", *fc
);
837 /* copy stuff using the common method */
838 count
= __videobuf_copy_to_user(q
, buf
, data
, count
, nonblocking
);
840 if ((count
== -EFAULT
) && (pos
== 0))
846 ssize_t
videobuf_read_one(struct videobuf_queue
*q
,
847 char __user
*data
, size_t count
, loff_t
*ppos
,
850 enum v4l2_field field
;
851 unsigned long flags
= 0;
852 unsigned size
= 0, nbufs
= 1;
855 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
857 mutex_lock(&q
->vb_lock
);
859 q
->ops
->buf_setup(q
, &nbufs
, &size
);
861 if (NULL
== q
->read_buf
&&
864 retval
= videobuf_read_zerocopy(q
, data
, count
, ppos
);
865 if (retval
>= 0 || retval
== -EIO
)
868 /* fallback to kernel bounce buffer on failures */
871 if (NULL
== q
->read_buf
) {
872 /* need to capture a new frame */
874 q
->read_buf
= videobuf_alloc(q
);
876 dprintk(1, "video alloc=0x%p\n", q
->read_buf
);
877 if (NULL
== q
->read_buf
)
879 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
880 q
->read_buf
->bsize
= count
; /* preferred size */
881 field
= videobuf_next_field(q
);
882 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
890 spin_lock_irqsave(q
->irqlock
, flags
);
891 q
->ops
->buf_queue(q
, q
->read_buf
);
892 spin_unlock_irqrestore(q
->irqlock
, flags
);
897 /* wait until capture is done */
898 retval
= videobuf_waiton(q
->read_buf
, nonblocking
, 1);
902 CALL(q
, sync
, q
, q
->read_buf
);
904 if (VIDEOBUF_ERROR
== q
->read_buf
->state
) {
905 /* catch I/O errors */
906 q
->ops
->buf_release(q
, q
->read_buf
);
913 /* Copy to userspace */
914 retval
= __videobuf_copy_to_user(q
, q
->read_buf
, data
, count
, nonblocking
);
918 q
->read_off
+= retval
;
919 if (q
->read_off
== q
->read_buf
->size
) {
920 /* all data copied, cleanup */
921 q
->ops
->buf_release(q
, q
->read_buf
);
927 mutex_unlock(&q
->vb_lock
);
930 EXPORT_SYMBOL_GPL(videobuf_read_one
);
932 /* Locking: Caller holds q->vb_lock */
933 static int __videobuf_read_start(struct videobuf_queue
*q
)
935 enum v4l2_field field
;
936 unsigned long flags
= 0;
937 unsigned int count
= 0, size
= 0;
940 q
->ops
->buf_setup(q
, &count
, &size
);
943 if (count
> VIDEO_MAX_FRAME
)
944 count
= VIDEO_MAX_FRAME
;
945 size
= PAGE_ALIGN(size
);
947 err
= __videobuf_mmap_setup(q
, count
, size
, V4L2_MEMORY_USERPTR
);
953 for (i
= 0; i
< count
; i
++) {
954 field
= videobuf_next_field(q
);
955 err
= q
->ops
->buf_prepare(q
, q
->bufs
[i
], field
);
958 list_add_tail(&q
->bufs
[i
]->stream
, &q
->stream
);
960 spin_lock_irqsave(q
->irqlock
, flags
);
961 for (i
= 0; i
< count
; i
++)
962 q
->ops
->buf_queue(q
, q
->bufs
[i
]);
963 spin_unlock_irqrestore(q
->irqlock
, flags
);
968 static void __videobuf_read_stop(struct videobuf_queue
*q
)
972 videobuf_queue_cancel(q
);
973 __videobuf_mmap_free(q
);
974 INIT_LIST_HEAD(&q
->stream
);
975 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
976 if (NULL
== q
->bufs
[i
])
984 int videobuf_read_start(struct videobuf_queue
*q
)
988 mutex_lock(&q
->vb_lock
);
989 rc
= __videobuf_read_start(q
);
990 mutex_unlock(&q
->vb_lock
);
994 EXPORT_SYMBOL_GPL(videobuf_read_start
);
996 void videobuf_read_stop(struct videobuf_queue
*q
)
998 mutex_lock(&q
->vb_lock
);
999 __videobuf_read_stop(q
);
1000 mutex_unlock(&q
->vb_lock
);
1002 EXPORT_SYMBOL_GPL(videobuf_read_stop
);
1004 void videobuf_stop(struct videobuf_queue
*q
)
1006 mutex_lock(&q
->vb_lock
);
1009 __videobuf_streamoff(q
);
1012 __videobuf_read_stop(q
);
1014 mutex_unlock(&q
->vb_lock
);
1016 EXPORT_SYMBOL_GPL(videobuf_stop
);
1018 ssize_t
videobuf_read_stream(struct videobuf_queue
*q
,
1019 char __user
*data
, size_t count
, loff_t
*ppos
,
1020 int vbihack
, int nonblocking
)
1023 unsigned long flags
= 0;
1025 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1027 dprintk(2, "%s\n", __func__
);
1028 mutex_lock(&q
->vb_lock
);
1033 retval
= __videobuf_read_start(q
);
1040 /* get / wait for data */
1041 if (NULL
== q
->read_buf
) {
1042 q
->read_buf
= list_entry(q
->stream
.next
,
1043 struct videobuf_buffer
,
1045 list_del(&q
->read_buf
->stream
);
1048 rc
= videobuf_waiton(q
->read_buf
, nonblocking
, 1);
1055 if (q
->read_buf
->state
== VIDEOBUF_DONE
) {
1056 rc
= __videobuf_copy_stream(q
, q
->read_buf
, data
+ retval
, count
,
1057 retval
, vbihack
, nonblocking
);
1067 q
->read_off
= q
->read_buf
->size
;
1072 /* requeue buffer when done with copying */
1073 if (q
->read_off
== q
->read_buf
->size
) {
1074 list_add_tail(&q
->read_buf
->stream
,
1076 spin_lock_irqsave(q
->irqlock
, flags
);
1077 q
->ops
->buf_queue(q
, q
->read_buf
);
1078 spin_unlock_irqrestore(q
->irqlock
, flags
);
1086 mutex_unlock(&q
->vb_lock
);
1089 EXPORT_SYMBOL_GPL(videobuf_read_stream
);
1091 unsigned int videobuf_poll_stream(struct file
*file
,
1092 struct videobuf_queue
*q
,
1095 struct videobuf_buffer
*buf
= NULL
;
1096 unsigned int rc
= 0;
1098 mutex_lock(&q
->vb_lock
);
1100 if (!list_empty(&q
->stream
))
1101 buf
= list_entry(q
->stream
.next
,
1102 struct videobuf_buffer
, stream
);
1105 __videobuf_read_start(q
);
1108 } else if (NULL
== q
->read_buf
) {
1109 q
->read_buf
= list_entry(q
->stream
.next
,
1110 struct videobuf_buffer
,
1112 list_del(&q
->read_buf
->stream
);
1121 poll_wait(file
, &buf
->done
, wait
);
1122 if (buf
->state
== VIDEOBUF_DONE
||
1123 buf
->state
== VIDEOBUF_ERROR
) {
1125 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
1126 case V4L2_BUF_TYPE_VBI_OUTPUT
:
1127 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
:
1128 rc
= POLLOUT
| POLLWRNORM
;
1131 rc
= POLLIN
| POLLRDNORM
;
1136 mutex_unlock(&q
->vb_lock
);
1139 EXPORT_SYMBOL_GPL(videobuf_poll_stream
);
1141 int videobuf_mmap_mapper(struct videobuf_queue
*q
, struct vm_area_struct
*vma
)
1146 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1148 if (!(vma
->vm_flags
& VM_WRITE
) || !(vma
->vm_flags
& VM_SHARED
)) {
1149 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1153 mutex_lock(&q
->vb_lock
);
1154 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1155 struct videobuf_buffer
*buf
= q
->bufs
[i
];
1157 if (buf
&& buf
->memory
== V4L2_MEMORY_MMAP
&&
1158 buf
->boff
== (vma
->vm_pgoff
<< PAGE_SHIFT
)) {
1159 rc
= CALL(q
, mmap_mapper
, q
, buf
, vma
);
1163 mutex_unlock(&q
->vb_lock
);
1167 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper
);
1169 #ifdef CONFIG_VIDEO_V4L1_COMPAT
1170 int videobuf_cgmbuf(struct videobuf_queue
*q
,
1171 struct video_mbuf
*mbuf
, int count
)
1173 struct v4l2_requestbuffers req
;
1176 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1178 memset(&req
, 0, sizeof(req
));
1181 req
.memory
= V4L2_MEMORY_MMAP
;
1182 rc
= videobuf_reqbufs(q
, &req
);
1186 mbuf
->frames
= req
.count
;
1188 for (i
= 0; i
< mbuf
->frames
; i
++) {
1189 mbuf
->offsets
[i
] = q
->bufs
[i
]->boff
;
1190 mbuf
->size
+= PAGE_ALIGN(q
->bufs
[i
]->bsize
);
1195 EXPORT_SYMBOL_GPL(videobuf_cgmbuf
);