2 * generic helper functions for handling video4linux capture buffers
4 * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
6 * Highly based on video-buf written originally by:
7 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
8 * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org>
9 * (c) 2006 Ted Walther and John Sokol
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/interrupt.h>
24 #include <media/videobuf-core.h>
26 #define MAGIC_BUFFER 0x20070728
27 #define MAGIC_CHECK(is, should) \
29 if (unlikely((is) != (should))) { \
31 "magic mismatch: %x (expected %x)\n", \
38 module_param(debug
, int, 0644);
40 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
41 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>");
42 MODULE_LICENSE("GPL");
44 #define dprintk(level, fmt, arg...) \
47 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
50 /* --------------------------------------------------------------------- */
52 #define CALL(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
55 struct videobuf_buffer
*videobuf_alloc_vb(struct videobuf_queue
*q
)
57 struct videobuf_buffer
*vb
;
59 BUG_ON(q
->msize
< sizeof(*vb
));
61 if (!q
->int_ops
|| !q
->int_ops
->alloc_vb
) {
62 printk(KERN_ERR
"No specific ops defined!\n");
66 vb
= q
->int_ops
->alloc_vb(q
->msize
);
68 init_waitqueue_head(&vb
->done
);
69 vb
->magic
= MAGIC_BUFFER
;
74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb
);
76 static int is_state_active_or_queued(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
)
81 spin_lock_irqsave(q
->irqlock
, flags
);
82 rc
= vb
->state
!= VIDEOBUF_ACTIVE
&& vb
->state
!= VIDEOBUF_QUEUED
;
83 spin_unlock_irqrestore(q
->irqlock
, flags
);
87 int videobuf_waiton(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
88 int non_blocking
, int intr
)
93 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
96 if (is_state_active_or_queued(q
, vb
))
101 is_ext_locked
= q
->ext_lock
&& mutex_is_locked(q
->ext_lock
);
103 /* Release vdev lock to prevent this wait from blocking outside access to
106 mutex_unlock(q
->ext_lock
);
108 ret
= wait_event_interruptible(vb
->done
, is_state_active_or_queued(q
, vb
));
110 wait_event(vb
->done
, is_state_active_or_queued(q
, vb
));
113 mutex_lock(q
->ext_lock
);
117 EXPORT_SYMBOL_GPL(videobuf_waiton
);
119 int videobuf_iolock(struct videobuf_queue
*q
, struct videobuf_buffer
*vb
,
120 struct v4l2_framebuffer
*fbuf
)
122 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
123 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
125 return CALL(q
, iolock
, q
, vb
, fbuf
);
127 EXPORT_SYMBOL_GPL(videobuf_iolock
);
129 void *videobuf_queue_to_vaddr(struct videobuf_queue
*q
,
130 struct videobuf_buffer
*buf
)
132 if (q
->int_ops
->vaddr
)
133 return q
->int_ops
->vaddr(buf
);
136 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr
);
138 /* --------------------------------------------------------------------- */
141 void videobuf_queue_core_init(struct videobuf_queue
*q
,
142 const struct videobuf_queue_ops
*ops
,
145 enum v4l2_buf_type type
,
146 enum v4l2_field field
,
149 struct videobuf_qtype_ops
*int_ops
,
150 struct mutex
*ext_lock
)
153 memset(q
, 0, sizeof(*q
));
154 q
->irqlock
= irqlock
;
155 q
->ext_lock
= ext_lock
;
162 q
->int_ops
= int_ops
;
164 /* All buffer operations are mandatory */
165 BUG_ON(!q
->ops
->buf_setup
);
166 BUG_ON(!q
->ops
->buf_prepare
);
167 BUG_ON(!q
->ops
->buf_queue
);
168 BUG_ON(!q
->ops
->buf_release
);
170 /* Lock is mandatory for queue_cancel to work */
173 /* Having implementations for abstract methods are mandatory */
176 mutex_init(&q
->vb_lock
);
177 init_waitqueue_head(&q
->wait
);
178 INIT_LIST_HEAD(&q
->stream
);
180 EXPORT_SYMBOL_GPL(videobuf_queue_core_init
);
182 /* Locking: Only usage in bttv unsafe find way to remove */
183 int videobuf_queue_is_busy(struct videobuf_queue
*q
)
187 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
190 dprintk(1, "busy: streaming active\n");
194 dprintk(1, "busy: pending read #1\n");
198 dprintk(1, "busy: pending read #2\n");
201 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
202 if (NULL
== q
->bufs
[i
])
204 if (q
->bufs
[i
]->map
) {
205 dprintk(1, "busy: buffer #%d mapped\n", i
);
208 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
209 dprintk(1, "busy: buffer #%d queued\n", i
);
212 if (q
->bufs
[i
]->state
== VIDEOBUF_ACTIVE
) {
213 dprintk(1, "busy: buffer #%d avtive\n", i
);
219 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy
);
222 * __videobuf_free() - free all the buffers and their control structures
224 * This function can only be called if streaming/reading is off, i.e. no buffers
225 * are under control of the driver.
227 /* Locking: Caller holds q->vb_lock */
228 static int __videobuf_free(struct videobuf_queue
*q
)
232 dprintk(1, "%s\n", __func__
);
236 if (q
->streaming
|| q
->reading
) {
237 dprintk(1, "Cannot free buffers when streaming or reading\n");
241 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
243 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++)
244 if (q
->bufs
[i
] && q
->bufs
[i
]->map
) {
245 dprintk(1, "Cannot free mmapped buffers\n");
249 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
250 if (NULL
== q
->bufs
[i
])
252 q
->ops
->buf_release(q
, q
->bufs
[i
]);
260 /* Locking: Caller holds q->vb_lock */
261 void videobuf_queue_cancel(struct videobuf_queue
*q
)
263 unsigned long flags
= 0;
268 wake_up_interruptible_sync(&q
->wait
);
270 /* remove queued buffers from list */
271 spin_lock_irqsave(q
->irqlock
, flags
);
272 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
273 if (NULL
== q
->bufs
[i
])
275 if (q
->bufs
[i
]->state
== VIDEOBUF_QUEUED
) {
276 list_del(&q
->bufs
[i
]->queue
);
277 q
->bufs
[i
]->state
= VIDEOBUF_ERROR
;
278 wake_up_all(&q
->bufs
[i
]->done
);
281 spin_unlock_irqrestore(q
->irqlock
, flags
);
283 /* free all buffers + clear queue */
284 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
285 if (NULL
== q
->bufs
[i
])
287 q
->ops
->buf_release(q
, q
->bufs
[i
]);
289 INIT_LIST_HEAD(&q
->stream
);
291 EXPORT_SYMBOL_GPL(videobuf_queue_cancel
);
293 /* --------------------------------------------------------------------- */
295 /* Locking: Caller holds q->vb_lock */
296 enum v4l2_field
videobuf_next_field(struct videobuf_queue
*q
)
298 enum v4l2_field field
= q
->field
;
300 BUG_ON(V4L2_FIELD_ANY
== field
);
302 if (V4L2_FIELD_ALTERNATE
== field
) {
303 if (V4L2_FIELD_TOP
== q
->last
) {
304 field
= V4L2_FIELD_BOTTOM
;
305 q
->last
= V4L2_FIELD_BOTTOM
;
307 field
= V4L2_FIELD_TOP
;
308 q
->last
= V4L2_FIELD_TOP
;
313 EXPORT_SYMBOL_GPL(videobuf_next_field
);
315 /* Locking: Caller holds q->vb_lock */
316 static void videobuf_status(struct videobuf_queue
*q
, struct v4l2_buffer
*b
,
317 struct videobuf_buffer
*vb
, enum v4l2_buf_type type
)
319 MAGIC_CHECK(vb
->magic
, MAGIC_BUFFER
);
320 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
325 b
->memory
= vb
->memory
;
327 case V4L2_MEMORY_MMAP
:
328 b
->m
.offset
= vb
->boff
;
329 b
->length
= vb
->bsize
;
331 case V4L2_MEMORY_USERPTR
:
332 b
->m
.userptr
= vb
->baddr
;
333 b
->length
= vb
->bsize
;
335 case V4L2_MEMORY_OVERLAY
:
336 b
->m
.offset
= vb
->boff
;
342 b
->flags
|= V4L2_BUF_FLAG_MAPPED
;
345 case VIDEOBUF_PREPARED
:
346 case VIDEOBUF_QUEUED
:
347 case VIDEOBUF_ACTIVE
:
348 b
->flags
|= V4L2_BUF_FLAG_QUEUED
;
351 b
->flags
|= V4L2_BUF_FLAG_ERROR
;
354 b
->flags
|= V4L2_BUF_FLAG_DONE
;
356 case VIDEOBUF_NEEDS_INIT
:
362 if (vb
->input
!= UNSET
) {
363 b
->flags
|= V4L2_BUF_FLAG_INPUT
;
364 b
->input
= vb
->input
;
367 b
->field
= vb
->field
;
368 b
->timestamp
= vb
->ts
;
369 b
->bytesused
= vb
->size
;
370 b
->sequence
= vb
->field_count
>> 1;
373 int videobuf_mmap_free(struct videobuf_queue
*q
)
376 videobuf_queue_lock(q
);
377 ret
= __videobuf_free(q
);
378 videobuf_queue_unlock(q
);
381 EXPORT_SYMBOL_GPL(videobuf_mmap_free
);
383 /* Locking: Caller holds q->vb_lock */
384 int __videobuf_mmap_setup(struct videobuf_queue
*q
,
385 unsigned int bcount
, unsigned int bsize
,
386 enum v4l2_memory memory
)
391 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
393 err
= __videobuf_free(q
);
397 /* Allocate and initialize buffers */
398 for (i
= 0; i
< bcount
; i
++) {
399 q
->bufs
[i
] = videobuf_alloc_vb(q
);
401 if (NULL
== q
->bufs
[i
])
405 q
->bufs
[i
]->input
= UNSET
;
406 q
->bufs
[i
]->memory
= memory
;
407 q
->bufs
[i
]->bsize
= bsize
;
409 case V4L2_MEMORY_MMAP
:
410 q
->bufs
[i
]->boff
= PAGE_ALIGN(bsize
) * i
;
412 case V4L2_MEMORY_USERPTR
:
413 case V4L2_MEMORY_OVERLAY
:
422 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i
, bsize
);
426 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup
);
428 int videobuf_mmap_setup(struct videobuf_queue
*q
,
429 unsigned int bcount
, unsigned int bsize
,
430 enum v4l2_memory memory
)
433 videobuf_queue_lock(q
);
434 ret
= __videobuf_mmap_setup(q
, bcount
, bsize
, memory
);
435 videobuf_queue_unlock(q
);
438 EXPORT_SYMBOL_GPL(videobuf_mmap_setup
);
440 int videobuf_reqbufs(struct videobuf_queue
*q
,
441 struct v4l2_requestbuffers
*req
)
443 unsigned int size
, count
;
446 if (req
->count
< 1) {
447 dprintk(1, "reqbufs: count invalid (%d)\n", req
->count
);
451 if (req
->memory
!= V4L2_MEMORY_MMAP
&&
452 req
->memory
!= V4L2_MEMORY_USERPTR
&&
453 req
->memory
!= V4L2_MEMORY_OVERLAY
) {
454 dprintk(1, "reqbufs: memory type invalid\n");
458 videobuf_queue_lock(q
);
459 if (req
->type
!= q
->type
) {
460 dprintk(1, "reqbufs: queue type invalid\n");
466 dprintk(1, "reqbufs: streaming already exists\n");
470 if (!list_empty(&q
->stream
)) {
471 dprintk(1, "reqbufs: stream running\n");
477 if (count
> VIDEO_MAX_FRAME
)
478 count
= VIDEO_MAX_FRAME
;
480 q
->ops
->buf_setup(q
, &count
, &size
);
481 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
483 (unsigned int)((count
* PAGE_ALIGN(size
)) >> PAGE_SHIFT
));
485 retval
= __videobuf_mmap_setup(q
, count
, size
, req
->memory
);
487 dprintk(1, "reqbufs: mmap setup returned %d\n", retval
);
495 videobuf_queue_unlock(q
);
498 EXPORT_SYMBOL_GPL(videobuf_reqbufs
);
500 int videobuf_querybuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
504 videobuf_queue_lock(q
);
505 if (unlikely(b
->type
!= q
->type
)) {
506 dprintk(1, "querybuf: Wrong type.\n");
509 if (unlikely(b
->index
>= VIDEO_MAX_FRAME
)) {
510 dprintk(1, "querybuf: index out of range.\n");
513 if (unlikely(NULL
== q
->bufs
[b
->index
])) {
514 dprintk(1, "querybuf: buffer is null.\n");
518 videobuf_status(q
, b
, q
->bufs
[b
->index
], q
->type
);
522 videobuf_queue_unlock(q
);
525 EXPORT_SYMBOL_GPL(videobuf_querybuf
);
527 int videobuf_qbuf(struct videobuf_queue
*q
, struct v4l2_buffer
*b
)
529 struct videobuf_buffer
*buf
;
530 enum v4l2_field field
;
531 unsigned long flags
= 0;
534 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
536 if (b
->memory
== V4L2_MEMORY_MMAP
)
537 down_read(¤t
->mm
->mmap_sem
);
539 videobuf_queue_lock(q
);
542 dprintk(1, "qbuf: Reading running...\n");
546 if (b
->type
!= q
->type
) {
547 dprintk(1, "qbuf: Wrong type.\n");
550 if (b
->index
>= VIDEO_MAX_FRAME
) {
551 dprintk(1, "qbuf: index out of range.\n");
554 buf
= q
->bufs
[b
->index
];
556 dprintk(1, "qbuf: buffer is null.\n");
559 MAGIC_CHECK(buf
->magic
, MAGIC_BUFFER
);
560 if (buf
->memory
!= b
->memory
) {
561 dprintk(1, "qbuf: memory type is wrong.\n");
564 if (buf
->state
!= VIDEOBUF_NEEDS_INIT
&& buf
->state
!= VIDEOBUF_IDLE
) {
565 dprintk(1, "qbuf: buffer is already queued or active.\n");
569 if (b
->flags
& V4L2_BUF_FLAG_INPUT
) {
570 if (b
->input
>= q
->inputs
) {
571 dprintk(1, "qbuf: wrong input.\n");
574 buf
->input
= b
->input
;
580 case V4L2_MEMORY_MMAP
:
581 if (0 == buf
->baddr
) {
582 dprintk(1, "qbuf: mmap requested "
583 "but buffer addr is zero!\n");
586 if (q
->type
== V4L2_BUF_TYPE_VIDEO_OUTPUT
587 || q
->type
== V4L2_BUF_TYPE_VBI_OUTPUT
588 || q
->type
== V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
) {
589 buf
->size
= b
->bytesused
;
590 buf
->field
= b
->field
;
591 buf
->ts
= b
->timestamp
;
594 case V4L2_MEMORY_USERPTR
:
595 if (b
->length
< buf
->bsize
) {
596 dprintk(1, "qbuf: buffer length is not enough\n");
599 if (VIDEOBUF_NEEDS_INIT
!= buf
->state
&&
600 buf
->baddr
!= b
->m
.userptr
)
601 q
->ops
->buf_release(q
, buf
);
602 buf
->baddr
= b
->m
.userptr
;
604 case V4L2_MEMORY_OVERLAY
:
605 buf
->boff
= b
->m
.offset
;
608 dprintk(1, "qbuf: wrong memory type\n");
612 dprintk(1, "qbuf: requesting next field\n");
613 field
= videobuf_next_field(q
);
614 retval
= q
->ops
->buf_prepare(q
, buf
, field
);
616 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval
);
620 list_add_tail(&buf
->stream
, &q
->stream
);
622 spin_lock_irqsave(q
->irqlock
, flags
);
623 q
->ops
->buf_queue(q
, buf
);
624 spin_unlock_irqrestore(q
->irqlock
, flags
);
626 dprintk(1, "qbuf: succeeded\n");
628 wake_up_interruptible_sync(&q
->wait
);
631 videobuf_queue_unlock(q
);
633 if (b
->memory
== V4L2_MEMORY_MMAP
)
634 up_read(¤t
->mm
->mmap_sem
);
638 EXPORT_SYMBOL_GPL(videobuf_qbuf
);
640 /* Locking: Caller holds q->vb_lock */
641 static int stream_next_buffer_check_queue(struct videobuf_queue
*q
, int noblock
)
647 dprintk(1, "next_buffer: Not streaming\n");
652 if (list_empty(&q
->stream
)) {
655 dprintk(2, "next_buffer: no buffers to dequeue\n");
658 dprintk(2, "next_buffer: waiting on buffer\n");
660 /* Drop lock to avoid deadlock with qbuf */
661 videobuf_queue_unlock(q
);
663 /* Checking list_empty and streaming is safe without
664 * locks because we goto checks to validate while
665 * holding locks before proceeding */
666 retval
= wait_event_interruptible(q
->wait
,
667 !list_empty(&q
->stream
) || !q
->streaming
);
668 videobuf_queue_lock(q
);
683 /* Locking: Caller holds q->vb_lock */
684 static int stream_next_buffer(struct videobuf_queue
*q
,
685 struct videobuf_buffer
**vb
, int nonblocking
)
688 struct videobuf_buffer
*buf
= NULL
;
690 retval
= stream_next_buffer_check_queue(q
, nonblocking
);
694 buf
= list_entry(q
->stream
.next
, struct videobuf_buffer
, stream
);
695 retval
= videobuf_waiton(q
, buf
, nonblocking
, 1);
704 int videobuf_dqbuf(struct videobuf_queue
*q
,
705 struct v4l2_buffer
*b
, int nonblocking
)
707 struct videobuf_buffer
*buf
= NULL
;
710 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
712 memset(b
, 0, sizeof(*b
));
713 videobuf_queue_lock(q
);
715 retval
= stream_next_buffer(q
, &buf
, nonblocking
);
717 dprintk(1, "dqbuf: next_buffer error: %i\n", retval
);
721 switch (buf
->state
) {
723 dprintk(1, "dqbuf: state is error\n");
726 dprintk(1, "dqbuf: state is done\n");
729 dprintk(1, "dqbuf: state invalid\n");
733 CALL(q
, sync
, q
, buf
);
734 videobuf_status(q
, b
, buf
, q
->type
);
735 list_del(&buf
->stream
);
736 buf
->state
= VIDEOBUF_IDLE
;
737 b
->flags
&= ~V4L2_BUF_FLAG_DONE
;
739 videobuf_queue_unlock(q
);
742 EXPORT_SYMBOL_GPL(videobuf_dqbuf
);
744 int videobuf_streamon(struct videobuf_queue
*q
)
746 struct videobuf_buffer
*buf
;
747 unsigned long flags
= 0;
750 videobuf_queue_lock(q
);
758 spin_lock_irqsave(q
->irqlock
, flags
);
759 list_for_each_entry(buf
, &q
->stream
, stream
)
760 if (buf
->state
== VIDEOBUF_PREPARED
)
761 q
->ops
->buf_queue(q
, buf
);
762 spin_unlock_irqrestore(q
->irqlock
, flags
);
764 wake_up_interruptible_sync(&q
->wait
);
766 videobuf_queue_unlock(q
);
769 EXPORT_SYMBOL_GPL(videobuf_streamon
);
771 /* Locking: Caller holds q->vb_lock */
772 static int __videobuf_streamoff(struct videobuf_queue
*q
)
777 videobuf_queue_cancel(q
);
782 int videobuf_streamoff(struct videobuf_queue
*q
)
786 videobuf_queue_lock(q
);
787 retval
= __videobuf_streamoff(q
);
788 videobuf_queue_unlock(q
);
792 EXPORT_SYMBOL_GPL(videobuf_streamoff
);
794 /* Locking: Caller holds q->vb_lock */
795 static ssize_t
videobuf_read_zerocopy(struct videobuf_queue
*q
,
797 size_t count
, loff_t
*ppos
)
799 enum v4l2_field field
;
800 unsigned long flags
= 0;
803 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
806 q
->read_buf
= videobuf_alloc_vb(q
);
807 if (NULL
== q
->read_buf
)
810 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
811 q
->read_buf
->baddr
= (unsigned long)data
;
812 q
->read_buf
->bsize
= count
;
814 field
= videobuf_next_field(q
);
815 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
819 /* start capture & wait */
820 spin_lock_irqsave(q
->irqlock
, flags
);
821 q
->ops
->buf_queue(q
, q
->read_buf
);
822 spin_unlock_irqrestore(q
->irqlock
, flags
);
823 retval
= videobuf_waiton(q
, q
->read_buf
, 0, 0);
825 CALL(q
, sync
, q
, q
->read_buf
);
826 if (VIDEOBUF_ERROR
== q
->read_buf
->state
)
829 retval
= q
->read_buf
->size
;
834 q
->ops
->buf_release(q
, q
->read_buf
);
840 static int __videobuf_copy_to_user(struct videobuf_queue
*q
,
841 struct videobuf_buffer
*buf
,
842 char __user
*data
, size_t count
,
845 void *vaddr
= CALL(q
, vaddr
, buf
);
847 /* copy to userspace */
848 if (count
> buf
->size
- q
->read_off
)
849 count
= buf
->size
- q
->read_off
;
851 if (copy_to_user(data
, vaddr
+ q
->read_off
, count
))
857 static int __videobuf_copy_stream(struct videobuf_queue
*q
,
858 struct videobuf_buffer
*buf
,
859 char __user
*data
, size_t count
, size_t pos
,
860 int vbihack
, int nonblocking
)
862 unsigned int *fc
= CALL(q
, vaddr
, buf
);
865 /* dirty, undocumented hack -- pass the frame counter
866 * within the last four bytes of each vbi data block.
867 * We need that one to maintain backward compatibility
868 * to all vbi decoding software out there ... */
869 fc
+= (buf
->size
>> 2) - 1;
870 *fc
= buf
->field_count
>> 1;
871 dprintk(1, "vbihack: %d\n", *fc
);
874 /* copy stuff using the common method */
875 count
= __videobuf_copy_to_user(q
, buf
, data
, count
, nonblocking
);
877 if ((count
== -EFAULT
) && (pos
== 0))
883 ssize_t
videobuf_read_one(struct videobuf_queue
*q
,
884 char __user
*data
, size_t count
, loff_t
*ppos
,
887 enum v4l2_field field
;
888 unsigned long flags
= 0;
889 unsigned size
= 0, nbufs
= 1;
892 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
894 videobuf_queue_lock(q
);
896 q
->ops
->buf_setup(q
, &nbufs
, &size
);
898 if (NULL
== q
->read_buf
&&
901 retval
= videobuf_read_zerocopy(q
, data
, count
, ppos
);
902 if (retval
>= 0 || retval
== -EIO
)
905 /* fallback to kernel bounce buffer on failures */
908 if (NULL
== q
->read_buf
) {
909 /* need to capture a new frame */
911 q
->read_buf
= videobuf_alloc_vb(q
);
913 dprintk(1, "video alloc=0x%p\n", q
->read_buf
);
914 if (NULL
== q
->read_buf
)
916 q
->read_buf
->memory
= V4L2_MEMORY_USERPTR
;
917 q
->read_buf
->bsize
= count
; /* preferred size */
918 field
= videobuf_next_field(q
);
919 retval
= q
->ops
->buf_prepare(q
, q
->read_buf
, field
);
927 spin_lock_irqsave(q
->irqlock
, flags
);
928 q
->ops
->buf_queue(q
, q
->read_buf
);
929 spin_unlock_irqrestore(q
->irqlock
, flags
);
934 /* wait until capture is done */
935 retval
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
939 CALL(q
, sync
, q
, q
->read_buf
);
941 if (VIDEOBUF_ERROR
== q
->read_buf
->state
) {
942 /* catch I/O errors */
943 q
->ops
->buf_release(q
, q
->read_buf
);
950 /* Copy to userspace */
951 retval
= __videobuf_copy_to_user(q
, q
->read_buf
, data
, count
, nonblocking
);
955 q
->read_off
+= retval
;
956 if (q
->read_off
== q
->read_buf
->size
) {
957 /* all data copied, cleanup */
958 q
->ops
->buf_release(q
, q
->read_buf
);
964 videobuf_queue_unlock(q
);
967 EXPORT_SYMBOL_GPL(videobuf_read_one
);
969 /* Locking: Caller holds q->vb_lock */
970 static int __videobuf_read_start(struct videobuf_queue
*q
)
972 enum v4l2_field field
;
973 unsigned long flags
= 0;
974 unsigned int count
= 0, size
= 0;
977 q
->ops
->buf_setup(q
, &count
, &size
);
980 if (count
> VIDEO_MAX_FRAME
)
981 count
= VIDEO_MAX_FRAME
;
982 size
= PAGE_ALIGN(size
);
984 err
= __videobuf_mmap_setup(q
, count
, size
, V4L2_MEMORY_USERPTR
);
990 for (i
= 0; i
< count
; i
++) {
991 field
= videobuf_next_field(q
);
992 err
= q
->ops
->buf_prepare(q
, q
->bufs
[i
], field
);
995 list_add_tail(&q
->bufs
[i
]->stream
, &q
->stream
);
997 spin_lock_irqsave(q
->irqlock
, flags
);
998 for (i
= 0; i
< count
; i
++)
999 q
->ops
->buf_queue(q
, q
->bufs
[i
]);
1000 spin_unlock_irqrestore(q
->irqlock
, flags
);
1005 static void __videobuf_read_stop(struct videobuf_queue
*q
)
1009 videobuf_queue_cancel(q
);
1011 INIT_LIST_HEAD(&q
->stream
);
1012 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1013 if (NULL
== q
->bufs
[i
])
1021 int videobuf_read_start(struct videobuf_queue
*q
)
1025 videobuf_queue_lock(q
);
1026 rc
= __videobuf_read_start(q
);
1027 videobuf_queue_unlock(q
);
1031 EXPORT_SYMBOL_GPL(videobuf_read_start
);
1033 void videobuf_read_stop(struct videobuf_queue
*q
)
1035 videobuf_queue_lock(q
);
1036 __videobuf_read_stop(q
);
1037 videobuf_queue_unlock(q
);
1039 EXPORT_SYMBOL_GPL(videobuf_read_stop
);
1041 void videobuf_stop(struct videobuf_queue
*q
)
1043 videobuf_queue_lock(q
);
1046 __videobuf_streamoff(q
);
1049 __videobuf_read_stop(q
);
1051 videobuf_queue_unlock(q
);
1053 EXPORT_SYMBOL_GPL(videobuf_stop
);
1055 ssize_t
videobuf_read_stream(struct videobuf_queue
*q
,
1056 char __user
*data
, size_t count
, loff_t
*ppos
,
1057 int vbihack
, int nonblocking
)
1060 unsigned long flags
= 0;
1062 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1064 dprintk(2, "%s\n", __func__
);
1065 videobuf_queue_lock(q
);
1070 retval
= __videobuf_read_start(q
);
1077 /* get / wait for data */
1078 if (NULL
== q
->read_buf
) {
1079 q
->read_buf
= list_entry(q
->stream
.next
,
1080 struct videobuf_buffer
,
1082 list_del(&q
->read_buf
->stream
);
1085 rc
= videobuf_waiton(q
, q
->read_buf
, nonblocking
, 1);
1092 if (q
->read_buf
->state
== VIDEOBUF_DONE
) {
1093 rc
= __videobuf_copy_stream(q
, q
->read_buf
, data
+ retval
, count
,
1094 retval
, vbihack
, nonblocking
);
1104 q
->read_off
= q
->read_buf
->size
;
1109 /* requeue buffer when done with copying */
1110 if (q
->read_off
== q
->read_buf
->size
) {
1111 list_add_tail(&q
->read_buf
->stream
,
1113 spin_lock_irqsave(q
->irqlock
, flags
);
1114 q
->ops
->buf_queue(q
, q
->read_buf
);
1115 spin_unlock_irqrestore(q
->irqlock
, flags
);
1123 videobuf_queue_unlock(q
);
1126 EXPORT_SYMBOL_GPL(videobuf_read_stream
);
1128 unsigned int videobuf_poll_stream(struct file
*file
,
1129 struct videobuf_queue
*q
,
1132 struct videobuf_buffer
*buf
= NULL
;
1133 unsigned int rc
= 0;
1135 videobuf_queue_lock(q
);
1137 if (!list_empty(&q
->stream
))
1138 buf
= list_entry(q
->stream
.next
,
1139 struct videobuf_buffer
, stream
);
1142 __videobuf_read_start(q
);
1145 } else if (NULL
== q
->read_buf
) {
1146 q
->read_buf
= list_entry(q
->stream
.next
,
1147 struct videobuf_buffer
,
1149 list_del(&q
->read_buf
->stream
);
1158 poll_wait(file
, &buf
->done
, wait
);
1159 if (buf
->state
== VIDEOBUF_DONE
||
1160 buf
->state
== VIDEOBUF_ERROR
) {
1162 case V4L2_BUF_TYPE_VIDEO_OUTPUT
:
1163 case V4L2_BUF_TYPE_VBI_OUTPUT
:
1164 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
:
1165 rc
= POLLOUT
| POLLWRNORM
;
1168 rc
= POLLIN
| POLLRDNORM
;
1173 videobuf_queue_unlock(q
);
1176 EXPORT_SYMBOL_GPL(videobuf_poll_stream
);
1178 int videobuf_mmap_mapper(struct videobuf_queue
*q
, struct vm_area_struct
*vma
)
1183 MAGIC_CHECK(q
->int_ops
->magic
, MAGIC_QTYPE_OPS
);
1185 if (!(vma
->vm_flags
& VM_WRITE
) || !(vma
->vm_flags
& VM_SHARED
)) {
1186 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1190 videobuf_queue_lock(q
);
1191 for (i
= 0; i
< VIDEO_MAX_FRAME
; i
++) {
1192 struct videobuf_buffer
*buf
= q
->bufs
[i
];
1194 if (buf
&& buf
->memory
== V4L2_MEMORY_MMAP
&&
1195 buf
->boff
== (vma
->vm_pgoff
<< PAGE_SHIFT
)) {
1196 rc
= CALL(q
, mmap_mapper
, q
, buf
, vma
);
1200 videobuf_queue_unlock(q
);
1204 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper
);