GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / media / video / ivtv / ivtv-queue.c
blob7fde36e6d22704b9fd8a748c88e67188422d1520
1 /*
2 buffer queues.
3 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
4 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
5 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include "ivtv-driver.h"
23 #include "ivtv-queue.h"
25 int ivtv_buf_copy_from_user(struct ivtv_stream *s, struct ivtv_buffer *buf, const char __user *src, int copybytes)
27 if (s->buf_size - buf->bytesused < copybytes)
28 copybytes = s->buf_size - buf->bytesused;
29 if (copy_from_user(buf->buf + buf->bytesused, src, copybytes)) {
30 return -EFAULT;
32 buf->bytesused += copybytes;
33 return copybytes;
36 void ivtv_buf_swap(struct ivtv_buffer *buf)
38 int i;
40 for (i = 0; i < buf->bytesused; i += 4)
41 swab32s((u32 *)(buf->buf + i));
44 void ivtv_queue_init(struct ivtv_queue *q)
46 INIT_LIST_HEAD(&q->list);
47 q->buffers = 0;
48 q->length = 0;
49 q->bytesused = 0;
52 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q)
54 unsigned long flags;
56 /* clear the buffer if it is going to be enqueued to the free queue */
57 if (q == &s->q_free) {
58 buf->bytesused = 0;
59 buf->readpos = 0;
60 buf->b_flags = 0;
61 buf->dma_xfer_cnt = 0;
63 spin_lock_irqsave(&s->qlock, flags);
64 list_add_tail(&buf->list, &q->list);
65 q->buffers++;
66 q->length += s->buf_size;
67 q->bytesused += buf->bytesused - buf->readpos;
68 spin_unlock_irqrestore(&s->qlock, flags);
71 struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q)
73 struct ivtv_buffer *buf = NULL;
74 unsigned long flags;
76 spin_lock_irqsave(&s->qlock, flags);
77 if (!list_empty(&q->list)) {
78 buf = list_entry(q->list.next, struct ivtv_buffer, list);
79 list_del_init(q->list.next);
80 q->buffers--;
81 q->length -= s->buf_size;
82 q->bytesused -= buf->bytesused - buf->readpos;
84 spin_unlock_irqrestore(&s->qlock, flags);
85 return buf;
88 static void ivtv_queue_move_buf(struct ivtv_stream *s, struct ivtv_queue *from,
89 struct ivtv_queue *to, int clear)
91 struct ivtv_buffer *buf = list_entry(from->list.next, struct ivtv_buffer, list);
93 list_move_tail(from->list.next, &to->list);
94 from->buffers--;
95 from->length -= s->buf_size;
96 from->bytesused -= buf->bytesused - buf->readpos;
97 /* special handling for q_free */
98 if (clear)
99 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
100 to->buffers++;
101 to->length += s->buf_size;
102 to->bytesused += buf->bytesused - buf->readpos;
105 /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
106 If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
107 If 'steal' != NULL, then buffers may also taken from that queue if
108 needed, but only if 'from' is the free queue.
110 The buffer is automatically cleared if it goes to the free queue. It is
111 also cleared if buffers need to be taken from the 'steal' queue and
112 the 'from' queue is the free queue.
114 When 'from' is q_free, then needed_bytes is compared to the total
115 available buffer length, otherwise needed_bytes is compared to the
116 bytesused value. For the 'steal' queue the total available buffer
117 length is always used.
119 -ENOMEM is returned if the buffers could not be obtained, 0 if all
120 buffers where obtained from the 'from' list and if non-zero then
121 the number of stolen buffers is returned. */
122 int ivtv_queue_move(struct ivtv_stream *s, struct ivtv_queue *from, struct ivtv_queue *steal,
123 struct ivtv_queue *to, int needed_bytes)
125 unsigned long flags;
126 int rc = 0;
127 int from_free = from == &s->q_free;
128 int to_free = to == &s->q_free;
129 int bytes_available, bytes_steal;
131 spin_lock_irqsave(&s->qlock, flags);
132 if (needed_bytes == 0) {
133 from_free = 1;
134 needed_bytes = from->length;
137 bytes_available = from_free ? from->length : from->bytesused;
138 bytes_steal = (from_free && steal) ? steal->length : 0;
140 if (bytes_available + bytes_steal < needed_bytes) {
141 spin_unlock_irqrestore(&s->qlock, flags);
142 return -ENOMEM;
144 while (bytes_available < needed_bytes) {
145 struct ivtv_buffer *buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
146 u16 dma_xfer_cnt = buf->dma_xfer_cnt;
148 /* move buffers from the tail of the 'steal' queue to the tail of the
149 'from' queue. Always copy all the buffers with the same dma_xfer_cnt
150 value, this ensures that you do not end up with partial frame data
151 if one frame is stored in multiple buffers. */
152 while (dma_xfer_cnt == buf->dma_xfer_cnt) {
153 list_move_tail(steal->list.prev, &from->list);
154 rc++;
155 steal->buffers--;
156 steal->length -= s->buf_size;
157 steal->bytesused -= buf->bytesused - buf->readpos;
158 buf->bytesused = buf->readpos = buf->b_flags = buf->dma_xfer_cnt = 0;
159 from->buffers++;
160 from->length += s->buf_size;
161 bytes_available += s->buf_size;
162 if (list_empty(&steal->list))
163 break;
164 buf = list_entry(steal->list.prev, struct ivtv_buffer, list);
167 if (from_free) {
168 u32 old_length = to->length;
170 while (to->length - old_length < needed_bytes) {
171 ivtv_queue_move_buf(s, from, to, 1);
174 else {
175 u32 old_bytesused = to->bytesused;
177 while (to->bytesused - old_bytesused < needed_bytes) {
178 ivtv_queue_move_buf(s, from, to, to_free);
181 spin_unlock_irqrestore(&s->qlock, flags);
182 return rc;
185 void ivtv_flush_queues(struct ivtv_stream *s)
187 ivtv_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
188 ivtv_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
189 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
190 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
193 int ivtv_stream_alloc(struct ivtv_stream *s)
195 struct ivtv *itv = s->itv;
196 int SGsize = sizeof(struct ivtv_sg_host_element) * s->buffers;
197 int i;
199 if (s->buffers == 0)
200 return 0;
202 IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
203 s->dma != PCI_DMA_NONE ? "DMA " : "",
204 s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
206 s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
207 if (s->sg_pending == NULL) {
208 IVTV_ERR("Could not allocate sg_pending for %s stream\n", s->name);
209 return -ENOMEM;
211 s->sg_pending_size = 0;
213 s->sg_processing = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
214 if (s->sg_processing == NULL) {
215 IVTV_ERR("Could not allocate sg_processing for %s stream\n", s->name);
216 kfree(s->sg_pending);
217 s->sg_pending = NULL;
218 return -ENOMEM;
220 s->sg_processing_size = 0;
222 s->sg_dma = kzalloc(sizeof(struct ivtv_sg_element),
223 GFP_KERNEL|__GFP_NOWARN);
224 if (s->sg_dma == NULL) {
225 IVTV_ERR("Could not allocate sg_dma for %s stream\n", s->name);
226 kfree(s->sg_pending);
227 s->sg_pending = NULL;
228 kfree(s->sg_processing);
229 s->sg_processing = NULL;
230 return -ENOMEM;
232 if (ivtv_might_use_dma(s)) {
233 s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
234 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
235 ivtv_stream_sync_for_cpu(s);
238 /* allocate stream buffers. Initially all buffers are in q_free. */
239 for (i = 0; i < s->buffers; i++) {
240 struct ivtv_buffer *buf = kzalloc(sizeof(struct ivtv_buffer),
241 GFP_KERNEL|__GFP_NOWARN);
243 if (buf == NULL)
244 break;
245 buf->buf = kmalloc(s->buf_size + 256, GFP_KERNEL|__GFP_NOWARN);
246 if (buf->buf == NULL) {
247 kfree(buf);
248 break;
250 INIT_LIST_HEAD(&buf->list);
251 if (ivtv_might_use_dma(s)) {
252 buf->dma_handle = pci_map_single(s->itv->pdev,
253 buf->buf, s->buf_size + 256, s->dma);
254 ivtv_buf_sync_for_cpu(s, buf);
256 ivtv_enqueue(s, buf, &s->q_free);
258 if (i == s->buffers)
259 return 0;
260 IVTV_ERR("Couldn't allocate buffers for %s stream\n", s->name);
261 ivtv_stream_free(s);
262 return -ENOMEM;
265 void ivtv_stream_free(struct ivtv_stream *s)
267 struct ivtv_buffer *buf;
269 /* move all buffers to q_free */
270 ivtv_flush_queues(s);
272 /* empty q_free */
273 while ((buf = ivtv_dequeue(s, &s->q_free))) {
274 if (ivtv_might_use_dma(s))
275 pci_unmap_single(s->itv->pdev, buf->dma_handle,
276 s->buf_size + 256, s->dma);
277 kfree(buf->buf);
278 kfree(buf);
281 /* Free SG Array/Lists */
282 if (s->sg_dma != NULL) {
283 if (s->sg_handle != IVTV_DMA_UNMAPPED) {
284 pci_unmap_single(s->itv->pdev, s->sg_handle,
285 sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
286 s->sg_handle = IVTV_DMA_UNMAPPED;
288 kfree(s->sg_pending);
289 kfree(s->sg_processing);
290 kfree(s->sg_dma);
291 s->sg_pending = NULL;
292 s->sg_processing = NULL;
293 s->sg_dma = NULL;
294 s->sg_pending_size = 0;
295 s->sg_processing_size = 0;