RT-AC56 3.0.0.4.374.37 core
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / media / video / cx23885 / cx23885-core.c
blob777c26240fd98cd621408474631f9c7c1f01644f
1 /*
2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kmod.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <asm/div64.h>
33 #include "cx23885.h"
34 #include "cimax2.h"
35 #include "cx23888-ir.h"
36 #include "cx23885-ir.h"
37 #include "cx23885-av.h"
38 #include "cx23885-input.h"
40 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
41 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
42 MODULE_LICENSE("GPL");
44 static unsigned int debug;
45 module_param(debug, int, 0644);
46 MODULE_PARM_DESC(debug, "enable debug messages");
48 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
49 module_param_array(card, int, NULL, 0444);
50 MODULE_PARM_DESC(card, "card type");
52 #define dprintk(level, fmt, arg...)\
53 do { if (debug >= level)\
54 printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
55 } while (0)
57 static unsigned int cx23885_devcount;
59 #define NO_SYNC_LINE (-1U)
62 static struct sram_channel cx23885_sram_channels[] = {
63 [SRAM_CH01] = {
64 .name = "VID A",
65 .cmds_start = 0x10000,
66 .ctrl_start = 0x10380,
67 .cdt = 0x104c0,
68 .fifo_start = 0x40,
69 .fifo_size = 0x2800,
70 .ptr1_reg = DMA1_PTR1,
71 .ptr2_reg = DMA1_PTR2,
72 .cnt1_reg = DMA1_CNT1,
73 .cnt2_reg = DMA1_CNT2,
75 [SRAM_CH02] = {
76 .name = "ch2",
77 .cmds_start = 0x0,
78 .ctrl_start = 0x0,
79 .cdt = 0x0,
80 .fifo_start = 0x0,
81 .fifo_size = 0x0,
82 .ptr1_reg = DMA2_PTR1,
83 .ptr2_reg = DMA2_PTR2,
84 .cnt1_reg = DMA2_CNT1,
85 .cnt2_reg = DMA2_CNT2,
87 [SRAM_CH03] = {
88 .name = "TS1 B",
89 .cmds_start = 0x100A0,
90 .ctrl_start = 0x10400,
91 .cdt = 0x10580,
92 .fifo_start = 0x5000,
93 .fifo_size = 0x1000,
94 .ptr1_reg = DMA3_PTR1,
95 .ptr2_reg = DMA3_PTR2,
96 .cnt1_reg = DMA3_CNT1,
97 .cnt2_reg = DMA3_CNT2,
99 [SRAM_CH04] = {
100 .name = "ch4",
101 .cmds_start = 0x0,
102 .ctrl_start = 0x0,
103 .cdt = 0x0,
104 .fifo_start = 0x0,
105 .fifo_size = 0x0,
106 .ptr1_reg = DMA4_PTR1,
107 .ptr2_reg = DMA4_PTR2,
108 .cnt1_reg = DMA4_CNT1,
109 .cnt2_reg = DMA4_CNT2,
111 [SRAM_CH05] = {
112 .name = "ch5",
113 .cmds_start = 0x0,
114 .ctrl_start = 0x0,
115 .cdt = 0x0,
116 .fifo_start = 0x0,
117 .fifo_size = 0x0,
118 .ptr1_reg = DMA5_PTR1,
119 .ptr2_reg = DMA5_PTR2,
120 .cnt1_reg = DMA5_CNT1,
121 .cnt2_reg = DMA5_CNT2,
123 [SRAM_CH06] = {
124 .name = "TS2 C",
125 .cmds_start = 0x10140,
126 .ctrl_start = 0x10440,
127 .cdt = 0x105e0,
128 .fifo_start = 0x6000,
129 .fifo_size = 0x1000,
130 .ptr1_reg = DMA5_PTR1,
131 .ptr2_reg = DMA5_PTR2,
132 .cnt1_reg = DMA5_CNT1,
133 .cnt2_reg = DMA5_CNT2,
135 [SRAM_CH07] = {
136 .name = "ch7",
137 .cmds_start = 0x0,
138 .ctrl_start = 0x0,
139 .cdt = 0x0,
140 .fifo_start = 0x0,
141 .fifo_size = 0x0,
142 .ptr1_reg = DMA6_PTR1,
143 .ptr2_reg = DMA6_PTR2,
144 .cnt1_reg = DMA6_CNT1,
145 .cnt2_reg = DMA6_CNT2,
147 [SRAM_CH08] = {
148 .name = "ch8",
149 .cmds_start = 0x0,
150 .ctrl_start = 0x0,
151 .cdt = 0x0,
152 .fifo_start = 0x0,
153 .fifo_size = 0x0,
154 .ptr1_reg = DMA7_PTR1,
155 .ptr2_reg = DMA7_PTR2,
156 .cnt1_reg = DMA7_CNT1,
157 .cnt2_reg = DMA7_CNT2,
159 [SRAM_CH09] = {
160 .name = "ch9",
161 .cmds_start = 0x0,
162 .ctrl_start = 0x0,
163 .cdt = 0x0,
164 .fifo_start = 0x0,
165 .fifo_size = 0x0,
166 .ptr1_reg = DMA8_PTR1,
167 .ptr2_reg = DMA8_PTR2,
168 .cnt1_reg = DMA8_CNT1,
169 .cnt2_reg = DMA8_CNT2,
173 static struct sram_channel cx23887_sram_channels[] = {
174 [SRAM_CH01] = {
175 .name = "VID A",
176 .cmds_start = 0x10000,
177 .ctrl_start = 0x105b0,
178 .cdt = 0x107b0,
179 .fifo_start = 0x40,
180 .fifo_size = 0x2800,
181 .ptr1_reg = DMA1_PTR1,
182 .ptr2_reg = DMA1_PTR2,
183 .cnt1_reg = DMA1_CNT1,
184 .cnt2_reg = DMA1_CNT2,
186 [SRAM_CH02] = {
187 .name = "ch2",
188 .cmds_start = 0x0,
189 .ctrl_start = 0x0,
190 .cdt = 0x0,
191 .fifo_start = 0x0,
192 .fifo_size = 0x0,
193 .ptr1_reg = DMA2_PTR1,
194 .ptr2_reg = DMA2_PTR2,
195 .cnt1_reg = DMA2_CNT1,
196 .cnt2_reg = DMA2_CNT2,
198 [SRAM_CH03] = {
199 .name = "TS1 B",
200 .cmds_start = 0x100A0,
201 .ctrl_start = 0x10630,
202 .cdt = 0x10870,
203 .fifo_start = 0x5000,
204 .fifo_size = 0x1000,
205 .ptr1_reg = DMA3_PTR1,
206 .ptr2_reg = DMA3_PTR2,
207 .cnt1_reg = DMA3_CNT1,
208 .cnt2_reg = DMA3_CNT2,
210 [SRAM_CH04] = {
211 .name = "ch4",
212 .cmds_start = 0x0,
213 .ctrl_start = 0x0,
214 .cdt = 0x0,
215 .fifo_start = 0x0,
216 .fifo_size = 0x0,
217 .ptr1_reg = DMA4_PTR1,
218 .ptr2_reg = DMA4_PTR2,
219 .cnt1_reg = DMA4_CNT1,
220 .cnt2_reg = DMA4_CNT2,
222 [SRAM_CH05] = {
223 .name = "ch5",
224 .cmds_start = 0x0,
225 .ctrl_start = 0x0,
226 .cdt = 0x0,
227 .fifo_start = 0x0,
228 .fifo_size = 0x0,
229 .ptr1_reg = DMA5_PTR1,
230 .ptr2_reg = DMA5_PTR2,
231 .cnt1_reg = DMA5_CNT1,
232 .cnt2_reg = DMA5_CNT2,
234 [SRAM_CH06] = {
235 .name = "TS2 C",
236 .cmds_start = 0x10140,
237 .ctrl_start = 0x10670,
238 .cdt = 0x108d0,
239 .fifo_start = 0x6000,
240 .fifo_size = 0x1000,
241 .ptr1_reg = DMA5_PTR1,
242 .ptr2_reg = DMA5_PTR2,
243 .cnt1_reg = DMA5_CNT1,
244 .cnt2_reg = DMA5_CNT2,
246 [SRAM_CH07] = {
247 .name = "ch7",
248 .cmds_start = 0x0,
249 .ctrl_start = 0x0,
250 .cdt = 0x0,
251 .fifo_start = 0x0,
252 .fifo_size = 0x0,
253 .ptr1_reg = DMA6_PTR1,
254 .ptr2_reg = DMA6_PTR2,
255 .cnt1_reg = DMA6_CNT1,
256 .cnt2_reg = DMA6_CNT2,
258 [SRAM_CH08] = {
259 .name = "ch8",
260 .cmds_start = 0x0,
261 .ctrl_start = 0x0,
262 .cdt = 0x0,
263 .fifo_start = 0x0,
264 .fifo_size = 0x0,
265 .ptr1_reg = DMA7_PTR1,
266 .ptr2_reg = DMA7_PTR2,
267 .cnt1_reg = DMA7_CNT1,
268 .cnt2_reg = DMA7_CNT2,
270 [SRAM_CH09] = {
271 .name = "ch9",
272 .cmds_start = 0x0,
273 .ctrl_start = 0x0,
274 .cdt = 0x0,
275 .fifo_start = 0x0,
276 .fifo_size = 0x0,
277 .ptr1_reg = DMA8_PTR1,
278 .ptr2_reg = DMA8_PTR2,
279 .cnt1_reg = DMA8_CNT1,
280 .cnt2_reg = DMA8_CNT2,
284 void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
286 unsigned long flags;
287 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
289 dev->pci_irqmask |= mask;
291 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
294 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
296 unsigned long flags;
297 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
299 dev->pci_irqmask |= mask;
300 cx_set(PCI_INT_MSK, mask);
302 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
305 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
307 u32 v;
308 unsigned long flags;
309 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
311 v = mask & dev->pci_irqmask;
312 if (v)
313 cx_set(PCI_INT_MSK, v);
315 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
318 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
320 cx23885_irq_enable(dev, 0xffffffff);
323 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
325 unsigned long flags;
326 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328 cx_clear(PCI_INT_MSK, mask);
330 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
333 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
335 cx23885_irq_disable(dev, 0xffffffff);
338 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
340 unsigned long flags;
341 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
343 dev->pci_irqmask &= ~mask;
344 cx_clear(PCI_INT_MSK, mask);
346 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
351 u32 v;
352 unsigned long flags;
353 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
355 v = cx_read(PCI_INT_MSK);
357 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
358 return v;
361 static int cx23885_risc_decode(u32 risc)
363 static char *instr[16] = {
364 [RISC_SYNC >> 28] = "sync",
365 [RISC_WRITE >> 28] = "write",
366 [RISC_WRITEC >> 28] = "writec",
367 [RISC_READ >> 28] = "read",
368 [RISC_READC >> 28] = "readc",
369 [RISC_JUMP >> 28] = "jump",
370 [RISC_SKIP >> 28] = "skip",
371 [RISC_WRITERM >> 28] = "writerm",
372 [RISC_WRITECM >> 28] = "writecm",
373 [RISC_WRITECR >> 28] = "writecr",
375 static int incr[16] = {
376 [RISC_WRITE >> 28] = 3,
377 [RISC_JUMP >> 28] = 3,
378 [RISC_SKIP >> 28] = 1,
379 [RISC_SYNC >> 28] = 1,
380 [RISC_WRITERM >> 28] = 3,
381 [RISC_WRITECM >> 28] = 3,
382 [RISC_WRITECR >> 28] = 4,
384 static char *bits[] = {
385 "12", "13", "14", "resync",
386 "cnt0", "cnt1", "18", "19",
387 "20", "21", "22", "23",
388 "irq1", "irq2", "eol", "sol",
390 int i;
392 printk("0x%08x [ %s", risc,
393 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
394 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
395 if (risc & (1 << (i + 12)))
396 printk(" %s", bits[i]);
397 printk(" count=%d ]\n", risc & 0xfff);
398 return incr[risc >> 28] ? incr[risc >> 28] : 1;
401 void cx23885_wakeup(struct cx23885_tsport *port,
402 struct cx23885_dmaqueue *q, u32 count)
404 struct cx23885_dev *dev = port->dev;
405 struct cx23885_buffer *buf;
406 int bc;
408 for (bc = 0;; bc++) {
409 if (list_empty(&q->active))
410 break;
411 buf = list_entry(q->active.next,
412 struct cx23885_buffer, vb.queue);
414 /* count comes from the hw and is is 16bit wide --
415 * this trick handles wrap-arounds correctly for
416 * up to 32767 buffers in flight... */
417 if ((s16) (count - buf->count) < 0)
418 break;
420 do_gettimeofday(&buf->vb.ts);
421 dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
422 count, buf->count);
423 buf->vb.state = VIDEOBUF_DONE;
424 list_del(&buf->vb.queue);
425 wake_up(&buf->vb.done);
427 if (list_empty(&q->active))
428 del_timer(&q->timeout);
429 else
430 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
431 if (bc != 1)
432 printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
433 __func__, bc);
436 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
437 struct sram_channel *ch,
438 unsigned int bpl, u32 risc)
440 unsigned int i, lines;
441 u32 cdt;
443 if (ch->cmds_start == 0) {
444 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
445 ch->name);
446 cx_write(ch->ptr1_reg, 0);
447 cx_write(ch->ptr2_reg, 0);
448 cx_write(ch->cnt2_reg, 0);
449 cx_write(ch->cnt1_reg, 0);
450 return 0;
451 } else {
452 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
453 ch->name);
456 bpl = (bpl + 7) & ~7; /* alignment */
457 cdt = ch->cdt;
458 lines = ch->fifo_size / bpl;
459 if (lines > 6)
460 lines = 6;
461 BUG_ON(lines < 2);
463 cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
464 cx_write(8 + 4, 8);
465 cx_write(8 + 8, 0);
467 /* write CDT */
468 for (i = 0; i < lines; i++) {
469 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
470 ch->fifo_start + bpl*i);
471 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
472 cx_write(cdt + 16*i + 4, 0);
473 cx_write(cdt + 16*i + 8, 0);
474 cx_write(cdt + 16*i + 12, 0);
477 /* write CMDS */
478 if (ch->jumponly)
479 cx_write(ch->cmds_start + 0, 8);
480 else
481 cx_write(ch->cmds_start + 0, risc);
482 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
483 cx_write(ch->cmds_start + 8, cdt);
484 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
485 cx_write(ch->cmds_start + 16, ch->ctrl_start);
486 if (ch->jumponly)
487 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
488 else
489 cx_write(ch->cmds_start + 20, 64 >> 2);
490 for (i = 24; i < 80; i += 4)
491 cx_write(ch->cmds_start + i, 0);
493 /* fill registers */
494 cx_write(ch->ptr1_reg, ch->fifo_start);
495 cx_write(ch->ptr2_reg, cdt);
496 cx_write(ch->cnt2_reg, (lines*16) >> 3);
497 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
499 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
500 dev->bridge,
501 ch->name,
502 bpl,
503 lines);
505 return 0;
508 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
509 struct sram_channel *ch)
511 static char *name[] = {
512 "init risc lo",
513 "init risc hi",
514 "cdt base",
515 "cdt size",
516 "iq base",
517 "iq size",
518 "risc pc lo",
519 "risc pc hi",
520 "iq wr ptr",
521 "iq rd ptr",
522 "cdt current",
523 "pci target lo",
524 "pci target hi",
525 "line / byte",
527 u32 risc;
528 unsigned int i, j, n;
530 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
531 dev->name, ch->name);
532 for (i = 0; i < ARRAY_SIZE(name); i++)
533 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
534 dev->name, name[i],
535 cx_read(ch->cmds_start + 4*i));
537 for (i = 0; i < 4; i++) {
538 risc = cx_read(ch->cmds_start + 4 * (i + 14));
539 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
540 cx23885_risc_decode(risc);
542 for (i = 0; i < (64 >> 2); i += n) {
543 risc = cx_read(ch->ctrl_start + 4 * i);
544 /* No consideration for bits 63-32 */
546 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
547 ch->ctrl_start + 4 * i, i);
548 n = cx23885_risc_decode(risc);
549 for (j = 1; j < n; j++) {
550 risc = cx_read(ch->ctrl_start + 4 * (i + j));
551 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
552 dev->name, i+j, risc, j);
556 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
557 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
558 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
559 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
560 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
561 dev->name, cx_read(ch->ptr1_reg));
562 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
563 dev->name, cx_read(ch->ptr2_reg));
564 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
565 dev->name, cx_read(ch->cnt1_reg));
566 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
567 dev->name, cx_read(ch->cnt2_reg));
570 static void cx23885_risc_disasm(struct cx23885_tsport *port,
571 struct btcx_riscmem *risc)
573 struct cx23885_dev *dev = port->dev;
574 unsigned int i, j, n;
576 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
577 dev->name, risc->cpu, (unsigned long)risc->dma);
578 for (i = 0; i < (risc->size >> 2); i += n) {
579 printk(KERN_INFO "%s: %04d: ", dev->name, i);
580 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
581 for (j = 1; j < n; j++)
582 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
583 dev->name, i + j, risc->cpu[i + j], j);
584 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
585 break;
589 static void cx23885_shutdown(struct cx23885_dev *dev)
591 /* disable RISC controller */
592 cx_write(DEV_CNTRL2, 0);
594 /* Disable all IR activity */
595 cx_write(IR_CNTRL_REG, 0);
597 /* Disable Video A/B activity */
598 cx_write(VID_A_DMA_CTL, 0);
599 cx_write(VID_B_DMA_CTL, 0);
600 cx_write(VID_C_DMA_CTL, 0);
602 /* Disable Audio activity */
603 cx_write(AUD_INT_DMA_CTL, 0);
604 cx_write(AUD_EXT_DMA_CTL, 0);
606 /* Disable Serial port */
607 cx_write(UART_CTL, 0);
609 /* Disable Interrupts */
610 cx23885_irq_disable_all(dev);
611 cx_write(VID_A_INT_MSK, 0);
612 cx_write(VID_B_INT_MSK, 0);
613 cx_write(VID_C_INT_MSK, 0);
614 cx_write(AUDIO_INT_INT_MSK, 0);
615 cx_write(AUDIO_EXT_INT_MSK, 0);
619 static void cx23885_reset(struct cx23885_dev *dev)
621 dprintk(1, "%s()\n", __func__);
623 cx23885_shutdown(dev);
625 cx_write(PCI_INT_STAT, 0xffffffff);
626 cx_write(VID_A_INT_STAT, 0xffffffff);
627 cx_write(VID_B_INT_STAT, 0xffffffff);
628 cx_write(VID_C_INT_STAT, 0xffffffff);
629 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
630 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
631 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
632 cx_write(PAD_CTRL, 0x00500300);
634 mdelay(100);
636 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
637 720*4, 0);
638 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
639 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
640 188*4, 0);
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
642 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
643 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
644 188*4, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
646 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
647 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
649 cx23885_gpio_setup(dev);
653 static int cx23885_pci_quirks(struct cx23885_dev *dev)
655 dprintk(1, "%s()\n", __func__);
657 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
658 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
659 * occur on the cx23887 bridge.
661 if (dev->bridge == CX23885_BRIDGE_885)
662 cx_clear(RDR_TLCTL0, 1 << 4);
664 return 0;
667 static int get_resources(struct cx23885_dev *dev)
669 if (request_mem_region(pci_resource_start(dev->pci, 0),
670 pci_resource_len(dev->pci, 0),
671 dev->name))
672 return 0;
674 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
675 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
677 return -EBUSY;
680 static void cx23885_timeout(unsigned long data);
681 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
682 u32 reg, u32 mask, u32 value);
684 static int cx23885_init_tsport(struct cx23885_dev *dev,
685 struct cx23885_tsport *port, int portno)
687 dprintk(1, "%s(portno=%d)\n", __func__, portno);
689 /* Transport bus init dma queue - Common settings */
690 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
691 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
692 port->vld_misc_val = 0x0;
693 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
695 spin_lock_init(&port->slock);
696 port->dev = dev;
697 port->nr = portno;
699 INIT_LIST_HEAD(&port->mpegq.active);
700 INIT_LIST_HEAD(&port->mpegq.queued);
701 port->mpegq.timeout.function = cx23885_timeout;
702 port->mpegq.timeout.data = (unsigned long)port;
703 init_timer(&port->mpegq.timeout);
705 mutex_init(&port->frontends.lock);
706 INIT_LIST_HEAD(&port->frontends.felist);
707 port->frontends.active_fe_id = 0;
709 /* This should be hardcoded allow a single frontend
710 * attachment to this tsport, keeping the -dvb.c
711 * code clean and safe.
713 if (!port->num_frontends)
714 port->num_frontends = 1;
716 switch (portno) {
717 case 1:
718 port->reg_gpcnt = VID_B_GPCNT;
719 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
720 port->reg_dma_ctl = VID_B_DMA_CTL;
721 port->reg_lngth = VID_B_LNGTH;
722 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
723 port->reg_gen_ctrl = VID_B_GEN_CTL;
724 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
725 port->reg_sop_status = VID_B_SOP_STATUS;
726 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
727 port->reg_vld_misc = VID_B_VLD_MISC;
728 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
729 port->reg_src_sel = VID_B_SRC_SEL;
730 port->reg_ts_int_msk = VID_B_INT_MSK;
731 port->reg_ts_int_stat = VID_B_INT_STAT;
732 port->sram_chno = SRAM_CH03; /* VID_B */
733 port->pci_irqmask = 0x02; /* VID_B bit1 */
734 break;
735 case 2:
736 port->reg_gpcnt = VID_C_GPCNT;
737 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
738 port->reg_dma_ctl = VID_C_DMA_CTL;
739 port->reg_lngth = VID_C_LNGTH;
740 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
741 port->reg_gen_ctrl = VID_C_GEN_CTL;
742 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
743 port->reg_sop_status = VID_C_SOP_STATUS;
744 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
745 port->reg_vld_misc = VID_C_VLD_MISC;
746 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
747 port->reg_src_sel = 0;
748 port->reg_ts_int_msk = VID_C_INT_MSK;
749 port->reg_ts_int_stat = VID_C_INT_STAT;
750 port->sram_chno = SRAM_CH06; /* VID_C */
751 port->pci_irqmask = 0x04; /* VID_C bit2 */
752 break;
753 default:
754 BUG();
757 cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
758 port->reg_dma_ctl, port->dma_ctl_val, 0x00);
760 return 0;
763 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
765 switch (cx_read(RDR_CFG2) & 0xff) {
766 case 0x00:
767 /* cx23885 */
768 dev->hwrevision = 0xa0;
769 break;
770 case 0x01:
771 /* CX23885-12Z */
772 dev->hwrevision = 0xa1;
773 break;
774 case 0x02:
775 /* CX23885-13Z/14Z */
776 dev->hwrevision = 0xb0;
777 break;
778 case 0x03:
779 if (dev->pci->device == 0x8880) {
780 /* CX23888-21Z/22Z */
781 dev->hwrevision = 0xc0;
782 } else {
783 /* CX23885-14Z */
784 dev->hwrevision = 0xa4;
786 break;
787 case 0x04:
788 if (dev->pci->device == 0x8880) {
789 /* CX23888-31Z */
790 dev->hwrevision = 0xd0;
791 } else {
792 /* CX23885-15Z, CX23888-31Z */
793 dev->hwrevision = 0xa5;
795 break;
796 case 0x0e:
797 /* CX23887-15Z */
798 dev->hwrevision = 0xc0;
799 break;
800 case 0x0f:
801 /* CX23887-14Z */
802 dev->hwrevision = 0xb1;
803 break;
804 default:
805 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
806 __func__, dev->hwrevision);
808 if (dev->hwrevision)
809 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
810 __func__, dev->hwrevision);
811 else
812 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
813 __func__, dev->hwrevision);
816 /* Find the first v4l2_subdev member of the group id in hw */
817 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
819 struct v4l2_subdev *result = NULL;
820 struct v4l2_subdev *sd;
822 spin_lock(&dev->v4l2_dev.lock);
823 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
824 if (sd->grp_id == hw) {
825 result = sd;
826 break;
829 spin_unlock(&dev->v4l2_dev.lock);
830 return result;
833 static int cx23885_dev_setup(struct cx23885_dev *dev)
835 int i;
837 spin_lock_init(&dev->pci_irqmask_lock);
839 mutex_init(&dev->lock);
840 mutex_init(&dev->gpio_lock);
842 atomic_inc(&dev->refcount);
844 dev->nr = cx23885_devcount++;
845 sprintf(dev->name, "cx23885[%d]", dev->nr);
847 /* Configure the internal memory */
848 if (dev->pci->device == 0x8880) {
849 /* Could be 887 or 888, assume a default */
850 dev->bridge = CX23885_BRIDGE_887;
851 /* Apply a sensible clock frequency for the PCIe bridge */
852 dev->clk_freq = 25000000;
853 dev->sram_channels = cx23887_sram_channels;
854 } else
855 if (dev->pci->device == 0x8852) {
856 dev->bridge = CX23885_BRIDGE_885;
857 /* Apply a sensible clock frequency for the PCIe bridge */
858 dev->clk_freq = 28000000;
859 dev->sram_channels = cx23885_sram_channels;
860 } else
861 BUG();
863 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
864 __func__, dev->bridge);
866 /* board config */
867 dev->board = UNSET;
868 if (card[dev->nr] < cx23885_bcount)
869 dev->board = card[dev->nr];
870 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
871 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
872 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
873 dev->board = cx23885_subids[i].card;
874 if (UNSET == dev->board) {
875 dev->board = CX23885_BOARD_UNKNOWN;
876 cx23885_card_list(dev);
879 /* If the user specific a clk freq override, apply it */
880 if (cx23885_boards[dev->board].clk_freq > 0)
881 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
883 dev->pci_bus = dev->pci->bus->number;
884 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
885 cx23885_irq_add(dev, 0x001f00);
886 if (cx23885_boards[dev->board].cimax > 0)
887 cx23885_irq_add(dev, 0x01800000); /* for CiMaxes */
889 /* External Master 1 Bus */
890 dev->i2c_bus[0].nr = 0;
891 dev->i2c_bus[0].dev = dev;
892 dev->i2c_bus[0].reg_stat = I2C1_STAT;
893 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
894 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
895 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
896 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
897 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
899 /* External Master 2 Bus */
900 dev->i2c_bus[1].nr = 1;
901 dev->i2c_bus[1].dev = dev;
902 dev->i2c_bus[1].reg_stat = I2C2_STAT;
903 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
904 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
905 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
906 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
907 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
909 /* Internal Master 3 Bus */
910 dev->i2c_bus[2].nr = 2;
911 dev->i2c_bus[2].dev = dev;
912 dev->i2c_bus[2].reg_stat = I2C3_STAT;
913 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
914 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
915 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
916 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
917 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
919 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
920 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
921 cx23885_init_tsport(dev, &dev->ts1, 1);
923 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
924 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
925 cx23885_init_tsport(dev, &dev->ts2, 2);
927 if (get_resources(dev) < 0) {
928 printk(KERN_ERR "CORE %s No more PCIe resources for "
929 "subsystem: %04x:%04x\n",
930 dev->name, dev->pci->subsystem_vendor,
931 dev->pci->subsystem_device);
933 cx23885_devcount--;
934 return -ENODEV;
937 /* PCIe stuff */
938 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
939 pci_resource_len(dev->pci, 0));
941 dev->bmmio = (u8 __iomem *)dev->lmmio;
943 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
944 dev->name, dev->pci->subsystem_vendor,
945 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
946 dev->board, card[dev->nr] == dev->board ?
947 "insmod option" : "autodetected");
949 cx23885_pci_quirks(dev);
951 /* Assume some sensible defaults */
952 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
953 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
954 dev->radio_type = cx23885_boards[dev->board].radio_type;
955 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
957 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x\n",
958 __func__, dev->tuner_type, dev->tuner_addr);
959 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
960 __func__, dev->radio_type, dev->radio_addr);
962 /* The cx23417 encoder has GPIO's that need to be initialised
963 * before DVB, so that demodulators and tuners are out of
964 * reset before DVB uses them.
966 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
967 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
968 cx23885_mc417_init(dev);
970 /* init hardware */
971 cx23885_reset(dev);
973 cx23885_i2c_register(&dev->i2c_bus[0]);
974 cx23885_i2c_register(&dev->i2c_bus[1]);
975 cx23885_i2c_register(&dev->i2c_bus[2]);
976 cx23885_card_setup(dev);
977 call_all(dev, core, s_power, 0);
978 cx23885_ir_init(dev);
980 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
981 if (cx23885_video_register(dev) < 0) {
982 printk(KERN_ERR "%s() Failed to register analog "
983 "video adapters on VID_A\n", __func__);
987 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
988 if (cx23885_dvb_register(&dev->ts1) < 0) {
989 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
990 __func__);
992 } else
993 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
994 if (cx23885_417_register(dev) < 0) {
995 printk(KERN_ERR
996 "%s() Failed to register 417 on VID_B\n",
997 __func__);
1001 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1002 if (cx23885_dvb_register(&dev->ts2) < 0) {
1003 printk(KERN_ERR
1004 "%s() Failed to register dvb on VID_C\n",
1005 __func__);
1007 } else
1008 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1009 if (cx23885_417_register(dev) < 0) {
1010 printk(KERN_ERR
1011 "%s() Failed to register 417 on VID_C\n",
1012 __func__);
1016 cx23885_dev_checkrevision(dev);
1018 return 0;
1021 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1023 release_mem_region(pci_resource_start(dev->pci, 0),
1024 pci_resource_len(dev->pci, 0));
1026 if (!atomic_dec_and_test(&dev->refcount))
1027 return;
1029 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1030 cx23885_video_unregister(dev);
1032 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1033 cx23885_dvb_unregister(&dev->ts1);
1035 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1036 cx23885_417_unregister(dev);
1038 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1039 cx23885_dvb_unregister(&dev->ts2);
1041 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1042 cx23885_417_unregister(dev);
1044 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1045 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1046 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1048 iounmap(dev->lmmio);
1051 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1052 unsigned int offset, u32 sync_line,
1053 unsigned int bpl, unsigned int padding,
1054 unsigned int lines)
1056 struct scatterlist *sg;
1057 unsigned int line, todo;
1059 /* sync instruction */
1060 if (sync_line != NO_SYNC_LINE)
1061 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1063 /* scan lines */
1064 sg = sglist;
1065 for (line = 0; line < lines; line++) {
1066 while (offset && offset >= sg_dma_len(sg)) {
1067 offset -= sg_dma_len(sg);
1068 sg++;
1070 if (bpl <= sg_dma_len(sg)-offset) {
1071 /* fits into current chunk */
1072 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
1073 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1074 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1075 offset += bpl;
1076 } else {
1077 /* scanline needs to be split */
1078 todo = bpl;
1079 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
1080 (sg_dma_len(sg)-offset));
1081 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1082 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1083 todo -= (sg_dma_len(sg)-offset);
1084 offset = 0;
1085 sg++;
1086 while (todo > sg_dma_len(sg)) {
1087 *(rp++) = cpu_to_le32(RISC_WRITE|
1088 sg_dma_len(sg));
1089 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1090 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1091 todo -= sg_dma_len(sg);
1092 sg++;
1094 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1095 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1096 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1097 offset += todo;
1099 offset += padding;
1102 return rp;
1105 int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
1106 struct scatterlist *sglist, unsigned int top_offset,
1107 unsigned int bottom_offset, unsigned int bpl,
1108 unsigned int padding, unsigned int lines)
1110 u32 instructions, fields;
1111 __le32 *rp;
1112 int rc;
1114 fields = 0;
1115 if (UNSET != top_offset)
1116 fields++;
1117 if (UNSET != bottom_offset)
1118 fields++;
1120 /* estimate risc mem: worst case is one write per page border +
1121 one write per scan line + syncs + jump (all 2 dwords). Padding
1122 can cause next bpl to start close to a page border. First DMA
1123 region may be smaller than PAGE_SIZE */
1124 /* write and jump need and extra dword */
1125 instructions = fields * (1 + ((bpl + padding) * lines)
1126 / PAGE_SIZE + lines);
1127 instructions += 2;
1128 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1129 if (rc < 0)
1130 return rc;
1132 /* write risc instructions */
1133 rp = risc->cpu;
1134 if (UNSET != top_offset)
1135 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1136 bpl, padding, lines);
1137 if (UNSET != bottom_offset)
1138 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1139 bpl, padding, lines);
1141 /* save pointer to jmp instruction address */
1142 risc->jmp = rp;
1143 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1144 return 0;
1147 static int cx23885_risc_databuffer(struct pci_dev *pci,
1148 struct btcx_riscmem *risc,
1149 struct scatterlist *sglist,
1150 unsigned int bpl,
1151 unsigned int lines)
1153 u32 instructions;
1154 __le32 *rp;
1155 int rc;
1157 /* estimate risc mem: worst case is one write per page border +
1158 one write per scan line + syncs + jump (all 2 dwords). Here
1159 there is no padding and no sync. First DMA region may be smaller
1160 than PAGE_SIZE */
1161 /* Jump and write need an extra dword */
1162 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1163 instructions += 1;
1165 rc = btcx_riscmem_alloc(pci, risc, instructions*12);
1166 if (rc < 0)
1167 return rc;
1169 /* write risc instructions */
1170 rp = risc->cpu;
1171 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
1173 /* save pointer to jmp instruction address */
1174 risc->jmp = rp;
1175 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1176 return 0;
1179 int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
1180 u32 reg, u32 mask, u32 value)
1182 __le32 *rp;
1183 int rc;
1185 rc = btcx_riscmem_alloc(pci, risc, 4*16);
1186 if (rc < 0)
1187 return rc;
1189 /* write risc instructions */
1190 rp = risc->cpu;
1191 *(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
1192 *(rp++) = cpu_to_le32(reg);
1193 *(rp++) = cpu_to_le32(value);
1194 *(rp++) = cpu_to_le32(mask);
1195 *(rp++) = cpu_to_le32(RISC_JUMP);
1196 *(rp++) = cpu_to_le32(risc->dma);
1197 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1198 return 0;
1201 void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
1203 struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
1205 BUG_ON(in_interrupt());
1206 videobuf_waiton(&buf->vb, 0, 0);
1207 videobuf_dma_unmap(q->dev, dma);
1208 videobuf_dma_free(dma);
1209 btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
1210 buf->vb.state = VIDEOBUF_NEEDS_INIT;
1213 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1215 struct cx23885_dev *dev = port->dev;
1217 dprintk(1, "%s() Register Dump\n", __func__);
1218 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1219 cx_read(DEV_CNTRL2));
1220 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1221 cx23885_irq_get_mask(dev));
1222 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1223 cx_read(AUDIO_INT_INT_MSK));
1224 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1225 cx_read(AUD_INT_DMA_CTL));
1226 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1227 cx_read(AUDIO_EXT_INT_MSK));
1228 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1229 cx_read(AUD_EXT_DMA_CTL));
1230 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1231 cx_read(PAD_CTRL));
1232 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1233 cx_read(ALT_PIN_OUT_SEL));
1234 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1235 cx_read(GPIO2));
1236 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1237 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1238 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1239 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1240 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1241 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1242 if (port->reg_src_sel)
1243 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1244 port->reg_src_sel, cx_read(port->reg_src_sel));
1245 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1246 port->reg_lngth, cx_read(port->reg_lngth));
1247 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1248 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1249 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1250 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1251 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1252 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1253 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1254 port->reg_sop_status, cx_read(port->reg_sop_status));
1255 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1256 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1257 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1258 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1259 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1260 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1261 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1262 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1265 static int cx23885_start_dma(struct cx23885_tsport *port,
1266 struct cx23885_dmaqueue *q,
1267 struct cx23885_buffer *buf)
1269 struct cx23885_dev *dev = port->dev;
1270 u32 reg;
1272 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1273 buf->vb.width, buf->vb.height, buf->vb.field);
1275 /* Stop the fifo and risc engine for this port */
1276 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1278 /* setup fifo + format */
1279 cx23885_sram_channel_setup(dev,
1280 &dev->sram_channels[port->sram_chno],
1281 port->ts_packet_size, buf->risc.dma);
1282 if (debug > 5) {
1283 cx23885_sram_channel_dump(dev,
1284 &dev->sram_channels[port->sram_chno]);
1285 cx23885_risc_disasm(port, &buf->risc);
1288 /* write TS length to chip */
1289 cx_write(port->reg_lngth, buf->vb.width);
1291 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1292 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1293 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1294 __func__,
1295 cx23885_boards[dev->board].portb,
1296 cx23885_boards[dev->board].portc);
1297 return -EINVAL;
1300 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1301 cx23885_av_clk(dev, 0);
1303 udelay(100);
1305 /* If the port supports SRC SELECT, configure it */
1306 if (port->reg_src_sel)
1307 cx_write(port->reg_src_sel, port->src_sel_val);
1309 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1310 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1311 cx_write(port->reg_vld_misc, port->vld_misc_val);
1312 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1313 udelay(100);
1315 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1316 /* reset counter to zero */
1317 cx_write(port->reg_gpcnt_ctl, 3);
1318 q->count = 1;
1320 /* Set VIDB pins to input */
1321 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1322 reg = cx_read(PAD_CTRL);
1323 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1324 cx_write(PAD_CTRL, reg);
1327 /* Set VIDC pins to input */
1328 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1329 reg = cx_read(PAD_CTRL);
1330 reg &= ~0x4; /* Clear TS2_SOP_OE */
1331 cx_write(PAD_CTRL, reg);
1334 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1336 reg = cx_read(PAD_CTRL);
1337 reg = reg & ~0x1; /* Clear TS1_OE */
1339 /* set TS1_SOP_OE and TS1_OE_HI */
1340 reg = reg | 0xa;
1341 cx_write(PAD_CTRL, reg);
1343 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1344 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1347 switch (dev->bridge) {
1348 case CX23885_BRIDGE_885:
1349 case CX23885_BRIDGE_887:
1350 case CX23885_BRIDGE_888:
1351 /* enable irqs */
1352 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1353 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1354 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1355 cx23885_irq_add(dev, port->pci_irqmask);
1356 cx23885_irq_enable_all(dev);
1357 break;
1358 default:
1359 BUG();
1362 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1364 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1365 cx23885_av_clk(dev, 1);
1367 if (debug > 4)
1368 cx23885_tsport_reg_dump(port);
1370 return 0;
1373 static int cx23885_stop_dma(struct cx23885_tsport *port)
1375 struct cx23885_dev *dev = port->dev;
1376 u32 reg;
1378 dprintk(1, "%s()\n", __func__);
1380 /* Stop interrupts and DMA */
1381 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1382 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1384 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1386 reg = cx_read(PAD_CTRL);
1388 /* Set TS1_OE */
1389 reg = reg | 0x1;
1391 /* clear TS1_SOP_OE and TS1_OE_HI */
1392 reg = reg & ~0xa;
1393 cx_write(PAD_CTRL, reg);
1394 cx_write(port->reg_src_sel, 0);
1395 cx_write(port->reg_gen_ctrl, 8);
1399 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1400 cx23885_av_clk(dev, 0);
1402 return 0;
1405 int cx23885_restart_queue(struct cx23885_tsport *port,
1406 struct cx23885_dmaqueue *q)
1408 struct cx23885_dev *dev = port->dev;
1409 struct cx23885_buffer *buf;
1411 dprintk(5, "%s()\n", __func__);
1412 if (list_empty(&q->active)) {
1413 struct cx23885_buffer *prev;
1414 prev = NULL;
1416 dprintk(5, "%s() queue is empty\n", __func__);
1418 for (;;) {
1419 if (list_empty(&q->queued))
1420 return 0;
1421 buf = list_entry(q->queued.next, struct cx23885_buffer,
1422 vb.queue);
1423 if (NULL == prev) {
1424 list_del(&buf->vb.queue);
1425 list_add_tail(&buf->vb.queue, &q->active);
1426 cx23885_start_dma(port, q, buf);
1427 buf->vb.state = VIDEOBUF_ACTIVE;
1428 buf->count = q->count++;
1429 mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
1430 dprintk(5, "[%p/%d] restart_queue - f/active\n",
1431 buf, buf->vb.i);
1433 } else if (prev->vb.width == buf->vb.width &&
1434 prev->vb.height == buf->vb.height &&
1435 prev->fmt == buf->fmt) {
1436 list_del(&buf->vb.queue);
1437 list_add_tail(&buf->vb.queue, &q->active);
1438 buf->vb.state = VIDEOBUF_ACTIVE;
1439 buf->count = q->count++;
1440 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1441 /* 64 bit bits 63-32 */
1442 prev->risc.jmp[2] = cpu_to_le32(0);
1443 dprintk(5, "[%p/%d] restart_queue - m/active\n",
1444 buf, buf->vb.i);
1445 } else {
1446 return 0;
1448 prev = buf;
1450 return 0;
1453 buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
1454 dprintk(2, "restart_queue [%p/%d]: restart dma\n",
1455 buf, buf->vb.i);
1456 cx23885_start_dma(port, q, buf);
1457 list_for_each_entry(buf, &q->active, vb.queue)
1458 buf->count = q->count++;
1459 mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
1460 return 0;
1463 /* ------------------------------------------------------------------ */
1465 int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
1466 struct cx23885_buffer *buf, enum v4l2_field field)
1468 struct cx23885_dev *dev = port->dev;
1469 int size = port->ts_packet_size * port->ts_packet_count;
1470 int rc;
1472 dprintk(1, "%s: %p\n", __func__, buf);
1473 if (0 != buf->vb.baddr && buf->vb.bsize < size)
1474 return -EINVAL;
1476 if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
1477 buf->vb.width = port->ts_packet_size;
1478 buf->vb.height = port->ts_packet_count;
1479 buf->vb.size = size;
1480 buf->vb.field = field /*V4L2_FIELD_TOP*/;
1482 rc = videobuf_iolock(q, &buf->vb, NULL);
1483 if (0 != rc)
1484 goto fail;
1485 cx23885_risc_databuffer(dev->pci, &buf->risc,
1486 videobuf_to_dma(&buf->vb)->sglist,
1487 buf->vb.width, buf->vb.height);
1489 buf->vb.state = VIDEOBUF_PREPARED;
1490 return 0;
1492 fail:
1493 cx23885_free_buffer(q, buf);
1494 return rc;
1497 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1499 struct cx23885_buffer *prev;
1500 struct cx23885_dev *dev = port->dev;
1501 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1503 /* add jump to stopper */
1504 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
1505 buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
1506 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1508 if (list_empty(&cx88q->active)) {
1509 dprintk(1, "queue is empty - first active\n");
1510 list_add_tail(&buf->vb.queue, &cx88q->active);
1511 cx23885_start_dma(port, cx88q, buf);
1512 buf->vb.state = VIDEOBUF_ACTIVE;
1513 buf->count = cx88q->count++;
1514 mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
1515 dprintk(1, "[%p/%d] %s - first active\n",
1516 buf, buf->vb.i, __func__);
1517 } else {
1518 dprintk(1, "queue is not empty - append to active\n");
1519 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1520 vb.queue);
1521 list_add_tail(&buf->vb.queue, &cx88q->active);
1522 buf->vb.state = VIDEOBUF_ACTIVE;
1523 buf->count = cx88q->count++;
1524 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1525 prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
1526 dprintk(1, "[%p/%d] %s - append to active\n",
1527 buf, buf->vb.i, __func__);
1531 /* ----------------------------------------------------------- */
1533 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
1534 int restart)
1536 struct cx23885_dev *dev = port->dev;
1537 struct cx23885_dmaqueue *q = &port->mpegq;
1538 struct cx23885_buffer *buf;
1539 unsigned long flags;
1541 spin_lock_irqsave(&port->slock, flags);
1542 while (!list_empty(&q->active)) {
1543 buf = list_entry(q->active.next, struct cx23885_buffer,
1544 vb.queue);
1545 list_del(&buf->vb.queue);
1546 buf->vb.state = VIDEOBUF_ERROR;
1547 wake_up(&buf->vb.done);
1548 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1549 buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
1551 if (restart) {
1552 dprintk(1, "restarting queue\n");
1553 cx23885_restart_queue(port, q);
1555 spin_unlock_irqrestore(&port->slock, flags);
1558 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1560 struct cx23885_dev *dev = port->dev;
1561 struct cx23885_dmaqueue *q = &port->mpegq;
1563 dprintk(1, "%s()\n", __func__);
1564 del_timer_sync(&q->timeout);
1565 cx23885_stop_dma(port);
1566 do_cancel_buffers(port, "cancel", 0);
1569 static void cx23885_timeout(unsigned long data)
1571 struct cx23885_tsport *port = (struct cx23885_tsport *)data;
1572 struct cx23885_dev *dev = port->dev;
1574 dprintk(1, "%s()\n", __func__);
1576 if (debug > 5)
1577 cx23885_sram_channel_dump(dev,
1578 &dev->sram_channels[port->sram_chno]);
1580 cx23885_stop_dma(port);
1581 do_cancel_buffers(port, "timeout", 1);
1584 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1586 struct cx23885_tsport *port = &dev->ts1;
1587 int count = 0;
1588 int handled = 0;
1590 if (status == 0)
1591 return handled;
1593 count = cx_read(port->reg_gpcnt);
1594 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1595 status, cx_read(port->reg_ts_int_msk), count);
1597 if ((status & VID_B_MSK_BAD_PKT) ||
1598 (status & VID_B_MSK_OPC_ERR) ||
1599 (status & VID_B_MSK_VBI_OPC_ERR) ||
1600 (status & VID_B_MSK_SYNC) ||
1601 (status & VID_B_MSK_VBI_SYNC) ||
1602 (status & VID_B_MSK_OF) ||
1603 (status & VID_B_MSK_VBI_OF)) {
1604 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1605 "= 0x%x\n", dev->name, status);
1606 if (status & VID_B_MSK_BAD_PKT)
1607 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1608 if (status & VID_B_MSK_OPC_ERR)
1609 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1610 if (status & VID_B_MSK_VBI_OPC_ERR)
1611 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1612 if (status & VID_B_MSK_SYNC)
1613 dprintk(1, " VID_B_MSK_SYNC\n");
1614 if (status & VID_B_MSK_VBI_SYNC)
1615 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1616 if (status & VID_B_MSK_OF)
1617 dprintk(1, " VID_B_MSK_OF\n");
1618 if (status & VID_B_MSK_VBI_OF)
1619 dprintk(1, " VID_B_MSK_VBI_OF\n");
1621 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1622 cx23885_sram_channel_dump(dev,
1623 &dev->sram_channels[port->sram_chno]);
1624 cx23885_417_check_encoder(dev);
1625 } else if (status & VID_B_MSK_RISCI1) {
1626 dprintk(7, " VID_B_MSK_RISCI1\n");
1627 spin_lock(&port->slock);
1628 cx23885_wakeup(port, &port->mpegq, count);
1629 spin_unlock(&port->slock);
1630 } else if (status & VID_B_MSK_RISCI2) {
1631 dprintk(7, " VID_B_MSK_RISCI2\n");
1632 spin_lock(&port->slock);
1633 cx23885_restart_queue(port, &port->mpegq);
1634 spin_unlock(&port->slock);
1636 if (status) {
1637 cx_write(port->reg_ts_int_stat, status);
1638 handled = 1;
1641 return handled;
1644 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1646 struct cx23885_dev *dev = port->dev;
1647 int handled = 0;
1648 u32 count;
1650 if ((status & VID_BC_MSK_OPC_ERR) ||
1651 (status & VID_BC_MSK_BAD_PKT) ||
1652 (status & VID_BC_MSK_SYNC) ||
1653 (status & VID_BC_MSK_OF)) {
1655 if (status & VID_BC_MSK_OPC_ERR)
1656 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1657 VID_BC_MSK_OPC_ERR);
1659 if (status & VID_BC_MSK_BAD_PKT)
1660 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1661 VID_BC_MSK_BAD_PKT);
1663 if (status & VID_BC_MSK_SYNC)
1664 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1665 VID_BC_MSK_SYNC);
1667 if (status & VID_BC_MSK_OF)
1668 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1669 VID_BC_MSK_OF);
1671 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1673 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1674 cx23885_sram_channel_dump(dev,
1675 &dev->sram_channels[port->sram_chno]);
1677 } else if (status & VID_BC_MSK_RISCI1) {
1679 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1681 spin_lock(&port->slock);
1682 count = cx_read(port->reg_gpcnt);
1683 cx23885_wakeup(port, &port->mpegq, count);
1684 spin_unlock(&port->slock);
1686 } else if (status & VID_BC_MSK_RISCI2) {
1688 dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
1690 spin_lock(&port->slock);
1691 cx23885_restart_queue(port, &port->mpegq);
1692 spin_unlock(&port->slock);
1695 if (status) {
1696 cx_write(port->reg_ts_int_stat, status);
1697 handled = 1;
1700 return handled;
1703 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1705 struct cx23885_dev *dev = dev_id;
1706 struct cx23885_tsport *ts1 = &dev->ts1;
1707 struct cx23885_tsport *ts2 = &dev->ts2;
1708 u32 pci_status, pci_mask;
1709 u32 vida_status, vida_mask;
1710 u32 ts1_status, ts1_mask;
1711 u32 ts2_status, ts2_mask;
1712 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1713 bool subdev_handled;
1715 pci_status = cx_read(PCI_INT_STAT);
1716 pci_mask = cx23885_irq_get_mask(dev);
1717 vida_status = cx_read(VID_A_INT_STAT);
1718 vida_mask = cx_read(VID_A_INT_MSK);
1719 ts1_status = cx_read(VID_B_INT_STAT);
1720 ts1_mask = cx_read(VID_B_INT_MSK);
1721 ts2_status = cx_read(VID_C_INT_STAT);
1722 ts2_mask = cx_read(VID_C_INT_MSK);
1724 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1725 goto out;
1727 vida_count = cx_read(VID_A_GPCNT);
1728 ts1_count = cx_read(ts1->reg_gpcnt);
1729 ts2_count = cx_read(ts2->reg_gpcnt);
1730 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1731 pci_status, pci_mask);
1732 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1733 vida_status, vida_mask, vida_count);
1734 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1735 ts1_status, ts1_mask, ts1_count);
1736 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1737 ts2_status, ts2_mask, ts2_count);
1739 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1740 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1741 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1742 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1743 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1744 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1746 if (pci_status & PCI_MSK_RISC_RD)
1747 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1748 PCI_MSK_RISC_RD);
1750 if (pci_status & PCI_MSK_RISC_WR)
1751 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1752 PCI_MSK_RISC_WR);
1754 if (pci_status & PCI_MSK_AL_RD)
1755 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1756 PCI_MSK_AL_RD);
1758 if (pci_status & PCI_MSK_AL_WR)
1759 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1760 PCI_MSK_AL_WR);
1762 if (pci_status & PCI_MSK_APB_DMA)
1763 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1764 PCI_MSK_APB_DMA);
1766 if (pci_status & PCI_MSK_VID_C)
1767 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1768 PCI_MSK_VID_C);
1770 if (pci_status & PCI_MSK_VID_B)
1771 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1772 PCI_MSK_VID_B);
1774 if (pci_status & PCI_MSK_VID_A)
1775 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1776 PCI_MSK_VID_A);
1778 if (pci_status & PCI_MSK_AUD_INT)
1779 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1780 PCI_MSK_AUD_INT);
1782 if (pci_status & PCI_MSK_AUD_EXT)
1783 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1784 PCI_MSK_AUD_EXT);
1786 if (pci_status & PCI_MSK_GPIO0)
1787 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1788 PCI_MSK_GPIO0);
1790 if (pci_status & PCI_MSK_GPIO1)
1791 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1792 PCI_MSK_GPIO1);
1794 if (pci_status & PCI_MSK_AV_CORE)
1795 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1796 PCI_MSK_AV_CORE);
1798 if (pci_status & PCI_MSK_IR)
1799 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1800 PCI_MSK_IR);
1803 if (cx23885_boards[dev->board].cimax > 0 &&
1804 ((pci_status & PCI_MSK_GPIO0) ||
1805 (pci_status & PCI_MSK_GPIO1))) {
1807 if (cx23885_boards[dev->board].cimax > 0)
1808 handled += netup_ci_slot_status(dev, pci_status);
1812 if (ts1_status) {
1813 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1814 handled += cx23885_irq_ts(ts1, ts1_status);
1815 else
1816 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1817 handled += cx23885_irq_417(dev, ts1_status);
1820 if (ts2_status) {
1821 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1822 handled += cx23885_irq_ts(ts2, ts2_status);
1823 else
1824 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1825 handled += cx23885_irq_417(dev, ts2_status);
1828 if (vida_status)
1829 handled += cx23885_video_irq(dev, vida_status);
1831 if (pci_status & PCI_MSK_IR) {
1832 subdev_handled = false;
1833 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1834 pci_status, &subdev_handled);
1835 if (subdev_handled)
1836 handled++;
1839 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1840 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1841 if (!schedule_work(&dev->cx25840_work))
1842 printk(KERN_ERR "%s: failed to set up deferred work for"
1843 " AV Core/IR interrupt. Interrupt is disabled"
1844 " and won't be re-enabled\n", dev->name);
1845 handled++;
1848 if (handled)
1849 cx_write(PCI_INT_STAT, pci_status);
1850 out:
1851 return IRQ_RETVAL(handled);
1854 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1855 unsigned int notification, void *arg)
1857 struct cx23885_dev *dev;
1859 if (sd == NULL)
1860 return;
1862 dev = to_cx23885(sd->v4l2_dev);
1864 switch (notification) {
1865 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1866 if (sd == dev->sd_ir)
1867 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1868 break;
1869 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1870 if (sd == dev->sd_ir)
1871 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1872 break;
1876 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1878 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1879 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1880 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1881 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1884 static inline int encoder_on_portb(struct cx23885_dev *dev)
1886 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1889 static inline int encoder_on_portc(struct cx23885_dev *dev)
1891 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1894 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1895 * registers depending on the board configuration (and whether the
1896 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1897 * be pushed into the correct hardware register, regardless of the
1898 * physical location. Certain registers are shared so we sanity check
1899 * and report errors if we think we're tampering with a GPIo that might
1900 * be assigned to the encoder (and used for the host bus).
1902 * GPIO 2 thru 0 - On the cx23885 bridge
1903 * GPIO 18 thru 3 - On the cx23417 host bus interface
1904 * GPIO 23 thru 19 - On the cx25840 a/v core
1906 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1908 if (mask & 0x7)
1909 cx_set(GP0_IO, mask & 0x7);
1911 if (mask & 0x0007fff8) {
1912 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1913 printk(KERN_ERR
1914 "%s: Setting GPIO on encoder ports\n",
1915 dev->name);
1916 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1919 /* TODO: 23-19 */
1920 if (mask & 0x00f80000)
1921 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1924 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1926 if (mask & 0x00000007)
1927 cx_clear(GP0_IO, mask & 0x7);
1929 if (mask & 0x0007fff8) {
1930 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1931 printk(KERN_ERR
1932 "%s: Clearing GPIO moving on encoder ports\n",
1933 dev->name);
1934 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1937 /* TODO: 23-19 */
1938 if (mask & 0x00f80000)
1939 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1942 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1944 if (mask & 0x00000007)
1945 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1947 if (mask & 0x0007fff8) {
1948 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1949 printk(KERN_ERR
1950 "%s: Reading GPIO moving on encoder ports\n",
1951 dev->name);
1952 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1955 /* TODO: 23-19 */
1956 if (mask & 0x00f80000)
1957 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1959 return 0;
1962 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1964 if ((mask & 0x00000007) && asoutput)
1965 cx_set(GP0_IO, (mask & 0x7) << 16);
1966 else if ((mask & 0x00000007) && !asoutput)
1967 cx_clear(GP0_IO, (mask & 0x7) << 16);
1969 if (mask & 0x0007fff8) {
1970 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1971 printk(KERN_ERR
1972 "%s: Enabling GPIO on encoder ports\n",
1973 dev->name);
1976 /* MC417_OEN is active low for output, write 1 for an input */
1977 if ((mask & 0x0007fff8) && asoutput)
1978 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1980 else if ((mask & 0x0007fff8) && !asoutput)
1981 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1983 /* TODO: 23-19 */
1986 static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
1987 const struct pci_device_id *pci_id)
1989 struct cx23885_dev *dev;
1990 int err;
1992 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1993 if (NULL == dev)
1994 return -ENOMEM;
1996 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1997 if (err < 0)
1998 goto fail_free;
2000 /* Prepare to handle notifications from subdevices */
2001 cx23885_v4l2_dev_notify_init(dev);
2003 /* pci init */
2004 dev->pci = pci_dev;
2005 if (pci_enable_device(pci_dev)) {
2006 err = -EIO;
2007 goto fail_unreg;
2010 if (cx23885_dev_setup(dev) < 0) {
2011 err = -EINVAL;
2012 goto fail_unreg;
2015 /* print pci info */
2016 pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
2017 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2018 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2019 "latency: %d, mmio: 0x%llx\n", dev->name,
2020 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2021 dev->pci_lat,
2022 (unsigned long long)pci_resource_start(pci_dev, 0));
2024 pci_set_master(pci_dev);
2025 if (!pci_dma_supported(pci_dev, 0xffffffff)) {
2026 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2027 err = -EIO;
2028 goto fail_irq;
2031 if (!pci_enable_msi(pci_dev))
2032 err = request_irq(pci_dev->irq, cx23885_irq,
2033 IRQF_DISABLED, dev->name, dev);
2034 else
2035 err = request_irq(pci_dev->irq, cx23885_irq,
2036 IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
2037 if (err < 0) {
2038 printk(KERN_ERR "%s: can't get IRQ %d\n",
2039 dev->name, pci_dev->irq);
2040 goto fail_irq;
2043 switch (dev->board) {
2044 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2045 cx23885_irq_add_enable(dev, 0x01800000); /* for NetUP */
2046 break;
2050 * The CX2388[58] IR controller can start firing interrupts when
2051 * enabled, so these have to take place after the cx23885_irq() handler
2052 * is hooked up by the call to request_irq() above.
2054 cx23885_ir_pci_int_enable(dev);
2055 cx23885_input_init(dev);
2057 return 0;
2059 fail_irq:
2060 cx23885_dev_unregister(dev);
2061 fail_unreg:
2062 v4l2_device_unregister(&dev->v4l2_dev);
2063 fail_free:
2064 kfree(dev);
2065 return err;
2068 static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
2070 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2071 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2073 cx23885_input_fini(dev);
2074 cx23885_ir_fini(dev);
2076 cx23885_shutdown(dev);
2078 pci_disable_device(pci_dev);
2080 /* unregister stuff */
2081 free_irq(pci_dev->irq, dev);
2082 pci_disable_msi(pci_dev);
2084 cx23885_dev_unregister(dev);
2085 v4l2_device_unregister(v4l2_dev);
2086 kfree(dev);
2089 static struct pci_device_id cx23885_pci_tbl[] = {
2091 /* CX23885 */
2092 .vendor = 0x14f1,
2093 .device = 0x8852,
2094 .subvendor = PCI_ANY_ID,
2095 .subdevice = PCI_ANY_ID,
2096 }, {
2097 /* CX23887 Rev 2 */
2098 .vendor = 0x14f1,
2099 .device = 0x8880,
2100 .subvendor = PCI_ANY_ID,
2101 .subdevice = PCI_ANY_ID,
2102 }, {
2103 /* --- end of list --- */
2106 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2108 static struct pci_driver cx23885_pci_driver = {
2109 .name = "cx23885",
2110 .id_table = cx23885_pci_tbl,
2111 .probe = cx23885_initdev,
2112 .remove = __devexit_p(cx23885_finidev),
2113 /* TODO */
2114 .suspend = NULL,
2115 .resume = NULL,
2118 static int __init cx23885_init(void)
2120 printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
2121 (CX23885_VERSION_CODE >> 16) & 0xff,
2122 (CX23885_VERSION_CODE >> 8) & 0xff,
2123 CX23885_VERSION_CODE & 0xff);
2124 #ifdef SNAPSHOT
2125 printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
2126 SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
2127 #endif
2128 return pci_register_driver(&cx23885_pci_driver);
2131 static void __exit cx23885_fini(void)
2133 pci_unregister_driver(&cx23885_pci_driver);
2136 module_init(cx23885_init);
2137 module_exit(cx23885_fini);
2139 /* ----------------------------------------------------------- */