drivers/media/dvb/pt1/pt1.c needs vmalloc.h
[linux-2.6/mini2440.git] / drivers / media / dvb / pt1 / pt1.c
blob81e623a90f09d70905ec1080f545f93e1cdd43ec
1 /*
2 * driver for Earthsoft PT1
4 * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
6 * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7 * by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pci.h>
28 #include <linux/kthread.h>
29 #include <linux/freezer.h>
31 #include "dvbdev.h"
32 #include "dvb_demux.h"
33 #include "dmxdev.h"
34 #include "dvb_net.h"
35 #include "dvb_frontend.h"
37 #include "va1j5jf8007t.h"
38 #include "va1j5jf8007s.h"
40 #define DRIVER_NAME "earth-pt1"
42 #define PT1_PAGE_SHIFT 12
43 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
44 #define PT1_NR_UPACKETS 1024
45 #define PT1_NR_BUFS 511
47 struct pt1_buffer_page {
48 __le32 upackets[PT1_NR_UPACKETS];
51 struct pt1_table_page {
52 __le32 next_pfn;
53 __le32 buf_pfns[PT1_NR_BUFS];
56 struct pt1_buffer {
57 struct pt1_buffer_page *page;
58 dma_addr_t addr;
61 struct pt1_table {
62 struct pt1_table_page *page;
63 dma_addr_t addr;
64 struct pt1_buffer bufs[PT1_NR_BUFS];
67 #define PT1_NR_ADAPS 4
69 struct pt1_adapter;
71 struct pt1 {
72 struct pci_dev *pdev;
73 void __iomem *regs;
74 struct i2c_adapter i2c_adap;
75 int i2c_running;
76 struct pt1_adapter *adaps[PT1_NR_ADAPS];
77 struct pt1_table *tables;
78 struct task_struct *kthread;
81 struct pt1_adapter {
82 struct pt1 *pt1;
83 int index;
85 u8 *buf;
86 int upacket_count;
87 int packet_count;
89 struct dvb_adapter adap;
90 struct dvb_demux demux;
91 int users;
92 struct dmxdev dmxdev;
93 struct dvb_net net;
94 struct dvb_frontend *fe;
95 int (*orig_set_voltage)(struct dvb_frontend *fe,
96 fe_sec_voltage_t voltage);
99 #define pt1_printk(level, pt1, format, arg...) \
100 dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
102 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
104 writel(data, pt1->regs + reg * 4);
107 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
109 return readl(pt1->regs + reg * 4);
112 static int pt1_nr_tables = 64;
113 module_param_named(nr_tables, pt1_nr_tables, int, 0);
115 static void pt1_increment_table_count(struct pt1 *pt1)
117 pt1_write_reg(pt1, 0, 0x00000020);
120 static void pt1_init_table_count(struct pt1 *pt1)
122 pt1_write_reg(pt1, 0, 0x00000010);
125 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
127 pt1_write_reg(pt1, 5, first_pfn);
128 pt1_write_reg(pt1, 0, 0x0c000040);
131 static void pt1_unregister_tables(struct pt1 *pt1)
133 pt1_write_reg(pt1, 0, 0x08080000);
136 static int pt1_sync(struct pt1 *pt1)
138 int i;
139 for (i = 0; i < 57; i++) {
140 if (pt1_read_reg(pt1, 0) & 0x20000000)
141 return 0;
142 pt1_write_reg(pt1, 0, 0x00000008);
144 pt1_printk(KERN_ERR, pt1, "could not sync\n");
145 return -EIO;
148 static u64 pt1_identify(struct pt1 *pt1)
150 int i;
151 u64 id;
152 id = 0;
153 for (i = 0; i < 57; i++) {
154 id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
155 pt1_write_reg(pt1, 0, 0x00000008);
157 return id;
160 static int pt1_unlock(struct pt1 *pt1)
162 int i;
163 pt1_write_reg(pt1, 0, 0x00000008);
164 for (i = 0; i < 3; i++) {
165 if (pt1_read_reg(pt1, 0) & 0x80000000)
166 return 0;
167 schedule_timeout_uninterruptible((HZ + 999) / 1000);
169 pt1_printk(KERN_ERR, pt1, "could not unlock\n");
170 return -EIO;
173 static int pt1_reset_pci(struct pt1 *pt1)
175 int i;
176 pt1_write_reg(pt1, 0, 0x01010000);
177 pt1_write_reg(pt1, 0, 0x01000000);
178 for (i = 0; i < 10; i++) {
179 if (pt1_read_reg(pt1, 0) & 0x00000001)
180 return 0;
181 schedule_timeout_uninterruptible((HZ + 999) / 1000);
183 pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
184 return -EIO;
187 static int pt1_reset_ram(struct pt1 *pt1)
189 int i;
190 pt1_write_reg(pt1, 0, 0x02020000);
191 pt1_write_reg(pt1, 0, 0x02000000);
192 for (i = 0; i < 10; i++) {
193 if (pt1_read_reg(pt1, 0) & 0x00000002)
194 return 0;
195 schedule_timeout_uninterruptible((HZ + 999) / 1000);
197 pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
198 return -EIO;
201 static int pt1_do_enable_ram(struct pt1 *pt1)
203 int i, j;
204 u32 status;
205 status = pt1_read_reg(pt1, 0) & 0x00000004;
206 pt1_write_reg(pt1, 0, 0x00000002);
207 for (i = 0; i < 10; i++) {
208 for (j = 0; j < 1024; j++) {
209 if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
210 return 0;
212 schedule_timeout_uninterruptible((HZ + 999) / 1000);
214 pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
215 return -EIO;
218 static int pt1_enable_ram(struct pt1 *pt1)
220 int i, ret;
221 schedule_timeout_uninterruptible((HZ + 999) / 1000);
222 for (i = 0; i < 10; i++) {
223 ret = pt1_do_enable_ram(pt1);
224 if (ret < 0)
225 return ret;
227 return 0;
230 static void pt1_disable_ram(struct pt1 *pt1)
232 pt1_write_reg(pt1, 0, 0x0b0b0000);
235 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
237 pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
240 static void pt1_init_streams(struct pt1 *pt1)
242 int i;
243 for (i = 0; i < PT1_NR_ADAPS; i++)
244 pt1_set_stream(pt1, i, 0);
247 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
249 u32 upacket;
250 int i;
251 int index;
252 struct pt1_adapter *adap;
253 int offset;
254 u8 *buf;
256 if (!page->upackets[PT1_NR_UPACKETS - 1])
257 return 0;
259 for (i = 0; i < PT1_NR_UPACKETS; i++) {
260 upacket = le32_to_cpu(page->upackets[i]);
261 index = (upacket >> 29) - 1;
262 if (index < 0 || index >= PT1_NR_ADAPS)
263 continue;
265 adap = pt1->adaps[index];
266 if (upacket >> 25 & 1)
267 adap->upacket_count = 0;
268 else if (!adap->upacket_count)
269 continue;
271 buf = adap->buf;
272 offset = adap->packet_count * 188 + adap->upacket_count * 3;
273 buf[offset] = upacket >> 16;
274 buf[offset + 1] = upacket >> 8;
275 if (adap->upacket_count != 62)
276 buf[offset + 2] = upacket;
278 if (++adap->upacket_count >= 63) {
279 adap->upacket_count = 0;
280 if (++adap->packet_count >= 21) {
281 dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
282 adap->packet_count = 0;
287 page->upackets[PT1_NR_UPACKETS - 1] = 0;
288 return 1;
291 static int pt1_thread(void *data)
293 struct pt1 *pt1;
294 int table_index;
295 int buf_index;
296 struct pt1_buffer_page *page;
298 pt1 = data;
299 set_freezable();
301 table_index = 0;
302 buf_index = 0;
304 while (!kthread_should_stop()) {
305 try_to_freeze();
307 page = pt1->tables[table_index].bufs[buf_index].page;
308 if (!pt1_filter(pt1, page)) {
309 schedule_timeout_interruptible((HZ + 999) / 1000);
310 continue;
313 if (++buf_index >= PT1_NR_BUFS) {
314 pt1_increment_table_count(pt1);
315 buf_index = 0;
316 if (++table_index >= pt1_nr_tables)
317 table_index = 0;
321 return 0;
324 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
326 dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
329 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
331 void *page;
332 dma_addr_t addr;
334 page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
335 GFP_KERNEL);
336 if (page == NULL)
337 return NULL;
339 BUG_ON(addr & (PT1_PAGE_SIZE - 1));
340 BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
342 *addrp = addr;
343 *pfnp = addr >> PT1_PAGE_SHIFT;
344 return page;
347 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
349 pt1_free_page(pt1, buf->page, buf->addr);
352 static int
353 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
355 struct pt1_buffer_page *page;
356 dma_addr_t addr;
358 page = pt1_alloc_page(pt1, &addr, pfnp);
359 if (page == NULL)
360 return -ENOMEM;
362 page->upackets[PT1_NR_UPACKETS - 1] = 0;
364 buf->page = page;
365 buf->addr = addr;
366 return 0;
369 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
371 int i;
373 for (i = 0; i < PT1_NR_BUFS; i++)
374 pt1_cleanup_buffer(pt1, &table->bufs[i]);
376 pt1_free_page(pt1, table->page, table->addr);
379 static int
380 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
382 struct pt1_table_page *page;
383 dma_addr_t addr;
384 int i, ret;
385 u32 buf_pfn;
387 page = pt1_alloc_page(pt1, &addr, pfnp);
388 if (page == NULL)
389 return -ENOMEM;
391 for (i = 0; i < PT1_NR_BUFS; i++) {
392 ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
393 if (ret < 0)
394 goto err;
396 page->buf_pfns[i] = cpu_to_le32(buf_pfn);
399 pt1_increment_table_count(pt1);
400 table->page = page;
401 table->addr = addr;
402 return 0;
404 err:
405 while (i--)
406 pt1_cleanup_buffer(pt1, &table->bufs[i]);
408 pt1_free_page(pt1, page, addr);
409 return ret;
412 static void pt1_cleanup_tables(struct pt1 *pt1)
414 struct pt1_table *tables;
415 int i;
417 tables = pt1->tables;
418 pt1_unregister_tables(pt1);
420 for (i = 0; i < pt1_nr_tables; i++)
421 pt1_cleanup_table(pt1, &tables[i]);
423 vfree(tables);
426 static int pt1_init_tables(struct pt1 *pt1)
428 struct pt1_table *tables;
429 int i, ret;
430 u32 first_pfn, pfn;
432 tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
433 if (tables == NULL)
434 return -ENOMEM;
436 pt1_init_table_count(pt1);
438 i = 0;
439 if (pt1_nr_tables) {
440 ret = pt1_init_table(pt1, &tables[0], &first_pfn);
441 if (ret)
442 goto err;
443 i++;
446 while (i < pt1_nr_tables) {
447 ret = pt1_init_table(pt1, &tables[i], &pfn);
448 if (ret)
449 goto err;
450 tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
451 i++;
454 tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
456 pt1_register_tables(pt1, first_pfn);
457 pt1->tables = tables;
458 return 0;
460 err:
461 while (i--)
462 pt1_cleanup_table(pt1, &tables[i]);
464 vfree(tables);
465 return ret;
468 static int pt1_start_feed(struct dvb_demux_feed *feed)
470 struct pt1_adapter *adap;
471 adap = container_of(feed->demux, struct pt1_adapter, demux);
472 if (!adap->users++)
473 pt1_set_stream(adap->pt1, adap->index, 1);
474 return 0;
477 static int pt1_stop_feed(struct dvb_demux_feed *feed)
479 struct pt1_adapter *adap;
480 adap = container_of(feed->demux, struct pt1_adapter, demux);
481 if (!--adap->users)
482 pt1_set_stream(adap->pt1, adap->index, 0);
483 return 0;
486 static void
487 pt1_set_power(struct pt1 *pt1, int power, int lnb, int reset)
489 pt1_write_reg(pt1, 1, power | lnb << 1 | !reset << 3);
492 static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
494 struct pt1_adapter *adap;
495 int lnb;
497 adap = container_of(fe->dvb, struct pt1_adapter, adap);
499 switch (voltage) {
500 case SEC_VOLTAGE_13: /* actually 11V */
501 lnb = 2;
502 break;
503 case SEC_VOLTAGE_18: /* actually 15V */
504 lnb = 3;
505 break;
506 case SEC_VOLTAGE_OFF:
507 lnb = 0;
508 break;
509 default:
510 return -EINVAL;
513 pt1_set_power(adap->pt1, 1, lnb, 0);
515 if (adap->orig_set_voltage)
516 return adap->orig_set_voltage(fe, voltage);
517 else
518 return 0;
521 static void pt1_free_adapter(struct pt1_adapter *adap)
523 dvb_unregister_frontend(adap->fe);
524 dvb_net_release(&adap->net);
525 adap->demux.dmx.close(&adap->demux.dmx);
526 dvb_dmxdev_release(&adap->dmxdev);
527 dvb_dmx_release(&adap->demux);
528 dvb_unregister_adapter(&adap->adap);
529 free_page((unsigned long)adap->buf);
530 kfree(adap);
533 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
535 static struct pt1_adapter *
536 pt1_alloc_adapter(struct pt1 *pt1, struct dvb_frontend *fe)
538 struct pt1_adapter *adap;
539 void *buf;
540 struct dvb_adapter *dvb_adap;
541 struct dvb_demux *demux;
542 struct dmxdev *dmxdev;
543 int ret;
545 adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
546 if (!adap) {
547 ret = -ENOMEM;
548 goto err;
551 adap->pt1 = pt1;
553 adap->orig_set_voltage = fe->ops.set_voltage;
554 fe->ops.set_voltage = pt1_set_voltage;
556 buf = (u8 *)__get_free_page(GFP_KERNEL);
557 if (!buf) {
558 ret = -ENOMEM;
559 goto err_kfree;
562 adap->buf = buf;
563 adap->upacket_count = 0;
564 adap->packet_count = 0;
566 dvb_adap = &adap->adap;
567 dvb_adap->priv = adap;
568 ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
569 &pt1->pdev->dev, adapter_nr);
570 if (ret < 0)
571 goto err_free_page;
573 demux = &adap->demux;
574 demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
575 demux->priv = adap;
576 demux->feednum = 256;
577 demux->filternum = 256;
578 demux->start_feed = pt1_start_feed;
579 demux->stop_feed = pt1_stop_feed;
580 demux->write_to_decoder = NULL;
581 ret = dvb_dmx_init(demux);
582 if (ret < 0)
583 goto err_unregister_adapter;
585 dmxdev = &adap->dmxdev;
586 dmxdev->filternum = 256;
587 dmxdev->demux = &demux->dmx;
588 dmxdev->capabilities = 0;
589 ret = dvb_dmxdev_init(dmxdev, dvb_adap);
590 if (ret < 0)
591 goto err_dmx_release;
593 dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
595 ret = dvb_register_frontend(dvb_adap, fe);
596 if (ret < 0)
597 goto err_net_release;
598 adap->fe = fe;
600 return adap;
602 err_net_release:
603 dvb_net_release(&adap->net);
604 adap->demux.dmx.close(&adap->demux.dmx);
605 dvb_dmxdev_release(&adap->dmxdev);
606 err_dmx_release:
607 dvb_dmx_release(demux);
608 err_unregister_adapter:
609 dvb_unregister_adapter(dvb_adap);
610 err_free_page:
611 free_page((unsigned long)buf);
612 err_kfree:
613 kfree(adap);
614 err:
615 return ERR_PTR(ret);
618 static void pt1_cleanup_adapters(struct pt1 *pt1)
620 int i;
621 for (i = 0; i < PT1_NR_ADAPS; i++)
622 pt1_free_adapter(pt1->adaps[i]);
625 struct pt1_config {
626 struct va1j5jf8007s_config va1j5jf8007s_config;
627 struct va1j5jf8007t_config va1j5jf8007t_config;
630 static const struct pt1_config pt1_configs[2] = {
632 { .demod_address = 0x1b },
633 { .demod_address = 0x1a },
634 }, {
635 { .demod_address = 0x19 },
636 { .demod_address = 0x18 },
640 static int pt1_init_adapters(struct pt1 *pt1)
642 int i, j;
643 struct i2c_adapter *i2c_adap;
644 const struct pt1_config *config;
645 struct dvb_frontend *fe[4];
646 struct pt1_adapter *adap;
647 int ret;
649 i = 0;
650 j = 0;
652 i2c_adap = &pt1->i2c_adap;
653 do {
654 config = &pt1_configs[i / 2];
656 fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
657 i2c_adap);
658 if (!fe[i]) {
659 ret = -ENODEV; /* This does not sound nice... */
660 goto err;
662 i++;
664 fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
665 i2c_adap);
666 if (!fe[i]) {
667 ret = -ENODEV;
668 goto err;
670 i++;
672 ret = va1j5jf8007s_prepare(fe[i - 2]);
673 if (ret < 0)
674 goto err;
676 ret = va1j5jf8007t_prepare(fe[i - 1]);
677 if (ret < 0)
678 goto err;
680 } while (i < 4);
682 do {
683 adap = pt1_alloc_adapter(pt1, fe[j]);
684 if (IS_ERR(adap))
685 goto err;
686 adap->index = j;
687 pt1->adaps[j] = adap;
688 } while (++j < 4);
690 return 0;
692 err:
693 while (i-- > j)
694 fe[i]->ops.release(fe[i]);
696 while (j--)
697 pt1_free_adapter(pt1->adaps[j]);
699 return ret;
702 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
703 int clock, int data, int next_addr)
705 pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
706 !clock << 11 | !data << 10 | next_addr);
709 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
711 pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
712 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
713 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
714 *addrp = addr + 3;
717 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
719 pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
720 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
721 pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
722 pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
723 *addrp = addr + 4;
726 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
728 int i;
729 for (i = 0; i < 8; i++)
730 pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
731 pt1_i2c_write_bit(pt1, addr, &addr, 1);
732 *addrp = addr;
735 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
737 int i;
738 for (i = 0; i < 8; i++)
739 pt1_i2c_read_bit(pt1, addr, &addr);
740 pt1_i2c_write_bit(pt1, addr, &addr, last);
741 *addrp = addr;
744 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
746 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
747 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
748 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
749 *addrp = addr + 3;
752 static void
753 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
755 int i;
756 pt1_i2c_prepare(pt1, addr, &addr);
757 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
758 for (i = 0; i < msg->len; i++)
759 pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
760 *addrp = addr;
763 static void
764 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
766 int i;
767 pt1_i2c_prepare(pt1, addr, &addr);
768 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
769 for (i = 0; i < msg->len; i++)
770 pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
771 *addrp = addr;
774 static int pt1_i2c_end(struct pt1 *pt1, int addr)
776 pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
777 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
778 pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
780 pt1_write_reg(pt1, 0, 0x00000004);
781 do {
782 if (signal_pending(current))
783 return -EINTR;
784 schedule_timeout_interruptible((HZ + 999) / 1000);
785 } while (pt1_read_reg(pt1, 0) & 0x00000080);
786 return 0;
789 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
791 int addr;
792 addr = 0;
794 pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
795 addr = addr + 1;
797 if (!pt1->i2c_running) {
798 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
799 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
800 addr = addr + 2;
801 pt1->i2c_running = 1;
803 *addrp = addr;
806 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
808 struct pt1 *pt1;
809 int i;
810 struct i2c_msg *msg, *next_msg;
811 int addr, ret;
812 u16 len;
813 u32 word;
815 pt1 = i2c_get_adapdata(adap);
817 for (i = 0; i < num; i++) {
818 msg = &msgs[i];
819 if (msg->flags & I2C_M_RD)
820 return -ENOTSUPP;
822 if (i + 1 < num)
823 next_msg = &msgs[i + 1];
824 else
825 next_msg = NULL;
827 if (next_msg && next_msg->flags & I2C_M_RD) {
828 i++;
830 len = next_msg->len;
831 if (len > 4)
832 return -ENOTSUPP;
834 pt1_i2c_begin(pt1, &addr);
835 pt1_i2c_write_msg(pt1, addr, &addr, msg);
836 pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
837 ret = pt1_i2c_end(pt1, addr);
838 if (ret < 0)
839 return ret;
841 word = pt1_read_reg(pt1, 2);
842 while (len--) {
843 next_msg->buf[len] = word;
844 word >>= 8;
846 } else {
847 pt1_i2c_begin(pt1, &addr);
848 pt1_i2c_write_msg(pt1, addr, &addr, msg);
849 ret = pt1_i2c_end(pt1, addr);
850 if (ret < 0)
851 return ret;
855 return num;
858 static u32 pt1_i2c_func(struct i2c_adapter *adap)
860 return I2C_FUNC_I2C;
863 static const struct i2c_algorithm pt1_i2c_algo = {
864 .master_xfer = pt1_i2c_xfer,
865 .functionality = pt1_i2c_func,
868 static void pt1_i2c_wait(struct pt1 *pt1)
870 int i;
871 for (i = 0; i < 128; i++)
872 pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
875 static void pt1_i2c_init(struct pt1 *pt1)
877 int i;
878 for (i = 0; i < 1024; i++)
879 pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
882 static void __devexit pt1_remove(struct pci_dev *pdev)
884 struct pt1 *pt1;
885 void __iomem *regs;
887 pt1 = pci_get_drvdata(pdev);
888 regs = pt1->regs;
890 kthread_stop(pt1->kthread);
891 pt1_cleanup_tables(pt1);
892 pt1_cleanup_adapters(pt1);
893 pt1_disable_ram(pt1);
894 pt1_set_power(pt1, 0, 0, 1);
895 i2c_del_adapter(&pt1->i2c_adap);
896 pci_set_drvdata(pdev, NULL);
897 kfree(pt1);
898 pci_iounmap(pdev, regs);
899 pci_release_regions(pdev);
900 pci_disable_device(pdev);
903 static int __devinit
904 pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
906 int ret;
907 void __iomem *regs;
908 struct pt1 *pt1;
909 struct i2c_adapter *i2c_adap;
910 struct task_struct *kthread;
912 ret = pci_enable_device(pdev);
913 if (ret < 0)
914 goto err;
916 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
917 if (ret < 0)
918 goto err_pci_disable_device;
920 pci_set_master(pdev);
922 ret = pci_request_regions(pdev, DRIVER_NAME);
923 if (ret < 0)
924 goto err_pci_disable_device;
926 regs = pci_iomap(pdev, 0, 0);
927 if (!regs) {
928 ret = -EIO;
929 goto err_pci_release_regions;
932 pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
933 if (!pt1) {
934 ret = -ENOMEM;
935 goto err_pci_iounmap;
938 pt1->pdev = pdev;
939 pt1->regs = regs;
940 pci_set_drvdata(pdev, pt1);
942 i2c_adap = &pt1->i2c_adap;
943 i2c_adap->class = I2C_CLASS_TV_DIGITAL;
944 i2c_adap->algo = &pt1_i2c_algo;
945 i2c_adap->algo_data = NULL;
946 i2c_adap->dev.parent = &pdev->dev;
947 i2c_set_adapdata(i2c_adap, pt1);
948 ret = i2c_add_adapter(i2c_adap);
949 if (ret < 0)
950 goto err_kfree;
952 pt1_set_power(pt1, 0, 0, 1);
954 pt1_i2c_init(pt1);
955 pt1_i2c_wait(pt1);
957 ret = pt1_sync(pt1);
958 if (ret < 0)
959 goto err_i2c_del_adapter;
961 pt1_identify(pt1);
963 ret = pt1_unlock(pt1);
964 if (ret < 0)
965 goto err_i2c_del_adapter;
967 ret = pt1_reset_pci(pt1);
968 if (ret < 0)
969 goto err_i2c_del_adapter;
971 ret = pt1_reset_ram(pt1);
972 if (ret < 0)
973 goto err_i2c_del_adapter;
975 ret = pt1_enable_ram(pt1);
976 if (ret < 0)
977 goto err_i2c_del_adapter;
979 pt1_init_streams(pt1);
981 pt1_set_power(pt1, 1, 0, 1);
982 schedule_timeout_uninterruptible((HZ + 49) / 50);
984 pt1_set_power(pt1, 1, 0, 0);
985 schedule_timeout_uninterruptible((HZ + 999) / 1000);
987 ret = pt1_init_adapters(pt1);
988 if (ret < 0)
989 goto err_pt1_disable_ram;
991 ret = pt1_init_tables(pt1);
992 if (ret < 0)
993 goto err_pt1_cleanup_adapters;
995 kthread = kthread_run(pt1_thread, pt1, "pt1");
996 if (IS_ERR(kthread)) {
997 ret = PTR_ERR(kthread);
998 goto err_pt1_cleanup_tables;
1001 pt1->kthread = kthread;
1002 return 0;
1004 err_pt1_cleanup_tables:
1005 pt1_cleanup_tables(pt1);
1006 err_pt1_cleanup_adapters:
1007 pt1_cleanup_adapters(pt1);
1008 err_pt1_disable_ram:
1009 pt1_disable_ram(pt1);
1010 pt1_set_power(pt1, 0, 0, 1);
1011 err_i2c_del_adapter:
1012 i2c_del_adapter(i2c_adap);
1013 err_kfree:
1014 pci_set_drvdata(pdev, NULL);
1015 kfree(pt1);
1016 err_pci_iounmap:
1017 pci_iounmap(pdev, regs);
1018 err_pci_release_regions:
1019 pci_release_regions(pdev);
1020 err_pci_disable_device:
1021 pci_disable_device(pdev);
1022 err:
1023 return ret;
1027 static struct pci_device_id pt1_id_table[] = {
1028 { PCI_DEVICE(0x10ee, 0x211a) },
1029 { },
1031 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1033 static struct pci_driver pt1_driver = {
1034 .name = DRIVER_NAME,
1035 .probe = pt1_probe,
1036 .remove = __devexit_p(pt1_remove),
1037 .id_table = pt1_id_table,
1041 static int __init pt1_init(void)
1043 return pci_register_driver(&pt1_driver);
1047 static void __exit pt1_cleanup(void)
1049 pci_unregister_driver(&pt1_driver);
1052 module_init(pt1_init);
1053 module_exit(pt1_cleanup);
1055 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1056 MODULE_DESCRIPTION("Earthsoft PT1 Driver");
1057 MODULE_LICENSE("GPL");