ASoC: Add virtual enumeration support for DAPM muxes
[wandboard.git] / drivers / ieee1394 / pcilynx.c
blob9555fd2538658561526400f6202dd1d13a7c2bc7
1 /*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Contributions:
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/interrupt.h>
36 #include <linux/wait.h>
37 #include <linux/errno.h>
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/fs.h>
43 #include <linux/poll.h>
44 #include <linux/kdev_t.h>
45 #include <linux/dma-mapping.h>
46 #include <asm/byteorder.h>
47 #include <asm/atomic.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/irq.h>
52 #include "csr1212.h"
53 #include "ieee1394.h"
54 #include "ieee1394_types.h"
55 #include "hosts.h"
56 #include "ieee1394_core.h"
57 #include "highlevel.h"
58 #include "pcilynx.h"
60 #include <linux/i2c.h>
61 #include <linux/i2c-algo-bit.h>
63 /* print general (card independent) information */
64 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65 /* print card specific information */
66 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
68 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71 #else
72 #define PRINT_GD(level, fmt, args...) do {} while (0)
73 #define PRINTD(level, card, fmt, args...) do {} while (0)
74 #endif
77 /* Module Parameters */
78 static int skip_eeprom;
79 module_param(skip_eeprom, int, 0444);
80 MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
83 static struct hpsb_host_driver lynx_driver;
84 static unsigned int card_id;
89 * I2C stuff
92 /* the i2c stuff was inspired by i2c-philips-par.c */
94 static void bit_setscl(void *data, int state)
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
104 static void bit_setsda(void *data, int state)
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
114 static int bit_getscl(void *data)
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
119 static int bit_getsda(void *data)
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
124 static struct i2c_algo_bit_data bit_data = {
125 .setsda = bit_setsda,
126 .setscl = bit_setscl,
127 .getsda = bit_getsda,
128 .getscl = bit_getscl,
129 .udelay = 5,
130 .timeout = 100,
135 * PCL handling functions.
138 static pcl_t alloc_pcl(struct ti_lynx *lynx)
140 u8 m;
141 int i, j;
143 spin_lock(&lynx->lock);
144 /* FIXME - use ffz() to make this readable */
145 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
146 m = lynx->pcl_bmap[i];
147 for (j = 0; j < 8; j++) {
148 if (m & 1<<j) {
149 continue;
151 m |= 1<<j;
152 lynx->pcl_bmap[i] = m;
153 spin_unlock(&lynx->lock);
154 return 8 * i + j;
157 spin_unlock(&lynx->lock);
159 return -1;
163 #if 0
164 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
166 int off, bit;
168 off = pclid / 8;
169 bit = pclid % 8;
171 if (pclid < 0) {
172 return;
175 spin_lock(&lynx->lock);
176 if (lynx->pcl_bmap[off] & 1<<bit) {
177 lynx->pcl_bmap[off] &= ~(1<<bit);
178 } else {
179 PRINT(KERN_ERR, lynx->id,
180 "attempted to free unallocated PCL %d", pclid);
182 spin_unlock(&lynx->lock);
185 /* functions useful for debugging */
186 static void pretty_print_pcl(const struct ti_pcl *pcl)
188 int i;
190 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
191 pcl->next, pcl->user_data, pcl->pcl_status,
192 pcl->remaining_transfer_count, pcl->next_data_buffer);
194 printk("PCL");
195 for (i=0; i<13; i++) {
196 printk(" c%x:%08x d%x:%08x",
197 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
198 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
200 printk("\n");
203 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
205 struct ti_pcl pcl;
207 get_pcl(lynx, pclid, &pcl);
208 pretty_print_pcl(&pcl);
210 #endif
214 /***********************************
215 * IEEE-1394 functionality section *
216 ***********************************/
219 static int get_phy_reg(struct ti_lynx *lynx, int addr)
221 int retval;
222 int i = 0;
224 unsigned long flags;
226 if (addr > 15) {
227 PRINT(KERN_ERR, lynx->id,
228 "%s: PHY register address %d out of range",
229 __func__, addr);
230 return -1;
233 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
235 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
236 do {
237 retval = reg_read(lynx, LINK_PHY);
239 if (i > 10000) {
240 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
241 __func__);
242 retval = -1;
243 break;
245 i++;
246 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
248 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
249 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
251 if (retval != -1) {
252 return retval & 0xff;
253 } else {
254 return -1;
258 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
260 unsigned long flags;
262 if (addr > 15) {
263 PRINT(KERN_ERR, lynx->id,
264 "%s: PHY register address %d out of range", __func__, addr);
265 return -1;
268 if (val > 0xff) {
269 PRINT(KERN_ERR, lynx->id,
270 "%s: PHY register value %d out of range", __func__, val);
271 return -1;
274 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
276 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
277 | LINK_PHY_WDATA(val));
279 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
281 return 0;
284 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
286 int reg;
288 if (page > 7) {
289 PRINT(KERN_ERR, lynx->id,
290 "%s: PHY page %d out of range", __func__, page);
291 return -1;
294 reg = get_phy_reg(lynx, 7);
295 if (reg != -1) {
296 reg &= 0x1f;
297 reg |= (page << 5);
298 set_phy_reg(lynx, 7, reg);
299 return 0;
300 } else {
301 return -1;
305 #if 0 /* not needed at this time */
306 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
308 int reg;
310 if (port > 15) {
311 PRINT(KERN_ERR, lynx->id,
312 "%s: PHY port %d out of range", __func__, port);
313 return -1;
316 reg = get_phy_reg(lynx, 7);
317 if (reg != -1) {
318 reg &= 0xf0;
319 reg |= port;
320 set_phy_reg(lynx, 7, reg);
321 return 0;
322 } else {
323 return -1;
326 #endif
328 static u32 get_phy_vendorid(struct ti_lynx *lynx)
330 u32 pvid = 0;
331 sel_phy_reg_page(lynx, 1);
332 pvid |= (get_phy_reg(lynx, 10) << 16);
333 pvid |= (get_phy_reg(lynx, 11) << 8);
334 pvid |= get_phy_reg(lynx, 12);
335 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
336 return pvid;
339 static u32 get_phy_productid(struct ti_lynx *lynx)
341 u32 id = 0;
342 sel_phy_reg_page(lynx, 1);
343 id |= (get_phy_reg(lynx, 13) << 16);
344 id |= (get_phy_reg(lynx, 14) << 8);
345 id |= get_phy_reg(lynx, 15);
346 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
347 return id;
350 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
351 struct hpsb_host *host)
353 quadlet_t lsid;
354 char phyreg[7];
355 int i;
357 phyreg[0] = lynx->phy_reg0;
358 for (i = 1; i < 7; i++) {
359 phyreg[i] = get_phy_reg(lynx, i);
362 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
363 more than 3 ports on the PHY anyway. */
365 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
366 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
367 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
368 if (!hpsb_disable_irm)
369 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
370 /* lsid |= 1 << 11; *//* set contender (hack) */
371 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
373 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
374 if (phyreg[3 + i] & 0x4) {
375 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
376 << (6 - i*2);
377 } else {
378 lsid |= 1 << (6 - i*2);
382 cpu_to_be32s(&lsid);
383 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
384 return lsid;
387 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
389 quadlet_t *q = lynx->rcv_page;
390 int phyid, isroot, size;
391 quadlet_t lsid = 0;
392 int i;
394 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
396 size = lynx->selfid_size;
397 phyid = lynx->phy_reg0;
399 i = (size > 16 ? 16 : size) / 4 - 1;
400 while (i >= 0) {
401 cpu_to_be32s(&q[i]);
402 i--;
405 if (!lynx->phyic.reg_1394a) {
406 lsid = generate_own_selfid(lynx, host);
409 isroot = (phyid & 2) != 0;
410 phyid >>= 2;
411 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
412 phyid, (isroot ? "root" : "not root"));
413 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
415 if (!lynx->phyic.reg_1394a && !size) {
416 hpsb_selfid_received(host, lsid);
419 while (size > 0) {
420 struct selfid *sid = (struct selfid *)q;
422 if (!lynx->phyic.reg_1394a && !sid->extended
423 && (sid->phy_id == (phyid + 1))) {
424 hpsb_selfid_received(host, lsid);
427 if (q[0] == ~q[1]) {
428 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
429 q[0]);
430 hpsb_selfid_received(host, q[0]);
431 } else {
432 PRINT(KERN_INFO, lynx->id,
433 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
435 q += 2;
436 size -= 8;
439 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
440 hpsb_selfid_received(host, lsid);
443 hpsb_selfid_complete(host, phyid, isroot);
445 if (host->in_bus_reset) return; /* in bus reset again */
447 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
448 reg_set_bits(lynx, LINK_CONTROL,
449 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
450 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
455 /* This must be called with the respective queue_lock held. */
456 static void send_next(struct ti_lynx *lynx, int what)
458 struct ti_pcl pcl;
459 struct lynx_send_data *d;
460 struct hpsb_packet *packet;
462 #if 0 /* has been removed from ieee1394 core */
463 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
464 #else
465 d = &lynx->async;
466 #endif
467 if (!list_empty(&d->pcl_queue)) {
468 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
469 BUG();
472 packet = driver_packet(d->queue.next);
473 list_move_tail(&packet->driver_list, &d->pcl_queue);
475 d->header_dma = pci_map_single(lynx->dev, packet->header,
476 packet->header_size, PCI_DMA_TODEVICE);
477 if (packet->data_size) {
478 d->data_dma = pci_map_single(lynx->dev, packet->data,
479 packet->data_size,
480 PCI_DMA_TODEVICE);
481 } else {
482 d->data_dma = 0;
485 pcl.next = PCL_NEXT_INVALID;
486 pcl.async_error_next = PCL_NEXT_INVALID;
487 pcl.pcl_status = 0;
488 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
489 #ifndef __BIG_ENDIAN
490 pcl.buffer[0].control |= PCL_BIGENDIAN;
491 #endif
492 pcl.buffer[0].pointer = d->header_dma;
493 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
494 pcl.buffer[1].pointer = d->data_dma;
496 switch (packet->type) {
497 case hpsb_async:
498 pcl.buffer[0].control |= PCL_CMD_XMT;
499 break;
500 #if 0 /* has been removed from ieee1394 core */
501 case hpsb_iso:
502 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
503 break;
504 #endif
505 case hpsb_raw:
506 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
507 break;
510 put_pcl(lynx, d->pcl, &pcl);
511 run_pcl(lynx, d->pcl_start, d->channel);
515 /* called from subsystem core */
516 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
518 struct ti_lynx *lynx = host->hostdata;
519 struct lynx_send_data *d;
520 unsigned long flags;
522 if (packet->data_size >= 4096) {
523 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
524 packet->data_size);
525 return -EOVERFLOW;
528 switch (packet->type) {
529 case hpsb_async:
530 case hpsb_raw:
531 d = &lynx->async;
532 break;
533 #if 0 /* has been removed from ieee1394 core */
534 case hpsb_iso:
535 d = &lynx->iso_send;
536 break;
537 #endif
538 default:
539 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
540 packet->type);
541 return -EINVAL;
544 if (packet->tcode == TCODE_WRITEQ
545 || packet->tcode == TCODE_READQ_RESPONSE) {
546 cpu_to_be32s(&packet->header[3]);
549 spin_lock_irqsave(&d->queue_lock, flags);
551 list_add_tail(&packet->driver_list, &d->queue);
552 if (list_empty(&d->pcl_queue))
553 send_next(lynx, packet->type);
555 spin_unlock_irqrestore(&d->queue_lock, flags);
557 return 0;
561 /* called from subsystem core */
562 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
564 struct ti_lynx *lynx = host->hostdata;
565 int retval = 0;
566 struct hpsb_packet *packet;
567 LIST_HEAD(packet_list);
568 unsigned long flags;
569 int phy_reg;
571 switch (cmd) {
572 case RESET_BUS:
573 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
574 retval = 0;
575 break;
578 switch (arg) {
579 case SHORT_RESET:
580 if (lynx->phyic.reg_1394a) {
581 phy_reg = get_phy_reg(lynx, 5);
582 if (phy_reg == -1) {
583 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
584 retval = -1;
585 break;
587 phy_reg |= 0x40;
589 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
591 lynx->selfid_size = -1;
592 lynx->phy_reg0 = -1;
593 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
594 break;
595 } else {
596 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
597 /* fall through to long bus reset */
599 case LONG_RESET:
600 phy_reg = get_phy_reg(lynx, 1);
601 if (phy_reg == -1) {
602 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
603 retval = -1;
604 break;
606 phy_reg |= 0x40;
608 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
610 lynx->selfid_size = -1;
611 lynx->phy_reg0 = -1;
612 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
613 break;
614 case SHORT_RESET_NO_FORCE_ROOT:
615 if (lynx->phyic.reg_1394a) {
616 phy_reg = get_phy_reg(lynx, 1);
617 if (phy_reg == -1) {
618 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
619 retval = -1;
620 break;
622 if (phy_reg & 0x80) {
623 phy_reg &= ~0x80;
624 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
627 phy_reg = get_phy_reg(lynx, 5);
628 if (phy_reg == -1) {
629 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
630 retval = -1;
631 break;
633 phy_reg |= 0x40;
635 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
637 lynx->selfid_size = -1;
638 lynx->phy_reg0 = -1;
639 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
640 break;
641 } else {
642 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
643 /* fall through to long bus reset */
645 case LONG_RESET_NO_FORCE_ROOT:
646 phy_reg = get_phy_reg(lynx, 1);
647 if (phy_reg == -1) {
648 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
649 retval = -1;
650 break;
652 phy_reg &= ~0x80;
653 phy_reg |= 0x40;
655 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
657 lynx->selfid_size = -1;
658 lynx->phy_reg0 = -1;
659 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
660 break;
661 case SHORT_RESET_FORCE_ROOT:
662 if (lynx->phyic.reg_1394a) {
663 phy_reg = get_phy_reg(lynx, 1);
664 if (phy_reg == -1) {
665 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
666 retval = -1;
667 break;
669 if (!(phy_reg & 0x80)) {
670 phy_reg |= 0x80;
671 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
674 phy_reg = get_phy_reg(lynx, 5);
675 if (phy_reg == -1) {
676 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
677 retval = -1;
678 break;
680 phy_reg |= 0x40;
682 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
684 lynx->selfid_size = -1;
685 lynx->phy_reg0 = -1;
686 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
687 break;
688 } else {
689 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
690 /* fall through to long bus reset */
692 case LONG_RESET_FORCE_ROOT:
693 phy_reg = get_phy_reg(lynx, 1);
694 if (phy_reg == -1) {
695 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
696 retval = -1;
697 break;
699 phy_reg |= 0xc0;
701 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
703 lynx->selfid_size = -1;
704 lynx->phy_reg0 = -1;
705 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
706 break;
707 default:
708 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
709 retval = -1;
712 break;
714 case GET_CYCLE_COUNTER:
715 retval = reg_read(lynx, CYCLE_TIMER);
716 break;
718 case SET_CYCLE_COUNTER:
719 reg_write(lynx, CYCLE_TIMER, arg);
720 break;
722 case SET_BUS_ID:
723 reg_write(lynx, LINK_ID,
724 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
725 break;
727 case ACT_CYCLE_MASTER:
728 if (arg) {
729 reg_set_bits(lynx, LINK_CONTROL,
730 LINK_CONTROL_CYCMASTER);
731 } else {
732 reg_clear_bits(lynx, LINK_CONTROL,
733 LINK_CONTROL_CYCMASTER);
735 break;
737 case CANCEL_REQUESTS:
738 spin_lock_irqsave(&lynx->async.queue_lock, flags);
740 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
741 list_splice_init(&lynx->async.queue, &packet_list);
743 if (list_empty(&lynx->async.pcl_queue)) {
744 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
745 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
746 } else {
747 struct ti_pcl pcl;
748 u32 ack;
750 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
752 get_pcl(lynx, lynx->async.pcl, &pcl);
754 packet = driver_packet(lynx->async.pcl_queue.next);
755 list_del_init(&packet->driver_list);
757 pci_unmap_single(lynx->dev, lynx->async.header_dma,
758 packet->header_size, PCI_DMA_TODEVICE);
759 if (packet->data_size) {
760 pci_unmap_single(lynx->dev, lynx->async.data_dma,
761 packet->data_size, PCI_DMA_TODEVICE);
764 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
766 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
767 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
768 ack = (pcl.pcl_status >> 15) & 0xf;
769 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
770 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
771 } else {
772 ack = (pcl.pcl_status >> 15) & 0xf;
774 } else {
775 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
776 ack = ACKX_ABORTED;
778 hpsb_packet_sent(host, packet, ack);
781 while (!list_empty(&packet_list)) {
782 packet = driver_packet(packet_list.next);
783 list_del_init(&packet->driver_list);
784 hpsb_packet_sent(host, packet, ACKX_ABORTED);
787 break;
788 #if 0 /* has been removed from ieee1394 core */
789 case ISO_LISTEN_CHANNEL:
790 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
792 if (lynx->iso_rcv.chan_count++ == 0) {
793 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
794 DMA_WORD1_CMP_ENABLE_MASTER);
797 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
798 break;
800 case ISO_UNLISTEN_CHANNEL:
801 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
803 if (--lynx->iso_rcv.chan_count == 0) {
804 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
808 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
809 break;
810 #endif
811 default:
812 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
813 retval = -1;
816 return retval;
820 /***************************************
821 * IEEE-1394 functionality section END *
822 ***************************************/
825 /********************************************************
826 * Global stuff (interrupt handler, init/shutdown code) *
827 ********************************************************/
830 static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
832 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
833 struct hpsb_host *host = lynx->host;
834 u32 intmask;
835 u32 linkint;
837 linkint = reg_read(lynx, LINK_INT_STATUS);
838 intmask = reg_read(lynx, PCI_INT_STATUS);
840 if (!(intmask & PCI_INT_INT_PEND))
841 return IRQ_NONE;
843 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
844 linkint);
846 reg_write(lynx, LINK_INT_STATUS, linkint);
847 reg_write(lynx, PCI_INT_STATUS, intmask);
849 if (intmask & PCI_INT_1394) {
850 if (linkint & LINK_INT_PHY_TIMEOUT) {
851 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
853 if (linkint & LINK_INT_PHY_BUSRESET) {
854 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
855 lynx->selfid_size = -1;
856 lynx->phy_reg0 = -1;
857 if (!host->in_bus_reset)
858 hpsb_bus_reset(host);
860 if (linkint & LINK_INT_PHY_REG_RCVD) {
861 u32 reg;
863 spin_lock(&lynx->phy_reg_lock);
864 reg = reg_read(lynx, LINK_PHY);
865 spin_unlock(&lynx->phy_reg_lock);
867 if (!host->in_bus_reset) {
868 PRINT(KERN_INFO, lynx->id,
869 "phy reg received without reset");
870 } else if (reg & 0xf00) {
871 PRINT(KERN_INFO, lynx->id,
872 "unsolicited phy reg %d received",
873 (reg >> 8) & 0xf);
874 } else {
875 lynx->phy_reg0 = reg & 0xff;
876 handle_selfid(lynx, host);
879 if (linkint & LINK_INT_ISO_STUCK) {
880 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
882 if (linkint & LINK_INT_ASYNC_STUCK) {
883 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
885 if (linkint & LINK_INT_SENT_REJECT) {
886 PRINT(KERN_INFO, lynx->id, "sent reject");
888 if (linkint & LINK_INT_TX_INVALID_TC) {
889 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
891 if (linkint & LINK_INT_GRF_OVERFLOW) {
892 /* flush FIFO if overflow happens during reset */
893 if (host->in_bus_reset)
894 reg_write(lynx, FIFO_CONTROL,
895 FIFO_CONTROL_GRF_FLUSH);
896 PRINT(KERN_INFO, lynx->id, "GRF overflow");
898 if (linkint & LINK_INT_ITF_UNDERFLOW) {
899 PRINT(KERN_INFO, lynx->id, "ITF underflow");
901 if (linkint & LINK_INT_ATF_UNDERFLOW) {
902 PRINT(KERN_INFO, lynx->id, "ATF underflow");
906 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
907 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
909 spin_lock(&lynx->iso_rcv.lock);
911 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
912 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
914 lynx->iso_rcv.used++;
915 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
917 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
918 || !lynx->iso_rcv.chan_count) {
919 PRINTD(KERN_DEBUG, lynx->id, "stopped");
920 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
923 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
924 CHANNEL_ISO_RCV);
926 spin_unlock(&lynx->iso_rcv.lock);
928 tasklet_schedule(&lynx->iso_rcv.tq);
931 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
932 PRINTD(KERN_DEBUG, lynx->id, "async sent");
933 spin_lock(&lynx->async.queue_lock);
935 if (list_empty(&lynx->async.pcl_queue)) {
936 spin_unlock(&lynx->async.queue_lock);
937 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
938 } else {
939 struct ti_pcl pcl;
940 u32 ack;
941 struct hpsb_packet *packet;
943 get_pcl(lynx, lynx->async.pcl, &pcl);
945 packet = driver_packet(lynx->async.pcl_queue.next);
946 list_del_init(&packet->driver_list);
948 pci_unmap_single(lynx->dev, lynx->async.header_dma,
949 packet->header_size, PCI_DMA_TODEVICE);
950 if (packet->data_size) {
951 pci_unmap_single(lynx->dev, lynx->async.data_dma,
952 packet->data_size, PCI_DMA_TODEVICE);
955 if (!list_empty(&lynx->async.queue)) {
956 send_next(lynx, hpsb_async);
959 spin_unlock(&lynx->async.queue_lock);
961 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
962 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
963 ack = (pcl.pcl_status >> 15) & 0xf;
964 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
965 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
966 } else {
967 ack = (pcl.pcl_status >> 15) & 0xf;
969 } else {
970 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
971 ack = ACKX_SEND_ERROR;
973 hpsb_packet_sent(host, packet, ack);
977 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
978 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
979 spin_lock(&lynx->iso_send.queue_lock);
981 if (list_empty(&lynx->iso_send.pcl_queue)) {
982 spin_unlock(&lynx->iso_send.queue_lock);
983 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
984 } else {
985 struct ti_pcl pcl;
986 u32 ack;
987 struct hpsb_packet *packet;
989 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
991 packet = driver_packet(lynx->iso_send.pcl_queue.next);
992 list_del_init(&packet->driver_list);
994 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
995 packet->header_size, PCI_DMA_TODEVICE);
996 if (packet->data_size) {
997 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
998 packet->data_size, PCI_DMA_TODEVICE);
1000 #if 0 /* has been removed from ieee1394 core */
1001 if (!list_empty(&lynx->iso_send.queue)) {
1002 send_next(lynx, hpsb_iso);
1004 #endif
1005 spin_unlock(&lynx->iso_send.queue_lock);
1007 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1008 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1009 ack = (pcl.pcl_status >> 15) & 0xf;
1010 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1011 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1012 } else {
1013 ack = (pcl.pcl_status >> 15) & 0xf;
1015 } else {
1016 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1017 ack = ACKX_SEND_ERROR;
1020 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1024 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1025 /* general receive DMA completed */
1026 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1028 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1029 stat & 0x1fff);
1031 if (stat & DMA_CHAN_STAT_SELFID) {
1032 lynx->selfid_size = stat & 0x1fff;
1033 handle_selfid(lynx, host);
1034 } else {
1035 quadlet_t *q_data = lynx->rcv_page;
1036 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1037 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1038 cpu_to_be32s(q_data + 3);
1040 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1043 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1046 return IRQ_HANDLED;
1050 static void iso_rcv_bh(struct ti_lynx *lynx)
1052 unsigned int idx;
1053 quadlet_t *data;
1054 unsigned long flags;
1056 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1058 while (lynx->iso_rcv.used) {
1059 idx = lynx->iso_rcv.last;
1060 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1062 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1063 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1065 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1066 PRINT(KERN_ERR, lynx->id,
1067 "iso length mismatch 0x%08x/0x%08x", *data,
1068 lynx->iso_rcv.stat[idx]);
1071 if (lynx->iso_rcv.stat[idx]
1072 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1073 PRINT(KERN_INFO, lynx->id,
1074 "iso receive error on %d to 0x%p", idx, data);
1075 } else {
1076 hpsb_packet_received(lynx->host, data,
1077 lynx->iso_rcv.stat[idx] & 0x1fff,
1081 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1082 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1083 lynx->iso_rcv.used--;
1086 if (lynx->iso_rcv.chan_count) {
1087 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1088 DMA_WORD1_CMP_ENABLE_MASTER);
1090 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1094 static void remove_card(struct pci_dev *dev)
1096 struct ti_lynx *lynx;
1097 struct device *lynx_dev;
1098 int i;
1100 lynx = pci_get_drvdata(dev);
1101 if (!lynx) return;
1102 pci_set_drvdata(dev, NULL);
1104 lynx_dev = get_device(&lynx->host->device);
1106 switch (lynx->state) {
1107 case is_host:
1108 reg_write(lynx, PCI_INT_ENABLE, 0);
1109 hpsb_remove_host(lynx->host);
1110 case have_intr:
1111 reg_write(lynx, PCI_INT_ENABLE, 0);
1112 free_irq(lynx->dev->irq, lynx);
1114 /* Disable IRM Contender and LCtrl */
1115 if (lynx->phyic.reg_1394a)
1116 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1118 /* Let all other nodes know to ignore us */
1119 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1121 case have_iomappings:
1122 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1123 /* Fix buggy cards with autoboot pin not tied low: */
1124 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1125 iounmap(lynx->registers);
1126 iounmap(lynx->local_rom);
1127 iounmap(lynx->local_ram);
1128 iounmap(lynx->aux_port);
1129 case have_1394_buffers:
1130 for (i = 0; i < ISORCV_PAGES; i++) {
1131 if (lynx->iso_rcv.page[i]) {
1132 pci_free_consistent(lynx->dev, PAGE_SIZE,
1133 lynx->iso_rcv.page[i],
1134 lynx->iso_rcv.page_dma[i]);
1137 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1138 lynx->rcv_page_dma);
1139 case have_aux_buf:
1140 case have_pcl_mem:
1141 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1142 lynx->pcl_mem_dma);
1143 case clear:
1144 /* do nothing - already freed */
1148 tasklet_kill(&lynx->iso_rcv.tq);
1150 if (lynx_dev)
1151 put_device(lynx_dev);
1155 static int __devinit add_card(struct pci_dev *dev,
1156 const struct pci_device_id *devid_is_unused)
1158 #define FAIL(fmt, args...) do { \
1159 PRINT_G(KERN_ERR, fmt , ## args); \
1160 remove_card(dev); \
1161 return error; \
1162 } while (0)
1164 char irq_buf[16];
1165 struct hpsb_host *host;
1166 struct ti_lynx *lynx; /* shortcut to currently handled device */
1167 struct ti_pcl pcl;
1168 u32 *pcli;
1169 int i;
1170 int error;
1172 error = -ENXIO;
1174 if (pci_set_dma_mask(dev, DMA_BIT_MASK(32)))
1175 FAIL("DMA address limits not supported for PCILynx hardware");
1176 if (pci_enable_device(dev))
1177 FAIL("failed to enable PCILynx hardware");
1178 pci_set_master(dev);
1180 error = -ENOMEM;
1182 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1183 if (!host) FAIL("failed to allocate control structure memory");
1185 lynx = host->hostdata;
1186 lynx->id = card_id++;
1187 lynx->dev = dev;
1188 lynx->state = clear;
1189 lynx->host = host;
1190 host->pdev = dev;
1191 pci_set_drvdata(dev, lynx);
1193 spin_lock_init(&lynx->lock);
1194 spin_lock_init(&lynx->phy_reg_lock);
1196 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1197 &lynx->pcl_mem_dma);
1199 if (lynx->pcl_mem != NULL) {
1200 lynx->state = have_pcl_mem;
1201 PRINT(KERN_INFO, lynx->id,
1202 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1203 lynx->pcl_mem);
1204 } else {
1205 FAIL("failed to allocate PCL memory area");
1208 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1209 &lynx->rcv_page_dma);
1210 if (lynx->rcv_page == NULL) {
1211 FAIL("failed to allocate receive buffer");
1213 lynx->state = have_1394_buffers;
1215 for (i = 0; i < ISORCV_PAGES; i++) {
1216 lynx->iso_rcv.page[i] =
1217 pci_alloc_consistent(dev, PAGE_SIZE,
1218 &lynx->iso_rcv.page_dma[i]);
1219 if (lynx->iso_rcv.page[i] == NULL) {
1220 FAIL("failed to allocate iso receive buffers");
1224 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1225 PCILYNX_MAX_REGISTER);
1226 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1227 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1228 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1229 PCILYNX_MAX_MEMORY);
1230 lynx->state = have_iomappings;
1232 if (lynx->registers == NULL) {
1233 FAIL("failed to remap registers - card not accessible");
1236 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1237 /* Fix buggy cards with autoboot pin not tied low: */
1238 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1240 sprintf (irq_buf, "%d", dev->irq);
1242 if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
1243 PCILYNX_DRIVER_NAME, lynx)) {
1244 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1245 lynx->state = have_intr;
1246 } else {
1247 FAIL("failed to allocate shared interrupt %s", irq_buf);
1250 /* alloc_pcl return values are not checked, it is expected that the
1251 * provided PCL space is sufficient for the initial allocations */
1252 lynx->rcv_pcl = alloc_pcl(lynx);
1253 lynx->rcv_pcl_start = alloc_pcl(lynx);
1254 lynx->async.pcl = alloc_pcl(lynx);
1255 lynx->async.pcl_start = alloc_pcl(lynx);
1256 lynx->iso_send.pcl = alloc_pcl(lynx);
1257 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1259 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1260 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1262 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1264 /* all allocations successful - simple init stuff follows */
1266 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1268 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1269 (unsigned long)lynx);
1271 spin_lock_init(&lynx->iso_rcv.lock);
1273 spin_lock_init(&lynx->async.queue_lock);
1274 lynx->async.channel = CHANNEL_ASYNC_SEND;
1275 spin_lock_init(&lynx->iso_send.queue_lock);
1276 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1278 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1279 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1280 lynx->local_ram, lynx->aux_port);
1282 /* now, looking for PHY register set */
1283 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1284 lynx->phyic.reg_1394a = 1;
1285 PRINT(KERN_INFO, lynx->id,
1286 "found 1394a conform PHY (using extended register set)");
1287 lynx->phyic.vendor = get_phy_vendorid(lynx);
1288 lynx->phyic.product = get_phy_productid(lynx);
1289 } else {
1290 lynx->phyic.reg_1394a = 0;
1291 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1294 lynx->selfid_size = -1;
1295 lynx->phy_reg0 = -1;
1297 INIT_LIST_HEAD(&lynx->async.queue);
1298 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1299 INIT_LIST_HEAD(&lynx->iso_send.queue);
1300 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1302 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1303 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1305 pcl.next = PCL_NEXT_INVALID;
1306 pcl.async_error_next = PCL_NEXT_INVALID;
1308 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1309 #ifndef __BIG_ENDIAN
1310 pcl.buffer[0].control |= PCL_BIGENDIAN;
1311 #endif
1312 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1314 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1315 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1316 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1318 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1319 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1320 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1322 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1323 pcl.async_error_next = PCL_NEXT_INVALID;
1324 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1326 pcl.next = PCL_NEXT_INVALID;
1327 pcl.async_error_next = PCL_NEXT_INVALID;
1328 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1329 #ifndef __BIG_ENDIAN
1330 pcl.buffer[0].control |= PCL_BIGENDIAN;
1331 #endif
1332 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1334 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1335 int page = i / ISORCV_PER_PAGE;
1336 int sec = i % ISORCV_PER_PAGE;
1338 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1339 + sec * MAX_ISORCV_SIZE;
1340 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1341 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1344 pcli = (u32 *)&pcl;
1345 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1346 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1348 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1350 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1351 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1352 /* 20 byte threshold before triggering PCI transfer */
1353 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1354 /* threshold on both send FIFOs before transmitting:
1355 FIFO size - cache line size - 1 */
1356 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1357 i = 0x30 - i - 1;
1358 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1360 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1362 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1363 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1364 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1365 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1366 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1367 | LINK_INT_ATF_UNDERFLOW);
1369 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1370 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1371 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1372 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1373 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1374 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1375 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1377 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1379 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1380 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1381 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1382 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1384 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1386 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1387 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1388 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1389 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1391 if (!lynx->phyic.reg_1394a) {
1392 if (!hpsb_disable_irm) {
1393 /* attempt to enable contender bit -FIXME- would this
1394 * work elsewhere? */
1395 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1396 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1398 } else {
1399 /* set the contender (if appropriate) and LCtrl bit in the
1400 * extended PHY register set. (Should check that PHY_02_EXTENDED
1401 * is set in register 2?)
1403 i = get_phy_reg(lynx, 4);
1404 i |= PHY_04_LCTRL;
1405 if (hpsb_disable_irm)
1406 i &= ~PHY_04_CONTENDER;
1407 else
1408 i |= PHY_04_CONTENDER;
1409 if (i != -1) set_phy_reg(lynx, 4, i);
1412 if (!skip_eeprom)
1414 /* needed for i2c communication with serial eeprom */
1415 struct i2c_adapter *i2c_ad;
1416 struct i2c_algo_bit_data i2c_adapter_data;
1418 error = -ENOMEM;
1419 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1420 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1422 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1423 i2c_adapter_data = bit_data;
1424 i2c_ad->algo_data = &i2c_adapter_data;
1425 i2c_adapter_data.data = lynx;
1426 i2c_ad->dev.parent = &dev->dev;
1428 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1429 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1431 /* reset hardware to sane state */
1432 lynx->i2c_driven_state = 0x00000070;
1433 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1435 if (i2c_bit_add_bus(i2c_ad) < 0)
1437 kfree(i2c_ad);
1438 error = -ENXIO;
1439 FAIL("unable to register i2c");
1441 else
1443 /* do i2c stuff */
1444 unsigned char i2c_cmd = 0x10;
1445 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1446 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1449 /* we use i2c_transfer because we have no i2c_client
1450 at hand */
1451 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1452 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1453 } else {
1454 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1455 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1456 * generation(1394a) and link_spd(1394a) field and recalculate
1457 * the CRC */
1459 for (i = 0; i < 5 ; i++)
1460 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1461 i, be32_to_cpu(lynx->bus_info_block[i]));
1463 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1464 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1465 (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
1467 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1468 } else {
1469 kfree(i2c_ad);
1470 error = -ENXIO;
1471 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1476 i2c_del_adapter(i2c_ad);
1477 kfree(i2c_ad);
1481 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1482 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1483 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1484 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1485 if (!lynx->phyic.reg_1394a)
1486 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1487 else
1488 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1490 if (hpsb_add_host(host)) {
1491 error = -ENOMEM;
1492 FAIL("Failed to register host with highlevel");
1495 lynx->state = is_host;
1497 return 0;
1498 #undef FAIL
1502 static struct pci_device_id pci_table[] = {
1504 .vendor = PCI_VENDOR_ID_TI,
1505 .device = PCI_DEVICE_ID_TI_PCILYNX,
1506 .subvendor = PCI_ANY_ID,
1507 .subdevice = PCI_ANY_ID,
1509 { } /* Terminating entry */
1512 static struct pci_driver lynx_pci_driver = {
1513 .name = PCILYNX_DRIVER_NAME,
1514 .id_table = pci_table,
1515 .probe = add_card,
1516 .remove = remove_card,
1519 static struct hpsb_host_driver lynx_driver = {
1520 .owner = THIS_MODULE,
1521 .name = PCILYNX_DRIVER_NAME,
1522 .set_hw_config_rom = NULL,
1523 .transmit_packet = lynx_transmit,
1524 .devctl = lynx_devctl,
1525 .isoctl = NULL,
1528 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1529 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1530 MODULE_LICENSE("GPL");
1531 MODULE_SUPPORTED_DEVICE("pcilynx");
1532 MODULE_DEVICE_TABLE(pci, pci_table);
1534 static int __init pcilynx_init(void)
1536 int ret;
1538 ret = pci_register_driver(&lynx_pci_driver);
1539 if (ret < 0) {
1540 PRINT_G(KERN_ERR, "PCI module init failed");
1541 return ret;
1544 return 0;
1547 static void __exit pcilynx_cleanup(void)
1549 pci_unregister_driver(&lynx_pci_driver);
1553 module_init(pcilynx_init);
1554 module_exit(pcilynx_cleanup);