GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / ieee1394 / pcilynx.c
blobe35f5c7cbdfd202c174123b57dff9c58e5a99d5a
1 /*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Contributions:
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/interrupt.h>
36 #include <linux/wait.h>
37 #include <linux/errno.h>
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/fs.h>
43 #include <linux/poll.h>
44 #include <linux/kdev_t.h>
45 #include <linux/dma-mapping.h>
46 #include <asm/byteorder.h>
47 #include <asm/atomic.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/irq.h>
52 #include "csr1212.h"
53 #include "ieee1394.h"
54 #include "ieee1394_types.h"
55 #include "hosts.h"
56 #include "ieee1394_core.h"
57 #include "highlevel.h"
58 #include "pcilynx.h"
60 #include <linux/i2c.h>
61 #include <linux/i2c-algo-bit.h>
63 /* print general (card independent) information */
64 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65 /* print card specific information */
66 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
68 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71 #else
72 #define PRINT_GD(level, fmt, args...) do {} while (0)
73 #define PRINTD(level, card, fmt, args...) do {} while (0)
74 #endif
77 /* Module Parameters */
78 static int skip_eeprom;
79 module_param(skip_eeprom, int, 0444);
80 MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
83 static struct hpsb_host_driver lynx_driver;
84 static unsigned int card_id;
89 * I2C stuff
92 /* the i2c stuff was inspired by i2c-philips-par.c */
94 static void bit_setscl(void *data, int state)
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
104 static void bit_setsda(void *data, int state)
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
114 static int bit_getscl(void *data)
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
119 static int bit_getsda(void *data)
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
124 static struct i2c_algo_bit_data bit_data = {
125 .setsda = bit_setsda,
126 .setscl = bit_setscl,
127 .getsda = bit_getsda,
128 .getscl = bit_getscl,
129 .udelay = 5,
130 .timeout = 100,
135 * PCL handling functions.
138 static pcl_t alloc_pcl(struct ti_lynx *lynx)
140 u8 m;
141 int i, j;
143 spin_lock(&lynx->lock);
144 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
145 m = lynx->pcl_bmap[i];
146 for (j = 0; j < 8; j++) {
147 if (m & 1<<j) {
148 continue;
150 m |= 1<<j;
151 lynx->pcl_bmap[i] = m;
152 spin_unlock(&lynx->lock);
153 return 8 * i + j;
156 spin_unlock(&lynx->lock);
158 return -1;
165 /***********************************
166 * IEEE-1394 functionality section *
167 ***********************************/
170 static int get_phy_reg(struct ti_lynx *lynx, int addr)
172 int retval;
173 int i = 0;
175 unsigned long flags;
177 if (addr > 15) {
178 PRINT(KERN_ERR, lynx->id,
179 "%s: PHY register address %d out of range",
180 __func__, addr);
181 return -1;
184 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
186 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
187 do {
188 retval = reg_read(lynx, LINK_PHY);
190 if (i > 10000) {
191 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
192 __func__);
193 retval = -1;
194 break;
196 i++;
197 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
199 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
200 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
202 if (retval != -1) {
203 return retval & 0xff;
204 } else {
205 return -1;
209 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
211 unsigned long flags;
213 if (addr > 15) {
214 PRINT(KERN_ERR, lynx->id,
215 "%s: PHY register address %d out of range", __func__, addr);
216 return -1;
219 if (val > 0xff) {
220 PRINT(KERN_ERR, lynx->id,
221 "%s: PHY register value %d out of range", __func__, val);
222 return -1;
225 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
227 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
228 | LINK_PHY_WDATA(val));
230 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
232 return 0;
235 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
237 int reg;
239 if (page > 7) {
240 PRINT(KERN_ERR, lynx->id,
241 "%s: PHY page %d out of range", __func__, page);
242 return -1;
245 reg = get_phy_reg(lynx, 7);
246 if (reg != -1) {
247 reg &= 0x1f;
248 reg |= (page << 5);
249 set_phy_reg(lynx, 7, reg);
250 return 0;
251 } else {
252 return -1;
257 static u32 get_phy_vendorid(struct ti_lynx *lynx)
259 u32 pvid = 0;
260 sel_phy_reg_page(lynx, 1);
261 pvid |= (get_phy_reg(lynx, 10) << 16);
262 pvid |= (get_phy_reg(lynx, 11) << 8);
263 pvid |= get_phy_reg(lynx, 12);
264 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
265 return pvid;
268 static u32 get_phy_productid(struct ti_lynx *lynx)
270 u32 id = 0;
271 sel_phy_reg_page(lynx, 1);
272 id |= (get_phy_reg(lynx, 13) << 16);
273 id |= (get_phy_reg(lynx, 14) << 8);
274 id |= get_phy_reg(lynx, 15);
275 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
276 return id;
279 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
280 struct hpsb_host *host)
282 quadlet_t lsid;
283 char phyreg[7];
284 int i;
286 phyreg[0] = lynx->phy_reg0;
287 for (i = 1; i < 7; i++) {
288 phyreg[i] = get_phy_reg(lynx, i);
292 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
293 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
294 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
295 if (!hpsb_disable_irm)
296 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
297 /* lsid |= 1 << 11; *//* set contender (hack) */
298 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
300 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
301 if (phyreg[3 + i] & 0x4) {
302 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
303 << (6 - i*2);
304 } else {
305 lsid |= 1 << (6 - i*2);
309 cpu_to_be32s(&lsid);
310 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
311 return lsid;
314 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
316 quadlet_t *q = lynx->rcv_page;
317 int phyid, isroot, size;
318 quadlet_t lsid = 0;
319 int i;
321 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
323 size = lynx->selfid_size;
324 phyid = lynx->phy_reg0;
326 i = (size > 16 ? 16 : size) / 4 - 1;
327 while (i >= 0) {
328 cpu_to_be32s(&q[i]);
329 i--;
332 if (!lynx->phyic.reg_1394a) {
333 lsid = generate_own_selfid(lynx, host);
336 isroot = (phyid & 2) != 0;
337 phyid >>= 2;
338 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
339 phyid, (isroot ? "root" : "not root"));
340 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
342 if (!lynx->phyic.reg_1394a && !size) {
343 hpsb_selfid_received(host, lsid);
346 while (size > 0) {
347 struct selfid *sid = (struct selfid *)q;
349 if (!lynx->phyic.reg_1394a && !sid->extended
350 && (sid->phy_id == (phyid + 1))) {
351 hpsb_selfid_received(host, lsid);
354 if (q[0] == ~q[1]) {
355 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
356 q[0]);
357 hpsb_selfid_received(host, q[0]);
358 } else {
359 PRINT(KERN_INFO, lynx->id,
360 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
362 q += 2;
363 size -= 8;
366 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
367 hpsb_selfid_received(host, lsid);
370 hpsb_selfid_complete(host, phyid, isroot);
372 if (host->in_bus_reset) return; /* in bus reset again */
374 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER);
375 reg_set_bits(lynx, LINK_CONTROL,
376 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
377 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
382 /* This must be called with the respective queue_lock held. */
383 static void send_next(struct ti_lynx *lynx, int what)
385 struct ti_pcl pcl;
386 struct lynx_send_data *d;
387 struct hpsb_packet *packet;
389 d = &lynx->async;
390 if (!list_empty(&d->pcl_queue)) {
391 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
392 BUG();
395 packet = driver_packet(d->queue.next);
396 list_move_tail(&packet->driver_list, &d->pcl_queue);
398 d->header_dma = pci_map_single(lynx->dev, packet->header,
399 packet->header_size, PCI_DMA_TODEVICE);
400 if (packet->data_size) {
401 d->data_dma = pci_map_single(lynx->dev, packet->data,
402 packet->data_size,
403 PCI_DMA_TODEVICE);
404 } else {
405 d->data_dma = 0;
408 pcl.next = PCL_NEXT_INVALID;
409 pcl.async_error_next = PCL_NEXT_INVALID;
410 pcl.pcl_status = 0;
411 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
412 #ifndef __BIG_ENDIAN
413 pcl.buffer[0].control |= PCL_BIGENDIAN;
414 #endif
415 pcl.buffer[0].pointer = d->header_dma;
416 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
417 pcl.buffer[1].pointer = d->data_dma;
419 switch (packet->type) {
420 case hpsb_async:
421 pcl.buffer[0].control |= PCL_CMD_XMT;
422 break;
423 case hpsb_raw:
424 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
425 break;
428 put_pcl(lynx, d->pcl, &pcl);
429 run_pcl(lynx, d->pcl_start, d->channel);
433 /* called from subsystem core */
434 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
436 struct ti_lynx *lynx = host->hostdata;
437 struct lynx_send_data *d;
438 unsigned long flags;
440 if (packet->data_size >= 4096) {
441 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
442 packet->data_size);
443 return -EOVERFLOW;
446 switch (packet->type) {
447 case hpsb_async:
448 case hpsb_raw:
449 d = &lynx->async;
450 break;
451 default:
452 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
453 packet->type);
454 return -EINVAL;
457 if (packet->tcode == TCODE_WRITEQ
458 || packet->tcode == TCODE_READQ_RESPONSE) {
459 cpu_to_be32s(&packet->header[3]);
462 spin_lock_irqsave(&d->queue_lock, flags);
464 list_add_tail(&packet->driver_list, &d->queue);
465 if (list_empty(&d->pcl_queue))
466 send_next(lynx, packet->type);
468 spin_unlock_irqrestore(&d->queue_lock, flags);
470 return 0;
474 /* called from subsystem core */
475 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
477 struct ti_lynx *lynx = host->hostdata;
478 int retval = 0;
479 struct hpsb_packet *packet;
480 LIST_HEAD(packet_list);
481 unsigned long flags;
482 int phy_reg;
484 switch (cmd) {
485 case RESET_BUS:
486 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
487 retval = 0;
488 break;
491 switch (arg) {
492 case SHORT_RESET:
493 if (lynx->phyic.reg_1394a) {
494 phy_reg = get_phy_reg(lynx, 5);
495 if (phy_reg == -1) {
496 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
497 retval = -1;
498 break;
500 phy_reg |= 0x40;
502 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
504 lynx->selfid_size = -1;
505 lynx->phy_reg0 = -1;
506 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
507 break;
508 } else {
509 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
510 /* fall through to long bus reset */
512 case LONG_RESET:
513 phy_reg = get_phy_reg(lynx, 1);
514 if (phy_reg == -1) {
515 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
516 retval = -1;
517 break;
519 phy_reg |= 0x40;
521 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
523 lynx->selfid_size = -1;
524 lynx->phy_reg0 = -1;
525 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
526 break;
527 case SHORT_RESET_NO_FORCE_ROOT:
528 if (lynx->phyic.reg_1394a) {
529 phy_reg = get_phy_reg(lynx, 1);
530 if (phy_reg == -1) {
531 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
532 retval = -1;
533 break;
535 if (phy_reg & 0x80) {
536 phy_reg &= ~0x80;
537 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
540 phy_reg = get_phy_reg(lynx, 5);
541 if (phy_reg == -1) {
542 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
543 retval = -1;
544 break;
546 phy_reg |= 0x40;
548 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
550 lynx->selfid_size = -1;
551 lynx->phy_reg0 = -1;
552 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
553 break;
554 } else {
555 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
556 /* fall through to long bus reset */
558 case LONG_RESET_NO_FORCE_ROOT:
559 phy_reg = get_phy_reg(lynx, 1);
560 if (phy_reg == -1) {
561 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
562 retval = -1;
563 break;
565 phy_reg &= ~0x80;
566 phy_reg |= 0x40;
568 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
570 lynx->selfid_size = -1;
571 lynx->phy_reg0 = -1;
572 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
573 break;
574 case SHORT_RESET_FORCE_ROOT:
575 if (lynx->phyic.reg_1394a) {
576 phy_reg = get_phy_reg(lynx, 1);
577 if (phy_reg == -1) {
578 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
579 retval = -1;
580 break;
582 if (!(phy_reg & 0x80)) {
583 phy_reg |= 0x80;
584 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
587 phy_reg = get_phy_reg(lynx, 5);
588 if (phy_reg == -1) {
589 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
590 retval = -1;
591 break;
593 phy_reg |= 0x40;
595 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
597 lynx->selfid_size = -1;
598 lynx->phy_reg0 = -1;
599 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
600 break;
601 } else {
602 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
603 /* fall through to long bus reset */
605 case LONG_RESET_FORCE_ROOT:
606 phy_reg = get_phy_reg(lynx, 1);
607 if (phy_reg == -1) {
608 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
609 retval = -1;
610 break;
612 phy_reg |= 0xc0;
614 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
616 lynx->selfid_size = -1;
617 lynx->phy_reg0 = -1;
618 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
619 break;
620 default:
621 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
622 retval = -1;
625 break;
627 case GET_CYCLE_COUNTER:
628 retval = reg_read(lynx, CYCLE_TIMER);
629 break;
631 case SET_CYCLE_COUNTER:
632 reg_write(lynx, CYCLE_TIMER, arg);
633 break;
635 case SET_BUS_ID:
636 reg_write(lynx, LINK_ID,
637 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
638 break;
640 case ACT_CYCLE_MASTER:
641 if (arg) {
642 reg_set_bits(lynx, LINK_CONTROL,
643 LINK_CONTROL_CYCMASTER);
644 } else {
645 reg_clear_bits(lynx, LINK_CONTROL,
646 LINK_CONTROL_CYCMASTER);
648 break;
650 case CANCEL_REQUESTS:
651 spin_lock_irqsave(&lynx->async.queue_lock, flags);
653 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
654 list_splice_init(&lynx->async.queue, &packet_list);
656 if (list_empty(&lynx->async.pcl_queue)) {
657 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
658 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
659 } else {
660 struct ti_pcl pcl;
661 u32 ack;
663 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
665 get_pcl(lynx, lynx->async.pcl, &pcl);
667 packet = driver_packet(lynx->async.pcl_queue.next);
668 list_del_init(&packet->driver_list);
670 pci_unmap_single(lynx->dev, lynx->async.header_dma,
671 packet->header_size, PCI_DMA_TODEVICE);
672 if (packet->data_size) {
673 pci_unmap_single(lynx->dev, lynx->async.data_dma,
674 packet->data_size, PCI_DMA_TODEVICE);
677 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
679 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
680 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
681 ack = (pcl.pcl_status >> 15) & 0xf;
682 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
683 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
684 } else {
685 ack = (pcl.pcl_status >> 15) & 0xf;
687 } else {
688 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
689 ack = ACKX_ABORTED;
691 hpsb_packet_sent(host, packet, ack);
694 while (!list_empty(&packet_list)) {
695 packet = driver_packet(packet_list.next);
696 list_del_init(&packet->driver_list);
697 hpsb_packet_sent(host, packet, ACKX_ABORTED);
700 break;
701 default:
702 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
703 retval = -1;
706 return retval;
710 /***************************************
711 * IEEE-1394 functionality section END *
712 ***************************************/
715 /********************************************************
716 * Global stuff (interrupt handler, init/shutdown code) *
717 ********************************************************/
720 static irqreturn_t lynx_irq_handler(int irq, void *dev_id)
722 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
723 struct hpsb_host *host = lynx->host;
724 u32 intmask;
725 u32 linkint;
727 linkint = reg_read(lynx, LINK_INT_STATUS);
728 intmask = reg_read(lynx, PCI_INT_STATUS);
730 if (!(intmask & PCI_INT_INT_PEND))
731 return IRQ_NONE;
733 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
734 linkint);
736 reg_write(lynx, LINK_INT_STATUS, linkint);
737 reg_write(lynx, PCI_INT_STATUS, intmask);
739 if (intmask & PCI_INT_1394) {
740 if (linkint & LINK_INT_PHY_TIMEOUT) {
741 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
743 if (linkint & LINK_INT_PHY_BUSRESET) {
744 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
745 lynx->selfid_size = -1;
746 lynx->phy_reg0 = -1;
747 if (!host->in_bus_reset)
748 hpsb_bus_reset(host);
750 if (linkint & LINK_INT_PHY_REG_RCVD) {
751 u32 reg;
753 spin_lock(&lynx->phy_reg_lock);
754 reg = reg_read(lynx, LINK_PHY);
755 spin_unlock(&lynx->phy_reg_lock);
757 if (!host->in_bus_reset) {
758 PRINT(KERN_INFO, lynx->id,
759 "phy reg received without reset");
760 } else if (reg & 0xf00) {
761 PRINT(KERN_INFO, lynx->id,
762 "unsolicited phy reg %d received",
763 (reg >> 8) & 0xf);
764 } else {
765 lynx->phy_reg0 = reg & 0xff;
766 handle_selfid(lynx, host);
769 if (linkint & LINK_INT_ISO_STUCK) {
770 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
772 if (linkint & LINK_INT_ASYNC_STUCK) {
773 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
775 if (linkint & LINK_INT_SENT_REJECT) {
776 PRINT(KERN_INFO, lynx->id, "sent reject");
778 if (linkint & LINK_INT_TX_INVALID_TC) {
779 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
781 if (linkint & LINK_INT_GRF_OVERFLOW) {
782 /* flush FIFO if overflow happens during reset */
783 if (host->in_bus_reset)
784 reg_write(lynx, FIFO_CONTROL,
785 FIFO_CONTROL_GRF_FLUSH);
786 PRINT(KERN_INFO, lynx->id, "GRF overflow");
788 if (linkint & LINK_INT_ITF_UNDERFLOW) {
789 PRINT(KERN_INFO, lynx->id, "ITF underflow");
791 if (linkint & LINK_INT_ATF_UNDERFLOW) {
792 PRINT(KERN_INFO, lynx->id, "ATF underflow");
796 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
797 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
799 spin_lock(&lynx->iso_rcv.lock);
801 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
802 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
804 lynx->iso_rcv.used++;
805 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
807 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
808 || !lynx->iso_rcv.chan_count) {
809 PRINTD(KERN_DEBUG, lynx->id, "stopped");
810 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
813 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
814 CHANNEL_ISO_RCV);
816 spin_unlock(&lynx->iso_rcv.lock);
818 tasklet_schedule(&lynx->iso_rcv.tq);
821 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
822 PRINTD(KERN_DEBUG, lynx->id, "async sent");
823 spin_lock(&lynx->async.queue_lock);
825 if (list_empty(&lynx->async.pcl_queue)) {
826 spin_unlock(&lynx->async.queue_lock);
827 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
828 } else {
829 struct ti_pcl pcl;
830 u32 ack;
831 struct hpsb_packet *packet;
833 get_pcl(lynx, lynx->async.pcl, &pcl);
835 packet = driver_packet(lynx->async.pcl_queue.next);
836 list_del_init(&packet->driver_list);
838 pci_unmap_single(lynx->dev, lynx->async.header_dma,
839 packet->header_size, PCI_DMA_TODEVICE);
840 if (packet->data_size) {
841 pci_unmap_single(lynx->dev, lynx->async.data_dma,
842 packet->data_size, PCI_DMA_TODEVICE);
845 if (!list_empty(&lynx->async.queue)) {
846 send_next(lynx, hpsb_async);
849 spin_unlock(&lynx->async.queue_lock);
851 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
852 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
853 ack = (pcl.pcl_status >> 15) & 0xf;
854 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
855 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
856 } else {
857 ack = (pcl.pcl_status >> 15) & 0xf;
859 } else {
860 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
861 ack = ACKX_SEND_ERROR;
863 hpsb_packet_sent(host, packet, ack);
867 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
868 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
869 spin_lock(&lynx->iso_send.queue_lock);
871 if (list_empty(&lynx->iso_send.pcl_queue)) {
872 spin_unlock(&lynx->iso_send.queue_lock);
873 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
874 } else {
875 struct ti_pcl pcl;
876 u32 ack;
877 struct hpsb_packet *packet;
879 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
881 packet = driver_packet(lynx->iso_send.pcl_queue.next);
882 list_del_init(&packet->driver_list);
884 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
885 packet->header_size, PCI_DMA_TODEVICE);
886 if (packet->data_size) {
887 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
888 packet->data_size, PCI_DMA_TODEVICE);
890 spin_unlock(&lynx->iso_send.queue_lock);
892 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
893 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
894 ack = (pcl.pcl_status >> 15) & 0xf;
895 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
896 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
897 } else {
898 ack = (pcl.pcl_status >> 15) & 0xf;
900 } else {
901 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
902 ack = ACKX_SEND_ERROR;
905 hpsb_packet_sent(host, packet, ack);
909 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
910 /* general receive DMA completed */
911 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
913 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
914 stat & 0x1fff);
916 if (stat & DMA_CHAN_STAT_SELFID) {
917 lynx->selfid_size = stat & 0x1fff;
918 handle_selfid(lynx, host);
919 } else {
920 quadlet_t *q_data = lynx->rcv_page;
921 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
922 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
923 cpu_to_be32s(q_data + 3);
925 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
928 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
931 return IRQ_HANDLED;
935 static void iso_rcv_bh(struct ti_lynx *lynx)
937 unsigned int idx;
938 quadlet_t *data;
939 unsigned long flags;
941 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
943 while (lynx->iso_rcv.used) {
944 idx = lynx->iso_rcv.last;
945 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
947 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
948 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
950 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
951 PRINT(KERN_ERR, lynx->id,
952 "iso length mismatch 0x%08x/0x%08x", *data,
953 lynx->iso_rcv.stat[idx]);
956 if (lynx->iso_rcv.stat[idx]
957 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
958 PRINT(KERN_INFO, lynx->id,
959 "iso receive error on %d to 0x%p", idx, data);
960 } else {
961 hpsb_packet_received(lynx->host, data,
962 lynx->iso_rcv.stat[idx] & 0x1fff,
966 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
967 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
968 lynx->iso_rcv.used--;
971 if (lynx->iso_rcv.chan_count) {
972 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
973 DMA_WORD1_CMP_ENABLE_MASTER);
975 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
979 static void remove_card(struct pci_dev *dev)
981 struct ti_lynx *lynx;
982 struct device *lynx_dev;
983 int i;
985 lynx = pci_get_drvdata(dev);
986 if (!lynx) return;
987 pci_set_drvdata(dev, NULL);
989 lynx_dev = get_device(&lynx->host->device);
991 switch (lynx->state) {
992 case is_host:
993 reg_write(lynx, PCI_INT_ENABLE, 0);
994 hpsb_remove_host(lynx->host);
995 case have_intr:
996 reg_write(lynx, PCI_INT_ENABLE, 0);
997 free_irq(lynx->dev->irq, lynx);
999 /* Disable IRM Contender and LCtrl */
1000 if (lynx->phyic.reg_1394a)
1001 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1003 /* Let all other nodes know to ignore us */
1004 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1006 case have_iomappings:
1007 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1008 /* Fix buggy cards with autoboot pin not tied low: */
1009 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1010 iounmap(lynx->registers);
1011 iounmap(lynx->local_rom);
1012 iounmap(lynx->local_ram);
1013 iounmap(lynx->aux_port);
1014 case have_1394_buffers:
1015 for (i = 0; i < ISORCV_PAGES; i++) {
1016 if (lynx->iso_rcv.page[i]) {
1017 pci_free_consistent(lynx->dev, PAGE_SIZE,
1018 lynx->iso_rcv.page[i],
1019 lynx->iso_rcv.page_dma[i]);
1022 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1023 lynx->rcv_page_dma);
1024 case have_aux_buf:
1025 case have_pcl_mem:
1026 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1027 lynx->pcl_mem_dma);
1028 case clear:
1029 /* do nothing - already freed */
1033 tasklet_kill(&lynx->iso_rcv.tq);
1035 if (lynx_dev)
1036 put_device(lynx_dev);
1040 static int __devinit add_card(struct pci_dev *dev,
1041 const struct pci_device_id *devid_is_unused)
1043 #define FAIL(fmt, args...) do { \
1044 PRINT_G(KERN_ERR, fmt , ## args); \
1045 remove_card(dev); \
1046 return error; \
1047 } while (0)
1049 char irq_buf[16];
1050 struct hpsb_host *host;
1051 struct ti_lynx *lynx; /* shortcut to currently handled device */
1052 struct ti_pcl pcl;
1053 u32 *pcli;
1054 int i;
1055 int error;
1057 error = -ENXIO;
1059 if (pci_set_dma_mask(dev, DMA_BIT_MASK(32)))
1060 FAIL("DMA address limits not supported for PCILynx hardware");
1061 if (pci_enable_device(dev))
1062 FAIL("failed to enable PCILynx hardware");
1063 pci_set_master(dev);
1065 error = -ENOMEM;
1067 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1068 if (!host) FAIL("failed to allocate control structure memory");
1070 lynx = host->hostdata;
1071 lynx->id = card_id++;
1072 lynx->dev = dev;
1073 lynx->state = clear;
1074 lynx->host = host;
1075 host->pdev = dev;
1076 pci_set_drvdata(dev, lynx);
1078 spin_lock_init(&lynx->lock);
1079 spin_lock_init(&lynx->phy_reg_lock);
1081 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1082 &lynx->pcl_mem_dma);
1084 if (lynx->pcl_mem != NULL) {
1085 lynx->state = have_pcl_mem;
1086 PRINT(KERN_INFO, lynx->id,
1087 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1088 lynx->pcl_mem);
1089 } else {
1090 FAIL("failed to allocate PCL memory area");
1093 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1094 &lynx->rcv_page_dma);
1095 if (lynx->rcv_page == NULL) {
1096 FAIL("failed to allocate receive buffer");
1098 lynx->state = have_1394_buffers;
1100 for (i = 0; i < ISORCV_PAGES; i++) {
1101 lynx->iso_rcv.page[i] =
1102 pci_alloc_consistent(dev, PAGE_SIZE,
1103 &lynx->iso_rcv.page_dma[i]);
1104 if (lynx->iso_rcv.page[i] == NULL) {
1105 FAIL("failed to allocate iso receive buffers");
1109 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1110 PCILYNX_MAX_REGISTER);
1111 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1112 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1113 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1114 PCILYNX_MAX_MEMORY);
1115 lynx->state = have_iomappings;
1117 if (lynx->registers == NULL) {
1118 FAIL("failed to remap registers - card not accessible");
1121 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1122 /* Fix buggy cards with autoboot pin not tied low: */
1123 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1125 sprintf (irq_buf, "%d", dev->irq);
1127 if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
1128 PCILYNX_DRIVER_NAME, lynx)) {
1129 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1130 lynx->state = have_intr;
1131 } else {
1132 FAIL("failed to allocate shared interrupt %s", irq_buf);
1135 /* alloc_pcl return values are not checked, it is expected that the
1136 * provided PCL space is sufficient for the initial allocations */
1137 lynx->rcv_pcl = alloc_pcl(lynx);
1138 lynx->rcv_pcl_start = alloc_pcl(lynx);
1139 lynx->async.pcl = alloc_pcl(lynx);
1140 lynx->async.pcl_start = alloc_pcl(lynx);
1141 lynx->iso_send.pcl = alloc_pcl(lynx);
1142 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1144 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1145 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1147 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1149 /* all allocations successful - simple init stuff follows */
1151 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1153 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1154 (unsigned long)lynx);
1156 spin_lock_init(&lynx->iso_rcv.lock);
1158 spin_lock_init(&lynx->async.queue_lock);
1159 lynx->async.channel = CHANNEL_ASYNC_SEND;
1160 spin_lock_init(&lynx->iso_send.queue_lock);
1161 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1163 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1164 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1165 lynx->local_ram, lynx->aux_port);
1167 /* now, looking for PHY register set */
1168 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1169 lynx->phyic.reg_1394a = 1;
1170 PRINT(KERN_INFO, lynx->id,
1171 "found 1394a conform PHY (using extended register set)");
1172 lynx->phyic.vendor = get_phy_vendorid(lynx);
1173 lynx->phyic.product = get_phy_productid(lynx);
1174 } else {
1175 lynx->phyic.reg_1394a = 0;
1176 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1179 lynx->selfid_size = -1;
1180 lynx->phy_reg0 = -1;
1182 INIT_LIST_HEAD(&lynx->async.queue);
1183 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1184 INIT_LIST_HEAD(&lynx->iso_send.queue);
1185 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1187 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1188 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1190 pcl.next = PCL_NEXT_INVALID;
1191 pcl.async_error_next = PCL_NEXT_INVALID;
1193 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1194 #ifndef __BIG_ENDIAN
1195 pcl.buffer[0].control |= PCL_BIGENDIAN;
1196 #endif
1197 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1199 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1200 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1201 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1203 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1204 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1205 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1207 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1208 pcl.async_error_next = PCL_NEXT_INVALID;
1209 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1211 pcl.next = PCL_NEXT_INVALID;
1212 pcl.async_error_next = PCL_NEXT_INVALID;
1213 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1214 #ifndef __BIG_ENDIAN
1215 pcl.buffer[0].control |= PCL_BIGENDIAN;
1216 #endif
1217 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1219 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1220 int page = i / ISORCV_PER_PAGE;
1221 int sec = i % ISORCV_PER_PAGE;
1223 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1224 + sec * MAX_ISORCV_SIZE;
1225 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1226 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1229 pcli = (u32 *)&pcl;
1230 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1231 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1233 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1235 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1236 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1237 /* 20 byte threshold before triggering PCI transfer */
1238 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1239 /* threshold on both send FIFOs before transmitting:
1240 FIFO size - cache line size - 1 */
1241 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1242 i = 0x30 - i - 1;
1243 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1245 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1247 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1248 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1249 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1250 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1251 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1252 | LINK_INT_ATF_UNDERFLOW);
1254 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1255 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1256 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1257 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1258 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1259 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1260 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1262 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1264 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1265 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1266 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1267 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1269 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1271 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1272 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1273 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1274 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1276 if (!lynx->phyic.reg_1394a) {
1277 if (!hpsb_disable_irm) {
1278 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1279 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1281 } else {
1282 /* set the contender (if appropriate) and LCtrl bit in the
1283 * extended PHY register set. (Should check that PHY_02_EXTENDED
1284 * is set in register 2?)
1286 i = get_phy_reg(lynx, 4);
1287 i |= PHY_04_LCTRL;
1288 if (hpsb_disable_irm)
1289 i &= ~PHY_04_CONTENDER;
1290 else
1291 i |= PHY_04_CONTENDER;
1292 if (i != -1) set_phy_reg(lynx, 4, i);
1295 if (!skip_eeprom)
1297 /* needed for i2c communication with serial eeprom */
1298 struct i2c_adapter *i2c_ad;
1299 struct i2c_algo_bit_data i2c_adapter_data;
1301 error = -ENOMEM;
1302 i2c_ad = kzalloc(sizeof(*i2c_ad), GFP_KERNEL);
1303 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1305 strlcpy(i2c_ad->name, "PCILynx I2C", sizeof(i2c_ad->name));
1306 i2c_adapter_data = bit_data;
1307 i2c_ad->algo_data = &i2c_adapter_data;
1308 i2c_adapter_data.data = lynx;
1309 i2c_ad->dev.parent = &dev->dev;
1311 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1312 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1314 /* reset hardware to sane state */
1315 lynx->i2c_driven_state = 0x00000070;
1316 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1318 if (i2c_bit_add_bus(i2c_ad) < 0)
1320 kfree(i2c_ad);
1321 error = -ENXIO;
1322 FAIL("unable to register i2c");
1324 else
1326 /* do i2c stuff */
1327 unsigned char i2c_cmd = 0x10;
1328 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1329 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1332 /* we use i2c_transfer because we have no i2c_client
1333 at hand */
1334 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1335 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1336 } else {
1337 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1339 for (i = 0; i < 5 ; i++)
1340 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1341 i, be32_to_cpu(lynx->bus_info_block[i]));
1343 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1344 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1345 (lynx->bus_info_block[1] == IEEE1394_BUSID_MAGIC))
1347 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1348 } else {
1349 kfree(i2c_ad);
1350 error = -ENXIO;
1351 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1356 i2c_del_adapter(i2c_ad);
1357 kfree(i2c_ad);
1361 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1362 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1363 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1364 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1365 if (!lynx->phyic.reg_1394a)
1366 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1367 else
1368 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1370 if (hpsb_add_host(host)) {
1371 error = -ENOMEM;
1372 FAIL("Failed to register host with highlevel");
1375 lynx->state = is_host;
1377 return 0;
1378 #undef FAIL
1382 static struct pci_device_id pci_table[] = {
1384 .vendor = PCI_VENDOR_ID_TI,
1385 .device = PCI_DEVICE_ID_TI_PCILYNX,
1386 .subvendor = PCI_ANY_ID,
1387 .subdevice = PCI_ANY_ID,
1389 { } /* Terminating entry */
1392 static struct pci_driver lynx_pci_driver = {
1393 .name = PCILYNX_DRIVER_NAME,
1394 .id_table = pci_table,
1395 .probe = add_card,
1396 .remove = remove_card,
1399 static struct hpsb_host_driver lynx_driver = {
1400 .owner = THIS_MODULE,
1401 .name = PCILYNX_DRIVER_NAME,
1402 .set_hw_config_rom = NULL,
1403 .transmit_packet = lynx_transmit,
1404 .devctl = lynx_devctl,
1405 .isoctl = NULL,
1408 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1409 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1410 MODULE_LICENSE("GPL");
1411 MODULE_SUPPORTED_DEVICE("pcilynx");
1412 MODULE_DEVICE_TABLE(pci, pci_table);
1414 static int __init pcilynx_init(void)
1416 int ret;
1418 ret = pci_register_driver(&lynx_pci_driver);
1419 if (ret < 0) {
1420 PRINT_G(KERN_ERR, "PCI module init failed");
1421 return ret;
1424 return 0;
1427 static void __exit pcilynx_cleanup(void)
1429 pci_unregister_driver(&lynx_pci_driver);
1433 module_init(pcilynx_init);
1434 module_exit(pcilynx_cleanup);