[PATCH] Make cpu_relax() imply barrier() on all arches
[linux-2.6/kvm.git] / drivers / ieee1394 / pcilynx.c
blobe6f41238f5e8c6c2f579cb5de5ae1fe0d66ce224
1 /*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Contributions:
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/interrupt.h>
36 #include <linux/wait.h>
37 #include <linux/errno.h>
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/fs.h>
43 #include <linux/poll.h>
44 #include <linux/kdev_t.h>
45 #include <linux/dma-mapping.h>
46 #include <asm/byteorder.h>
47 #include <asm/atomic.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/irq.h>
52 #include "csr1212.h"
53 #include "ieee1394.h"
54 #include "ieee1394_types.h"
55 #include "hosts.h"
56 #include "ieee1394_core.h"
57 #include "highlevel.h"
58 #include "pcilynx.h"
60 #include <linux/i2c.h>
61 #include <linux/i2c-algo-bit.h>
63 /* print general (card independent) information */
64 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
65 /* print card specific information */
66 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
68 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
69 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
70 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
71 #else
72 #define PRINT_GD(level, fmt, args...) do {} while (0)
73 #define PRINTD(level, card, fmt, args...) do {} while (0)
74 #endif
77 /* Module Parameters */
78 static int skip_eeprom;
79 module_param(skip_eeprom, int, 0444);
80 MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
83 static struct hpsb_host_driver lynx_driver;
84 static unsigned int card_id;
89 * I2C stuff
92 /* the i2c stuff was inspired by i2c-philips-par.c */
94 static void bit_setscl(void *data, int state)
96 if (state) {
97 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
98 } else {
99 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
101 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
104 static void bit_setsda(void *data, int state)
106 if (state) {
107 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
108 } else {
109 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
111 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
114 static int bit_getscl(void *data)
116 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
119 static int bit_getsda(void *data)
121 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
124 static int bit_reg(struct i2c_client *client)
126 return 0;
129 static int bit_unreg(struct i2c_client *client)
131 return 0;
134 static struct i2c_algo_bit_data bit_data = {
135 .setsda = bit_setsda,
136 .setscl = bit_setscl,
137 .getsda = bit_getsda,
138 .getscl = bit_getscl,
139 .udelay = 5,
140 .mdelay = 5,
141 .timeout = 100,
144 static struct i2c_adapter bit_ops = {
145 .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
146 .client_register = bit_reg,
147 .client_unregister = bit_unreg,
148 .name = "PCILynx I2C",
154 * PCL handling functions.
157 static pcl_t alloc_pcl(struct ti_lynx *lynx)
159 u8 m;
160 int i, j;
162 spin_lock(&lynx->lock);
163 /* FIXME - use ffz() to make this readable */
164 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
165 m = lynx->pcl_bmap[i];
166 for (j = 0; j < 8; j++) {
167 if (m & 1<<j) {
168 continue;
170 m |= 1<<j;
171 lynx->pcl_bmap[i] = m;
172 spin_unlock(&lynx->lock);
173 return 8 * i + j;
176 spin_unlock(&lynx->lock);
178 return -1;
182 #if 0
183 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
185 int off, bit;
187 off = pclid / 8;
188 bit = pclid % 8;
190 if (pclid < 0) {
191 return;
194 spin_lock(&lynx->lock);
195 if (lynx->pcl_bmap[off] & 1<<bit) {
196 lynx->pcl_bmap[off] &= ~(1<<bit);
197 } else {
198 PRINT(KERN_ERR, lynx->id,
199 "attempted to free unallocated PCL %d", pclid);
201 spin_unlock(&lynx->lock);
204 /* functions useful for debugging */
205 static void pretty_print_pcl(const struct ti_pcl *pcl)
207 int i;
209 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
210 pcl->next, pcl->user_data, pcl->pcl_status,
211 pcl->remaining_transfer_count, pcl->next_data_buffer);
213 printk("PCL");
214 for (i=0; i<13; i++) {
215 printk(" c%x:%08x d%x:%08x",
216 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
217 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
219 printk("\n");
222 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
224 struct ti_pcl pcl;
226 get_pcl(lynx, pclid, &pcl);
227 pretty_print_pcl(&pcl);
229 #endif
233 /***********************************
234 * IEEE-1394 functionality section *
235 ***********************************/
238 static int get_phy_reg(struct ti_lynx *lynx, int addr)
240 int retval;
241 int i = 0;
243 unsigned long flags;
245 if (addr > 15) {
246 PRINT(KERN_ERR, lynx->id,
247 "%s: PHY register address %d out of range",
248 __FUNCTION__, addr);
249 return -1;
252 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
254 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
255 do {
256 retval = reg_read(lynx, LINK_PHY);
258 if (i > 10000) {
259 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
260 __FUNCTION__);
261 retval = -1;
262 break;
264 i++;
265 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
267 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
268 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
270 if (retval != -1) {
271 return retval & 0xff;
272 } else {
273 return -1;
277 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
279 unsigned long flags;
281 if (addr > 15) {
282 PRINT(KERN_ERR, lynx->id,
283 "%s: PHY register address %d out of range", __FUNCTION__, addr);
284 return -1;
287 if (val > 0xff) {
288 PRINT(KERN_ERR, lynx->id,
289 "%s: PHY register value %d out of range", __FUNCTION__, val);
290 return -1;
293 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
295 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
296 | LINK_PHY_WDATA(val));
298 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
300 return 0;
303 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
305 int reg;
307 if (page > 7) {
308 PRINT(KERN_ERR, lynx->id,
309 "%s: PHY page %d out of range", __FUNCTION__, page);
310 return -1;
313 reg = get_phy_reg(lynx, 7);
314 if (reg != -1) {
315 reg &= 0x1f;
316 reg |= (page << 5);
317 set_phy_reg(lynx, 7, reg);
318 return 0;
319 } else {
320 return -1;
324 #if 0 /* not needed at this time */
325 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
327 int reg;
329 if (port > 15) {
330 PRINT(KERN_ERR, lynx->id,
331 "%s: PHY port %d out of range", __FUNCTION__, port);
332 return -1;
335 reg = get_phy_reg(lynx, 7);
336 if (reg != -1) {
337 reg &= 0xf0;
338 reg |= port;
339 set_phy_reg(lynx, 7, reg);
340 return 0;
341 } else {
342 return -1;
345 #endif
347 static u32 get_phy_vendorid(struct ti_lynx *lynx)
349 u32 pvid = 0;
350 sel_phy_reg_page(lynx, 1);
351 pvid |= (get_phy_reg(lynx, 10) << 16);
352 pvid |= (get_phy_reg(lynx, 11) << 8);
353 pvid |= get_phy_reg(lynx, 12);
354 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
355 return pvid;
358 static u32 get_phy_productid(struct ti_lynx *lynx)
360 u32 id = 0;
361 sel_phy_reg_page(lynx, 1);
362 id |= (get_phy_reg(lynx, 13) << 16);
363 id |= (get_phy_reg(lynx, 14) << 8);
364 id |= get_phy_reg(lynx, 15);
365 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
366 return id;
369 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
370 struct hpsb_host *host)
372 quadlet_t lsid;
373 char phyreg[7];
374 int i;
376 phyreg[0] = lynx->phy_reg0;
377 for (i = 1; i < 7; i++) {
378 phyreg[i] = get_phy_reg(lynx, i);
381 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
382 more than 3 ports on the PHY anyway. */
384 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
385 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
386 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
387 if (!hpsb_disable_irm)
388 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
389 /* lsid |= 1 << 11; *//* set contender (hack) */
390 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
392 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
393 if (phyreg[3 + i] & 0x4) {
394 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
395 << (6 - i*2);
396 } else {
397 lsid |= 1 << (6 - i*2);
401 cpu_to_be32s(&lsid);
402 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
403 return lsid;
406 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
408 quadlet_t *q = lynx->rcv_page;
409 int phyid, isroot, size;
410 quadlet_t lsid = 0;
411 int i;
413 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
415 size = lynx->selfid_size;
416 phyid = lynx->phy_reg0;
418 i = (size > 16 ? 16 : size) / 4 - 1;
419 while (i >= 0) {
420 cpu_to_be32s(&q[i]);
421 i--;
424 if (!lynx->phyic.reg_1394a) {
425 lsid = generate_own_selfid(lynx, host);
428 isroot = (phyid & 2) != 0;
429 phyid >>= 2;
430 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
431 phyid, (isroot ? "root" : "not root"));
432 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
434 if (!lynx->phyic.reg_1394a && !size) {
435 hpsb_selfid_received(host, lsid);
438 while (size > 0) {
439 struct selfid *sid = (struct selfid *)q;
441 if (!lynx->phyic.reg_1394a && !sid->extended
442 && (sid->phy_id == (phyid + 1))) {
443 hpsb_selfid_received(host, lsid);
446 if (q[0] == ~q[1]) {
447 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
448 q[0]);
449 hpsb_selfid_received(host, q[0]);
450 } else {
451 PRINT(KERN_INFO, lynx->id,
452 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
454 q += 2;
455 size -= 8;
458 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
459 hpsb_selfid_received(host, lsid);
462 hpsb_selfid_complete(host, phyid, isroot);
464 if (host->in_bus_reset) return; /* in bus reset again */
466 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
467 reg_set_bits(lynx, LINK_CONTROL,
468 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
469 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
474 /* This must be called with the respective queue_lock held. */
475 static void send_next(struct ti_lynx *lynx, int what)
477 struct ti_pcl pcl;
478 struct lynx_send_data *d;
479 struct hpsb_packet *packet;
481 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
482 if (!list_empty(&d->pcl_queue)) {
483 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
484 BUG();
487 packet = driver_packet(d->queue.next);
488 list_move_tail(&packet->driver_list, &d->pcl_queue);
490 d->header_dma = pci_map_single(lynx->dev, packet->header,
491 packet->header_size, PCI_DMA_TODEVICE);
492 if (packet->data_size) {
493 d->data_dma = pci_map_single(lynx->dev, packet->data,
494 packet->data_size,
495 PCI_DMA_TODEVICE);
496 } else {
497 d->data_dma = 0;
500 pcl.next = PCL_NEXT_INVALID;
501 pcl.async_error_next = PCL_NEXT_INVALID;
502 pcl.pcl_status = 0;
503 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
504 #ifndef __BIG_ENDIAN
505 pcl.buffer[0].control |= PCL_BIGENDIAN;
506 #endif
507 pcl.buffer[0].pointer = d->header_dma;
508 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
509 pcl.buffer[1].pointer = d->data_dma;
511 switch (packet->type) {
512 case hpsb_async:
513 pcl.buffer[0].control |= PCL_CMD_XMT;
514 break;
515 case hpsb_iso:
516 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
517 break;
518 case hpsb_raw:
519 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
520 break;
523 put_pcl(lynx, d->pcl, &pcl);
524 run_pcl(lynx, d->pcl_start, d->channel);
528 /* called from subsystem core */
529 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
531 struct ti_lynx *lynx = host->hostdata;
532 struct lynx_send_data *d;
533 unsigned long flags;
535 if (packet->data_size >= 4096) {
536 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
537 packet->data_size);
538 return -EOVERFLOW;
541 switch (packet->type) {
542 case hpsb_async:
543 case hpsb_raw:
544 d = &lynx->async;
545 break;
546 case hpsb_iso:
547 d = &lynx->iso_send;
548 break;
549 default:
550 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
551 packet->type);
552 return -EINVAL;
555 if (packet->tcode == TCODE_WRITEQ
556 || packet->tcode == TCODE_READQ_RESPONSE) {
557 cpu_to_be32s(&packet->header[3]);
560 spin_lock_irqsave(&d->queue_lock, flags);
562 list_add_tail(&packet->driver_list, &d->queue);
563 if (list_empty(&d->pcl_queue))
564 send_next(lynx, packet->type);
566 spin_unlock_irqrestore(&d->queue_lock, flags);
568 return 0;
572 /* called from subsystem core */
573 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
575 struct ti_lynx *lynx = host->hostdata;
576 int retval = 0;
577 struct hpsb_packet *packet;
578 LIST_HEAD(packet_list);
579 unsigned long flags;
580 int phy_reg;
582 switch (cmd) {
583 case RESET_BUS:
584 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
585 retval = 0;
586 break;
589 switch (arg) {
590 case SHORT_RESET:
591 if (lynx->phyic.reg_1394a) {
592 phy_reg = get_phy_reg(lynx, 5);
593 if (phy_reg == -1) {
594 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
595 retval = -1;
596 break;
598 phy_reg |= 0x40;
600 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
602 lynx->selfid_size = -1;
603 lynx->phy_reg0 = -1;
604 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
605 break;
606 } else {
607 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
608 /* fall through to long bus reset */
610 case LONG_RESET:
611 phy_reg = get_phy_reg(lynx, 1);
612 if (phy_reg == -1) {
613 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
614 retval = -1;
615 break;
617 phy_reg |= 0x40;
619 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
621 lynx->selfid_size = -1;
622 lynx->phy_reg0 = -1;
623 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
624 break;
625 case SHORT_RESET_NO_FORCE_ROOT:
626 if (lynx->phyic.reg_1394a) {
627 phy_reg = get_phy_reg(lynx, 1);
628 if (phy_reg == -1) {
629 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
630 retval = -1;
631 break;
633 if (phy_reg & 0x80) {
634 phy_reg &= ~0x80;
635 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
638 phy_reg = get_phy_reg(lynx, 5);
639 if (phy_reg == -1) {
640 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
641 retval = -1;
642 break;
644 phy_reg |= 0x40;
646 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
648 lynx->selfid_size = -1;
649 lynx->phy_reg0 = -1;
650 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
651 break;
652 } else {
653 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
654 /* fall through to long bus reset */
656 case LONG_RESET_NO_FORCE_ROOT:
657 phy_reg = get_phy_reg(lynx, 1);
658 if (phy_reg == -1) {
659 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
660 retval = -1;
661 break;
663 phy_reg &= ~0x80;
664 phy_reg |= 0x40;
666 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
668 lynx->selfid_size = -1;
669 lynx->phy_reg0 = -1;
670 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
671 break;
672 case SHORT_RESET_FORCE_ROOT:
673 if (lynx->phyic.reg_1394a) {
674 phy_reg = get_phy_reg(lynx, 1);
675 if (phy_reg == -1) {
676 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
677 retval = -1;
678 break;
680 if (!(phy_reg & 0x80)) {
681 phy_reg |= 0x80;
682 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
685 phy_reg = get_phy_reg(lynx, 5);
686 if (phy_reg == -1) {
687 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
688 retval = -1;
689 break;
691 phy_reg |= 0x40;
693 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
695 lynx->selfid_size = -1;
696 lynx->phy_reg0 = -1;
697 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
698 break;
699 } else {
700 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
701 /* fall through to long bus reset */
703 case LONG_RESET_FORCE_ROOT:
704 phy_reg = get_phy_reg(lynx, 1);
705 if (phy_reg == -1) {
706 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
707 retval = -1;
708 break;
710 phy_reg |= 0xc0;
712 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
714 lynx->selfid_size = -1;
715 lynx->phy_reg0 = -1;
716 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
717 break;
718 default:
719 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
720 retval = -1;
723 break;
725 case GET_CYCLE_COUNTER:
726 retval = reg_read(lynx, CYCLE_TIMER);
727 break;
729 case SET_CYCLE_COUNTER:
730 reg_write(lynx, CYCLE_TIMER, arg);
731 break;
733 case SET_BUS_ID:
734 reg_write(lynx, LINK_ID,
735 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
736 break;
738 case ACT_CYCLE_MASTER:
739 if (arg) {
740 reg_set_bits(lynx, LINK_CONTROL,
741 LINK_CONTROL_CYCMASTER);
742 } else {
743 reg_clear_bits(lynx, LINK_CONTROL,
744 LINK_CONTROL_CYCMASTER);
746 break;
748 case CANCEL_REQUESTS:
749 spin_lock_irqsave(&lynx->async.queue_lock, flags);
751 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
752 list_splice(&lynx->async.queue, &packet_list);
753 INIT_LIST_HEAD(&lynx->async.queue);
755 if (list_empty(&lynx->async.pcl_queue)) {
756 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
757 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
758 } else {
759 struct ti_pcl pcl;
760 u32 ack;
761 struct hpsb_packet *packet;
763 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
765 get_pcl(lynx, lynx->async.pcl, &pcl);
767 packet = driver_packet(lynx->async.pcl_queue.next);
768 list_del_init(&packet->driver_list);
770 pci_unmap_single(lynx->dev, lynx->async.header_dma,
771 packet->header_size, PCI_DMA_TODEVICE);
772 if (packet->data_size) {
773 pci_unmap_single(lynx->dev, lynx->async.data_dma,
774 packet->data_size, PCI_DMA_TODEVICE);
777 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
779 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
780 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
781 ack = (pcl.pcl_status >> 15) & 0xf;
782 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
783 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
784 } else {
785 ack = (pcl.pcl_status >> 15) & 0xf;
787 } else {
788 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
789 ack = ACKX_ABORTED;
791 hpsb_packet_sent(host, packet, ack);
794 while (!list_empty(&packet_list)) {
795 packet = driver_packet(packet_list.next);
796 list_del_init(&packet->driver_list);
797 hpsb_packet_sent(host, packet, ACKX_ABORTED);
800 break;
802 case ISO_LISTEN_CHANNEL:
803 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
805 if (lynx->iso_rcv.chan_count++ == 0) {
806 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
807 DMA_WORD1_CMP_ENABLE_MASTER);
810 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
811 break;
813 case ISO_UNLISTEN_CHANNEL:
814 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
816 if (--lynx->iso_rcv.chan_count == 0) {
817 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
821 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
822 break;
824 default:
825 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
826 retval = -1;
829 return retval;
833 /***************************************
834 * IEEE-1394 functionality section END *
835 ***************************************/
838 /********************************************************
839 * Global stuff (interrupt handler, init/shutdown code) *
840 ********************************************************/
843 static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
844 struct pt_regs *regs_are_unused)
846 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
847 struct hpsb_host *host = lynx->host;
848 u32 intmask;
849 u32 linkint;
851 linkint = reg_read(lynx, LINK_INT_STATUS);
852 intmask = reg_read(lynx, PCI_INT_STATUS);
854 if (!(intmask & PCI_INT_INT_PEND))
855 return IRQ_NONE;
857 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
858 linkint);
860 reg_write(lynx, LINK_INT_STATUS, linkint);
861 reg_write(lynx, PCI_INT_STATUS, intmask);
863 if (intmask & PCI_INT_1394) {
864 if (linkint & LINK_INT_PHY_TIMEOUT) {
865 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
867 if (linkint & LINK_INT_PHY_BUSRESET) {
868 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
869 lynx->selfid_size = -1;
870 lynx->phy_reg0 = -1;
871 if (!host->in_bus_reset)
872 hpsb_bus_reset(host);
874 if (linkint & LINK_INT_PHY_REG_RCVD) {
875 u32 reg;
877 spin_lock(&lynx->phy_reg_lock);
878 reg = reg_read(lynx, LINK_PHY);
879 spin_unlock(&lynx->phy_reg_lock);
881 if (!host->in_bus_reset) {
882 PRINT(KERN_INFO, lynx->id,
883 "phy reg received without reset");
884 } else if (reg & 0xf00) {
885 PRINT(KERN_INFO, lynx->id,
886 "unsolicited phy reg %d received",
887 (reg >> 8) & 0xf);
888 } else {
889 lynx->phy_reg0 = reg & 0xff;
890 handle_selfid(lynx, host);
893 if (linkint & LINK_INT_ISO_STUCK) {
894 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
896 if (linkint & LINK_INT_ASYNC_STUCK) {
897 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
899 if (linkint & LINK_INT_SENT_REJECT) {
900 PRINT(KERN_INFO, lynx->id, "sent reject");
902 if (linkint & LINK_INT_TX_INVALID_TC) {
903 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
905 if (linkint & LINK_INT_GRF_OVERFLOW) {
906 /* flush FIFO if overflow happens during reset */
907 if (host->in_bus_reset)
908 reg_write(lynx, FIFO_CONTROL,
909 FIFO_CONTROL_GRF_FLUSH);
910 PRINT(KERN_INFO, lynx->id, "GRF overflow");
912 if (linkint & LINK_INT_ITF_UNDERFLOW) {
913 PRINT(KERN_INFO, lynx->id, "ITF underflow");
915 if (linkint & LINK_INT_ATF_UNDERFLOW) {
916 PRINT(KERN_INFO, lynx->id, "ATF underflow");
920 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
921 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
923 spin_lock(&lynx->iso_rcv.lock);
925 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
926 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
928 lynx->iso_rcv.used++;
929 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
931 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
932 || !lynx->iso_rcv.chan_count) {
933 PRINTD(KERN_DEBUG, lynx->id, "stopped");
934 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
937 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
938 CHANNEL_ISO_RCV);
940 spin_unlock(&lynx->iso_rcv.lock);
942 tasklet_schedule(&lynx->iso_rcv.tq);
945 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
946 PRINTD(KERN_DEBUG, lynx->id, "async sent");
947 spin_lock(&lynx->async.queue_lock);
949 if (list_empty(&lynx->async.pcl_queue)) {
950 spin_unlock(&lynx->async.queue_lock);
951 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
952 } else {
953 struct ti_pcl pcl;
954 u32 ack;
955 struct hpsb_packet *packet;
957 get_pcl(lynx, lynx->async.pcl, &pcl);
959 packet = driver_packet(lynx->async.pcl_queue.next);
960 list_del_init(&packet->driver_list);
962 pci_unmap_single(lynx->dev, lynx->async.header_dma,
963 packet->header_size, PCI_DMA_TODEVICE);
964 if (packet->data_size) {
965 pci_unmap_single(lynx->dev, lynx->async.data_dma,
966 packet->data_size, PCI_DMA_TODEVICE);
969 if (!list_empty(&lynx->async.queue)) {
970 send_next(lynx, hpsb_async);
973 spin_unlock(&lynx->async.queue_lock);
975 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
976 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
977 ack = (pcl.pcl_status >> 15) & 0xf;
978 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
979 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
980 } else {
981 ack = (pcl.pcl_status >> 15) & 0xf;
983 } else {
984 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
985 ack = ACKX_SEND_ERROR;
987 hpsb_packet_sent(host, packet, ack);
991 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
992 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
993 spin_lock(&lynx->iso_send.queue_lock);
995 if (list_empty(&lynx->iso_send.pcl_queue)) {
996 spin_unlock(&lynx->iso_send.queue_lock);
997 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
998 } else {
999 struct ti_pcl pcl;
1000 u32 ack;
1001 struct hpsb_packet *packet;
1003 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
1005 packet = driver_packet(lynx->iso_send.pcl_queue.next);
1006 list_del_init(&packet->driver_list);
1008 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1009 packet->header_size, PCI_DMA_TODEVICE);
1010 if (packet->data_size) {
1011 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1012 packet->data_size, PCI_DMA_TODEVICE);
1015 if (!list_empty(&lynx->iso_send.queue)) {
1016 send_next(lynx, hpsb_iso);
1019 spin_unlock(&lynx->iso_send.queue_lock);
1021 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1022 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1023 ack = (pcl.pcl_status >> 15) & 0xf;
1024 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1025 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1026 } else {
1027 ack = (pcl.pcl_status >> 15) & 0xf;
1029 } else {
1030 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1031 ack = ACKX_SEND_ERROR;
1034 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1038 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1039 /* general receive DMA completed */
1040 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1042 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1043 stat & 0x1fff);
1045 if (stat & DMA_CHAN_STAT_SELFID) {
1046 lynx->selfid_size = stat & 0x1fff;
1047 handle_selfid(lynx, host);
1048 } else {
1049 quadlet_t *q_data = lynx->rcv_page;
1050 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1051 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1052 cpu_to_be32s(q_data + 3);
1054 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1057 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1060 return IRQ_HANDLED;
1064 static void iso_rcv_bh(struct ti_lynx *lynx)
1066 unsigned int idx;
1067 quadlet_t *data;
1068 unsigned long flags;
1070 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1072 while (lynx->iso_rcv.used) {
1073 idx = lynx->iso_rcv.last;
1074 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1076 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1077 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1079 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1080 PRINT(KERN_ERR, lynx->id,
1081 "iso length mismatch 0x%08x/0x%08x", *data,
1082 lynx->iso_rcv.stat[idx]);
1085 if (lynx->iso_rcv.stat[idx]
1086 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1087 PRINT(KERN_INFO, lynx->id,
1088 "iso receive error on %d to 0x%p", idx, data);
1089 } else {
1090 hpsb_packet_received(lynx->host, data,
1091 lynx->iso_rcv.stat[idx] & 0x1fff,
1095 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1096 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1097 lynx->iso_rcv.used--;
1100 if (lynx->iso_rcv.chan_count) {
1101 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1102 DMA_WORD1_CMP_ENABLE_MASTER);
1104 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1108 static void remove_card(struct pci_dev *dev)
1110 struct ti_lynx *lynx;
1111 struct device *lynx_dev;
1112 int i;
1114 lynx = pci_get_drvdata(dev);
1115 if (!lynx) return;
1116 pci_set_drvdata(dev, NULL);
1118 lynx_dev = get_device(&lynx->host->device);
1120 switch (lynx->state) {
1121 case is_host:
1122 reg_write(lynx, PCI_INT_ENABLE, 0);
1123 hpsb_remove_host(lynx->host);
1124 case have_intr:
1125 reg_write(lynx, PCI_INT_ENABLE, 0);
1126 free_irq(lynx->dev->irq, lynx);
1128 /* Disable IRM Contender and LCtrl */
1129 if (lynx->phyic.reg_1394a)
1130 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1132 /* Let all other nodes know to ignore us */
1133 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1135 case have_iomappings:
1136 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1137 /* Fix buggy cards with autoboot pin not tied low: */
1138 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1139 iounmap(lynx->registers);
1140 iounmap(lynx->local_rom);
1141 iounmap(lynx->local_ram);
1142 iounmap(lynx->aux_port);
1143 case have_1394_buffers:
1144 for (i = 0; i < ISORCV_PAGES; i++) {
1145 if (lynx->iso_rcv.page[i]) {
1146 pci_free_consistent(lynx->dev, PAGE_SIZE,
1147 lynx->iso_rcv.page[i],
1148 lynx->iso_rcv.page_dma[i]);
1151 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1152 lynx->rcv_page_dma);
1153 case have_aux_buf:
1154 case have_pcl_mem:
1155 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1156 lynx->pcl_mem_dma);
1157 case clear:
1158 /* do nothing - already freed */
1162 tasklet_kill(&lynx->iso_rcv.tq);
1164 if (lynx_dev)
1165 put_device(lynx_dev);
1169 static int __devinit add_card(struct pci_dev *dev,
1170 const struct pci_device_id *devid_is_unused)
1172 #define FAIL(fmt, args...) do { \
1173 PRINT_G(KERN_ERR, fmt , ## args); \
1174 remove_card(dev); \
1175 return error; \
1176 } while (0)
1178 char irq_buf[16];
1179 struct hpsb_host *host;
1180 struct ti_lynx *lynx; /* shortcut to currently handled device */
1181 struct ti_pcl pcl;
1182 u32 *pcli;
1183 int i;
1184 int error;
1186 error = -ENXIO;
1188 if (pci_set_dma_mask(dev, DMA_32BIT_MASK))
1189 FAIL("DMA address limits not supported for PCILynx hardware");
1190 if (pci_enable_device(dev))
1191 FAIL("failed to enable PCILynx hardware");
1192 pci_set_master(dev);
1194 error = -ENOMEM;
1196 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1197 if (!host) FAIL("failed to allocate control structure memory");
1199 lynx = host->hostdata;
1200 lynx->id = card_id++;
1201 lynx->dev = dev;
1202 lynx->state = clear;
1203 lynx->host = host;
1204 host->pdev = dev;
1205 pci_set_drvdata(dev, lynx);
1207 spin_lock_init(&lynx->lock);
1208 spin_lock_init(&lynx->phy_reg_lock);
1210 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1211 &lynx->pcl_mem_dma);
1213 if (lynx->pcl_mem != NULL) {
1214 lynx->state = have_pcl_mem;
1215 PRINT(KERN_INFO, lynx->id,
1216 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1217 lynx->pcl_mem);
1218 } else {
1219 FAIL("failed to allocate PCL memory area");
1222 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1223 &lynx->rcv_page_dma);
1224 if (lynx->rcv_page == NULL) {
1225 FAIL("failed to allocate receive buffer");
1227 lynx->state = have_1394_buffers;
1229 for (i = 0; i < ISORCV_PAGES; i++) {
1230 lynx->iso_rcv.page[i] =
1231 pci_alloc_consistent(dev, PAGE_SIZE,
1232 &lynx->iso_rcv.page_dma[i]);
1233 if (lynx->iso_rcv.page[i] == NULL) {
1234 FAIL("failed to allocate iso receive buffers");
1238 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1239 PCILYNX_MAX_REGISTER);
1240 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1241 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1242 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1243 PCILYNX_MAX_MEMORY);
1244 lynx->state = have_iomappings;
1246 if (lynx->registers == NULL) {
1247 FAIL("failed to remap registers - card not accessible");
1250 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1251 /* Fix buggy cards with autoboot pin not tied low: */
1252 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1254 sprintf (irq_buf, "%d", dev->irq);
1256 if (!request_irq(dev->irq, lynx_irq_handler, IRQF_SHARED,
1257 PCILYNX_DRIVER_NAME, lynx)) {
1258 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1259 lynx->state = have_intr;
1260 } else {
1261 FAIL("failed to allocate shared interrupt %s", irq_buf);
1264 /* alloc_pcl return values are not checked, it is expected that the
1265 * provided PCL space is sufficient for the initial allocations */
1266 lynx->rcv_pcl = alloc_pcl(lynx);
1267 lynx->rcv_pcl_start = alloc_pcl(lynx);
1268 lynx->async.pcl = alloc_pcl(lynx);
1269 lynx->async.pcl_start = alloc_pcl(lynx);
1270 lynx->iso_send.pcl = alloc_pcl(lynx);
1271 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1273 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1274 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1276 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1278 /* all allocations successful - simple init stuff follows */
1280 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1282 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1283 (unsigned long)lynx);
1285 spin_lock_init(&lynx->iso_rcv.lock);
1287 spin_lock_init(&lynx->async.queue_lock);
1288 lynx->async.channel = CHANNEL_ASYNC_SEND;
1289 spin_lock_init(&lynx->iso_send.queue_lock);
1290 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1292 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1293 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1294 lynx->local_ram, lynx->aux_port);
1296 /* now, looking for PHY register set */
1297 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1298 lynx->phyic.reg_1394a = 1;
1299 PRINT(KERN_INFO, lynx->id,
1300 "found 1394a conform PHY (using extended register set)");
1301 lynx->phyic.vendor = get_phy_vendorid(lynx);
1302 lynx->phyic.product = get_phy_productid(lynx);
1303 } else {
1304 lynx->phyic.reg_1394a = 0;
1305 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1308 lynx->selfid_size = -1;
1309 lynx->phy_reg0 = -1;
1311 INIT_LIST_HEAD(&lynx->async.queue);
1312 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1313 INIT_LIST_HEAD(&lynx->iso_send.queue);
1314 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1316 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1317 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1319 pcl.next = PCL_NEXT_INVALID;
1320 pcl.async_error_next = PCL_NEXT_INVALID;
1322 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1323 #ifndef __BIG_ENDIAN
1324 pcl.buffer[0].control |= PCL_BIGENDIAN;
1325 #endif
1326 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1328 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1329 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1330 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1332 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1333 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1334 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1336 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1337 pcl.async_error_next = PCL_NEXT_INVALID;
1338 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1340 pcl.next = PCL_NEXT_INVALID;
1341 pcl.async_error_next = PCL_NEXT_INVALID;
1342 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1343 #ifndef __BIG_ENDIAN
1344 pcl.buffer[0].control |= PCL_BIGENDIAN;
1345 #endif
1346 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1348 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1349 int page = i / ISORCV_PER_PAGE;
1350 int sec = i % ISORCV_PER_PAGE;
1352 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1353 + sec * MAX_ISORCV_SIZE;
1354 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1355 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1358 pcli = (u32 *)&pcl;
1359 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1360 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1362 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1364 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1365 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1366 /* 20 byte threshold before triggering PCI transfer */
1367 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1368 /* threshold on both send FIFOs before transmitting:
1369 FIFO size - cache line size - 1 */
1370 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1371 i = 0x30 - i - 1;
1372 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1374 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1376 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1377 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1378 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1379 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1380 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1381 | LINK_INT_ATF_UNDERFLOW);
1383 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1384 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1385 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1386 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1387 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1388 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1389 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1391 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1393 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1394 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1395 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1396 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1398 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1400 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1401 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1402 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1403 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1405 if (!lynx->phyic.reg_1394a) {
1406 if (!hpsb_disable_irm) {
1407 /* attempt to enable contender bit -FIXME- would this
1408 * work elsewhere? */
1409 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1410 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1412 } else {
1413 /* set the contender (if appropriate) and LCtrl bit in the
1414 * extended PHY register set. (Should check that PHY_02_EXTENDED
1415 * is set in register 2?)
1417 i = get_phy_reg(lynx, 4);
1418 i |= PHY_04_LCTRL;
1419 if (hpsb_disable_irm)
1420 i &= ~PHY_04_CONTENDER;
1421 else
1422 i |= PHY_04_CONTENDER;
1423 if (i != -1) set_phy_reg(lynx, 4, i);
1426 if (!skip_eeprom)
1428 /* needed for i2c communication with serial eeprom */
1429 struct i2c_adapter *i2c_ad;
1430 struct i2c_algo_bit_data i2c_adapter_data;
1432 error = -ENOMEM;
1433 i2c_ad = kmalloc(sizeof(*i2c_ad), SLAB_KERNEL);
1434 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1436 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
1437 i2c_adapter_data = bit_data;
1438 i2c_ad->algo_data = &i2c_adapter_data;
1439 i2c_adapter_data.data = lynx;
1441 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1442 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1444 /* reset hardware to sane state */
1445 lynx->i2c_driven_state = 0x00000070;
1446 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1448 if (i2c_bit_add_bus(i2c_ad) < 0)
1450 kfree(i2c_ad);
1451 error = -ENXIO;
1452 FAIL("unable to register i2c");
1454 else
1456 /* do i2c stuff */
1457 unsigned char i2c_cmd = 0x10;
1458 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1459 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1462 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
1463 do it more efficiently in one transaction rather then using several reads */
1464 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1465 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1466 } else {
1467 int i;
1469 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1470 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1471 * generation(1394a) and link_spd(1394a) field and recalculate
1472 * the CRC */
1474 for (i = 0; i < 5 ; i++)
1475 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1476 i, be32_to_cpu(lynx->bus_info_block[i]));
1478 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1479 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1480 (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
1482 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1483 } else {
1484 kfree(i2c_ad);
1485 error = -ENXIO;
1486 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1491 i2c_bit_del_bus(i2c_ad);
1492 kfree(i2c_ad);
1496 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1497 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1498 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1499 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1500 if (!lynx->phyic.reg_1394a)
1501 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1502 else
1503 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1505 if (hpsb_add_host(host)) {
1506 error = -ENOMEM;
1507 FAIL("Failed to register host with highlevel");
1510 lynx->state = is_host;
1512 return 0;
1513 #undef FAIL
1517 static struct pci_device_id pci_table[] = {
1519 .vendor = PCI_VENDOR_ID_TI,
1520 .device = PCI_DEVICE_ID_TI_PCILYNX,
1521 .subvendor = PCI_ANY_ID,
1522 .subdevice = PCI_ANY_ID,
1524 { } /* Terminating entry */
1527 static struct pci_driver lynx_pci_driver = {
1528 .name = PCILYNX_DRIVER_NAME,
1529 .id_table = pci_table,
1530 .probe = add_card,
1531 .remove = remove_card,
1534 static struct hpsb_host_driver lynx_driver = {
1535 .owner = THIS_MODULE,
1536 .name = PCILYNX_DRIVER_NAME,
1537 .set_hw_config_rom = NULL,
1538 .transmit_packet = lynx_transmit,
1539 .devctl = lynx_devctl,
1540 .isoctl = NULL,
1543 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1544 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1545 MODULE_LICENSE("GPL");
1546 MODULE_SUPPORTED_DEVICE("pcilynx");
1547 MODULE_DEVICE_TABLE(pci, pci_table);
1549 static int __init pcilynx_init(void)
1551 int ret;
1553 ret = pci_register_driver(&lynx_pci_driver);
1554 if (ret < 0) {
1555 PRINT_G(KERN_ERR, "PCI module init failed");
1556 return ret;
1559 return 0;
1562 static void __exit pcilynx_cleanup(void)
1564 pci_unregister_driver(&lynx_pci_driver);
1568 module_init(pcilynx_init);
1569 module_exit(pcilynx_cleanup);