- Linus: more PageDirty / swapcache handling
[davej-history.git] / drivers / ieee1394 / pcilynx.c
blob9943c255cffad3d710288e3caeb479008956ea01
1 /*
2 * ti_pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/wait.h>
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/fs.h>
30 #include <linux/poll.h>
31 #include <asm/byteorder.h>
32 #include <asm/atomic.h>
33 #include <asm/io.h>
34 #include <asm/uaccess.h>
36 #include "ieee1394.h"
37 #include "ieee1394_types.h"
38 #include "hosts.h"
39 #include "ieee1394_core.h"
40 #include "pcilynx.h"
43 #if MAX_PCILYNX_CARDS > PCILYNX_MINOR_ROM_START
44 #error Max number of cards is bigger than PCILYNX_MINOR_ROM_START - this does not work.
45 #endif
47 /* print general (card independent) information */
48 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
49 /* print card specific information */
50 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
52 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
53 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
54 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
55 #else
56 #define PRINT_GD(level, fmt, args...) do {} while (0)
57 #define PRINTD(level, card, fmt, args...) do {} while (0)
58 #endif
60 static struct ti_lynx cards[MAX_PCILYNX_CARDS];
61 static int num_of_cards = 0;
65 * PCL handling functions.
68 static pcl_t alloc_pcl(struct ti_lynx *lynx)
70 u8 m;
71 int i, j;
73 spin_lock(&lynx->lock);
74 /* FIXME - use ffz() to make this readable */
75 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
76 m = lynx->pcl_bmap[i];
77 for (j = 0; j < 8; j++) {
78 if (m & 1<<j) {
79 continue;
81 m |= 1<<j;
82 lynx->pcl_bmap[i] = m;
83 spin_unlock(&lynx->lock);
84 return 8 * i + j;
87 spin_unlock(&lynx->lock);
89 return -1;
93 #if 0
94 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
96 int off, bit;
98 off = pclid / 8;
99 bit = pclid % 8;
101 if (pclid < 0) {
102 return;
105 spin_lock(&lynx->lock);
106 if (lynx->pcl_bmap[off] & 1<<bit) {
107 lynx->pcl_bmap[off] &= ~(1<<bit);
108 } else {
109 PRINT(KERN_ERR, lynx->id,
110 "attempted to free unallocated PCL %d", pclid);
112 spin_unlock(&lynx->lock);
115 /* functions useful for debugging */
116 static void pretty_print_pcl(const struct ti_pcl *pcl)
118 int i;
120 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
121 pcl->next, pcl->user_data, pcl->pcl_status,
122 pcl->remaining_transfer_count, pcl->next_data_buffer);
124 printk("PCL");
125 for (i=0; i<13; i++) {
126 printk(" c%x:%08x d%x:%08x",
127 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
128 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
130 printk("\n");
133 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
135 struct ti_pcl pcl;
137 get_pcl(lynx, pclid, &pcl);
138 pretty_print_pcl(&pcl);
140 #endif
143 static int add_card(struct pci_dev *dev);
144 static void remove_card(struct ti_lynx *lynx);
145 static int init_driver(void);
150 /***********************************
151 * IEEE-1394 functionality section *
152 ***********************************/
155 static int get_phy_reg(struct ti_lynx *lynx, int addr)
157 int retval;
158 int i = 0;
160 unsigned long flags;
162 if (addr > 15) {
163 PRINT(KERN_ERR, lynx->id, __FUNCTION__
164 ": PHY register address %d out of range", addr);
165 return -1;
168 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
170 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
171 do {
172 retval = reg_read(lynx, LINK_PHY);
174 if (i > 10000) {
175 PRINT(KERN_ERR, lynx->id, __FUNCTION__
176 ": runaway loop, aborting");
177 retval = -1;
178 break;
180 i++;
181 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
183 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
184 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
186 if (retval != -1) {
187 return retval & 0xff;
188 } else {
189 return -1;
193 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
195 unsigned long flags;
197 if (addr > 15) {
198 PRINT(KERN_ERR, lynx->id, __FUNCTION__
199 ": PHY register address %d out of range", addr);
200 return -1;
203 if (val > 0xff) {
204 PRINT(KERN_ERR, lynx->id, __FUNCTION__
205 ": PHY register value %d out of range", val);
206 return -1;
209 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
211 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
212 | LINK_PHY_WDATA(val));
214 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
216 return 0;
219 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
221 int reg;
223 if (page > 7) {
224 PRINT(KERN_ERR, lynx->id, __FUNCTION__
225 ": PHY page %d out of range", page);
226 return -1;
229 reg = get_phy_reg(lynx, 7);
230 if (reg != -1) {
231 reg &= 0x1f;
232 reg |= (page << 5);
233 set_phy_reg(lynx, 7, reg);
234 return 0;
235 } else {
236 return -1;
240 #if 0 /* not needed at this time */
241 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
243 int reg;
245 if (port > 15) {
246 PRINT(KERN_ERR, lynx->id, __FUNCTION__
247 ": PHY port %d out of range", port);
248 return -1;
251 reg = get_phy_reg(lynx, 7);
252 if (reg != -1) {
253 reg &= 0xf0;
254 reg |= port;
255 set_phy_reg(lynx, 7, reg);
256 return 0;
257 } else {
258 return -1;
261 #endif
263 static u32 get_phy_vendorid(struct ti_lynx *lynx)
265 u32 pvid = 0;
266 sel_phy_reg_page(lynx, 1);
267 pvid |= (get_phy_reg(lynx, 10) << 16);
268 pvid |= (get_phy_reg(lynx, 11) << 8);
269 pvid |= get_phy_reg(lynx, 12);
270 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
271 return pvid;
274 static u32 get_phy_productid(struct ti_lynx *lynx)
276 u32 id = 0;
277 sel_phy_reg_page(lynx, 1);
278 id |= (get_phy_reg(lynx, 13) << 16);
279 id |= (get_phy_reg(lynx, 14) << 8);
280 id |= get_phy_reg(lynx, 15);
281 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
282 return id;
285 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
286 struct hpsb_host *host)
288 quadlet_t lsid;
289 char phyreg[7];
290 int i;
292 for (i = 0; i < 7; i++) {
293 phyreg[i] = get_phy_reg(lynx, i);
296 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
297 more than 3 ports on the PHY anyway. */
299 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
300 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
301 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
302 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
303 /* lsid |= 1 << 11; *//* set contender (hack) */
304 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
306 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
307 if (phyreg[3 + i] & 0x4) {
308 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
309 << (6 - i*2);
310 } else {
311 lsid |= 1 << (6 - i*2);
315 cpu_to_be32s(&lsid);
316 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
317 return lsid;
320 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host, size_t size)
322 quadlet_t *q = lynx->rcv_page;
323 int phyid, isroot;
324 quadlet_t lsid = 0;
325 int i;
327 i = (size > 16 ? 16 : size) / 4 - 1;
328 while (i >= 0) {
329 cpu_to_be32s(&q[i]);
330 i--;
333 if (!lynx->phyic.reg_1394a) {
334 lsid = generate_own_selfid(lynx, host);
337 phyid = get_phy_reg(lynx, 0);
338 isroot = (phyid & 2) != 0;
339 phyid >>= 2;
340 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
341 phyid, (isroot ? "root" : "not root"));
342 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
344 if (!lynx->phyic.reg_1394a && !size) {
345 hpsb_selfid_received(host, lsid);
348 while (size > 0) {
349 struct selfid *sid = (struct selfid *)q;
351 if (!lynx->phyic.reg_1394a && !sid->extended
352 && (sid->phy_id == (phyid + 1))) {
353 hpsb_selfid_received(host, lsid);
356 if (q[0] == ~q[1]) {
357 PRINT(KERN_DEBUG, lynx->id, "selfid packet 0x%x rcvd",
358 q[0]);
359 hpsb_selfid_received(host, q[0]);
360 } else {
361 PRINT(KERN_INFO, lynx->id,
362 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
364 q += 2;
365 size -= 8;
368 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
369 hpsb_selfid_received(host, lsid);
372 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER);
374 hpsb_selfid_complete(host, phyid, isroot);
379 /* This must be called with the respective queue_lock held. */
380 static void send_next(struct ti_lynx *lynx, int what)
382 struct ti_pcl pcl;
383 struct lynx_send_data *d;
384 struct hpsb_packet *packet;
386 d = (what == iso ? &lynx->iso_send : &lynx->async);
387 packet = d->queue;
389 d->header_dma = pci_map_single(lynx->dev, packet->header,
390 packet->header_size, PCI_DMA_TODEVICE);
391 if (packet->data_size) {
392 d->data_dma = pci_map_single(lynx->dev, packet->data,
393 packet->data_size,
394 PCI_DMA_TODEVICE);
395 } else {
396 d->data_dma = 0;
399 pcl.next = PCL_NEXT_INVALID;
400 pcl.async_error_next = PCL_NEXT_INVALID;
401 #ifdef __BIG_ENDIAN
402 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
403 #else
404 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size
405 | PCL_BIGENDIAN;
406 #endif
407 pcl.buffer[0].pointer = d->header_dma;
408 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
409 pcl.buffer[1].pointer = d->data_dma;
411 switch (packet->type) {
412 case async:
413 pcl.buffer[0].control |= PCL_CMD_XMT;
414 break;
415 case iso:
416 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
417 break;
418 case raw:
419 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
420 break;
423 if (!packet->data_be) {
424 pcl.buffer[1].control |= PCL_BIGENDIAN;
427 put_pcl(lynx, d->pcl, &pcl);
428 run_pcl(lynx, d->pcl_start, d->channel);
432 static int lynx_detect(struct hpsb_host_template *tmpl)
434 struct hpsb_host *host;
435 int i;
437 init_driver();
439 for (i = 0; i < num_of_cards; i++) {
440 host = hpsb_get_host(tmpl, 0);
441 if (host == NULL) {
442 /* simply don't init more after out of mem */
443 return i;
445 host->hostdata = &cards[i];
446 cards[i].host = host;
449 return num_of_cards;
452 static int lynx_initialize(struct hpsb_host *host)
454 struct ti_lynx *lynx = host->hostdata;
455 struct ti_pcl pcl;
456 int i;
457 u32 *pcli;
459 lynx->async.queue = NULL;
460 spin_lock_init(&lynx->async.queue_lock);
461 spin_lock_init(&lynx->phy_reg_lock);
463 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
464 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
466 pcl.next = PCL_NEXT_INVALID;
467 pcl.async_error_next = PCL_NEXT_INVALID;
468 #ifdef __BIG_ENDIAN
469 pcl.buffer[0].control = PCL_CMD_RCV | 16;
470 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
471 #else
472 pcl.buffer[0].control = PCL_CMD_RCV | PCL_BIGENDIAN | 16;
473 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
474 #endif
475 pcl.buffer[0].pointer = lynx->rcv_page_dma;
476 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
477 put_pcl(lynx, lynx->rcv_pcl, &pcl);
479 pcl.next = pcl_bus(lynx, lynx->async.pcl);
480 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
481 put_pcl(lynx, lynx->async.pcl_start, &pcl);
483 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
484 pcl.async_error_next = PCL_NEXT_INVALID;
485 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
487 pcl.next = PCL_NEXT_INVALID;
488 pcl.async_error_next = PCL_NEXT_INVALID;
489 pcl.buffer[0].control = PCL_CMD_RCV | 4;
490 #ifndef __BIG_ENDIAN
491 pcl.buffer[0].control |= PCL_BIGENDIAN;
492 #endif
493 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
495 for (i = 0; i < NUM_ISORCV_PCL; i++) {
496 int page = i / ISORCV_PER_PAGE;
497 int sec = i % ISORCV_PER_PAGE;
499 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
500 + sec * MAX_ISORCV_SIZE;
501 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
502 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
505 pcli = (u32 *)&pcl;
506 for (i = 0; i < NUM_ISORCV_PCL; i++) {
507 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
509 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
511 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
512 reg_write(lynx, FIFO_SIZES, 0x003030a0);
513 /* 20 byte threshold before triggering PCI transfer */
514 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
515 /* threshold on both send FIFOs before transmitting:
516 FIFO size - cache line size - 1 */
517 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
518 i = 0x30 - i - 1;
519 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
521 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
523 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
524 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
525 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
526 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
527 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
528 | LINK_INT_ATF_UNDERFLOW);
530 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
531 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
532 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
533 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
534 DMA_WORD1_CMP_MATCH_NODE_BCAST | DMA_WORD1_CMP_MATCH_BROADCAST
535 | DMA_WORD1_CMP_MATCH_LOCAL | DMA_WORD1_CMP_MATCH_BUS_BCAST
536 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
538 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
540 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
541 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
542 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
543 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
545 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
547 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
548 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
549 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
550 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX
551 | LINK_CONTROL_CYCTIMEREN);
553 /* attempt to enable contender bit -FIXME- would this work elsewhere? */
554 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
555 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
557 return 1;
560 static void lynx_release(struct hpsb_host *host)
562 struct ti_lynx *lynx;
564 if (host != NULL) {
565 lynx = host->hostdata;
566 remove_card(lynx);
567 } else {
568 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
569 unregister_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME);
570 #endif
574 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
576 struct ti_lynx *lynx = host->hostdata;
577 struct lynx_send_data *d;
578 unsigned long flags;
580 if (packet->data_size >= 4096) {
581 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%d)",
582 packet->data_size);
583 return 0;
586 switch (packet->type) {
587 case async:
588 case raw:
589 d = &lynx->async;
590 break;
591 case iso:
592 d = &lynx->iso_send;
593 break;
594 default:
595 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
596 packet->type);
597 return 0;
600 packet->xnext = NULL;
601 if (packet->tcode == TCODE_WRITEQ
602 || packet->tcode == TCODE_READQ_RESPONSE) {
603 cpu_to_be32s(&packet->header[3]);
606 spin_lock_irqsave(&d->queue_lock, flags);
608 if (d->queue == NULL) {
609 d->queue = packet;
610 d->queue_last = packet;
611 send_next(lynx, packet->type);
612 } else {
613 d->queue_last->xnext = packet;
614 d->queue_last = packet;
617 spin_unlock_irqrestore(&d->queue_lock, flags);
619 return 1;
622 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
624 struct ti_lynx *lynx = host->hostdata;
625 int retval = 0;
626 struct hpsb_packet *packet, *lastpacket;
627 unsigned long flags;
629 switch (cmd) {
630 case RESET_BUS:
631 if (arg) {
632 arg = 3 << 6;
633 } else {
634 arg = 1 << 6;
637 retval = get_phy_reg(lynx, 1);
638 arg |= (retval == -1 ? 63 : retval);
639 retval = 0;
641 PRINT(KERN_INFO, lynx->id, "resetting bus on request%s",
642 (host->attempt_root ? " and attempting to become root"
643 : ""));
645 set_phy_reg(lynx, 1, arg);
646 break;
648 case GET_CYCLE_COUNTER:
649 retval = reg_read(lynx, CYCLE_TIMER);
650 break;
652 case SET_CYCLE_COUNTER:
653 reg_write(lynx, CYCLE_TIMER, arg);
654 break;
656 case SET_BUS_ID:
657 reg_write(lynx, LINK_ID,
658 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
659 break;
661 case ACT_CYCLE_MASTER:
662 if (arg) {
663 reg_set_bits(lynx, LINK_CONTROL,
664 LINK_CONTROL_CYCMASTER);
665 } else {
666 reg_clear_bits(lynx, LINK_CONTROL,
667 LINK_CONTROL_CYCMASTER);
669 break;
671 case CANCEL_REQUESTS:
672 spin_lock_irqsave(&lynx->async.queue_lock, flags);
674 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
675 packet = lynx->async.queue;
676 lynx->async.queue = NULL;
678 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
680 while (packet != NULL) {
681 lastpacket = packet;
682 packet = packet->xnext;
683 hpsb_packet_sent(host, lastpacket, ACKX_ABORTED);
686 break;
688 case MODIFY_USAGE:
689 if (arg) {
690 MOD_INC_USE_COUNT;
691 } else {
692 MOD_DEC_USE_COUNT;
694 break;
696 case ISO_LISTEN_CHANNEL:
697 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
699 if (lynx->iso_rcv.chan_count++ == 0) {
700 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
701 DMA_WORD1_CMP_ENABLE_MASTER);
704 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
705 break;
707 case ISO_UNLISTEN_CHANNEL:
708 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
710 if (--lynx->iso_rcv.chan_count == 0) {
711 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
715 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
716 break;
718 default:
719 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
720 retval = -1;
723 return retval;
727 /***************************************
728 * IEEE-1394 functionality section END *
729 ***************************************/
731 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
732 /* VFS functions for local bus / aux device access. Access to those
733 * is implemented as a character device instead of block devices
734 * because buffers are not wanted for this. Therefore llseek (from
735 * VFS) can be used for these char devices with obvious effects.
737 static int mem_open(struct inode*, struct file*);
738 static int mem_release(struct inode*, struct file*);
739 static unsigned int aux_poll(struct file*, struct poll_table_struct*);
740 static loff_t mem_llseek(struct file*, loff_t, int);
741 static ssize_t mem_read (struct file*, char*, size_t, loff_t*);
742 static ssize_t mem_write(struct file*, const char*, size_t, loff_t*);
745 static struct file_operations aux_ops = {
746 OWNER_THIS_MODULE
747 read: mem_read,
748 write: mem_write,
749 poll: aux_poll,
750 llseek: mem_llseek,
751 open: mem_open,
752 release: mem_release,
756 static void aux_setup_pcls(struct ti_lynx *lynx)
758 struct ti_pcl pcl;
760 pcl.next = PCL_NEXT_INVALID;
761 pcl.user_data = pcl_bus(lynx, lynx->dmem_pcl);
762 put_pcl(lynx, lynx->dmem_pcl, &pcl);
765 static int mem_open(struct inode *inode, struct file *file)
767 int cid = MINOR(inode->i_rdev);
768 enum { t_rom, t_aux, t_ram } type;
769 struct memdata *md;
771 V22_COMPAT_MOD_INC_USE_COUNT;
773 if (cid < PCILYNX_MINOR_AUX_START) {
774 /* just for completeness */
775 V22_COMPAT_MOD_DEC_USE_COUNT;
776 return -ENXIO;
777 } else if (cid < PCILYNX_MINOR_ROM_START) {
778 cid -= PCILYNX_MINOR_AUX_START;
779 if (cid >= num_of_cards || !cards[cid].aux_port) {
780 V22_COMPAT_MOD_DEC_USE_COUNT;
781 return -ENXIO;
783 type = t_aux;
784 } else if (cid < PCILYNX_MINOR_RAM_START) {
785 cid -= PCILYNX_MINOR_ROM_START;
786 if (cid >= num_of_cards || !cards[cid].local_rom) {
787 V22_COMPAT_MOD_DEC_USE_COUNT;
788 return -ENXIO;
790 type = t_rom;
791 } else {
792 /* WARNING: Know what you are doing when opening RAM.
793 * It is currently used inside the driver! */
794 cid -= PCILYNX_MINOR_RAM_START;
795 if (cid >= num_of_cards || !cards[cid].local_ram) {
796 V22_COMPAT_MOD_DEC_USE_COUNT;
797 return -ENXIO;
799 type = t_ram;
802 md = (struct memdata *)kmalloc(sizeof(struct memdata), SLAB_KERNEL);
803 if (md == NULL) {
804 V22_COMPAT_MOD_DEC_USE_COUNT;
805 return -ENOMEM;
808 md->lynx = &cards[cid];
809 md->cid = cid;
811 switch (type) {
812 case t_rom:
813 md->type = rom;
814 break;
815 case t_ram:
816 md->type = ram;
817 break;
818 case t_aux:
819 atomic_set(&md->aux_intr_last_seen,
820 atomic_read(&cards[cid].aux_intr_seen));
821 md->type = aux;
822 break;
825 file->private_data = md;
827 return 0;
830 static int mem_release(struct inode *inode, struct file *file)
832 struct memdata *md = (struct memdata *)file->private_data;
834 kfree(md);
836 V22_COMPAT_MOD_DEC_USE_COUNT;
837 return 0;
840 static unsigned int aux_poll(struct file *file, poll_table *pt)
842 struct memdata *md = (struct memdata *)file->private_data;
843 int cid = md->cid;
844 unsigned int mask;
846 /* reading and writing is always allowed */
847 mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
849 if (md->type == aux) {
850 poll_wait(file, &cards[cid].aux_intr_wait, pt);
852 if (atomic_read(&md->aux_intr_last_seen)
853 != atomic_read(&cards[cid].aux_intr_seen)) {
854 mask |= POLLPRI;
855 atomic_inc(&md->aux_intr_last_seen);
859 return mask;
862 loff_t mem_llseek(struct file *file, loff_t offs, int orig)
864 loff_t newoffs;
866 switch (orig) {
867 case 0:
868 newoffs = offs;
869 break;
870 case 1:
871 newoffs = offs + file->f_pos;
872 break;
873 case 2:
874 newoffs = PCILYNX_MAX_MEMORY + 1 + offs;
875 break;
876 default:
877 return -EINVAL;
880 if (newoffs < 0 || newoffs > PCILYNX_MAX_MEMORY + 1) return -EINVAL;
882 file->f_pos = newoffs;
883 return newoffs;
887 * do not DMA if count is too small because this will have a serious impact
888 * on performance - the value 2400 was found by experiment and may not work
889 * everywhere as good as here - use mem_mindma option for modules to change
891 short mem_mindma = 2400;
892 MODULE_PARM(mem_mindma, "h");
894 static ssize_t mem_dmaread(struct memdata *md, u32 physbuf, ssize_t count,
895 int offset)
897 pcltmp_t pcltmp;
898 struct ti_pcl *pcl;
899 size_t retval;
900 int i;
901 DECLARE_WAITQUEUE(wait, current);
903 count &= ~3;
904 count = MIN(count, 53196);
905 retval = count;
907 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
908 & DMA_CHAN_CTRL_BUSY) {
909 PRINT(KERN_WARNING, md->lynx->id, "DMA ALREADY ACTIVE!");
912 reg_write(md->lynx, LBUS_ADDR, md->type | offset);
914 pcl = edit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
915 pcl->buffer[0].control = PCL_CMD_LBUS_TO_PCI | MIN(count, 4092);
916 pcl->buffer[0].pointer = physbuf;
917 count -= 4092;
919 i = 0;
920 while (count > 0) {
921 i++;
922 pcl->buffer[i].control = MIN(count, 4092);
923 pcl->buffer[i].pointer = physbuf + i * 4092;
924 count -= 4092;
926 pcl->buffer[i].control |= PCL_LAST_BUFF;
927 commit_pcl(md->lynx, md->lynx->dmem_pcl, &pcltmp);
929 set_current_state(TASK_INTERRUPTIBLE);
930 add_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
931 run_sub_pcl(md->lynx, md->lynx->dmem_pcl, 2, CHANNEL_LOCALBUS);
933 schedule();
934 while (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
935 & DMA_CHAN_CTRL_BUSY) {
936 if (signal_pending(current)) {
937 retval = -EINTR;
938 break;
940 schedule();
943 reg_write(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS), 0);
944 remove_wait_queue(&md->lynx->mem_dma_intr_wait, &wait);
946 if (reg_read(md->lynx, DMA_CHAN_CTRL(CHANNEL_LOCALBUS))
947 & DMA_CHAN_CTRL_BUSY) {
948 PRINT(KERN_ERR, md->lynx->id, "DMA STILL ACTIVE!");
951 return retval;
954 static ssize_t mem_read(struct file *file, char *buffer, size_t count,
955 loff_t *offset)
957 struct memdata *md = (struct memdata *)file->private_data;
958 ssize_t bcount;
959 size_t alignfix;
960 int off = (int)*offset; /* avoid useless 64bit-arithmetic */
961 ssize_t retval;
962 void *membase;
964 if ((off + count) > PCILYNX_MAX_MEMORY + 1) {
965 count = PCILYNX_MAX_MEMORY + 1 - off;
967 if (count == 0) {
968 return 0;
972 switch (md->type) {
973 case rom:
974 membase = md->lynx->local_rom;
975 break;
976 case ram:
977 membase = md->lynx->local_ram;
978 break;
979 case aux:
980 membase = md->lynx->aux_port;
981 break;
982 default:
983 panic("pcilynx%d: unsupported md->type %d in " __FUNCTION__,
984 md->lynx->id, md->type);
987 down(&md->lynx->mem_dma_mutex);
989 if (count < mem_mindma) {
990 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off, count);
991 goto out;
994 bcount = count;
995 alignfix = 4 - (off % 4);
996 if (alignfix != 4) {
997 if (bcount < alignfix) {
998 alignfix = bcount;
1000 memcpy_fromio(md->lynx->mem_dma_buffer, membase+off,
1001 alignfix);
1002 if (bcount == alignfix) {
1003 goto out;
1005 bcount -= alignfix;
1006 off += alignfix;
1009 while (bcount >= 4) {
1010 retval = mem_dmaread(md, md->lynx->mem_dma_buffer_dma
1011 + count - bcount, bcount, off);
1012 if (retval < 0) return retval;
1014 bcount -= retval;
1015 off += retval;
1018 if (bcount) {
1019 memcpy_fromio(md->lynx->mem_dma_buffer + count - bcount,
1020 membase+off, bcount);
1023 out:
1024 retval = copy_to_user(buffer, md->lynx->mem_dma_buffer, count);
1025 up(&md->lynx->mem_dma_mutex);
1027 if (retval < 0) return retval;
1028 *offset += count;
1029 return count;
1033 static ssize_t mem_write(struct file *file, const char *buffer, size_t count,
1034 loff_t *offset)
1036 struct memdata *md = (struct memdata *)file->private_data;
1038 if (((*offset) + count) > PCILYNX_MAX_MEMORY+1) {
1039 count = PCILYNX_MAX_MEMORY+1 - *offset;
1041 if (count == 0 || *offset > PCILYNX_MAX_MEMORY) {
1042 return -ENOSPC;
1045 /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
1046 switch (md->type) {
1047 case aux:
1048 copy_from_user(md->lynx->aux_port+(*offset), buffer, count);
1049 break;
1050 case ram:
1051 copy_from_user(md->lynx->local_ram+(*offset), buffer, count);
1052 break;
1053 case rom:
1054 /* the ROM may be writeable */
1055 copy_from_user(md->lynx->local_rom+(*offset), buffer, count);
1056 break;
1059 file->f_pos += count;
1060 return count;
1062 #endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
1065 /********************************************************
1066 * Global stuff (interrupt handler, init/shutdown code) *
1067 ********************************************************/
1070 static void lynx_irq_handler(int irq, void *dev_id,
1071 struct pt_regs *regs_are_unused)
1073 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
1074 struct hpsb_host *host = lynx->host;
1075 u32 intmask;
1076 u32 linkint;
1078 linkint = reg_read(lynx, LINK_INT_STATUS);
1079 intmask = reg_read(lynx, PCI_INT_STATUS);
1081 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
1082 linkint);
1084 reg_write(lynx, LINK_INT_STATUS, linkint);
1085 reg_write(lynx, PCI_INT_STATUS, intmask);
1087 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1088 if (intmask & PCI_INT_AUX_INT) {
1089 atomic_inc(&lynx->aux_intr_seen);
1090 wake_up_interruptible(&lynx->aux_intr_wait);
1093 if (intmask & PCI_INT_DMA_HLT(CHANNEL_LOCALBUS)) {
1094 wake_up_interruptible(&lynx->mem_dma_intr_wait);
1096 #endif
1099 if (intmask & PCI_INT_1394) {
1100 if (linkint & LINK_INT_PHY_TIMEOUT) {
1101 PRINT(KERN_INFO, lynx->id, "PHY timeout occured");
1103 if (linkint & LINK_INT_PHY_BUSRESET) {
1104 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
1105 if (!host->in_bus_reset) {
1106 hpsb_bus_reset(host);
1109 if (linkint & LINK_INT_PHY_REG_RCVD) {
1110 if (!host->in_bus_reset) {
1111 PRINT(KERN_INFO, lynx->id,
1112 "phy reg received without reset");
1115 if (linkint & LINK_INT_ISO_STUCK) {
1116 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
1118 if (linkint & LINK_INT_ASYNC_STUCK) {
1119 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
1121 if (linkint & LINK_INT_SENT_REJECT) {
1122 PRINT(KERN_INFO, lynx->id, "sent reject");
1124 if (linkint & LINK_INT_TX_INVALID_TC) {
1125 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
1127 if (linkint & LINK_INT_GRF_OVERFLOW) {
1128 PRINT(KERN_INFO, lynx->id, "GRF overflow");
1130 if (linkint & LINK_INT_ITF_UNDERFLOW) {
1131 PRINT(KERN_INFO, lynx->id, "ITF underflow");
1133 if (linkint & LINK_INT_ATF_UNDERFLOW) {
1134 PRINT(KERN_INFO, lynx->id, "ATF underflow");
1138 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
1139 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
1141 spin_lock(&lynx->iso_rcv.lock);
1143 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
1144 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
1146 lynx->iso_rcv.used++;
1147 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
1149 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
1150 || !lynx->iso_rcv.chan_count) {
1151 PRINTD(KERN_DEBUG, lynx->id, "stopped");
1152 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1155 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
1156 CHANNEL_ISO_RCV);
1158 spin_unlock(&lynx->iso_rcv.lock);
1160 queue_task(&lynx->iso_rcv.tq, &tq_immediate);
1161 mark_bh(IMMEDIATE_BH);
1164 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
1165 u32 ack;
1166 struct hpsb_packet *packet;
1168 spin_lock(&lynx->async.queue_lock);
1170 ack = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND));
1171 packet = lynx->async.queue;
1172 lynx->async.queue = packet->xnext;
1174 pci_unmap_single(lynx->dev, lynx->async.header_dma,
1175 packet->header_size, PCI_DMA_TODEVICE);
1176 if (packet->data_size) {
1177 pci_unmap_single(lynx->dev, lynx->async.data_dma,
1178 packet->data_size, PCI_DMA_TODEVICE);
1181 if (lynx->async.queue != NULL) {
1182 send_next(lynx, async);
1185 spin_unlock(&lynx->async.queue_lock);
1187 if (ack & DMA_CHAN_STAT_SPECIALACK) {
1188 ack = (ack >> 15) & 0xf;
1189 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1190 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1191 } else {
1192 ack = (ack >> 15) & 0xf;
1195 hpsb_packet_sent(host, packet, ack);
1198 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
1199 struct hpsb_packet *packet;
1201 spin_lock(&lynx->iso_send.queue_lock);
1203 packet = lynx->iso_send.queue;
1204 lynx->iso_send.queue = packet->xnext;
1206 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1207 packet->header_size, PCI_DMA_TODEVICE);
1208 if (packet->data_size) {
1209 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1210 packet->data_size, PCI_DMA_TODEVICE);
1213 if (lynx->iso_send.queue != NULL) {
1214 send_next(lynx, iso);
1217 spin_unlock(&lynx->iso_send.queue_lock);
1219 hpsb_packet_sent(host, packet, ACK_COMPLETE);
1222 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1223 /* general receive DMA completed */
1224 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1226 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1227 stat & 0x1fff);
1229 if (stat & DMA_CHAN_STAT_SELFID) {
1230 handle_selfid(lynx, host, stat & 0x1fff);
1231 reg_set_bits(lynx, LINK_CONTROL,
1232 LINK_CONTROL_RCV_CMP_VALID
1233 | LINK_CONTROL_TX_ASYNC_EN
1234 | LINK_CONTROL_RX_ASYNC_EN);
1235 } else {
1236 quadlet_t *q_data = lynx->rcv_page;
1237 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1238 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1239 cpu_to_be32s(q_data + 3);
1241 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1244 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1248 static void iso_rcv_bh(struct ti_lynx *lynx)
1250 unsigned int idx;
1251 quadlet_t *data;
1252 unsigned long flags;
1254 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1256 while (lynx->iso_rcv.used) {
1257 idx = lynx->iso_rcv.last;
1258 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1260 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1261 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1263 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1264 PRINT(KERN_ERR, lynx->id,
1265 "iso length mismatch 0x%08x/0x%08x", *data,
1266 lynx->iso_rcv.stat[idx]);
1269 if (lynx->iso_rcv.stat[idx]
1270 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1271 PRINT(KERN_INFO, lynx->id,
1272 "iso receive error on %d to 0x%p", idx, data);
1273 } else {
1274 hpsb_packet_received(lynx->host, data,
1275 lynx->iso_rcv.stat[idx] & 0x1fff,
1279 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1280 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1281 lynx->iso_rcv.used--;
1284 if (lynx->iso_rcv.chan_count) {
1285 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1286 DMA_WORD1_CMP_ENABLE_MASTER);
1288 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1292 static int add_card(struct pci_dev *dev)
1294 #define FAIL(fmt, args...) do { \
1295 PRINT_G(KERN_ERR, fmt , ## args); \
1296 num_of_cards--; \
1297 remove_card(lynx); \
1298 return 1; \
1299 } while (0)
1301 struct ti_lynx *lynx; /* shortcut to currently handled device */
1302 unsigned int i;
1304 if (num_of_cards == MAX_PCILYNX_CARDS) {
1305 PRINT_G(KERN_WARNING, "cannot handle more than %d cards. "
1306 "Adjust MAX_PCILYNX_CARDS in pcilynx.h.",
1307 MAX_PCILYNX_CARDS);
1308 return 1;
1311 lynx = &cards[num_of_cards++];
1313 lynx->id = num_of_cards-1;
1314 lynx->dev = dev;
1316 if (!pci_dma_supported(dev, 0xffffffff)) {
1317 FAIL("DMA address limits not supported for PCILynx hardware %d",
1318 lynx->id);
1320 if (pci_enable_device(dev)) {
1321 FAIL("failed to enable PCILynx hardware %d", lynx->id);
1323 pci_set_master(dev);
1325 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1326 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1327 &lynx->pcl_mem_dma);
1329 if (lynx->pcl_mem != NULL) {
1330 lynx->state = have_pcl_mem;
1331 PRINT(KERN_INFO, lynx->id,
1332 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1333 lynx->pcl_mem);
1334 } else {
1335 FAIL("failed to allocate PCL memory area");
1337 #endif
1339 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1340 lynx->mem_dma_buffer = pci_alloc_consistent(dev, 65536,
1341 &lynx->mem_dma_buffer_dma);
1342 if (lynx->mem_dma_buffer == NULL) {
1343 FAIL("failed to allocate DMA buffer for aux");
1345 lynx->state = have_aux_buf;
1346 #endif
1348 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1349 &lynx->rcv_page_dma);
1350 if (lynx->rcv_page == NULL) {
1351 FAIL("failed to allocate receive buffer");
1353 lynx->state = have_1394_buffers;
1355 for (i = 0; i < ISORCV_PAGES; i++) {
1356 lynx->iso_rcv.page[i] =
1357 pci_alloc_consistent(dev, PAGE_SIZE,
1358 &lynx->iso_rcv.page_dma[i]);
1359 if (lynx->iso_rcv.page[i] == NULL) {
1360 FAIL("failed to allocate iso receive buffers");
1364 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1365 PCILYNX_MAX_REGISTER);
1366 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1367 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1368 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1369 PCILYNX_MAX_MEMORY);
1370 lynx->state = have_iomappings;
1372 if (lynx->registers == NULL) {
1373 FAIL("failed to remap registers - card not accessible");
1376 #ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1377 if (lynx->local_ram == NULL) {
1378 FAIL("failed to remap local RAM which is required for "
1379 "operation");
1381 #endif
1383 reg_write(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1385 if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1386 PCILYNX_DRIVER_NAME, lynx)) {
1387 PRINT(KERN_INFO, lynx->id, "allocated interrupt %d", dev->irq);
1388 lynx->state = have_intr;
1389 } else {
1390 FAIL("failed to allocate shared interrupt %d", dev->irq);
1393 /* alloc_pcl return values are not checked, it is expected that the
1394 * provided PCL space is sufficient for the initial allocations */
1395 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1396 if (lynx->aux_port != NULL) {
1397 lynx->dmem_pcl = alloc_pcl(lynx);
1398 aux_setup_pcls(lynx);
1399 sema_init(&lynx->mem_dma_mutex, 1);
1401 #endif
1402 lynx->rcv_pcl = alloc_pcl(lynx);
1403 lynx->rcv_pcl_start = alloc_pcl(lynx);
1404 lynx->async.pcl = alloc_pcl(lynx);
1405 lynx->async.pcl_start = alloc_pcl(lynx);
1406 lynx->iso_send.pcl = alloc_pcl(lynx);
1407 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1409 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1410 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1412 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1414 /* all allocations successful - simple init stuff follows */
1416 lynx->lock = SPIN_LOCK_UNLOCKED;
1418 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_AUX_INT | PCI_INT_DMA_ALL);
1420 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1421 init_waitqueue_head(&lynx->mem_dma_intr_wait);
1422 init_waitqueue_head(&lynx->aux_intr_wait);
1423 #endif
1425 lynx->iso_rcv.tq.routine = (void (*)(void*))iso_rcv_bh;
1426 lynx->iso_rcv.tq.data = lynx;
1427 lynx->iso_rcv.lock = SPIN_LOCK_UNLOCKED;
1429 lynx->async.queue_lock = SPIN_LOCK_UNLOCKED;
1430 lynx->async.channel = CHANNEL_ASYNC_SEND;
1431 lynx->iso_send.queue_lock = SPIN_LOCK_UNLOCKED;
1432 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1434 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1435 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1436 lynx->local_ram, lynx->aux_port);
1438 /* now, looking for PHY register set */
1439 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1440 lynx->phyic.reg_1394a = 1;
1441 PRINT(KERN_INFO, lynx->id,
1442 "found 1394a conform PHY (using extended register set)");
1443 lynx->phyic.vendor = get_phy_vendorid(lynx);
1444 lynx->phyic.product = get_phy_productid(lynx);
1445 } else {
1446 lynx->phyic.reg_1394a = 0;
1447 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1450 return 0;
1451 #undef FAIL
1454 static void remove_card(struct ti_lynx *lynx)
1456 int i;
1458 switch (lynx->state) {
1459 case have_intr:
1460 reg_write(lynx, PCI_INT_ENABLE, 0);
1461 free_irq(lynx->dev->irq, lynx);
1462 case have_iomappings:
1463 reg_write(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1464 iounmap(lynx->registers);
1465 iounmap(lynx->local_rom);
1466 iounmap(lynx->local_ram);
1467 iounmap(lynx->aux_port);
1468 case have_1394_buffers:
1469 for (i = 0; i < ISORCV_PAGES; i++) {
1470 if (lynx->iso_rcv.page[i]) {
1471 pci_free_consistent(lynx->dev, PAGE_SIZE,
1472 lynx->iso_rcv.page[i],
1473 lynx->iso_rcv.page_dma[i]);
1476 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1477 lynx->rcv_page_dma);
1478 case have_aux_buf:
1479 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1480 pci_free_consistent(lynx->dev, 65536, lynx->mem_dma_buffer,
1481 lynx->mem_dma_buffer_dma);
1482 #endif
1483 case have_pcl_mem:
1484 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1485 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1486 lynx->pcl_mem_dma);
1487 #endif
1488 case clear:
1489 /* do nothing - already freed */
1492 lynx->state = clear;
1495 static int init_driver()
1497 struct pci_dev *dev = NULL;
1498 int success = 0;
1500 if (num_of_cards) {
1501 PRINT_G(KERN_DEBUG, __PRETTY_FUNCTION__ " called again");
1502 return 0;
1505 PRINT_G(KERN_INFO, "looking for PCILynx cards");
1507 while ((dev = pci_find_device(PCI_VENDOR_ID_TI,
1508 PCI_DEVICE_ID_TI_PCILYNX, dev))
1509 != NULL) {
1510 if (add_card(dev) == 0) {
1511 success = 1;
1515 if (success == 0) {
1516 PRINT_G(KERN_WARNING, "no operable PCILynx cards found");
1517 return -ENXIO;
1520 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1521 if (register_chrdev(PCILYNX_MAJOR, PCILYNX_DRIVER_NAME, &aux_ops)) {
1522 PRINT_G(KERN_ERR, "allocation of char major number %d failed",
1523 PCILYNX_MAJOR);
1524 return -EBUSY;
1526 #endif
1528 return 0;
1532 static size_t get_lynx_rom(struct hpsb_host *host, const quadlet_t **ptr)
1534 *ptr = lynx_csr_rom;
1535 return sizeof(lynx_csr_rom);
1538 struct hpsb_host_template *get_lynx_template(void)
1540 static struct hpsb_host_template tmpl = {
1541 name: "pcilynx",
1542 detect_hosts: lynx_detect,
1543 initialize_host: lynx_initialize,
1544 release_host: lynx_release,
1545 get_rom: get_lynx_rom,
1546 transmit_packet: lynx_transmit,
1547 devctl: lynx_devctl
1550 return &tmpl;
1554 #ifdef MODULE
1556 /* EXPORT_NO_SYMBOLS; */
1558 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1559 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1560 MODULE_SUPPORTED_DEVICE("pcilynx");
1562 void cleanup_module(void)
1564 hpsb_unregister_lowlevel(get_lynx_template());
1565 PRINT_G(KERN_INFO, "removed " PCILYNX_DRIVER_NAME " module");
1568 int init_module(void)
1570 if (hpsb_register_lowlevel(get_lynx_template())) {
1571 PRINT_G(KERN_ERR, "registering failed");
1572 return -ENXIO;
1573 } else {
1574 return 0;
1578 #endif /* MODULE */