2 * ti_pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/config.h>
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/interrupt.h>
25 #include <linux/wait.h>
26 #include <linux/errno.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
30 #include <linux/poll.h>
31 #include <asm/byteorder.h>
32 #include <asm/atomic.h>
34 #include <asm/uaccess.h>
37 #include "ieee1394_types.h"
39 #include "ieee1394_core.h"
43 #if MAX_PCILYNX_CARDS > PCILYNX_MINOR_ROM_START
44 #error Max number of cards is bigger than PCILYNX_MINOR_ROM_START - this does not work.
47 /* print general (card independent) information */
48 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
49 /* print card specific information */
50 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
52 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
53 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
54 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
56 #define PRINT_GD(level, fmt, args...) do {} while (0)
57 #define PRINTD(level, card, fmt, args...) do {} while (0)
60 static struct ti_lynx cards
[MAX_PCILYNX_CARDS
];
61 static int num_of_cards
= 0;
65 * PCL handling functions.
68 static pcl_t
alloc_pcl(struct ti_lynx
*lynx
)
73 spin_lock(&lynx
->lock
);
74 /* FIXME - use ffz() to make this readable */
75 for (i
= 0; i
< (LOCALRAM_SIZE
/ 1024); i
++) {
76 m
= lynx
->pcl_bmap
[i
];
77 for (j
= 0; j
< 8; j
++) {
82 lynx
->pcl_bmap
[i
] = m
;
83 spin_unlock(&lynx
->lock
);
87 spin_unlock(&lynx
->lock
);
94 static void free_pcl(struct ti_lynx
*lynx
, pcl_t pclid
)
105 spin_lock(&lynx
->lock
);
106 if (lynx
->pcl_bmap
[off
] & 1<<bit
) {
107 lynx
->pcl_bmap
[off
] &= ~(1<<bit
);
109 PRINT(KERN_ERR
, lynx
->id
,
110 "attempted to free unallocated PCL %d", pclid
);
112 spin_unlock(&lynx
->lock
);
115 /* functions useful for debugging */
116 static void pretty_print_pcl(const struct ti_pcl
*pcl
)
120 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
121 pcl
->next
, pcl
->user_data
, pcl
->pcl_status
,
122 pcl
->remaining_transfer_count
, pcl
->next_data_buffer
);
125 for (i
=0; i
<13; i
++) {
126 printk(" c%x:%08x d%x:%08x",
127 i
, pcl
->buffer
[i
].control
, i
, pcl
->buffer
[i
].pointer
);
128 if (!(i
& 0x3) && (i
!= 12)) printk("\nPCL");
133 static void print_pcl(const struct ti_lynx
*lynx
, pcl_t pclid
)
137 get_pcl(lynx
, pclid
, &pcl
);
138 pretty_print_pcl(&pcl
);
143 static int add_card(struct pci_dev
*dev
);
144 static void remove_card(struct ti_lynx
*lynx
);
145 static int init_driver(void);
150 /***********************************
151 * IEEE-1394 functionality section *
152 ***********************************/
155 static int get_phy_reg(struct ti_lynx
*lynx
, int addr
)
163 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
164 ": PHY register address %d out of range", addr
);
168 spin_lock_irqsave(&lynx
->phy_reg_lock
, flags
);
170 reg_write(lynx
, LINK_PHY
, LINK_PHY_READ
| LINK_PHY_ADDR(addr
));
172 retval
= reg_read(lynx
, LINK_PHY
);
175 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
176 ": runaway loop, aborting");
181 } while ((retval
& 0xf00) != LINK_PHY_RADDR(addr
));
183 reg_write(lynx
, LINK_INT_STATUS
, LINK_INT_PHY_REG_RCVD
);
184 spin_unlock_irqrestore(&lynx
->phy_reg_lock
, flags
);
187 return retval
& 0xff;
193 static int set_phy_reg(struct ti_lynx
*lynx
, int addr
, int val
)
198 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
199 ": PHY register address %d out of range", addr
);
204 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
205 ": PHY register value %d out of range", val
);
209 spin_lock_irqsave(&lynx
->phy_reg_lock
, flags
);
211 reg_write(lynx
, LINK_PHY
, LINK_PHY_WRITE
| LINK_PHY_ADDR(addr
)
212 | LINK_PHY_WDATA(val
));
214 spin_unlock_irqrestore(&lynx
->phy_reg_lock
, flags
);
219 static int sel_phy_reg_page(struct ti_lynx
*lynx
, int page
)
224 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
225 ": PHY page %d out of range", page
);
229 reg
= get_phy_reg(lynx
, 7);
233 set_phy_reg(lynx
, 7, reg
);
240 #if 0 /* not needed at this time */
241 static int sel_phy_reg_port(struct ti_lynx
*lynx
, int port
)
246 PRINT(KERN_ERR
, lynx
->id
, __FUNCTION__
247 ": PHY port %d out of range", port
);
251 reg
= get_phy_reg(lynx
, 7);
255 set_phy_reg(lynx
, 7, reg
);
263 static u32
get_phy_vendorid(struct ti_lynx
*lynx
)
266 sel_phy_reg_page(lynx
, 1);
267 pvid
|= (get_phy_reg(lynx
, 10) << 16);
268 pvid
|= (get_phy_reg(lynx
, 11) << 8);
269 pvid
|= get_phy_reg(lynx
, 12);
270 PRINT(KERN_INFO
, lynx
->id
, "PHY vendor id 0x%06x", pvid
);
274 static u32
get_phy_productid(struct ti_lynx
*lynx
)
277 sel_phy_reg_page(lynx
, 1);
278 id
|= (get_phy_reg(lynx
, 13) << 16);
279 id
|= (get_phy_reg(lynx
, 14) << 8);
280 id
|= get_phy_reg(lynx
, 15);
281 PRINT(KERN_INFO
, lynx
->id
, "PHY product id 0x%06x", id
);
285 static quadlet_t
generate_own_selfid(struct ti_lynx
*lynx
,
286 struct hpsb_host
*host
)
292 for (i
= 0; i
< 7; i
++) {
293 phyreg
[i
] = get_phy_reg(lynx
, i
);
296 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
297 more than 3 ports on the PHY anyway. */
299 lsid
= 0x80400000 | ((phyreg
[0] & 0xfc) << 22);
300 lsid
|= (phyreg
[1] & 0x3f) << 16; /* gap count */
301 lsid
|= (phyreg
[2] & 0xc0) << 8; /* max speed */
302 lsid
|= (phyreg
[6] & 0x01) << 11; /* contender (phy dependent) */
303 /* lsid |= 1 << 11; *//* set contender (hack) */
304 lsid
|= (phyreg
[6] & 0x10) >> 3; /* initiated reset */
306 for (i
= 0; i
< (phyreg
[2] & 0xf); i
++) { /* ports */
307 if (phyreg
[3 + i
] & 0x4) {
308 lsid
|= (((phyreg
[3 + i
] & 0x8) | 0x10) >> 3)
311 lsid
|= 1 << (6 - i
*2);
316 PRINT(KERN_DEBUG
, lynx
->id
, "generated own selfid 0x%x", lsid
);
320 static void handle_selfid(struct ti_lynx
*lynx
, struct hpsb_host
*host
, size_t size
)
322 quadlet_t
*q
= lynx
->rcv_page
;
327 i
= (size
> 16 ? 16 : size
) / 4 - 1;
333 if (!lynx
->phyic
.reg_1394a
) {
334 lsid
= generate_own_selfid(lynx
, host
);
337 phyid
= get_phy_reg(lynx
, 0);
338 isroot
= (phyid
& 2) != 0;
340 PRINT(KERN_INFO
, lynx
->id
, "SelfID process finished (phyid %d, %s)",
341 phyid
, (isroot
? "root" : "not root"));
342 reg_write(lynx
, LINK_ID
, (0xffc0 | phyid
) << 16);
344 if (!lynx
->phyic
.reg_1394a
&& !size
) {
345 hpsb_selfid_received(host
, lsid
);
349 struct selfid
*sid
= (struct selfid
*)q
;
351 if (!lynx
->phyic
.reg_1394a
&& !sid
->extended
352 && (sid
->phy_id
== (phyid
+ 1))) {
353 hpsb_selfid_received(host
, lsid
);
357 PRINT(KERN_DEBUG
, lynx
->id
, "selfid packet 0x%x rcvd",
359 hpsb_selfid_received(host
, q
[0]);
361 PRINT(KERN_INFO
, lynx
->id
,
362 "inconsistent selfid 0x%x/0x%x", q
[0], q
[1]);
368 if (!lynx
->phyic
.reg_1394a
&& isroot
&& phyid
!= 0) {
369 hpsb_selfid_received(host
, lsid
);
372 if (isroot
) reg_set_bits(lynx
, LINK_CONTROL
, LINK_CONTROL_CYCMASTER
);
374 hpsb_selfid_complete(host
, phyid
, isroot
);
379 /* This must be called with the respective queue_lock held. */
380 static void send_next(struct ti_lynx
*lynx
, int what
)
383 struct lynx_send_data
*d
;
384 struct hpsb_packet
*packet
;
386 d
= (what
== iso
? &lynx
->iso_send
: &lynx
->async
);
389 d
->header_dma
= pci_map_single(lynx
->dev
, packet
->header
,
390 packet
->header_size
, PCI_DMA_TODEVICE
);
391 if (packet
->data_size
) {
392 d
->data_dma
= pci_map_single(lynx
->dev
, packet
->data
,
399 pcl
.next
= PCL_NEXT_INVALID
;
400 pcl
.async_error_next
= PCL_NEXT_INVALID
;
402 pcl
.buffer
[0].control
= packet
->speed_code
<< 14 | packet
->header_size
;
404 pcl
.buffer
[0].control
= packet
->speed_code
<< 14 | packet
->header_size
407 pcl
.buffer
[0].pointer
= d
->header_dma
;
408 pcl
.buffer
[1].control
= PCL_LAST_BUFF
| packet
->data_size
;
409 pcl
.buffer
[1].pointer
= d
->data_dma
;
411 switch (packet
->type
) {
413 pcl
.buffer
[0].control
|= PCL_CMD_XMT
;
416 pcl
.buffer
[0].control
|= PCL_CMD_XMT
| PCL_ISOMODE
;
419 pcl
.buffer
[0].control
|= PCL_CMD_UNFXMT
;
423 if (!packet
->data_be
) {
424 pcl
.buffer
[1].control
|= PCL_BIGENDIAN
;
427 put_pcl(lynx
, d
->pcl
, &pcl
);
428 run_pcl(lynx
, d
->pcl_start
, d
->channel
);
432 static int lynx_detect(struct hpsb_host_template
*tmpl
)
434 struct hpsb_host
*host
;
439 for (i
= 0; i
< num_of_cards
; i
++) {
440 host
= hpsb_get_host(tmpl
, 0);
442 /* simply don't init more after out of mem */
445 host
->hostdata
= &cards
[i
];
446 cards
[i
].host
= host
;
452 static int lynx_initialize(struct hpsb_host
*host
)
454 struct ti_lynx
*lynx
= host
->hostdata
;
459 lynx
->async
.queue
= NULL
;
460 spin_lock_init(&lynx
->async
.queue_lock
);
461 spin_lock_init(&lynx
->phy_reg_lock
);
463 pcl
.next
= pcl_bus(lynx
, lynx
->rcv_pcl
);
464 put_pcl(lynx
, lynx
->rcv_pcl_start
, &pcl
);
466 pcl
.next
= PCL_NEXT_INVALID
;
467 pcl
.async_error_next
= PCL_NEXT_INVALID
;
469 pcl
.buffer
[0].control
= PCL_CMD_RCV
| 16;
470 pcl
.buffer
[1].control
= PCL_LAST_BUFF
| 4080;
472 pcl
.buffer
[0].control
= PCL_CMD_RCV
| PCL_BIGENDIAN
| 16;
473 pcl
.buffer
[1].control
= PCL_LAST_BUFF
| 4080;
475 pcl
.buffer
[0].pointer
= lynx
->rcv_page_dma
;
476 pcl
.buffer
[1].pointer
= lynx
->rcv_page_dma
+ 16;
477 put_pcl(lynx
, lynx
->rcv_pcl
, &pcl
);
479 pcl
.next
= pcl_bus(lynx
, lynx
->async
.pcl
);
480 pcl
.async_error_next
= pcl_bus(lynx
, lynx
->async
.pcl
);
481 put_pcl(lynx
, lynx
->async
.pcl_start
, &pcl
);
483 pcl
.next
= pcl_bus(lynx
, lynx
->iso_send
.pcl
);
484 pcl
.async_error_next
= PCL_NEXT_INVALID
;
485 put_pcl(lynx
, lynx
->iso_send
.pcl_start
, &pcl
);
487 pcl
.next
= PCL_NEXT_INVALID
;
488 pcl
.async_error_next
= PCL_NEXT_INVALID
;
489 pcl
.buffer
[0].control
= PCL_CMD_RCV
| 4;
491 pcl
.buffer
[0].control
|= PCL_BIGENDIAN
;
493 pcl
.buffer
[1].control
= PCL_LAST_BUFF
| 2044;
495 for (i
= 0; i
< NUM_ISORCV_PCL
; i
++) {
496 int page
= i
/ ISORCV_PER_PAGE
;
497 int sec
= i
% ISORCV_PER_PAGE
;
499 pcl
.buffer
[0].pointer
= lynx
->iso_rcv
.page_dma
[page
]
500 + sec
* MAX_ISORCV_SIZE
;
501 pcl
.buffer
[1].pointer
= pcl
.buffer
[0].pointer
+ 4;
502 put_pcl(lynx
, lynx
->iso_rcv
.pcl
[i
], &pcl
);
506 for (i
= 0; i
< NUM_ISORCV_PCL
; i
++) {
507 pcli
[i
] = pcl_bus(lynx
, lynx
->iso_rcv
.pcl
[i
]);
509 put_pcl(lynx
, lynx
->iso_rcv
.pcl_start
, &pcl
);
511 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
512 reg_write(lynx
, FIFO_SIZES
, 0x003030a0);
513 /* 20 byte threshold before triggering PCI transfer */
514 reg_write(lynx
, DMA_GLOBAL_REGISTER
, 0x2<<24);
515 /* threshold on both send FIFOs before transmitting:
516 FIFO size - cache line size - 1 */
517 i
= reg_read(lynx
, PCI_LATENCY_CACHELINE
) & 0xff;
519 reg_write(lynx
, FIFO_XMIT_THRESHOLD
, (i
<< 8) | i
);
521 reg_set_bits(lynx
, PCI_INT_ENABLE
, PCI_INT_1394
);
523 reg_write(lynx
, LINK_INT_ENABLE
, LINK_INT_PHY_TIMEOUT
524 | LINK_INT_PHY_REG_RCVD
| LINK_INT_PHY_BUSRESET
525 | LINK_INT_ISO_STUCK
| LINK_INT_ASYNC_STUCK
526 | LINK_INT_SENT_REJECT
| LINK_INT_TX_INVALID_TC
527 | LINK_INT_GRF_OVERFLOW
| LINK_INT_ITF_UNDERFLOW
528 | LINK_INT_ATF_UNDERFLOW
);
530 reg_write(lynx
, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV
), 0);
531 reg_write(lynx
, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV
), 0xa<<4);
532 reg_write(lynx
, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV
), 0);
533 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV
),
534 DMA_WORD1_CMP_MATCH_NODE_BCAST
| DMA_WORD1_CMP_MATCH_BROADCAST
535 | DMA_WORD1_CMP_MATCH_LOCAL
| DMA_WORD1_CMP_MATCH_BUS_BCAST
536 | DMA_WORD1_CMP_ENABLE_SELF_ID
| DMA_WORD1_CMP_ENABLE_MASTER
);
538 run_pcl(lynx
, lynx
->rcv_pcl_start
, CHANNEL_ASYNC_RCV
);
540 reg_write(lynx
, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV
), 0);
541 reg_write(lynx
, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV
), 0x9<<4);
542 reg_write(lynx
, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV
), 0);
543 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV
), 0);
545 run_sub_pcl(lynx
, lynx
->iso_rcv
.pcl_start
, 0, CHANNEL_ISO_RCV
);
547 reg_write(lynx
, LINK_CONTROL
, LINK_CONTROL_RCV_CMP_VALID
548 | LINK_CONTROL_TX_ISO_EN
| LINK_CONTROL_RX_ISO_EN
549 | LINK_CONTROL_TX_ASYNC_EN
| LINK_CONTROL_RX_ASYNC_EN
550 | LINK_CONTROL_RESET_TX
| LINK_CONTROL_RESET_RX
551 | LINK_CONTROL_CYCTIMEREN
);
553 /* attempt to enable contender bit -FIXME- would this work elsewhere? */
554 reg_set_bits(lynx
, GPIO_CTRL_A
, 0x1);
555 reg_write(lynx
, GPIO_DATA_BASE
+ 0x3c, 0x1);
560 static void lynx_release(struct hpsb_host
*host
)
562 struct ti_lynx
*lynx
;
565 lynx
= host
->hostdata
;
568 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
569 unregister_chrdev(PCILYNX_MAJOR
, PCILYNX_DRIVER_NAME
);
574 static int lynx_transmit(struct hpsb_host
*host
, struct hpsb_packet
*packet
)
576 struct ti_lynx
*lynx
= host
->hostdata
;
577 struct lynx_send_data
*d
;
580 if (packet
->data_size
>= 4096) {
581 PRINT(KERN_ERR
, lynx
->id
, "transmit packet data too big (%d)",
586 switch (packet
->type
) {
595 PRINT(KERN_ERR
, lynx
->id
, "invalid packet type %d",
600 packet
->xnext
= NULL
;
601 if (packet
->tcode
== TCODE_WRITEQ
602 || packet
->tcode
== TCODE_READQ_RESPONSE
) {
603 cpu_to_be32s(&packet
->header
[3]);
606 spin_lock_irqsave(&d
->queue_lock
, flags
);
608 if (d
->queue
== NULL
) {
610 d
->queue_last
= packet
;
611 send_next(lynx
, packet
->type
);
613 d
->queue_last
->xnext
= packet
;
614 d
->queue_last
= packet
;
617 spin_unlock_irqrestore(&d
->queue_lock
, flags
);
622 static int lynx_devctl(struct hpsb_host
*host
, enum devctl_cmd cmd
, int arg
)
624 struct ti_lynx
*lynx
= host
->hostdata
;
626 struct hpsb_packet
*packet
, *lastpacket
;
637 retval
= get_phy_reg(lynx
, 1);
638 arg
|= (retval
== -1 ? 63 : retval
);
641 PRINT(KERN_INFO
, lynx
->id
, "resetting bus on request%s",
642 (host
->attempt_root
? " and attempting to become root"
645 set_phy_reg(lynx
, 1, arg
);
648 case GET_CYCLE_COUNTER
:
649 retval
= reg_read(lynx
, CYCLE_TIMER
);
652 case SET_CYCLE_COUNTER
:
653 reg_write(lynx
, CYCLE_TIMER
, arg
);
657 reg_write(lynx
, LINK_ID
,
658 (arg
<< 22) | (reg_read(lynx
, LINK_ID
) & 0x003f0000));
661 case ACT_CYCLE_MASTER
:
663 reg_set_bits(lynx
, LINK_CONTROL
,
664 LINK_CONTROL_CYCMASTER
);
666 reg_clear_bits(lynx
, LINK_CONTROL
,
667 LINK_CONTROL_CYCMASTER
);
671 case CANCEL_REQUESTS
:
672 spin_lock_irqsave(&lynx
->async
.queue_lock
, flags
);
674 reg_write(lynx
, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND
), 0);
675 packet
= lynx
->async
.queue
;
676 lynx
->async
.queue
= NULL
;
678 spin_unlock_irqrestore(&lynx
->async
.queue_lock
, flags
);
680 while (packet
!= NULL
) {
682 packet
= packet
->xnext
;
683 hpsb_packet_sent(host
, lastpacket
, ACKX_ABORTED
);
696 case ISO_LISTEN_CHANNEL
:
697 spin_lock_irqsave(&lynx
->iso_rcv
.lock
, flags
);
699 if (lynx
->iso_rcv
.chan_count
++ == 0) {
700 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV
),
701 DMA_WORD1_CMP_ENABLE_MASTER
);
704 spin_unlock_irqrestore(&lynx
->iso_rcv
.lock
, flags
);
707 case ISO_UNLISTEN_CHANNEL
:
708 spin_lock_irqsave(&lynx
->iso_rcv
.lock
, flags
);
710 if (--lynx
->iso_rcv
.chan_count
== 0) {
711 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV
),
715 spin_unlock_irqrestore(&lynx
->iso_rcv
.lock
, flags
);
719 PRINT(KERN_ERR
, lynx
->id
, "unknown devctl command %d", cmd
);
727 /***************************************
728 * IEEE-1394 functionality section END *
729 ***************************************/
731 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
732 /* VFS functions for local bus / aux device access. Access to those
733 * is implemented as a character device instead of block devices
734 * because buffers are not wanted for this. Therefore llseek (from
735 * VFS) can be used for these char devices with obvious effects.
737 static int mem_open(struct inode
*, struct file
*);
738 static int mem_release(struct inode
*, struct file
*);
739 static unsigned int aux_poll(struct file
*, struct poll_table_struct
*);
740 static loff_t
mem_llseek(struct file
*, loff_t
, int);
741 static ssize_t
mem_read (struct file
*, char*, size_t, loff_t
*);
742 static ssize_t
mem_write(struct file
*, const char*, size_t, loff_t
*);
745 static struct file_operations aux_ops
= {
752 release
: mem_release
,
756 static void aux_setup_pcls(struct ti_lynx
*lynx
)
760 pcl
.next
= PCL_NEXT_INVALID
;
761 pcl
.user_data
= pcl_bus(lynx
, lynx
->dmem_pcl
);
762 put_pcl(lynx
, lynx
->dmem_pcl
, &pcl
);
765 static int mem_open(struct inode
*inode
, struct file
*file
)
767 int cid
= MINOR(inode
->i_rdev
);
768 enum { t_rom
, t_aux
, t_ram
} type
;
771 V22_COMPAT_MOD_INC_USE_COUNT
;
773 if (cid
< PCILYNX_MINOR_AUX_START
) {
774 /* just for completeness */
775 V22_COMPAT_MOD_DEC_USE_COUNT
;
777 } else if (cid
< PCILYNX_MINOR_ROM_START
) {
778 cid
-= PCILYNX_MINOR_AUX_START
;
779 if (cid
>= num_of_cards
|| !cards
[cid
].aux_port
) {
780 V22_COMPAT_MOD_DEC_USE_COUNT
;
784 } else if (cid
< PCILYNX_MINOR_RAM_START
) {
785 cid
-= PCILYNX_MINOR_ROM_START
;
786 if (cid
>= num_of_cards
|| !cards
[cid
].local_rom
) {
787 V22_COMPAT_MOD_DEC_USE_COUNT
;
792 /* WARNING: Know what you are doing when opening RAM.
793 * It is currently used inside the driver! */
794 cid
-= PCILYNX_MINOR_RAM_START
;
795 if (cid
>= num_of_cards
|| !cards
[cid
].local_ram
) {
796 V22_COMPAT_MOD_DEC_USE_COUNT
;
802 md
= (struct memdata
*)kmalloc(sizeof(struct memdata
), SLAB_KERNEL
);
804 V22_COMPAT_MOD_DEC_USE_COUNT
;
808 md
->lynx
= &cards
[cid
];
819 atomic_set(&md
->aux_intr_last_seen
,
820 atomic_read(&cards
[cid
].aux_intr_seen
));
825 file
->private_data
= md
;
830 static int mem_release(struct inode
*inode
, struct file
*file
)
832 struct memdata
*md
= (struct memdata
*)file
->private_data
;
836 V22_COMPAT_MOD_DEC_USE_COUNT
;
840 static unsigned int aux_poll(struct file
*file
, poll_table
*pt
)
842 struct memdata
*md
= (struct memdata
*)file
->private_data
;
846 /* reading and writing is always allowed */
847 mask
= POLLIN
| POLLRDNORM
| POLLOUT
| POLLWRNORM
;
849 if (md
->type
== aux
) {
850 poll_wait(file
, &cards
[cid
].aux_intr_wait
, pt
);
852 if (atomic_read(&md
->aux_intr_last_seen
)
853 != atomic_read(&cards
[cid
].aux_intr_seen
)) {
855 atomic_inc(&md
->aux_intr_last_seen
);
862 loff_t
mem_llseek(struct file
*file
, loff_t offs
, int orig
)
871 newoffs
= offs
+ file
->f_pos
;
874 newoffs
= PCILYNX_MAX_MEMORY
+ 1 + offs
;
880 if (newoffs
< 0 || newoffs
> PCILYNX_MAX_MEMORY
+ 1) return -EINVAL
;
882 file
->f_pos
= newoffs
;
887 * do not DMA if count is too small because this will have a serious impact
888 * on performance - the value 2400 was found by experiment and may not work
889 * everywhere as good as here - use mem_mindma option for modules to change
891 short mem_mindma
= 2400;
892 MODULE_PARM(mem_mindma
, "h");
894 static ssize_t
mem_dmaread(struct memdata
*md
, u32 physbuf
, ssize_t count
,
901 DECLARE_WAITQUEUE(wait
, current
);
904 count
= MIN(count
, 53196);
907 if (reg_read(md
->lynx
, DMA_CHAN_CTRL(CHANNEL_LOCALBUS
))
908 & DMA_CHAN_CTRL_BUSY
) {
909 PRINT(KERN_WARNING
, md
->lynx
->id
, "DMA ALREADY ACTIVE!");
912 reg_write(md
->lynx
, LBUS_ADDR
, md
->type
| offset
);
914 pcl
= edit_pcl(md
->lynx
, md
->lynx
->dmem_pcl
, &pcltmp
);
915 pcl
->buffer
[0].control
= PCL_CMD_LBUS_TO_PCI
| MIN(count
, 4092);
916 pcl
->buffer
[0].pointer
= physbuf
;
922 pcl
->buffer
[i
].control
= MIN(count
, 4092);
923 pcl
->buffer
[i
].pointer
= physbuf
+ i
* 4092;
926 pcl
->buffer
[i
].control
|= PCL_LAST_BUFF
;
927 commit_pcl(md
->lynx
, md
->lynx
->dmem_pcl
, &pcltmp
);
929 set_current_state(TASK_INTERRUPTIBLE
);
930 add_wait_queue(&md
->lynx
->mem_dma_intr_wait
, &wait
);
931 run_sub_pcl(md
->lynx
, md
->lynx
->dmem_pcl
, 2, CHANNEL_LOCALBUS
);
934 while (reg_read(md
->lynx
, DMA_CHAN_CTRL(CHANNEL_LOCALBUS
))
935 & DMA_CHAN_CTRL_BUSY
) {
936 if (signal_pending(current
)) {
943 reg_write(md
->lynx
, DMA_CHAN_CTRL(CHANNEL_LOCALBUS
), 0);
944 remove_wait_queue(&md
->lynx
->mem_dma_intr_wait
, &wait
);
946 if (reg_read(md
->lynx
, DMA_CHAN_CTRL(CHANNEL_LOCALBUS
))
947 & DMA_CHAN_CTRL_BUSY
) {
948 PRINT(KERN_ERR
, md
->lynx
->id
, "DMA STILL ACTIVE!");
954 static ssize_t
mem_read(struct file
*file
, char *buffer
, size_t count
,
957 struct memdata
*md
= (struct memdata
*)file
->private_data
;
960 int off
= (int)*offset
; /* avoid useless 64bit-arithmetic */
964 if ((off
+ count
) > PCILYNX_MAX_MEMORY
+ 1) {
965 count
= PCILYNX_MAX_MEMORY
+ 1 - off
;
974 membase
= md
->lynx
->local_rom
;
977 membase
= md
->lynx
->local_ram
;
980 membase
= md
->lynx
->aux_port
;
983 panic("pcilynx%d: unsupported md->type %d in " __FUNCTION__
,
984 md
->lynx
->id
, md
->type
);
987 down(&md
->lynx
->mem_dma_mutex
);
989 if (count
< mem_mindma
) {
990 memcpy_fromio(md
->lynx
->mem_dma_buffer
, membase
+off
, count
);
995 alignfix
= 4 - (off
% 4);
997 if (bcount
< alignfix
) {
1000 memcpy_fromio(md
->lynx
->mem_dma_buffer
, membase
+off
,
1002 if (bcount
== alignfix
) {
1009 while (bcount
>= 4) {
1010 retval
= mem_dmaread(md
, md
->lynx
->mem_dma_buffer_dma
1011 + count
- bcount
, bcount
, off
);
1012 if (retval
< 0) return retval
;
1019 memcpy_fromio(md
->lynx
->mem_dma_buffer
+ count
- bcount
,
1020 membase
+off
, bcount
);
1024 retval
= copy_to_user(buffer
, md
->lynx
->mem_dma_buffer
, count
);
1025 up(&md
->lynx
->mem_dma_mutex
);
1027 if (retval
< 0) return retval
;
1033 static ssize_t
mem_write(struct file
*file
, const char *buffer
, size_t count
,
1036 struct memdata
*md
= (struct memdata
*)file
->private_data
;
1038 if (((*offset
) + count
) > PCILYNX_MAX_MEMORY
+1) {
1039 count
= PCILYNX_MAX_MEMORY
+1 - *offset
;
1041 if (count
== 0 || *offset
> PCILYNX_MAX_MEMORY
) {
1045 /* FIXME: dereferencing pointers to PCI mem doesn't work everywhere */
1048 copy_from_user(md
->lynx
->aux_port
+(*offset
), buffer
, count
);
1051 copy_from_user(md
->lynx
->local_ram
+(*offset
), buffer
, count
);
1054 /* the ROM may be writeable */
1055 copy_from_user(md
->lynx
->local_rom
+(*offset
), buffer
, count
);
1059 file
->f_pos
+= count
;
1062 #endif /* CONFIG_IEEE1394_PCILYNX_PORTS */
1065 /********************************************************
1066 * Global stuff (interrupt handler, init/shutdown code) *
1067 ********************************************************/
1070 static void lynx_irq_handler(int irq
, void *dev_id
,
1071 struct pt_regs
*regs_are_unused
)
1073 struct ti_lynx
*lynx
= (struct ti_lynx
*)dev_id
;
1074 struct hpsb_host
*host
= lynx
->host
;
1078 linkint
= reg_read(lynx
, LINK_INT_STATUS
);
1079 intmask
= reg_read(lynx
, PCI_INT_STATUS
);
1081 PRINTD(KERN_DEBUG
, lynx
->id
, "interrupt: 0x%08x / 0x%08x", intmask
,
1084 reg_write(lynx
, LINK_INT_STATUS
, linkint
);
1085 reg_write(lynx
, PCI_INT_STATUS
, intmask
);
1087 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1088 if (intmask
& PCI_INT_AUX_INT
) {
1089 atomic_inc(&lynx
->aux_intr_seen
);
1090 wake_up_interruptible(&lynx
->aux_intr_wait
);
1093 if (intmask
& PCI_INT_DMA_HLT(CHANNEL_LOCALBUS
)) {
1094 wake_up_interruptible(&lynx
->mem_dma_intr_wait
);
1099 if (intmask
& PCI_INT_1394
) {
1100 if (linkint
& LINK_INT_PHY_TIMEOUT
) {
1101 PRINT(KERN_INFO
, lynx
->id
, "PHY timeout occured");
1103 if (linkint
& LINK_INT_PHY_BUSRESET
) {
1104 PRINT(KERN_INFO
, lynx
->id
, "bus reset interrupt");
1105 if (!host
->in_bus_reset
) {
1106 hpsb_bus_reset(host
);
1109 if (linkint
& LINK_INT_PHY_REG_RCVD
) {
1110 if (!host
->in_bus_reset
) {
1111 PRINT(KERN_INFO
, lynx
->id
,
1112 "phy reg received without reset");
1115 if (linkint
& LINK_INT_ISO_STUCK
) {
1116 PRINT(KERN_INFO
, lynx
->id
, "isochronous transmitter stuck");
1118 if (linkint
& LINK_INT_ASYNC_STUCK
) {
1119 PRINT(KERN_INFO
, lynx
->id
, "asynchronous transmitter stuck");
1121 if (linkint
& LINK_INT_SENT_REJECT
) {
1122 PRINT(KERN_INFO
, lynx
->id
, "sent reject");
1124 if (linkint
& LINK_INT_TX_INVALID_TC
) {
1125 PRINT(KERN_INFO
, lynx
->id
, "invalid transaction code");
1127 if (linkint
& LINK_INT_GRF_OVERFLOW
) {
1128 PRINT(KERN_INFO
, lynx
->id
, "GRF overflow");
1130 if (linkint
& LINK_INT_ITF_UNDERFLOW
) {
1131 PRINT(KERN_INFO
, lynx
->id
, "ITF underflow");
1133 if (linkint
& LINK_INT_ATF_UNDERFLOW
) {
1134 PRINT(KERN_INFO
, lynx
->id
, "ATF underflow");
1138 if (intmask
& PCI_INT_DMA_HLT(CHANNEL_ISO_RCV
)) {
1139 PRINTD(KERN_DEBUG
, lynx
->id
, "iso receive");
1141 spin_lock(&lynx
->iso_rcv
.lock
);
1143 lynx
->iso_rcv
.stat
[lynx
->iso_rcv
.next
] =
1144 reg_read(lynx
, DMA_CHAN_STAT(CHANNEL_ISO_RCV
));
1146 lynx
->iso_rcv
.used
++;
1147 lynx
->iso_rcv
.next
= (lynx
->iso_rcv
.next
+ 1) % NUM_ISORCV_PCL
;
1149 if ((lynx
->iso_rcv
.next
== lynx
->iso_rcv
.last
)
1150 || !lynx
->iso_rcv
.chan_count
) {
1151 PRINTD(KERN_DEBUG
, lynx
->id
, "stopped");
1152 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV
), 0);
1155 run_sub_pcl(lynx
, lynx
->iso_rcv
.pcl_start
, lynx
->iso_rcv
.next
,
1158 spin_unlock(&lynx
->iso_rcv
.lock
);
1160 queue_task(&lynx
->iso_rcv
.tq
, &tq_immediate
);
1161 mark_bh(IMMEDIATE_BH
);
1164 if (intmask
& PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND
)) {
1166 struct hpsb_packet
*packet
;
1168 spin_lock(&lynx
->async
.queue_lock
);
1170 ack
= reg_read(lynx
, DMA_CHAN_STAT(CHANNEL_ASYNC_SEND
));
1171 packet
= lynx
->async
.queue
;
1172 lynx
->async
.queue
= packet
->xnext
;
1174 pci_unmap_single(lynx
->dev
, lynx
->async
.header_dma
,
1175 packet
->header_size
, PCI_DMA_TODEVICE
);
1176 if (packet
->data_size
) {
1177 pci_unmap_single(lynx
->dev
, lynx
->async
.data_dma
,
1178 packet
->data_size
, PCI_DMA_TODEVICE
);
1181 if (lynx
->async
.queue
!= NULL
) {
1182 send_next(lynx
, async
);
1185 spin_unlock(&lynx
->async
.queue_lock
);
1187 if (ack
& DMA_CHAN_STAT_SPECIALACK
) {
1188 ack
= (ack
>> 15) & 0xf;
1189 PRINTD(KERN_INFO
, lynx
->id
, "special ack %d", ack
);
1190 ack
= (ack
== 1 ? ACKX_TIMEOUT
: ACKX_SEND_ERROR
);
1192 ack
= (ack
>> 15) & 0xf;
1195 hpsb_packet_sent(host
, packet
, ack
);
1198 if (intmask
& PCI_INT_DMA_HLT(CHANNEL_ISO_SEND
)) {
1199 struct hpsb_packet
*packet
;
1201 spin_lock(&lynx
->iso_send
.queue_lock
);
1203 packet
= lynx
->iso_send
.queue
;
1204 lynx
->iso_send
.queue
= packet
->xnext
;
1206 pci_unmap_single(lynx
->dev
, lynx
->iso_send
.header_dma
,
1207 packet
->header_size
, PCI_DMA_TODEVICE
);
1208 if (packet
->data_size
) {
1209 pci_unmap_single(lynx
->dev
, lynx
->iso_send
.data_dma
,
1210 packet
->data_size
, PCI_DMA_TODEVICE
);
1213 if (lynx
->iso_send
.queue
!= NULL
) {
1214 send_next(lynx
, iso
);
1217 spin_unlock(&lynx
->iso_send
.queue_lock
);
1219 hpsb_packet_sent(host
, packet
, ACK_COMPLETE
);
1222 if (intmask
& PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV
)) {
1223 /* general receive DMA completed */
1224 int stat
= reg_read(lynx
, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV
));
1226 PRINTD(KERN_DEBUG
, lynx
->id
, "received packet size %d",
1229 if (stat
& DMA_CHAN_STAT_SELFID
) {
1230 handle_selfid(lynx
, host
, stat
& 0x1fff);
1231 reg_set_bits(lynx
, LINK_CONTROL
,
1232 LINK_CONTROL_RCV_CMP_VALID
1233 | LINK_CONTROL_TX_ASYNC_EN
1234 | LINK_CONTROL_RX_ASYNC_EN
);
1236 quadlet_t
*q_data
= lynx
->rcv_page
;
1237 if ((*q_data
>> 4 & 0xf) == TCODE_READQ_RESPONSE
1238 || (*q_data
>> 4 & 0xf) == TCODE_WRITEQ
) {
1239 cpu_to_be32s(q_data
+ 3);
1241 hpsb_packet_received(host
, q_data
, stat
& 0x1fff, 0);
1244 run_pcl(lynx
, lynx
->rcv_pcl_start
, CHANNEL_ASYNC_RCV
);
1248 static void iso_rcv_bh(struct ti_lynx
*lynx
)
1252 unsigned long flags
;
1254 spin_lock_irqsave(&lynx
->iso_rcv
.lock
, flags
);
1256 while (lynx
->iso_rcv
.used
) {
1257 idx
= lynx
->iso_rcv
.last
;
1258 spin_unlock_irqrestore(&lynx
->iso_rcv
.lock
, flags
);
1260 data
= lynx
->iso_rcv
.page
[idx
/ ISORCV_PER_PAGE
]
1261 + (idx
% ISORCV_PER_PAGE
) * MAX_ISORCV_SIZE
;
1263 if ((*data
>> 16) + 4 != (lynx
->iso_rcv
.stat
[idx
] & 0x1fff)) {
1264 PRINT(KERN_ERR
, lynx
->id
,
1265 "iso length mismatch 0x%08x/0x%08x", *data
,
1266 lynx
->iso_rcv
.stat
[idx
]);
1269 if (lynx
->iso_rcv
.stat
[idx
]
1270 & (DMA_CHAN_STAT_PCIERR
| DMA_CHAN_STAT_PKTERR
)) {
1271 PRINT(KERN_INFO
, lynx
->id
,
1272 "iso receive error on %d to 0x%p", idx
, data
);
1274 hpsb_packet_received(lynx
->host
, data
,
1275 lynx
->iso_rcv
.stat
[idx
] & 0x1fff,
1279 spin_lock_irqsave(&lynx
->iso_rcv
.lock
, flags
);
1280 lynx
->iso_rcv
.last
= (idx
+ 1) % NUM_ISORCV_PCL
;
1281 lynx
->iso_rcv
.used
--;
1284 if (lynx
->iso_rcv
.chan_count
) {
1285 reg_write(lynx
, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV
),
1286 DMA_WORD1_CMP_ENABLE_MASTER
);
1288 spin_unlock_irqrestore(&lynx
->iso_rcv
.lock
, flags
);
1292 static int add_card(struct pci_dev
*dev
)
1294 #define FAIL(fmt, args...) do { \
1295 PRINT_G(KERN_ERR, fmt , ## args); \
1297 remove_card(lynx); \
1301 struct ti_lynx
*lynx
; /* shortcut to currently handled device */
1304 if (num_of_cards
== MAX_PCILYNX_CARDS
) {
1305 PRINT_G(KERN_WARNING
, "cannot handle more than %d cards. "
1306 "Adjust MAX_PCILYNX_CARDS in pcilynx.h.",
1311 lynx
= &cards
[num_of_cards
++];
1313 lynx
->id
= num_of_cards
-1;
1316 if (!pci_dma_supported(dev
, 0xffffffff)) {
1317 FAIL("DMA address limits not supported for PCILynx hardware %d",
1320 if (pci_enable_device(dev
)) {
1321 FAIL("failed to enable PCILynx hardware %d", lynx
->id
);
1323 pci_set_master(dev
);
1325 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1326 lynx
->pcl_mem
= pci_alloc_consistent(dev
, LOCALRAM_SIZE
,
1327 &lynx
->pcl_mem_dma
);
1329 if (lynx
->pcl_mem
!= NULL
) {
1330 lynx
->state
= have_pcl_mem
;
1331 PRINT(KERN_INFO
, lynx
->id
,
1332 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE
,
1335 FAIL("failed to allocate PCL memory area");
1339 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1340 lynx
->mem_dma_buffer
= pci_alloc_consistent(dev
, 65536,
1341 &lynx
->mem_dma_buffer_dma
);
1342 if (lynx
->mem_dma_buffer
== NULL
) {
1343 FAIL("failed to allocate DMA buffer for aux");
1345 lynx
->state
= have_aux_buf
;
1348 lynx
->rcv_page
= pci_alloc_consistent(dev
, PAGE_SIZE
,
1349 &lynx
->rcv_page_dma
);
1350 if (lynx
->rcv_page
== NULL
) {
1351 FAIL("failed to allocate receive buffer");
1353 lynx
->state
= have_1394_buffers
;
1355 for (i
= 0; i
< ISORCV_PAGES
; i
++) {
1356 lynx
->iso_rcv
.page
[i
] =
1357 pci_alloc_consistent(dev
, PAGE_SIZE
,
1358 &lynx
->iso_rcv
.page_dma
[i
]);
1359 if (lynx
->iso_rcv
.page
[i
] == NULL
) {
1360 FAIL("failed to allocate iso receive buffers");
1364 lynx
->registers
= ioremap_nocache(pci_resource_start(dev
,0),
1365 PCILYNX_MAX_REGISTER
);
1366 lynx
->local_ram
= ioremap(pci_resource_start(dev
,1), PCILYNX_MAX_MEMORY
);
1367 lynx
->aux_port
= ioremap(pci_resource_start(dev
,2), PCILYNX_MAX_MEMORY
);
1368 lynx
->local_rom
= ioremap(pci_resource_start(dev
,PCI_ROM_RESOURCE
),
1369 PCILYNX_MAX_MEMORY
);
1370 lynx
->state
= have_iomappings
;
1372 if (lynx
->registers
== NULL
) {
1373 FAIL("failed to remap registers - card not accessible");
1376 #ifdef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1377 if (lynx
->local_ram
== NULL
) {
1378 FAIL("failed to remap local RAM which is required for "
1383 reg_write(lynx
, MISC_CONTROL
, MISC_CONTROL_SWRESET
);
1385 if (!request_irq(dev
->irq
, lynx_irq_handler
, SA_SHIRQ
,
1386 PCILYNX_DRIVER_NAME
, lynx
)) {
1387 PRINT(KERN_INFO
, lynx
->id
, "allocated interrupt %d", dev
->irq
);
1388 lynx
->state
= have_intr
;
1390 FAIL("failed to allocate shared interrupt %d", dev
->irq
);
1393 /* alloc_pcl return values are not checked, it is expected that the
1394 * provided PCL space is sufficient for the initial allocations */
1395 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1396 if (lynx
->aux_port
!= NULL
) {
1397 lynx
->dmem_pcl
= alloc_pcl(lynx
);
1398 aux_setup_pcls(lynx
);
1399 sema_init(&lynx
->mem_dma_mutex
, 1);
1402 lynx
->rcv_pcl
= alloc_pcl(lynx
);
1403 lynx
->rcv_pcl_start
= alloc_pcl(lynx
);
1404 lynx
->async
.pcl
= alloc_pcl(lynx
);
1405 lynx
->async
.pcl_start
= alloc_pcl(lynx
);
1406 lynx
->iso_send
.pcl
= alloc_pcl(lynx
);
1407 lynx
->iso_send
.pcl_start
= alloc_pcl(lynx
);
1409 for (i
= 0; i
< NUM_ISORCV_PCL
; i
++) {
1410 lynx
->iso_rcv
.pcl
[i
] = alloc_pcl(lynx
);
1412 lynx
->iso_rcv
.pcl_start
= alloc_pcl(lynx
);
1414 /* all allocations successful - simple init stuff follows */
1416 lynx
->lock
= SPIN_LOCK_UNLOCKED
;
1418 reg_write(lynx
, PCI_INT_ENABLE
, PCI_INT_AUX_INT
| PCI_INT_DMA_ALL
);
1420 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1421 init_waitqueue_head(&lynx
->mem_dma_intr_wait
);
1422 init_waitqueue_head(&lynx
->aux_intr_wait
);
1425 lynx
->iso_rcv
.tq
.routine
= (void (*)(void*))iso_rcv_bh
;
1426 lynx
->iso_rcv
.tq
.data
= lynx
;
1427 lynx
->iso_rcv
.lock
= SPIN_LOCK_UNLOCKED
;
1429 lynx
->async
.queue_lock
= SPIN_LOCK_UNLOCKED
;
1430 lynx
->async
.channel
= CHANNEL_ASYNC_SEND
;
1431 lynx
->iso_send
.queue_lock
= SPIN_LOCK_UNLOCKED
;
1432 lynx
->iso_send
.channel
= CHANNEL_ISO_SEND
;
1434 PRINT(KERN_INFO
, lynx
->id
, "remapped memory spaces reg 0x%p, rom 0x%p, "
1435 "ram 0x%p, aux 0x%p", lynx
->registers
, lynx
->local_rom
,
1436 lynx
->local_ram
, lynx
->aux_port
);
1438 /* now, looking for PHY register set */
1439 if ((get_phy_reg(lynx
, 2) & 0xe0) == 0xe0) {
1440 lynx
->phyic
.reg_1394a
= 1;
1441 PRINT(KERN_INFO
, lynx
->id
,
1442 "found 1394a conform PHY (using extended register set)");
1443 lynx
->phyic
.vendor
= get_phy_vendorid(lynx
);
1444 lynx
->phyic
.product
= get_phy_productid(lynx
);
1446 lynx
->phyic
.reg_1394a
= 0;
1447 PRINT(KERN_INFO
, lynx
->id
, "found old 1394 PHY");
1454 static void remove_card(struct ti_lynx
*lynx
)
1458 switch (lynx
->state
) {
1460 reg_write(lynx
, PCI_INT_ENABLE
, 0);
1461 free_irq(lynx
->dev
->irq
, lynx
);
1462 case have_iomappings
:
1463 reg_write(lynx
, MISC_CONTROL
, MISC_CONTROL_SWRESET
);
1464 iounmap(lynx
->registers
);
1465 iounmap(lynx
->local_rom
);
1466 iounmap(lynx
->local_ram
);
1467 iounmap(lynx
->aux_port
);
1468 case have_1394_buffers
:
1469 for (i
= 0; i
< ISORCV_PAGES
; i
++) {
1470 if (lynx
->iso_rcv
.page
[i
]) {
1471 pci_free_consistent(lynx
->dev
, PAGE_SIZE
,
1472 lynx
->iso_rcv
.page
[i
],
1473 lynx
->iso_rcv
.page_dma
[i
]);
1476 pci_free_consistent(lynx
->dev
, PAGE_SIZE
, lynx
->rcv_page
,
1477 lynx
->rcv_page_dma
);
1479 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1480 pci_free_consistent(lynx
->dev
, 65536, lynx
->mem_dma_buffer
,
1481 lynx
->mem_dma_buffer_dma
);
1484 #ifndef CONFIG_IEEE1394_PCILYNX_LOCALRAM
1485 pci_free_consistent(lynx
->dev
, LOCALRAM_SIZE
, lynx
->pcl_mem
,
1489 /* do nothing - already freed */
1492 lynx
->state
= clear
;
1495 static int init_driver()
1497 struct pci_dev
*dev
= NULL
;
1501 PRINT_G(KERN_DEBUG
, __PRETTY_FUNCTION__
" called again");
1505 PRINT_G(KERN_INFO
, "looking for PCILynx cards");
1507 while ((dev
= pci_find_device(PCI_VENDOR_ID_TI
,
1508 PCI_DEVICE_ID_TI_PCILYNX
, dev
))
1510 if (add_card(dev
) == 0) {
1516 PRINT_G(KERN_WARNING
, "no operable PCILynx cards found");
1520 #ifdef CONFIG_IEEE1394_PCILYNX_PORTS
1521 if (register_chrdev(PCILYNX_MAJOR
, PCILYNX_DRIVER_NAME
, &aux_ops
)) {
1522 PRINT_G(KERN_ERR
, "allocation of char major number %d failed",
1532 static size_t get_lynx_rom(struct hpsb_host
*host
, const quadlet_t
**ptr
)
1534 *ptr
= lynx_csr_rom
;
1535 return sizeof(lynx_csr_rom
);
1538 struct hpsb_host_template
*get_lynx_template(void)
1540 static struct hpsb_host_template tmpl
= {
1542 detect_hosts
: lynx_detect
,
1543 initialize_host
: lynx_initialize
,
1544 release_host
: lynx_release
,
1545 get_rom
: get_lynx_rom
,
1546 transmit_packet
: lynx_transmit
,
1556 /* EXPORT_NO_SYMBOLS; */
1558 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1559 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1560 MODULE_SUPPORTED_DEVICE("pcilynx");
1562 void cleanup_module(void)
1564 hpsb_unregister_lowlevel(get_lynx_template());
1565 PRINT_G(KERN_INFO
, "removed " PCILYNX_DRIVER_NAME
" module");
1568 int init_module(void)
1570 if (hpsb_register_lowlevel(get_lynx_template())) {
1571 PRINT_G(KERN_ERR
, "registering failed");