2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000 by Silicon Graphics, Inc.
13 * If you find problems with this drivers, then if possible do the
14 * following. Hook up a terminal to the MSC port, send an NMI to the CPUs
15 * by typing ^Tnmi (where ^T stands for <CTRL>-T). You'll see something
18 * 1A 000: *** NMI while in Kernel and no NMI vector installed on node 0
19 * 1A 000: *** Error EPC: 0xffffffff800265e4 (0xffffffff800265e4)
20 * 1A 000: *** Press ENTER to continue.
22 * Next enter the command ``lw i:0x86000f0 0x18'' and include this
23 * commands output which will look like below with your bugreport.
25 * 1A 000: POD MSC Dex> lw i:0x86000f0 0x18
26 * 1A 000: 92000000086000f0: 0021f28c 00000000 00000000 00000000
27 * 1A 000: 9200000008600100: a5000000 01cde000 00000000 000004e0
28 * 1A 000: 9200000008600110: 00000650 00000000 00110b15 00000000
29 * 1A 000: 9200000008600120: 006d0005 77bbca0a a5000000 01ce0000
30 * 1A 000: 9200000008600130: 80000500 00000500 00002538 05690008
31 * 1A 000: 9200000008600140: 00000000 00000000 000003e1 0000786d
35 * - Handle allocation failures in ioc3_alloc_skb() more gracefully.
36 * - Handle allocation failures in ioc3_init_rings().
37 * - Use prefetching for large packets. What is a good lower limit for
39 * - We're probably allocating a bit too much memory.
40 * - Workarounds for various PHYs.
41 * - Proper autonegotiation.
42 * - What exactly is net_device_stats.tx_dropped supposed to count?
43 * - Use hardware checksums.
44 * - Convert to using the PCI infrastructure / IOC3 meta driver.
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/kernel.h>
50 #include <linux/errno.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
53 #include <linux/pci_ids.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
59 #include <asm/byteorder.h>
61 #include <asm/pgtable.h>
62 #include <asm/sn/types.h>
63 #include <asm/sn/sn0/addrs.h>
64 #include <asm/sn/sn0/hubni.h>
65 #include <asm/sn/sn0/hubio.h>
66 #include <asm/sn/klconfig.h>
68 #include <asm/sn/sn0/ip27.h>
69 #include <asm/pci/bridge.h>
71 /* 32 RX buffers. This is tunable in the range of 16 <= x < 512. */
74 /* Private ioctls that de facto are well known and used for examply
76 #define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Read from current PHY */
77 #define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read any PHY register */
78 #define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write any PHY register */
80 /* These exist in other drivers; we don't use them at this time. */
81 #define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters */
82 #define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters */
84 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
85 static void ioc3_set_multicast_list(struct net_device
*dev
);
86 static int ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
87 static void ioc3_timeout(struct net_device
*dev
);
88 static inline unsigned int ioc3_hash(const unsigned char *addr
);
90 static const char ioc3_str
[] = "IOC3 Ethernet";
92 /* Private per NIC data of the driver. */
96 unsigned long rxr
; /* pointer to receiver ring */
97 struct ioc3_etxd
*txr
;
98 struct sk_buff
*rx_skbs
[512];
99 struct sk_buff
*tx_skbs
[128];
100 struct net_device_stats stats
;
101 int rx_ci
; /* RX consumer index */
102 int rx_pi
; /* RX producer index */
103 int tx_ci
; /* TX consumer index */
104 int tx_pi
; /* TX producer index */
106 u32 emcr
, ehar_h
, ehar_l
;
107 spinlock_t ioc3_lock
;
110 /* We use this to acquire receive skb's that we can DMA directly into. */
111 #define ALIGNED_RX_SKB_ADDR(addr) \
112 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
114 #define ioc3_alloc_skb(__length, __gfp_flags) \
115 ({ struct sk_buff *__skb; \
116 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
118 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
120 skb_reserve(__skb, __offset); \
125 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
126 1644 while it's actually 1664. This one was nasty to track down ... */
128 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
130 /* DMA barrier to separate cached and uncached accesses. */
132 __asm__("sync" ::: "memory")
135 #define IOC3_SIZE 0x100000
137 #define ioc3_r(reg) \
144 #define ioc3_w(reg,val) \
146 (ioc3->reg = (val)); \
150 mcr_pack(u32 pulse
, u32 sample
)
152 return (pulse
<< 10) | (sample
<< 2);
156 nic_wait(struct ioc3
*ioc3
)
162 } while (!(mcr
& 2));
168 nic_reset(struct ioc3
*ioc3
)
172 ioc3_w(mcr
, mcr_pack(500, 65));
173 presence
= nic_wait(ioc3
);
175 ioc3_w(mcr
, mcr_pack(0, 500));
182 nic_read_bit(struct ioc3
*ioc3
)
186 ioc3_w(mcr
, mcr_pack(6, 13));
187 result
= nic_wait(ioc3
);
188 ioc3_w(mcr
, mcr_pack(0, 100));
195 nic_write_bit(struct ioc3
*ioc3
, int bit
)
198 ioc3_w(mcr
, mcr_pack(6, 110));
200 ioc3_w(mcr
, mcr_pack(80, 30));
206 * Read a byte from an iButton device
209 nic_read_byte(struct ioc3
*ioc3
)
214 for (i
= 0; i
< 8; i
++)
215 result
= (result
>> 1) | (nic_read_bit(ioc3
) << 7);
221 * Write a byte to an iButton device
224 nic_write_byte(struct ioc3
*ioc3
, int byte
)
228 for (i
= 8; i
; i
--) {
232 nic_write_bit(ioc3
, bit
);
237 nic_find(struct ioc3
*ioc3
, int *last
)
239 int a
, b
, index
, disc
;
244 nic_write_byte(ioc3
, 0xf0);
246 /* Algorithm from ``Book of iButton Standards''. */
247 for (index
= 0, disc
= 0; index
< 64; index
++) {
248 a
= nic_read_bit(ioc3
);
249 b
= nic_read_bit(ioc3
);
252 printk("NIC search failed.\n");
258 if (index
== *last
) {
259 address
|= 1UL << index
;
260 } else if (index
> *last
) {
261 address
&= ~(1UL << index
);
263 } else if ((address
& (1UL << index
)) == 0)
265 nic_write_bit(ioc3
, address
& (1UL << index
));
269 address
|= 1UL << index
;
271 address
&= ~(1UL << index
);
272 nic_write_bit(ioc3
, a
);
282 static void nic_init(struct ioc3
*ioc3
)
293 reg
= nic_find(ioc3
, &save
);
295 switch (reg
& 0xff) {
301 printk("No NIC connected.\n");
310 nic_write_byte(ioc3
, 0x55);
311 for (i
= 0; i
< 8; i
++)
312 nic_write_byte(ioc3
, (reg
>> (i
<< 3)) & 0xff);
314 reg
>>= 8; /* Shift out type. */
315 for (i
= 0; i
< 6; i
++) {
316 serial
[i
] = reg
& 0xff;
322 printk("Found %s NIC", type
);
323 if (type
!= "unknown") {
324 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
325 " CRC %02x", serial
[0], serial
[1], serial
[2],
326 serial
[3], serial
[4], serial
[5], crc
);
332 * Read the NIC (Number-In-a-Can) device.
334 static void ioc3_get_eaddr(struct net_device
*dev
, struct ioc3
*ioc3
)
339 ioc3_w(gpcr_s
, (1 << 21));
344 nic_write_byte(ioc3
, 0xf0);
345 nic_write_byte(ioc3
, 0x00);
346 nic_write_byte(ioc3
, 0x00);
348 for (i
= 13; i
>= 0; i
--)
349 nic
[i
] = nic_read_byte(ioc3
);
351 printk("Ethernet address is ");
352 for (i
= 2; i
< 8; i
++) {
353 dev
->dev_addr
[i
- 2] = nic
[i
];
354 printk("%02x", nic
[i
]);
361 /* Caller must hold the ioc3_lock ever for MII readers. This is also
362 used to protect the transmitter side but it's low contention. */
363 static u16
mii_read(struct ioc3
*ioc3
, int phy
, int reg
)
365 while (ioc3
->micr
& MICR_BUSY
);
366 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
| MICR_READTRIG
;
367 while (ioc3
->micr
& MICR_BUSY
);
369 return ioc3
->midr
& MIDR_DATA_MASK
;
372 static void mii_write(struct ioc3
*ioc3
, int phy
, int reg
, u16 data
)
374 while (ioc3
->micr
& MICR_BUSY
);
376 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
;
377 while (ioc3
->micr
& MICR_BUSY
);
380 static struct net_device_stats
*ioc3_get_stats(struct net_device
*dev
)
382 struct ioc3_private
*ip
= (struct ioc3_private
*) dev
->priv
;
383 struct ioc3
*ioc3
= ip
->regs
;
385 ip
->stats
.collisions
+= (ioc3
->etcdc
& ETCDC_COLLCNT_MASK
);
390 ioc3_rx(struct net_device
*dev
, struct ioc3_private
*ip
, struct ioc3
*ioc3
)
392 struct sk_buff
*skb
, *new_skb
;
393 int rx_entry
, n_entry
, len
;
394 struct ioc3_erxbuf
*rxb
;
398 rxr
= (unsigned long *) ip
->rxr
; /* Ring base */
399 rx_entry
= ip
->rx_ci
; /* RX consume index */
402 skb
= ip
->rx_skbs
[rx_entry
];
403 rxb
= (struct ioc3_erxbuf
*) (skb
->data
- RX_OFFSET
);
406 while (w0
& ERXBUF_V
) {
407 err
= rxb
->err
; /* It's valid ... */
408 if (err
& ERXBUF_GOODPKT
) {
409 len
= (w0
>> ERXBUF_BYTECNT_SHIFT
) & 0x7ff;
411 skb
->protocol
= eth_type_trans(skb
, dev
);
414 new_skb
= ioc3_alloc_skb(RX_BUF_ALLOC_SIZE
, GFP_ATOMIC
);
416 /* Ouch, drop packet and just recycle packet
417 to keep the ring filled. */
418 ip
->stats
.rx_dropped
++;
425 /* Because we reserve afterwards. */
426 skb_put(new_skb
, (1664 + RX_OFFSET
));
427 rxb
= (struct ioc3_erxbuf
*) new_skb
->data
;
428 skb_reserve(new_skb
, RX_OFFSET
);
430 ip
->stats
.rx_packets
++; /* Statistics */
431 ip
->stats
.rx_bytes
+= len
;
435 if (err
& (ERXBUF_CRCERR
| ERXBUF_FRAMERR
| ERXBUF_CODERR
|
436 ERXBUF_INVPREAMB
| ERXBUF_BADPKT
| ERXBUF_CARRIER
)) {
437 /* We don't send the skbuf to the network layer, so
441 if (err
& ERXBUF_CRCERR
) /* Statistics */
442 ip
->stats
.rx_crc_errors
++;
443 if (err
& ERXBUF_FRAMERR
)
444 ip
->stats
.rx_frame_errors
++;
445 ip
->stats
.rx_errors
++;
449 ip
->rx_skbs
[n_entry
] = new_skb
;
450 rxr
[n_entry
] = (0xa5UL
<< 56) |
451 ((unsigned long) rxb
& TO_PHYS_MASK
);
452 rxb
->w0
= 0; /* Clear valid flag */
453 n_entry
= (n_entry
+ 1) & 511; /* Update erpir */
454 ioc3
->erpir
= (n_entry
<< 3) | ERPIR_ARM
;
456 /* Now go on to the next ring entry. */
457 rx_entry
= (rx_entry
+ 1) & 511;
458 skb
= ip
->rx_skbs
[rx_entry
];
459 rxb
= (struct ioc3_erxbuf
*) (skb
->data
- RX_OFFSET
);
463 ip
->rx_ci
= rx_entry
;
467 ioc3_tx(struct net_device
*dev
, struct ioc3_private
*ip
, struct ioc3
*ioc3
)
469 unsigned long packets
, bytes
;
470 int tx_entry
, o_entry
;
474 spin_lock(&ip
->ioc3_lock
);
477 tx_entry
= (etcir
>> 7) & 127;
482 while (o_entry
!= tx_entry
) {
484 skb
= ip
->tx_skbs
[o_entry
];
486 dev_kfree_skb_irq(skb
);
487 ip
->tx_skbs
[o_entry
] = NULL
;
489 o_entry
= (o_entry
+ 1) & 127; /* Next */
491 etcir
= ioc3
->etcir
; /* More pkts sent? */
492 tx_entry
= (etcir
>> 7) & 127;
495 ip
->stats
.tx_packets
+= packets
;
496 ip
->stats
.tx_bytes
+= bytes
;
497 ip
->txqlen
-= packets
;
499 if (ip
->txqlen
< 128)
500 netif_wake_queue(dev
);
503 spin_unlock(&ip
->ioc3_lock
);
507 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
508 * software problems, so we should try to recover
509 * more gracefully if this ever happens. In theory we might be flooded
510 * with such error interrupts if something really goes wrong, so we might
511 * also consider to take the interface down.
514 ioc3_error(struct net_device
*dev
, struct ioc3_private
*ip
,
515 struct ioc3
*ioc3
, u32 eisr
)
517 if (eisr
& (EISR_RXMEMERR
| EISR_TXMEMERR
)) {
518 if (eisr
& EISR_RXMEMERR
) {
519 printk(KERN_ERR
"%s: RX PCI error.\n", dev
->name
);
521 if (eisr
& EISR_TXMEMERR
) {
522 printk(KERN_ERR
"%s: TX PCI error.\n", dev
->name
);
527 ioc3_clean_tx_ring(dev
->priv
);
530 dev
->trans_start
= jiffies
;
531 netif_wake_queue(dev
);
534 /* The interrupt handler does all of the Rx thread work and cleans up
535 after the Tx thread. */
536 static void ioc3_interrupt(int irq
, void *_dev
, struct pt_regs
*regs
)
538 struct net_device
*dev
= (struct net_device
*)_dev
;
539 struct ioc3_private
*ip
= dev
->priv
;
540 struct ioc3
*ioc3
= ip
->regs
;
541 const u32 enabled
= EISR_RXTIMERINT
| EISR_TXEXPLICIT
|
542 EISR_RXMEMERR
| EISR_TXMEMERR
;
545 eisr
= ioc3
->eisr
& enabled
;
548 ioc3
->eisr
; /* Flush */
550 if (eisr
& EISR_RXTIMERINT
)
551 ioc3_rx(dev
, ip
, ioc3
);
552 if (eisr
& EISR_TXEXPLICIT
)
553 ioc3_tx(dev
, ip
, ioc3
);
554 if (eisr
& (EISR_RXMEMERR
| EISR_TXMEMERR
))
555 ioc3_error(dev
, ip
, ioc3
, eisr
);
556 eisr
= ioc3
->eisr
& enabled
;
560 /* One day this will do the autonegotiation. */
561 int ioc3_mii_init(struct net_device
*dev
, struct ioc3_private
*ip
,
564 u16 word
, mii0
, mii_status
, mii2
, mii3
, mii4
;
565 u32 vendor
, model
, rev
;
568 spin_lock_irq(&ip
->ioc3_lock
);
570 for (i
= 0; i
< 32; i
++) {
571 word
= mii_read(ioc3
, i
, 2);
572 if ((word
!= 0xffff) & (word
!= 0x0000)) {
574 break; /* Found a PHY */
578 spin_unlock_irq(&ip
->ioc3_lock
);
579 printk("Didn't find a PHY, goodbye.\n");
584 mii0
= mii_read(ioc3
, phy
, 0);
585 mii_status
= mii_read(ioc3
, phy
, 1);
586 mii2
= mii_read(ioc3
, phy
, 2);
587 mii3
= mii_read(ioc3
, phy
, 3);
588 mii4
= mii_read(ioc3
, phy
, 4);
589 vendor
= (mii2
<< 12) | (mii3
>> 4);
590 model
= (mii3
>> 4) & 0x3f;
592 printk("Ok, using PHY %d, vendor 0x%x, model %d, rev %d.\n",
593 phy
, vendor
, model
, rev
);
594 printk(KERN_INFO
"%s: MII transceiver found at MDIO address "
595 "%d, config %4.4x status %4.4x.\n",
596 dev
->name
, phy
, mii0
, mii_status
);
598 /* Autonegotiate 100mbit and fullduplex. */
599 mii_write(ioc3
, phy
, 0, mii0
| 0x3100);
601 spin_unlock_irq(&ip
->ioc3_lock
);
602 mdelay(1000); /* XXX Yikes XXX */
603 spin_lock_irq(&ip
->ioc3_lock
);
605 mii_status
= mii_read(ioc3
, phy
, 1);
606 spin_unlock_irq(&ip
->ioc3_lock
);
612 ioc3_init_rings(struct net_device
*dev
, struct ioc3_private
*ip
,
615 struct ioc3_erxbuf
*rxb
;
620 /* Allocate and initialize rx ring. 4kb = 512 entries */
621 ip
->rxr
= get_free_page(GFP_KERNEL
);
622 rxr
= (unsigned long *) ip
->rxr
;
624 /* Now the rx buffers. The RX ring may be larger but we only
625 allocate 16 buffers for now. Need to tune this for performance
627 for (i
= 0; i
< RX_BUFFS
; i
++) {
630 skb
= ioc3_alloc_skb(RX_BUF_ALLOC_SIZE
, 0);
636 ip
->rx_skbs
[i
] = skb
;
639 /* Because we reserve afterwards. */
640 skb_put(skb
, (1664 + RX_OFFSET
));
641 rxb
= (struct ioc3_erxbuf
*) skb
->data
;
642 rxb
->w0
= 0; /* Clear valid bit */
643 rxr
[i
] = (0xa5UL
<< 56) | ((unsigned long) rxb
& TO_PHYS_MASK
);
644 skb_reserve(skb
, RX_OFFSET
);
647 /* Now the rx ring base, consume & produce registers. */
648 ring
= (0xa5UL
<< 56) | (ip
->rxr
& TO_PHYS_MASK
);
649 ioc3
->erbr_h
= ring
>> 32;
650 ioc3
->erbr_l
= ring
& 0xffffffff;
652 ioc3
->ercir
= (ip
->rx_ci
<< 3);
653 ip
->rx_pi
= RX_BUFFS
;
654 ioc3
->erpir
= (ip
->rx_pi
<< 3) | ERPIR_ARM
;
656 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
657 ip
->txr
= (struct ioc3_etxd
*)__get_free_pages(GFP_KERNEL
, 2);
658 ring
= (0xa5UL
<< 56) | ((unsigned long)ip
->txr
& TO_PHYS_MASK
);
660 ip
->txqlen
= 0; /* nothing queued */
662 /* Now the tx ring base, consume & produce registers. */
663 ioc3
->etbr_h
= ring
>> 32;
664 ioc3
->etbr_l
= ring
& 0xffffffff;
666 ioc3
->etpir
= (ip
->tx_pi
<< 7);
668 ioc3
->etcir
= (ip
->tx_pi
<< 7);
669 ioc3
->etcir
; /* Flush */
673 ioc3_clean_tx_ring(struct ioc3_private
*ip
)
678 for (i
=0; i
< 128; i
++) {
679 skb
= ip
->tx_skbs
[i
];
681 ip
->tx_skbs
[i
] = NULL
;
682 dev_kfree_skb_any(skb
);
688 ioc3_free_rings(struct ioc3_private
*ip
)
693 ioc3_clean_tx_ring(ip
);
694 free_pages((unsigned long)ip
->txr
, 2);
696 for (i
=0; i
< 512; i
++) {
697 skb
= ip
->rx_skbs
[i
];
699 dev_kfree_skb_any(skb
);
701 free_page((unsigned long)ip
->rxr
);
705 ioc3_ssram_disc(struct ioc3_private
*ip
)
707 struct ioc3
*ioc3
= ip
->regs
;
708 volatile u32
*ssram0
= &ioc3
->ssram
[0x0000];
709 volatile u32
*ssram1
= &ioc3
->ssram
[0x4000];
710 unsigned int pattern
= 0x5555;
712 /* Assume the larger size SSRAM and enable parity checking */
713 ioc3
->emcr
|= (EMCR_BUFSIZ
| EMCR_RAMPAR
);
716 *ssram1
= ~pattern
& IOC3_SSRAM_DM
;
718 if ((*ssram0
& IOC3_SSRAM_DM
) != pattern
||
719 (*ssram1
& IOC3_SSRAM_DM
) != (~pattern
& IOC3_SSRAM_DM
)) {
720 /* set ssram size to 64 KB */
721 ip
->emcr
= EMCR_RAMPAR
;
722 ioc3
->emcr
&= ~EMCR_BUFSIZ
;
724 ip
->emcr
= EMCR_BUFSIZ
| EMCR_RAMPAR
;
728 static void ioc3_init(struct net_device
*dev
)
730 struct ioc3_private
*ip
= dev
->priv
;
731 struct ioc3
*ioc3
= ip
->regs
;
733 ioc3
->emcr
= EMCR_RST
; /* Reset */
734 ioc3
->emcr
; /* flush WB */
735 udelay(4); /* Give it time ... */
736 ioc3
->emcr
= ip
->emcr
;
740 ioc3
->etcsr
= (17<<ETCSR_IPGR2_SHIFT
) | (11<<ETCSR_IPGR1_SHIFT
) | 21;
741 ioc3
->etcdc
; /* Clear on read */
742 ioc3
->ercsr
= 15; /* RX low watermark */
743 ioc3
->ertr
= 0; /* Interrupt immediately */
744 ioc3
->emar_h
= (dev
->dev_addr
[5] << 8) | dev
->dev_addr
[4];
745 ioc3
->emar_l
= (dev
->dev_addr
[3] << 24) | (dev
->dev_addr
[2] << 16) |
746 (dev
->dev_addr
[1] << 8) | dev
->dev_addr
[0];
747 ioc3
->ehar_h
= ioc3
->ehar_l
= 0;
748 ioc3
->ersr
= 42; /* XXX should be random */
749 //ioc3->erpir = ERPIR_ARM;
751 ioc3_init_rings(dev
, ip
, ioc3
);
753 ip
->emcr
|= ((RX_OFFSET
/ 2) << EMCR_RXOFF_SHIFT
) | EMCR_TXDMAEN
|
754 EMCR_TXEN
| EMCR_RXDMAEN
| EMCR_RXEN
;
755 ioc3
->emcr
= ip
->emcr
;
756 ioc3
->eier
= EISR_RXTIMERINT
| EISR_TXEXPLICIT
| /* Interrupts ... */
757 EISR_RXMEMERR
| EISR_TXMEMERR
;
760 static void ioc3_stop(struct net_device
*dev
)
762 struct ioc3_private
*ip
= dev
->priv
;
763 struct ioc3
*ioc3
= ip
->regs
;
765 ioc3
->emcr
= 0; /* Shutup */
766 ioc3
->eier
= 0; /* Disable interrupts */
767 ioc3
->eier
; /* Flush */
771 ioc3_open(struct net_device
*dev
)
773 if (request_irq(dev
->irq
, ioc3_interrupt
, 0, ioc3_str
, dev
)) {
774 printk(KERN_ERR
"%s: Can't get irq %d\n", dev
->name
, dev
->irq
);
779 ((struct ioc3_private
*)dev
->priv
)->ehar_h
= 0;
780 ((struct ioc3_private
*)dev
->priv
)->ehar_l
= 0;
783 netif_start_queue(dev
);
791 ioc3_close(struct net_device
*dev
)
793 struct ioc3_private
*ip
= dev
->priv
;
795 netif_stop_queue(dev
);
797 ioc3_stop(dev
); /* Flush */
798 free_irq(dev
->irq
, dev
);
807 static void ioc3_pci_init(struct pci_dev
*pdev
)
809 struct net_device
*dev
= NULL
; // XXX
810 struct ioc3_private
*ip
;
812 unsigned long ioc3_base
, ioc3_size
;
814 dev
= init_etherdev(dev
, 0);
817 * This probably needs to be register_netdevice, or call
818 * init_etherdev so that it calls register_netdevice. Quick
821 netif_device_attach(dev
);
823 ip
= (struct ioc3_private
*) kmalloc(sizeof(*ip
), GFP_KERNEL
);
824 memset(ip
, 0, sizeof(*ip
));
826 dev
->irq
= pdev
->irq
;
828 ioc3_base
= pdev
->resource
[0].start
;
829 ioc3_size
= pdev
->resource
[0].end
- ioc3_base
;
830 ioc3
= (struct ioc3
*) ioremap(ioc3_base
, ioc3_size
);
833 spin_lock_init(&ip
->ioc3_lock
);
838 ioc3_mii_init(dev
, ip
, ioc3
);
841 printk("IOC3 SSRAM has %d kbyte.\n", ip
->emcr
& EMCR_BUFSIZ
? 128 : 64);
843 ioc3_get_eaddr(dev
, ioc3
);
845 /* The IOC3-specific entries in the device structure. */
846 dev
->open
= ioc3_open
;
847 dev
->hard_start_xmit
= ioc3_start_xmit
;
848 dev
->tx_timeout
= ioc3_timeout
;
849 dev
->watchdog_timeo
= 5 * HZ
;
850 dev
->stop
= ioc3_close
;
851 dev
->get_stats
= ioc3_get_stats
;
852 dev
->do_ioctl
= ioc3_ioctl
;
853 dev
->set_multicast_list
= ioc3_set_multicast_list
;
856 static int __init
ioc3_probe(void)
858 static int called
= 0;
866 struct pci_dev
*pdev
= NULL
;
868 while ((pdev
= pci_find_device(PCI_VENDOR_ID_SGI
,
869 PCI_DEVICE_ID_SGI_IOC3
, pdev
))) {
875 return cards
? -ENODEV
: 0;
878 static void __exit
ioc3_cleanup_module(void)
880 /* Later, when we really support modules. */
884 ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
887 struct ioc3_private
*ip
= dev
->priv
;
888 struct ioc3
*ioc3
= ip
->regs
;
890 struct ioc3_etxd
*desc
;
893 spin_lock_irq(&ip
->ioc3_lock
);
895 data
= (unsigned long) skb
->data
;
899 desc
= &ip
->txr
[produce
];
902 /* Short packet, let's copy it directly into the ring. */
903 memcpy(desc
->data
, skb
->data
, skb
->len
);
904 if (len
< ETH_ZLEN
) {
905 /* Very short packet, pad with zeros at the end. */
906 memset(desc
->data
+ len
, 0, ETH_ZLEN
- len
);
909 desc
->cmd
= len
| ETXD_INTWHENDONE
| ETXD_D0V
;
911 } else if ((data
^ (data
+ len
)) & 0x4000) {
912 unsigned long b2
, s1
, s2
;
914 b2
= (data
| 0x3fffUL
) + 1UL;
916 s2
= data
+ len
- b2
;
918 desc
->cmd
= len
| ETXD_INTWHENDONE
| ETXD_B1V
| ETXD_B2V
;
919 desc
->bufcnt
= (s1
<< ETXD_B1CNT_SHIFT
) |
920 (s2
<< ETXD_B2CNT_SHIFT
);
921 desc
->p1
= (0xa5UL
<< 56) | (data
& TO_PHYS_MASK
);
922 desc
->p2
= (0xa5UL
<< 56) | (data
& TO_PHYS_MASK
);
924 /* Normal sized packet that doesn't cross a page boundary. */
925 desc
->cmd
= len
| ETXD_INTWHENDONE
| ETXD_B1V
;
926 desc
->bufcnt
= len
<< ETXD_B1CNT_SHIFT
;
927 desc
->p1
= (0xa5UL
<< 56) | (data
& TO_PHYS_MASK
);
932 dev
->trans_start
= jiffies
;
933 ip
->tx_skbs
[produce
] = skb
; /* Remember skb */
934 produce
= (produce
+ 1) & 127;
936 ioc3
->etpir
= produce
<< 7; /* Fire ... */
940 if (ip
->txqlen
> 127)
941 netif_stop_queue(dev
);
943 spin_unlock_irq(&ip
->ioc3_lock
);
948 static void ioc3_timeout(struct net_device
*dev
)
950 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
953 ioc3_clean_tx_ring(dev
->priv
);
956 dev
->trans_start
= jiffies
;
957 netif_wake_queue(dev
);
961 * Given a multicast ethernet address, this routine calculates the
962 * address's bit index in the logical address filter mask
964 #define CRC_MASK 0xEDB88320
966 static inline unsigned int
967 ioc3_hash(const unsigned char *addr
)
969 unsigned int temp
= 0;
975 for (crc
= ~0; --len
>= 0; addr
++) {
977 for (bits
= 8; --bits
>= 0; ) {
978 if ((byte
^ crc
) & 1)
979 crc
= (crc
>> 1) ^ CRC_MASK
;
986 crc
&= 0x3f; /* bit reverse lowest 6 bits for hash index */
987 for (bits
= 6; --bits
>= 0; ) {
996 /* Provide ioctl() calls to examine the MII xcvr state. */
997 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
999 struct ioc3_private
*ip
= (struct ioc3_private
*) dev
->priv
;
1000 u16
*data
= (u16
*)&rq
->ifr_data
;
1001 struct ioc3
*ioc3
= ip
->regs
;
1005 case SIOCGMIIPHY
: /* Get the address of the PHY in use. */
1011 case SIOCGMIIREG
: /* Read any PHY register. */
1012 spin_lock_irq(&ip
->ioc3_lock
);
1013 data
[3] = mii_read(ioc3
, data
[0], data
[1]);
1014 spin_unlock_irq(&ip
->ioc3_lock
);
1017 case SIOCSMIIREG
: /* Write any PHY register. */
1018 if (!capable(CAP_NET_ADMIN
))
1020 spin_lock_irq(&ip
->ioc3_lock
);
1021 mii_write(ioc3
, data
[0], data
[1], data
[2]);
1022 spin_unlock_irq(&ip
->ioc3_lock
);
1032 static void ioc3_set_multicast_list(struct net_device
*dev
)
1034 struct dev_mc_list
*dmi
= dev
->mc_list
;
1035 struct ioc3_private
*ip
= dev
->priv
;
1036 struct ioc3
*ioc3
= ip
->regs
;
1037 char *addr
= dmi
->dmi_addr
;
1041 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1042 /* Unconditionally log net taps. */
1043 printk(KERN_INFO
"%s: Promiscuous mode enabled.\n", dev
->name
);
1044 ip
->emcr
|= EMCR_PROMISC
;
1045 ioc3
->emcr
= ip
->emcr
;
1048 ip
->emcr
&= ~EMCR_PROMISC
;
1049 ioc3
->emcr
= ip
->emcr
; /* Clear promiscuous. */
1052 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
1053 /* Too many for hashing to make sense or we want all
1054 multicast packets anyway, so skip computing all the
1055 hashes and just accept all packets. */
1056 ip
->ehar_h
= 0xffffffff;
1057 ip
->ehar_l
= 0xffffffff;
1059 for (i
= 0; i
< dev
->mc_count
; i
++) {
1065 ehar
|= (1 << ioc3_hash(addr
));
1067 ip
->ehar_h
= ehar
>> 32;
1068 ip
->ehar_l
= ehar
& 0xffffffff;
1070 ioc3
->ehar_h
= ip
->ehar_h
;
1071 ioc3
->ehar_l
= ip
->ehar_l
;
1076 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1077 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1080 module_init(ioc3_probe
);
1081 module_exit(ioc3_cleanup_module
);