Fix comment.
[linux-2.6/linux-mips.git] / drivers / net / ioc3-eth.c
blob7bac3627744ce2eadc83de123f380e9c2d4ba4c4
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000 by Silicon Graphics, Inc.
11 * Reporting bugs:
13 * If you find problems with this drivers, then if possible do the
14 * following. Hook up a terminal to the MSC port, send an NMI to the CPUs
15 * by typing ^Tnmi (where ^T stands for <CTRL>-T). You'll see something
16 * like:
17 * 1A 000:
18 * 1A 000: *** NMI while in Kernel and no NMI vector installed on node 0
19 * 1A 000: *** Error EPC: 0xffffffff800265e4 (0xffffffff800265e4)
20 * 1A 000: *** Press ENTER to continue.
22 * Next enter the command ``lw i:0x86000f0 0x18'' and include this
23 * commands output which will look like below with your bugreport.
25 * 1A 000: POD MSC Dex> lw i:0x86000f0 0x18
26 * 1A 000: 92000000086000f0: 0021f28c 00000000 00000000 00000000
27 * 1A 000: 9200000008600100: a5000000 01cde000 00000000 000004e0
28 * 1A 000: 9200000008600110: 00000650 00000000 00110b15 00000000
29 * 1A 000: 9200000008600120: 006d0005 77bbca0a a5000000 01ce0000
30 * 1A 000: 9200000008600130: 80000500 00000500 00002538 05690008
31 * 1A 000: 9200000008600140: 00000000 00000000 000003e1 0000786d
33 * To do:
35 * - Handle allocation failures in ioc3_alloc_skb() more gracefully.
36 * - Handle allocation failures in ioc3_init_rings().
37 * - Use prefetching for large packets. What is a good lower limit for
38 * prefetching?
39 * - We're probably allocating a bit too much memory.
40 * - Workarounds for various PHYs.
41 * - Proper autonegotiation.
42 * - What exactly is net_device_stats.tx_dropped supposed to count?
43 * - Use hardware checksums.
44 * - Convert to using the PCI infrastructure / IOC3 meta driver.
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/kernel.h>
49 #include <linux/mm.h>
50 #include <linux/errno.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
53 #include <linux/pci_ids.h>
55 #include <linux/netdevice.h>
56 #include <linux/etherdevice.h>
57 #include <linux/skbuff.h>
59 #include <asm/byteorder.h>
60 #include <asm/io.h>
61 #include <asm/pgtable.h>
62 #include <asm/sn/types.h>
63 #include <asm/sn/sn0/addrs.h>
64 #include <asm/sn/sn0/hubni.h>
65 #include <asm/sn/sn0/hubio.h>
66 #include <asm/sn/klconfig.h>
67 #include <asm/ioc3.h>
68 #include <asm/sn/sn0/ip27.h>
69 #include <asm/pci/bridge.h>
71 /* 32 RX buffers. This is tunable in the range of 16 <= x < 512. */
72 #define RX_BUFFS 32
74 /* Private ioctls that de facto are well known and used for examply
75 by mii-tool. */
76 #define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Read from current PHY */
77 #define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read any PHY register */
78 #define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write any PHY register */
80 /* These exist in other drivers; we don't use them at this time. */
81 #define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters */
82 #define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters */
84 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
85 static void ioc3_set_multicast_list(struct net_device *dev);
86 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
87 static void ioc3_timeout(struct net_device *dev);
88 static inline unsigned int ioc3_hash(const unsigned char *addr);
90 static const char ioc3_str[] = "IOC3 Ethernet";
92 /* Private per NIC data of the driver. */
93 struct ioc3_private {
94 struct ioc3 *regs;
95 int phy;
96 unsigned long rxr; /* pointer to receiver ring */
97 struct ioc3_etxd *txr;
98 struct sk_buff *rx_skbs[512];
99 struct sk_buff *tx_skbs[128];
100 struct net_device_stats stats;
101 int rx_ci; /* RX consumer index */
102 int rx_pi; /* RX producer index */
103 int tx_ci; /* TX consumer index */
104 int tx_pi; /* TX producer index */
105 int txqlen;
106 u32 emcr, ehar_h, ehar_l;
107 spinlock_t ioc3_lock;
110 /* We use this to acquire receive skb's that we can DMA directly into. */
111 #define ALIGNED_RX_SKB_ADDR(addr) \
112 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
114 #define ioc3_alloc_skb(__length, __gfp_flags) \
115 ({ struct sk_buff *__skb; \
116 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
117 if (__skb) { \
118 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
119 if(__offset) \
120 skb_reserve(__skb, __offset); \
122 __skb; \
125 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
126 1644 while it's actually 1664. This one was nasty to track down ... */
127 #define RX_OFFSET 10
128 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
130 /* DMA barrier to separate cached and uncached accesses. */
131 #define BARRIER() \
132 __asm__("sync" ::: "memory")
135 #define IOC3_SIZE 0x100000
137 #define ioc3_r(reg) \
138 ({ \
139 u32 __res; \
140 __res = ioc3->reg; \
141 __res; \
144 #define ioc3_w(reg,val) \
145 do { \
146 (ioc3->reg = (val)); \
147 } while(0)
149 static inline u32
150 mcr_pack(u32 pulse, u32 sample)
152 return (pulse << 10) | (sample << 2);
155 static int
156 nic_wait(struct ioc3 *ioc3)
158 u32 mcr;
160 do {
161 mcr = ioc3_r(mcr);
162 } while (!(mcr & 2));
164 return mcr & 1;
167 static int
168 nic_reset(struct ioc3 *ioc3)
170 int presence;
172 ioc3_w(mcr, mcr_pack(500, 65));
173 presence = nic_wait(ioc3);
175 ioc3_w(mcr, mcr_pack(0, 500));
176 nic_wait(ioc3);
178 return presence;
181 static inline int
182 nic_read_bit(struct ioc3 *ioc3)
184 int result;
186 ioc3_w(mcr, mcr_pack(6, 13));
187 result = nic_wait(ioc3);
188 ioc3_w(mcr, mcr_pack(0, 100));
189 nic_wait(ioc3);
191 return result;
194 static inline void
195 nic_write_bit(struct ioc3 *ioc3, int bit)
197 if (bit)
198 ioc3_w(mcr, mcr_pack(6, 110));
199 else
200 ioc3_w(mcr, mcr_pack(80, 30));
202 nic_wait(ioc3);
206 * Read a byte from an iButton device
208 static u32
209 nic_read_byte(struct ioc3 *ioc3)
211 u32 result = 0;
212 int i;
214 for (i = 0; i < 8; i++)
215 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
217 return result;
221 * Write a byte to an iButton device
223 static void
224 nic_write_byte(struct ioc3 *ioc3, int byte)
226 int i, bit;
228 for (i = 8; i; i--) {
229 bit = byte & 1;
230 byte >>= 1;
232 nic_write_bit(ioc3, bit);
236 static u64
237 nic_find(struct ioc3 *ioc3, int *last)
239 int a, b, index, disc;
240 u64 address = 0;
242 nic_reset(ioc3);
243 /* Search ROM. */
244 nic_write_byte(ioc3, 0xf0);
246 /* Algorithm from ``Book of iButton Standards''. */
247 for (index = 0, disc = 0; index < 64; index++) {
248 a = nic_read_bit(ioc3);
249 b = nic_read_bit(ioc3);
251 if (a && b) {
252 printk("NIC search failed.\n");
253 *last = 0;
254 return 0;
257 if (!a && !b) {
258 if (index == *last) {
259 address |= 1UL << index;
260 } else if (index > *last) {
261 address &= ~(1UL << index);
262 disc = index;
263 } else if ((address & (1UL << index)) == 0)
264 disc = index;
265 nic_write_bit(ioc3, address & (1UL << index));
266 continue;
267 } else {
268 if (a)
269 address |= 1UL << index;
270 else
271 address &= ~(1UL << index);
272 nic_write_bit(ioc3, a);
273 continue;
277 *last = disc;
279 return address;
282 static void nic_init(struct ioc3 *ioc3)
284 const char *type;
285 u8 crc;
286 u8 serial[6];
287 int save = 0, i;
289 type = "unknown";
291 do {
292 u64 reg;
293 reg = nic_find(ioc3, &save);
295 switch (reg & 0xff) {
296 case 0x91:
297 type = "DS1981U";
298 break;
299 default:
300 if (save == 0) {
301 printk("No NIC connected.\n");
302 return;
304 continue;
307 nic_reset(ioc3);
309 /* Match ROM. */
310 nic_write_byte(ioc3, 0x55);
311 for (i = 0; i < 8; i++)
312 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
314 reg >>= 8; /* Shift out type. */
315 for (i = 0; i < 6; i++) {
316 serial[i] = reg & 0xff;
317 reg >>= 8;
319 crc = reg & 0xff;
320 } while (0);
322 printk("Found %s NIC", type);
323 if (type != "unknown") {
324 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
325 " CRC %02x", serial[0], serial[1], serial[2],
326 serial[3], serial[4], serial[5], crc);
328 printk(".\n");
332 * Read the NIC (Number-In-a-Can) device.
334 static void ioc3_get_eaddr(struct net_device *dev, struct ioc3 *ioc3)
336 u8 nic[14];
337 int i;
339 ioc3_w(gpcr_s, (1 << 21));
341 nic_init(ioc3);
343 /* Read Memory. */
344 nic_write_byte(ioc3, 0xf0);
345 nic_write_byte(ioc3, 0x00);
346 nic_write_byte(ioc3, 0x00);
348 for (i = 13; i >= 0; i--)
349 nic[i] = nic_read_byte(ioc3);
351 printk("Ethernet address is ");
352 for (i = 2; i < 8; i++) {
353 dev->dev_addr[i - 2] = nic[i];
354 printk("%02x", nic[i]);
355 if (i < 7)
356 printk(":");
358 printk(".\n");
361 /* Caller must hold the ioc3_lock ever for MII readers. This is also
362 used to protect the transmitter side but it's low contention. */
363 static u16 mii_read(struct ioc3 *ioc3, int phy, int reg)
365 while (ioc3->micr & MICR_BUSY);
366 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
367 while (ioc3->micr & MICR_BUSY);
369 return ioc3->midr & MIDR_DATA_MASK;
372 static void mii_write(struct ioc3 *ioc3, int phy, int reg, u16 data)
374 while (ioc3->micr & MICR_BUSY);
375 ioc3->midr = data;
376 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
377 while (ioc3->micr & MICR_BUSY);
380 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
382 struct ioc3_private *ip = (struct ioc3_private *) dev->priv;
383 struct ioc3 *ioc3 = ip->regs;
385 ip->stats.collisions += (ioc3->etcdc & ETCDC_COLLCNT_MASK);
386 return &ip->stats;
389 static inline void
390 ioc3_rx(struct net_device *dev, struct ioc3_private *ip, struct ioc3 *ioc3)
392 struct sk_buff *skb, *new_skb;
393 int rx_entry, n_entry, len;
394 struct ioc3_erxbuf *rxb;
395 unsigned long *rxr;
396 u32 w0, err;
398 rxr = (unsigned long *) ip->rxr; /* Ring base */
399 rx_entry = ip->rx_ci; /* RX consume index */
400 n_entry = ip->rx_pi;
402 skb = ip->rx_skbs[rx_entry];
403 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
404 w0 = rxb->w0;
406 while (w0 & ERXBUF_V) {
407 err = rxb->err; /* It's valid ... */
408 if (err & ERXBUF_GOODPKT) {
409 len = (w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff;
410 skb_trim(skb, len);
411 skb->protocol = eth_type_trans(skb, dev);
412 netif_rx(skb);
414 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
415 if (!new_skb) {
416 /* Ouch, drop packet and just recycle packet
417 to keep the ring filled. */
418 ip->stats.rx_dropped++;
419 new_skb = skb;
420 goto next;
423 new_skb->dev = dev;
425 /* Because we reserve afterwards. */
426 skb_put(new_skb, (1664 + RX_OFFSET));
427 rxb = (struct ioc3_erxbuf *) new_skb->data;
428 skb_reserve(new_skb, RX_OFFSET);
430 ip->stats.rx_packets++; /* Statistics */
431 ip->stats.rx_bytes += len;
433 goto next;
435 if (err & (ERXBUF_CRCERR | ERXBUF_FRAMERR | ERXBUF_CODERR |
436 ERXBUF_INVPREAMB | ERXBUF_BADPKT | ERXBUF_CARRIER)) {
437 /* We don't send the skbuf to the network layer, so
438 just recycle it. */
439 new_skb = skb;
441 if (err & ERXBUF_CRCERR) /* Statistics */
442 ip->stats.rx_crc_errors++;
443 if (err & ERXBUF_FRAMERR)
444 ip->stats.rx_frame_errors++;
445 ip->stats.rx_errors++;
448 next:
449 ip->rx_skbs[n_entry] = new_skb;
450 rxr[n_entry] = (0xa5UL << 56) |
451 ((unsigned long) rxb & TO_PHYS_MASK);
452 rxb->w0 = 0; /* Clear valid flag */
453 n_entry = (n_entry + 1) & 511; /* Update erpir */
454 ioc3->erpir = (n_entry << 3) | ERPIR_ARM;
456 /* Now go on to the next ring entry. */
457 rx_entry = (rx_entry + 1) & 511;
458 skb = ip->rx_skbs[rx_entry];
459 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
460 w0 = rxb->w0;
462 ip->rx_pi = n_entry;
463 ip->rx_ci = rx_entry;
466 static inline void
467 ioc3_tx(struct net_device *dev, struct ioc3_private *ip, struct ioc3 *ioc3)
469 unsigned long packets, bytes;
470 int tx_entry, o_entry;
471 struct sk_buff *skb;
472 u32 etcir;
474 spin_lock(&ip->ioc3_lock);
475 etcir = ioc3->etcir;
477 tx_entry = (etcir >> 7) & 127;
478 o_entry = ip->tx_ci;
479 packets = 0;
480 bytes = 0;
482 while (o_entry != tx_entry) {
483 packets++;
484 skb = ip->tx_skbs[o_entry];
485 bytes += skb->len;
486 dev_kfree_skb_irq(skb);
487 ip->tx_skbs[o_entry] = NULL;
489 o_entry = (o_entry + 1) & 127; /* Next */
491 etcir = ioc3->etcir; /* More pkts sent? */
492 tx_entry = (etcir >> 7) & 127;
495 ip->stats.tx_packets += packets;
496 ip->stats.tx_bytes += bytes;
497 ip->txqlen -= packets;
499 if (ip->txqlen < 128)
500 netif_wake_queue(dev);
502 ip->tx_ci = o_entry;
503 spin_unlock(&ip->ioc3_lock);
507 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
508 * software problems, so we should try to recover
509 * more gracefully if this ever happens. In theory we might be flooded
510 * with such error interrupts if something really goes wrong, so we might
511 * also consider to take the interface down.
513 static void
514 ioc3_error(struct net_device *dev, struct ioc3_private *ip,
515 struct ioc3 *ioc3, u32 eisr)
517 if (eisr & (EISR_RXMEMERR | EISR_TXMEMERR)) {
518 if (eisr & EISR_RXMEMERR) {
519 printk(KERN_ERR "%s: RX PCI error.\n", dev->name);
521 if (eisr & EISR_TXMEMERR) {
522 printk(KERN_ERR "%s: TX PCI error.\n", dev->name);
526 ioc3_stop(dev);
527 ioc3_clean_tx_ring(dev->priv);
528 ioc3_init(dev);
530 dev->trans_start = jiffies;
531 netif_wake_queue(dev);
534 /* The interrupt handler does all of the Rx thread work and cleans up
535 after the Tx thread. */
536 static void ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
538 struct net_device *dev = (struct net_device *)_dev;
539 struct ioc3_private *ip = dev->priv;
540 struct ioc3 *ioc3 = ip->regs;
541 const u32 enabled = EISR_RXTIMERINT | EISR_TXEXPLICIT |
542 EISR_RXMEMERR | EISR_TXMEMERR;
543 u32 eisr;
545 eisr = ioc3->eisr & enabled;
546 while (eisr) {
547 ioc3->eisr = eisr;
548 ioc3->eisr; /* Flush */
550 if (eisr & EISR_RXTIMERINT)
551 ioc3_rx(dev, ip, ioc3);
552 if (eisr & EISR_TXEXPLICIT)
553 ioc3_tx(dev, ip, ioc3);
554 if (eisr & (EISR_RXMEMERR | EISR_TXMEMERR))
555 ioc3_error(dev, ip, ioc3, eisr);
556 eisr = ioc3->eisr & enabled;
560 /* One day this will do the autonegotiation. */
561 int ioc3_mii_init(struct net_device *dev, struct ioc3_private *ip,
562 struct ioc3 *ioc3)
564 u16 word, mii0, mii_status, mii2, mii3, mii4;
565 u32 vendor, model, rev;
566 int i, phy;
568 spin_lock_irq(&ip->ioc3_lock);
569 phy = -1;
570 for (i = 0; i < 32; i++) {
571 word = mii_read(ioc3, i, 2);
572 if ((word != 0xffff) & (word != 0x0000)) {
573 phy = i;
574 break; /* Found a PHY */
577 if (phy == -1) {
578 spin_unlock_irq(&ip->ioc3_lock);
579 printk("Didn't find a PHY, goodbye.\n");
580 return -ENODEV;
582 ip->phy = phy;
584 mii0 = mii_read(ioc3, phy, 0);
585 mii_status = mii_read(ioc3, phy, 1);
586 mii2 = mii_read(ioc3, phy, 2);
587 mii3 = mii_read(ioc3, phy, 3);
588 mii4 = mii_read(ioc3, phy, 4);
589 vendor = (mii2 << 12) | (mii3 >> 4);
590 model = (mii3 >> 4) & 0x3f;
591 rev = mii3 & 0xf;
592 printk("Ok, using PHY %d, vendor 0x%x, model %d, rev %d.\n",
593 phy, vendor, model, rev);
594 printk(KERN_INFO "%s: MII transceiver found at MDIO address "
595 "%d, config %4.4x status %4.4x.\n",
596 dev->name, phy, mii0, mii_status);
598 /* Autonegotiate 100mbit and fullduplex. */
599 mii_write(ioc3, phy, 0, mii0 | 0x3100);
601 spin_unlock_irq(&ip->ioc3_lock);
602 mdelay(1000); /* XXX Yikes XXX */
603 spin_lock_irq(&ip->ioc3_lock);
605 mii_status = mii_read(ioc3, phy, 1);
606 spin_unlock_irq(&ip->ioc3_lock);
608 return 0;
611 static void
612 ioc3_init_rings(struct net_device *dev, struct ioc3_private *ip,
613 struct ioc3 *ioc3)
615 struct ioc3_erxbuf *rxb;
616 unsigned long *rxr;
617 unsigned long ring;
618 int i;
620 /* Allocate and initialize rx ring. 4kb = 512 entries */
621 ip->rxr = get_free_page(GFP_KERNEL);
622 rxr = (unsigned long *) ip->rxr;
624 /* Now the rx buffers. The RX ring may be larger but we only
625 allocate 16 buffers for now. Need to tune this for performance
626 and memory later. */
627 for (i = 0; i < RX_BUFFS; i++) {
628 struct sk_buff *skb;
630 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, 0);
631 if (!skb) {
632 show_free_areas();
633 continue;
636 ip->rx_skbs[i] = skb;
637 skb->dev = dev;
639 /* Because we reserve afterwards. */
640 skb_put(skb, (1664 + RX_OFFSET));
641 rxb = (struct ioc3_erxbuf *) skb->data;
642 rxb->w0 = 0; /* Clear valid bit */
643 rxr[i] = (0xa5UL << 56) | ((unsigned long) rxb & TO_PHYS_MASK);
644 skb_reserve(skb, RX_OFFSET);
647 /* Now the rx ring base, consume & produce registers. */
648 ring = (0xa5UL << 56) | (ip->rxr & TO_PHYS_MASK);
649 ioc3->erbr_h = ring >> 32;
650 ioc3->erbr_l = ring & 0xffffffff;
651 ip->rx_ci = 0;
652 ioc3->ercir = (ip->rx_ci << 3);
653 ip->rx_pi = RX_BUFFS;
654 ioc3->erpir = (ip->rx_pi << 3) | ERPIR_ARM;
656 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
657 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
658 ring = (0xa5UL << 56) | ((unsigned long)ip->txr & TO_PHYS_MASK);
660 ip->txqlen = 0; /* nothing queued */
662 /* Now the tx ring base, consume & produce registers. */
663 ioc3->etbr_h = ring >> 32;
664 ioc3->etbr_l = ring & 0xffffffff;
665 ip->tx_pi = 0;
666 ioc3->etpir = (ip->tx_pi << 7);
667 ip->tx_ci = 0;
668 ioc3->etcir = (ip->tx_pi << 7);
669 ioc3->etcir; /* Flush */
672 static void
673 ioc3_clean_tx_ring(struct ioc3_private *ip)
675 struct sk_buff *skb;
676 int i;
678 for (i=0; i < 128; i++) {
679 skb = ip->tx_skbs[i];
680 if (skb) {
681 ip->tx_skbs[i] = NULL;
682 dev_kfree_skb_any(skb);
687 static void
688 ioc3_free_rings(struct ioc3_private *ip)
690 struct sk_buff *skb;
691 int i;
693 ioc3_clean_tx_ring(ip);
694 free_pages((unsigned long)ip->txr, 2);
696 for (i=0; i < 512; i++) {
697 skb = ip->rx_skbs[i];
698 if (skb)
699 dev_kfree_skb_any(skb);
701 free_page((unsigned long)ip->rxr);
704 static inline void
705 ioc3_ssram_disc(struct ioc3_private *ip)
707 struct ioc3 *ioc3 = ip->regs;
708 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
709 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
710 unsigned int pattern = 0x5555;
712 /* Assume the larger size SSRAM and enable parity checking */
713 ioc3->emcr |= (EMCR_BUFSIZ | EMCR_RAMPAR);
715 *ssram0 = pattern;
716 *ssram1 = ~pattern & IOC3_SSRAM_DM;
718 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
719 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
720 /* set ssram size to 64 KB */
721 ip->emcr = EMCR_RAMPAR;
722 ioc3->emcr &= ~EMCR_BUFSIZ;
723 } else {
724 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
728 static void ioc3_init(struct net_device *dev)
730 struct ioc3_private *ip = dev->priv;
731 struct ioc3 *ioc3 = ip->regs;
733 ioc3->emcr = EMCR_RST; /* Reset */
734 ioc3->emcr; /* flush WB */
735 udelay(4); /* Give it time ... */
736 ioc3->emcr = ip->emcr;
738 /* Misc registers */
739 ioc3->erbar = 0;
740 ioc3->etcsr = (17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21;
741 ioc3->etcdc; /* Clear on read */
742 ioc3->ercsr = 15; /* RX low watermark */
743 ioc3->ertr = 0; /* Interrupt immediately */
744 ioc3->emar_h = (dev->dev_addr[5] << 8) | dev->dev_addr[4];
745 ioc3->emar_l = (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
746 (dev->dev_addr[1] << 8) | dev->dev_addr[0];
747 ioc3->ehar_h = ioc3->ehar_l = 0;
748 ioc3->ersr = 42; /* XXX should be random */
749 //ioc3->erpir = ERPIR_ARM;
751 ioc3_init_rings(dev, ip, ioc3);
753 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
754 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN;
755 ioc3->emcr = ip->emcr;
756 ioc3->eier = EISR_RXTIMERINT | EISR_TXEXPLICIT | /* Interrupts ... */
757 EISR_RXMEMERR | EISR_TXMEMERR;
760 static void ioc3_stop(struct net_device *dev)
762 struct ioc3_private *ip = dev->priv;
763 struct ioc3 *ioc3 = ip->regs;
765 ioc3->emcr = 0; /* Shutup */
766 ioc3->eier = 0; /* Disable interrupts */
767 ioc3->eier; /* Flush */
770 static int
771 ioc3_open(struct net_device *dev)
773 if (request_irq(dev->irq, ioc3_interrupt, 0, ioc3_str, dev)) {
774 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
776 return -EAGAIN;
779 ((struct ioc3_private *)dev->priv)->ehar_h = 0;
780 ((struct ioc3_private *)dev->priv)->ehar_l = 0;
781 ioc3_init(dev);
783 netif_start_queue(dev);
785 MOD_INC_USE_COUNT;
787 return 0;
790 static int
791 ioc3_close(struct net_device *dev)
793 struct ioc3_private *ip = dev->priv;
795 netif_stop_queue(dev);
797 ioc3_stop(dev); /* Flush */
798 free_irq(dev->irq, dev);
800 ioc3_free_rings(ip);
802 MOD_DEC_USE_COUNT;
804 return 0;
807 static void ioc3_pci_init(struct pci_dev *pdev)
809 struct net_device *dev = NULL; // XXX
810 struct ioc3_private *ip;
811 struct ioc3 *ioc3;
812 unsigned long ioc3_base, ioc3_size;
814 dev = init_etherdev(dev, 0);
817 * This probably needs to be register_netdevice, or call
818 * init_etherdev so that it calls register_netdevice. Quick
819 * hack for now.
821 netif_device_attach(dev);
823 ip = (struct ioc3_private *) kmalloc(sizeof(*ip), GFP_KERNEL);
824 memset(ip, 0, sizeof(*ip));
825 dev->priv = ip;
826 dev->irq = pdev->irq;
828 ioc3_base = pdev->resource[0].start;
829 ioc3_size = pdev->resource[0].end - ioc3_base;
830 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
831 ip->regs = ioc3;
833 spin_lock_init(&ip->ioc3_lock);
835 ioc3_stop(dev);
836 ip->emcr = 0;
837 ioc3_init(dev);
838 ioc3_mii_init(dev, ip, ioc3);
840 ioc3_ssram_disc(ip);
841 printk("IOC3 SSRAM has %d kbyte.\n", ip->emcr & EMCR_BUFSIZ ? 128 : 64);
843 ioc3_get_eaddr(dev, ioc3);
845 /* The IOC3-specific entries in the device structure. */
846 dev->open = ioc3_open;
847 dev->hard_start_xmit = ioc3_start_xmit;
848 dev->tx_timeout = ioc3_timeout;
849 dev->watchdog_timeo = 5 * HZ;
850 dev->stop = ioc3_close;
851 dev->get_stats = ioc3_get_stats;
852 dev->do_ioctl = ioc3_ioctl;
853 dev->set_multicast_list = ioc3_set_multicast_list;
856 static int __init ioc3_probe(void)
858 static int called = 0;
859 int cards = 0;
861 if (called)
862 return -ENODEV;
863 called = 1;
865 if (pci_present()) {
866 struct pci_dev *pdev = NULL;
868 while ((pdev = pci_find_device(PCI_VENDOR_ID_SGI,
869 PCI_DEVICE_ID_SGI_IOC3, pdev))) {
870 ioc3_pci_init(pdev);
871 cards++;
875 return cards ? -ENODEV : 0;
878 static void __exit ioc3_cleanup_module(void)
880 /* Later, when we really support modules. */
883 static int
884 ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
886 unsigned long data;
887 struct ioc3_private *ip = dev->priv;
888 struct ioc3 *ioc3 = ip->regs;
889 unsigned int len;
890 struct ioc3_etxd *desc;
891 int produce;
893 spin_lock_irq(&ip->ioc3_lock);
895 data = (unsigned long) skb->data;
896 len = skb->len;
898 produce = ip->tx_pi;
899 desc = &ip->txr[produce];
901 if (len <= 104) {
902 /* Short packet, let's copy it directly into the ring. */
903 memcpy(desc->data, skb->data, skb->len);
904 if (len < ETH_ZLEN) {
905 /* Very short packet, pad with zeros at the end. */
906 memset(desc->data + len, 0, ETH_ZLEN - len);
907 len = ETH_ZLEN;
909 desc->cmd = len | ETXD_INTWHENDONE | ETXD_D0V;
910 desc->bufcnt = len;
911 } else if ((data ^ (data + len)) & 0x4000) {
912 unsigned long b2, s1, s2;
914 b2 = (data | 0x3fffUL) + 1UL;
915 s1 = b2 - data;
916 s2 = data + len - b2;
918 desc->cmd = len | ETXD_INTWHENDONE | ETXD_B1V | ETXD_B2V;
919 desc->bufcnt = (s1 << ETXD_B1CNT_SHIFT) |
920 (s2 << ETXD_B2CNT_SHIFT);
921 desc->p1 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
922 desc->p2 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
923 } else {
924 /* Normal sized packet that doesn't cross a page boundary. */
925 desc->cmd = len | ETXD_INTWHENDONE | ETXD_B1V;
926 desc->bufcnt = len << ETXD_B1CNT_SHIFT;
927 desc->p1 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
930 BARRIER();
932 dev->trans_start = jiffies;
933 ip->tx_skbs[produce] = skb; /* Remember skb */
934 produce = (produce + 1) & 127;
935 ip->tx_pi = produce;
936 ioc3->etpir = produce << 7; /* Fire ... */
938 ip->txqlen++;
940 if (ip->txqlen > 127)
941 netif_stop_queue(dev);
943 spin_unlock_irq(&ip->ioc3_lock);
945 return 0;
948 static void ioc3_timeout(struct net_device *dev)
950 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
952 ioc3_stop(dev);
953 ioc3_clean_tx_ring(dev->priv);
954 ioc3_init(dev);
956 dev->trans_start = jiffies;
957 netif_wake_queue(dev);
961 * Given a multicast ethernet address, this routine calculates the
962 * address's bit index in the logical address filter mask
964 #define CRC_MASK 0xEDB88320
966 static inline unsigned int
967 ioc3_hash(const unsigned char *addr)
969 unsigned int temp = 0;
970 unsigned char byte;
971 unsigned int crc;
972 int bits, len;
974 len = ETH_ALEN;
975 for (crc = ~0; --len >= 0; addr++) {
976 byte = *addr;
977 for (bits = 8; --bits >= 0; ) {
978 if ((byte ^ crc) & 1)
979 crc = (crc >> 1) ^ CRC_MASK;
980 else
981 crc >>= 1;
982 byte >>= 1;
986 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
987 for (bits = 6; --bits >= 0; ) {
988 temp <<= 1;
989 temp |= (crc & 0x1);
990 crc >>= 1;
993 return temp;
996 /* Provide ioctl() calls to examine the MII xcvr state. */
997 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
999 struct ioc3_private *ip = (struct ioc3_private *) dev->priv;
1000 u16 *data = (u16 *)&rq->ifr_data;
1001 struct ioc3 *ioc3 = ip->regs;
1002 int phy = ip->phy;
1004 switch (cmd) {
1005 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1006 if (phy == -1)
1007 return -ENODEV;
1008 data[0] = phy;
1009 return 0;
1011 case SIOCGMIIREG: /* Read any PHY register. */
1012 spin_lock_irq(&ip->ioc3_lock);
1013 data[3] = mii_read(ioc3, data[0], data[1]);
1014 spin_unlock_irq(&ip->ioc3_lock);
1015 return 0;
1017 case SIOCSMIIREG: /* Write any PHY register. */
1018 if (!capable(CAP_NET_ADMIN))
1019 return -EPERM;
1020 spin_lock_irq(&ip->ioc3_lock);
1021 mii_write(ioc3, data[0], data[1], data[2]);
1022 spin_unlock_irq(&ip->ioc3_lock);
1023 return 0;
1025 default:
1026 return -EOPNOTSUPP;
1029 return -EOPNOTSUPP;
1032 static void ioc3_set_multicast_list(struct net_device *dev)
1034 struct dev_mc_list *dmi = dev->mc_list;
1035 struct ioc3_private *ip = dev->priv;
1036 struct ioc3 *ioc3 = ip->regs;
1037 char *addr = dmi->dmi_addr;
1038 u64 ehar = 0;
1039 int i;
1041 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1042 /* Unconditionally log net taps. */
1043 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1044 ip->emcr |= EMCR_PROMISC;
1045 ioc3->emcr = ip->emcr;
1046 ioc3->emcr;
1047 } else {
1048 ip->emcr &= ~EMCR_PROMISC;
1049 ioc3->emcr = ip->emcr; /* Clear promiscuous. */
1050 ioc3->emcr;
1052 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1053 /* Too many for hashing to make sense or we want all
1054 multicast packets anyway, so skip computing all the
1055 hashes and just accept all packets. */
1056 ip->ehar_h = 0xffffffff;
1057 ip->ehar_l = 0xffffffff;
1058 } else {
1059 for (i = 0; i < dev->mc_count; i++) {
1060 dmi = dmi->next;
1062 if (!(*addr & 1))
1063 continue;
1065 ehar |= (1 << ioc3_hash(addr));
1067 ip->ehar_h = ehar >> 32;
1068 ip->ehar_l = ehar & 0xffffffff;
1070 ioc3->ehar_h = ip->ehar_h;
1071 ioc3->ehar_l = ip->ehar_l;
1075 #ifdef MODULE
1076 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1077 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1078 #endif /* MODULE */
1080 module_init(ioc3_probe);
1081 module_exit(ioc3_cleanup_module);