More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / ioc3-eth.c
blob1d0417760cedb6ce145dc23c24b9022eb5916730
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
11 * References:
12 * o IOC3 ASIC specification 4.51, 1996-04-18
13 * o IEEE 802.3 specification, 2000 edition
14 * o DP38840A Specification, National Semiconductor, March 1997
16 * To do:
18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
19 * o Handle allocation failures in ioc3_init_rings().
20 * o Use prefetching for large packets. What is a good lower limit for
21 * prefetching?
22 * o We're probably allocating a bit too much memory.
23 * o Use hardware checksums.
24 * o Convert to using a IOC3 meta driver.
25 * o Which PHYs might possibly be attached to the IOC3 in real live,
26 * which workarounds are required for them? Do we ever have Lucent's?
27 * o For the 2.5 branch kill the mii-tool ioctls.
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
33 #include <linux/mm.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/crc32.h>
39 #ifdef CONFIG_SERIAL_8250
40 #include <linux/serial.h>
41 #include <asm/serial.h>
42 #define IOC3_BAUD (22000000 / (3*16))
43 #define IOC3_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
44 #endif
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/skbuff.h>
50 #include <linux/dp83840.h>
52 #include <asm/byteorder.h>
53 #include <asm/io.h>
54 #include <asm/pgtable.h>
55 #include <asm/uaccess.h>
56 #include <asm/sn/types.h>
57 #include <asm/sn/sn0/addrs.h>
58 #include <asm/sn/sn0/hubni.h>
59 #include <asm/sn/sn0/hubio.h>
60 #include <asm/sn/klconfig.h>
61 #include <asm/sn/ioc3.h>
62 #include <asm/sn/sn0/ip27.h>
63 #include <asm/pci/bridge.h>
66 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
67 * value must be a power of two.
69 #define RX_BUFFS 64
71 /* Timer state engine. */
72 enum ioc3_timer_state {
73 arbwait = 0, /* Waiting for auto negotiation to complete. */
74 lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
75 ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
76 asleep = 3, /* Time inactive. */
79 /* Private per NIC data of the driver. */
80 struct ioc3_private {
81 struct ioc3 *regs;
82 int phy;
83 unsigned long *rxr; /* pointer to receiver ring */
84 struct ioc3_etxd *txr;
85 struct sk_buff *rx_skbs[512];
86 struct sk_buff *tx_skbs[128];
87 struct net_device_stats stats;
88 int rx_ci; /* RX consumer index */
89 int rx_pi; /* RX producer index */
90 int tx_ci; /* TX consumer index */
91 int tx_pi; /* TX producer index */
92 int txqlen;
93 u32 emcr, ehar_h, ehar_l;
94 spinlock_t ioc3_lock;
95 struct net_device *dev;
97 /* Members used by autonegotiation */
98 struct timer_list ioc3_timer;
99 enum ioc3_timer_state timer_state; /* State of auto-neg timer. */
100 unsigned int timer_ticks; /* Number of clicks at each state */
101 unsigned short sw_bmcr; /* sw copy of MII config register */
102 unsigned short sw_bmsr; /* sw copy of MII status register */
103 unsigned short sw_physid1; /* sw copy of PHYSID1 */
104 unsigned short sw_physid2; /* sw copy of PHYSID2 */
105 unsigned short sw_advertise; /* sw copy of ADVERTISE */
106 unsigned short sw_lpa; /* sw copy of LPA */
107 unsigned short sw_csconfig; /* sw copy of CSCONFIG */
110 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
111 static void ioc3_set_multicast_list(struct net_device *dev);
112 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
113 static void ioc3_timeout(struct net_device *dev);
114 static inline unsigned int ioc3_hash(const unsigned char *addr);
115 static inline void ioc3_stop(struct ioc3_private *ip);
116 static void ioc3_init(struct ioc3_private *ip);
118 static const char ioc3_str[] = "IOC3 Ethernet";
120 /* We use this to acquire receive skb's that we can DMA directly into. */
121 #define ALIGNED_RX_SKB_ADDR(addr) \
122 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
124 #define ioc3_alloc_skb(__length, __gfp_flags) \
125 ({ struct sk_buff *__skb; \
126 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
127 if (__skb) { \
128 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
129 if(__offset) \
130 skb_reserve(__skb, __offset); \
132 __skb; \
135 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
136 1644 while it's actually 1664. This one was nasty to track down ... */
137 #define RX_OFFSET 10
138 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
140 /* DMA barrier to separate cached and uncached accesses. */
141 #define BARRIER() \
142 __asm__("sync" ::: "memory")
145 #define IOC3_SIZE 0x100000
147 #define ioc3_r(reg) \
148 ({ \
149 u32 __res; \
150 __res = ioc3->reg; \
151 __res; \
154 #define ioc3_w(reg,val) \
155 do { \
156 (ioc3->reg = (val)); \
157 } while(0)
159 static inline u32
160 mcr_pack(u32 pulse, u32 sample)
162 return (pulse << 10) | (sample << 2);
165 static int
166 nic_wait(struct ioc3 *ioc3)
168 u32 mcr;
170 do {
171 mcr = ioc3_r(mcr);
172 } while (!(mcr & 2));
174 return mcr & 1;
177 static int
178 nic_reset(struct ioc3 *ioc3)
180 int presence;
182 ioc3_w(mcr, mcr_pack(500, 65));
183 presence = nic_wait(ioc3);
185 ioc3_w(mcr, mcr_pack(0, 500));
186 nic_wait(ioc3);
188 return presence;
191 static inline int
192 nic_read_bit(struct ioc3 *ioc3)
194 int result;
196 ioc3_w(mcr, mcr_pack(6, 13));
197 result = nic_wait(ioc3);
198 ioc3_w(mcr, mcr_pack(0, 100));
199 nic_wait(ioc3);
201 return result;
204 static inline void
205 nic_write_bit(struct ioc3 *ioc3, int bit)
207 if (bit)
208 ioc3_w(mcr, mcr_pack(6, 110));
209 else
210 ioc3_w(mcr, mcr_pack(80, 30));
212 nic_wait(ioc3);
216 * Read a byte from an iButton device
218 static u32
219 nic_read_byte(struct ioc3 *ioc3)
221 u32 result = 0;
222 int i;
224 for (i = 0; i < 8; i++)
225 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
227 return result;
231 * Write a byte to an iButton device
233 static void
234 nic_write_byte(struct ioc3 *ioc3, int byte)
236 int i, bit;
238 for (i = 8; i; i--) {
239 bit = byte & 1;
240 byte >>= 1;
242 nic_write_bit(ioc3, bit);
246 static u64
247 nic_find(struct ioc3 *ioc3, int *last)
249 int a, b, index, disc;
250 u64 address = 0;
252 nic_reset(ioc3);
253 /* Search ROM. */
254 nic_write_byte(ioc3, 0xf0);
256 /* Algorithm from ``Book of iButton Standards''. */
257 for (index = 0, disc = 0; index < 64; index++) {
258 a = nic_read_bit(ioc3);
259 b = nic_read_bit(ioc3);
261 if (a && b) {
262 printk("NIC search failed (not fatal).\n");
263 *last = 0;
264 return 0;
267 if (!a && !b) {
268 if (index == *last) {
269 address |= 1UL << index;
270 } else if (index > *last) {
271 address &= ~(1UL << index);
272 disc = index;
273 } else if ((address & (1UL << index)) == 0)
274 disc = index;
275 nic_write_bit(ioc3, address & (1UL << index));
276 continue;
277 } else {
278 if (a)
279 address |= 1UL << index;
280 else
281 address &= ~(1UL << index);
282 nic_write_bit(ioc3, a);
283 continue;
287 *last = disc;
289 return address;
292 static int nic_init(struct ioc3 *ioc3)
294 const char *type;
295 u8 crc;
296 u8 serial[6];
297 int save = 0, i;
299 type = "unknown";
301 while (1) {
302 u64 reg;
303 reg = nic_find(ioc3, &save);
305 switch (reg & 0xff) {
306 case 0x91:
307 type = "DS1981U";
308 break;
309 default:
310 if (save == 0) {
311 /* Let the caller try again. */
312 return -1;
314 continue;
317 nic_reset(ioc3);
319 /* Match ROM. */
320 nic_write_byte(ioc3, 0x55);
321 for (i = 0; i < 8; i++)
322 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
324 reg >>= 8; /* Shift out type. */
325 for (i = 0; i < 6; i++) {
326 serial[i] = reg & 0xff;
327 reg >>= 8;
329 crc = reg & 0xff;
330 break;
333 printk("Found %s NIC", type);
334 if (type != "unknown") {
335 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
336 " CRC %02x", serial[0], serial[1], serial[2],
337 serial[3], serial[4], serial[5], crc);
339 printk(".\n");
341 return 0;
345 * Read the NIC (Number-In-a-Can) device used to store the MAC address on
346 * SN0 / SN00 nodeboards and PCI cards.
348 static void ioc3_get_eaddr_nic(struct ioc3_private *ip)
350 struct ioc3 *ioc3 = ip->regs;
351 u8 nic[14];
352 int tries = 2; /* There may be some problem with the battery? */
353 int i;
355 ioc3_w(gpcr_s, (1 << 21));
357 while (tries--) {
358 if (!nic_init(ioc3))
359 break;
360 udelay(500);
363 if (tries < 0) {
364 printk("Failed to read MAC address\n");
365 return;
368 /* Read Memory. */
369 nic_write_byte(ioc3, 0xf0);
370 nic_write_byte(ioc3, 0x00);
371 nic_write_byte(ioc3, 0x00);
373 for (i = 13; i >= 0; i--)
374 nic[i] = nic_read_byte(ioc3);
376 for (i = 2; i < 8; i++)
377 ip->dev->dev_addr[i - 2] = nic[i];
381 * Ok, this is hosed by design. It's necessary to know what machine the
382 * NIC is in in order to know how to read the NIC address. We also have
383 * to know if it's a PCI card or a NIC in on the node board ...
385 static void ioc3_get_eaddr(struct ioc3_private *ip)
387 int i;
390 ioc3_get_eaddr_nic(ip);
392 printk("Ethernet address is ");
393 for (i = 0; i < 6; i++) {
394 printk("%02x", ip->dev->dev_addr[i]);
395 if (i < 5)
396 printk(":");
398 printk(".\n");
403 * Caller must hold the ioc3_lock ever for MII readers. This is also
404 * used to protect the transmitter side but it's low contention.
406 static u16 mii_read(struct ioc3_private *ip, int reg)
408 struct ioc3 *ioc3 = ip->regs;
409 int phy = ip->phy;
411 while (ioc3->micr & MICR_BUSY);
412 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
413 while (ioc3->micr & MICR_BUSY);
415 return ioc3->midr_r & MIDR_DATA_MASK;
418 static void mii_write(struct ioc3_private *ip, int reg, u16 data)
420 struct ioc3 *ioc3 = ip->regs;
421 int phy = ip->phy;
423 while (ioc3->micr & MICR_BUSY);
424 ioc3->midr_w = data;
425 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
426 while (ioc3->micr & MICR_BUSY);
429 static int ioc3_mii_init(struct ioc3_private *ip);
431 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
433 struct ioc3_private *ip = dev->priv;
434 struct ioc3 *ioc3 = ip->regs;
436 ip->stats.collisions += (ioc3->etcdc & ETCDC_COLLCNT_MASK);
437 return &ip->stats;
440 static inline void
441 ioc3_rx(struct ioc3_private *ip)
443 struct sk_buff *skb, *new_skb;
444 struct ioc3 *ioc3 = ip->regs;
445 int rx_entry, n_entry, len;
446 struct ioc3_erxbuf *rxb;
447 unsigned long *rxr;
448 u32 w0, err;
450 rxr = (unsigned long *) ip->rxr; /* Ring base */
451 rx_entry = ip->rx_ci; /* RX consume index */
452 n_entry = ip->rx_pi;
454 skb = ip->rx_skbs[rx_entry];
455 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
456 w0 = be32_to_cpu(rxb->w0);
458 while (w0 & ERXBUF_V) {
459 err = be32_to_cpu(rxb->err); /* It's valid ... */
460 if (err & ERXBUF_GOODPKT) {
461 len = ((w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff) - 4;
462 skb_trim(skb, len);
463 skb->protocol = eth_type_trans(skb, ip->dev);
465 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
466 if (!new_skb) {
467 /* Ouch, drop packet and just recycle packet
468 to keep the ring filled. */
469 ip->stats.rx_dropped++;
470 new_skb = skb;
471 goto next;
473 netif_rx(skb);
475 ip->rx_skbs[rx_entry] = NULL; /* Poison */
477 new_skb->dev = ip->dev;
479 /* Because we reserve afterwards. */
480 skb_put(new_skb, (1664 + RX_OFFSET));
481 rxb = (struct ioc3_erxbuf *) new_skb->data;
482 skb_reserve(new_skb, RX_OFFSET);
484 ip->dev->last_rx = jiffies;
485 ip->stats.rx_packets++; /* Statistics */
486 ip->stats.rx_bytes += len;
487 } else {
488 /* The frame is invalid and the skb never
489 reached the network layer so we can just
490 recycle it. */
491 new_skb = skb;
492 ip->stats.rx_errors++;
494 if (err & ERXBUF_CRCERR) /* Statistics */
495 ip->stats.rx_crc_errors++;
496 if (err & ERXBUF_FRAMERR)
497 ip->stats.rx_frame_errors++;
498 next:
499 ip->rx_skbs[n_entry] = new_skb;
500 rxr[n_entry] = cpu_to_be64((0xa5UL << 56) |
501 ((unsigned long) rxb & TO_PHYS_MASK));
502 rxb->w0 = 0; /* Clear valid flag */
503 n_entry = (n_entry + 1) & 511; /* Update erpir */
505 /* Now go on to the next ring entry. */
506 rx_entry = (rx_entry + 1) & 511;
507 skb = ip->rx_skbs[rx_entry];
508 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
509 w0 = be32_to_cpu(rxb->w0);
511 ioc3->erpir = (n_entry << 3) | ERPIR_ARM;
512 ip->rx_pi = n_entry;
513 ip->rx_ci = rx_entry;
516 static inline void
517 ioc3_tx(struct ioc3_private *ip)
519 unsigned long packets, bytes;
520 struct ioc3 *ioc3 = ip->regs;
521 int tx_entry, o_entry;
522 struct sk_buff *skb;
523 u32 etcir;
525 spin_lock(&ip->ioc3_lock);
526 etcir = ioc3->etcir;
528 tx_entry = (etcir >> 7) & 127;
529 o_entry = ip->tx_ci;
530 packets = 0;
531 bytes = 0;
533 while (o_entry != tx_entry) {
534 packets++;
535 skb = ip->tx_skbs[o_entry];
536 bytes += skb->len;
537 dev_kfree_skb_irq(skb);
538 ip->tx_skbs[o_entry] = NULL;
540 o_entry = (o_entry + 1) & 127; /* Next */
542 etcir = ioc3->etcir; /* More pkts sent? */
543 tx_entry = (etcir >> 7) & 127;
546 ip->stats.tx_packets += packets;
547 ip->stats.tx_bytes += bytes;
548 ip->txqlen -= packets;
550 if (ip->txqlen < 128)
551 netif_wake_queue(ip->dev);
553 ip->tx_ci = o_entry;
554 spin_unlock(&ip->ioc3_lock);
558 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
559 * software problems, so we should try to recover
560 * more gracefully if this ever happens. In theory we might be flooded
561 * with such error interrupts if something really goes wrong, so we might
562 * also consider to take the interface down.
564 static void
565 ioc3_error(struct ioc3_private *ip, u32 eisr)
567 struct net_device *dev = ip->dev;
568 unsigned char *iface = dev->name;
570 if (eisr & EISR_RXOFLO)
571 printk(KERN_ERR "%s: RX overflow.\n", iface);
572 if (eisr & EISR_RXBUFOFLO)
573 printk(KERN_ERR "%s: RX buffer overflow.\n", iface);
574 if (eisr & EISR_RXMEMERR)
575 printk(KERN_ERR "%s: RX PCI error.\n", iface);
576 if (eisr & EISR_RXPARERR)
577 printk(KERN_ERR "%s: RX SSRAM parity error.\n", iface);
578 if (eisr & EISR_TXBUFUFLO)
579 printk(KERN_ERR "%s: TX buffer underflow.\n", iface);
580 if (eisr & EISR_TXMEMERR)
581 printk(KERN_ERR "%s: TX PCI error.\n", iface);
583 ioc3_stop(ip);
584 ioc3_init(ip);
585 ioc3_mii_init(ip);
587 dev->trans_start = jiffies;
588 netif_wake_queue(dev);
591 /* The interrupt handler does all of the Rx thread work and cleans up
592 after the Tx thread. */
593 static irqreturn_t ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
595 struct net_device *dev = (struct net_device *)_dev;
596 struct ioc3_private *ip = dev->priv;
597 struct ioc3 *ioc3 = ip->regs;
598 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
599 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
600 EISR_TXEXPLICIT | EISR_TXMEMERR;
601 u32 eisr;
603 eisr = ioc3->eisr & enabled;
605 while (eisr) {
606 ioc3->eisr = eisr;
607 ioc3->eisr; /* Flush */
609 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
610 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
611 ioc3_error(ip, eisr);
612 if (eisr & EISR_RXTIMERINT)
613 ioc3_rx(ip);
614 if (eisr & EISR_TXEXPLICIT)
615 ioc3_tx(ip);
617 eisr = ioc3->eisr & enabled;
619 return IRQ_HANDLED;
623 * Auto negotiation. The scheme is very simple. We have a timer routine that
624 * keeps watching the auto negotiation process as it progresses. The DP83840
625 * is first told to start doing it's thing, we set up the time and place the
626 * timer state machine in it's initial state.
628 * Here the timer peeks at the DP83840 status registers at each click to see
629 * if the auto negotiation has completed, we assume here that the DP83840 PHY
630 * will time out at some point and just tell us what (didn't) happen. For
631 * complete coverage we only allow so many of the ticks at this level to run,
632 * when this has expired we print a warning message and try another strategy.
633 * This "other" strategy is to force the interface into various speed/duplex
634 * configurations and we stop when we see a link-up condition before the
635 * maximum number of "peek" ticks have occurred.
637 * Once a valid link status has been detected we configure the IOC3 to speak
638 * the most efficient protocol we could get a clean link for. The priority
639 * for link configurations, highest first is:
641 * 100 Base-T Full Duplex
642 * 100 Base-T Half Duplex
643 * 10 Base-T Full Duplex
644 * 10 Base-T Half Duplex
646 * We start a new timer now, after a successful auto negotiation status has
647 * been detected. This timer just waits for the link-up bit to get set in
648 * the BMCR of the DP83840. When this occurs we print a kernel log message
649 * describing the link type in use and the fact that it is up.
651 * If a fatal error of some sort is signalled and detected in the interrupt
652 * service routine, and the chip is reset, or the link is ifconfig'd down
653 * and then back up, this entire process repeats itself all over again.
655 static int ioc3_try_next_permutation(struct ioc3_private *ip)
657 ip->sw_bmcr = mii_read(ip, MII_BMCR);
659 /* Downgrade from full to half duplex. Only possible via ethtool. */
660 if (ip->sw_bmcr & BMCR_FULLDPLX) {
661 ip->sw_bmcr &= ~BMCR_FULLDPLX;
662 mii_write(ip, MII_BMCR, ip->sw_bmcr);
664 return 0;
667 /* Downgrade from 100 to 10. */
668 if (ip->sw_bmcr & BMCR_SPEED100) {
669 ip->sw_bmcr &= ~BMCR_SPEED100;
670 mii_write(ip, MII_BMCR, ip->sw_bmcr);
672 return 0;
675 /* We've tried everything. */
676 return -1;
679 static void
680 ioc3_display_link_mode(struct ioc3_private *ip)
682 char *tmode = "";
684 ip->sw_lpa = mii_read(ip, MII_LPA);
686 if (ip->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
687 if (ip->sw_lpa & LPA_100FULL)
688 tmode = "100Mb/s, Full Duplex";
689 else
690 tmode = "100Mb/s, Half Duplex";
691 } else {
692 if (ip->sw_lpa & LPA_10FULL)
693 tmode = "10Mb/s, Full Duplex";
694 else
695 tmode = "10Mb/s, Half Duplex";
698 printk(KERN_INFO "%s: Link is up at %s.\n", ip->dev->name, tmode);
701 static void
702 ioc3_display_forced_link_mode(struct ioc3_private *ip)
704 char *speed = "", *duplex = "";
706 ip->sw_bmcr = mii_read(ip, MII_BMCR);
707 if (ip->sw_bmcr & BMCR_SPEED100)
708 speed = "100Mb/s, ";
709 else
710 speed = "10Mb/s, ";
711 if (ip->sw_bmcr & BMCR_FULLDPLX)
712 duplex = "Full Duplex.\n";
713 else
714 duplex = "Half Duplex.\n";
716 printk(KERN_INFO "%s: Link has been forced up at %s%s", ip->dev->name,
717 speed, duplex);
720 static int ioc3_set_link_modes(struct ioc3_private *ip)
722 struct ioc3 *ioc3 = ip->regs;
723 int full;
726 * All we care about is making sure the bigmac tx_cfg has a
727 * proper duplex setting.
729 if (ip->timer_state == arbwait) {
730 ip->sw_lpa = mii_read(ip, MII_LPA);
731 if (!(ip->sw_lpa & (LPA_10HALF | LPA_10FULL |
732 LPA_100HALF | LPA_100FULL)))
733 goto no_response;
734 if (ip->sw_lpa & LPA_100FULL)
735 full = 1;
736 else if (ip->sw_lpa & LPA_100HALF)
737 full = 0;
738 else if (ip->sw_lpa & LPA_10FULL)
739 full = 1;
740 else
741 full = 0;
742 } else {
743 /* Forcing a link mode. */
744 ip->sw_bmcr = mii_read(ip, MII_BMCR);
745 if (ip->sw_bmcr & BMCR_FULLDPLX)
746 full = 1;
747 else
748 full = 0;
751 if (full)
752 ip->emcr |= EMCR_DUPLEX;
753 else
754 ip->emcr &= ~EMCR_DUPLEX;
756 ioc3->emcr = ip->emcr;
757 ioc3->emcr;
759 return 0;
761 no_response:
763 return 1;
766 static int is_lucent_phy(struct ioc3_private *ip)
768 unsigned short mr2, mr3;
769 int ret = 0;
771 mr2 = mii_read(ip, MII_PHYSID1);
772 mr3 = mii_read(ip, MII_PHYSID2);
773 if ((mr2 & 0xffff) == 0x0180 && ((mr3 & 0xffff) >> 10) == 0x1d) {
774 ret = 1;
777 return ret;
780 static void ioc3_timer(unsigned long data)
782 struct ioc3_private *ip = (struct ioc3_private *) data;
783 int restart_timer = 0;
785 ip->timer_ticks++;
786 switch (ip->timer_state) {
787 case arbwait:
789 * Only allow for 5 ticks, thats 10 seconds and much too
790 * long to wait for arbitration to complete.
792 if (ip->timer_ticks >= 10) {
793 /* Enter force mode. */
794 do_force_mode:
795 ip->sw_bmcr = mii_read(ip, MII_BMCR);
796 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
797 " trying force link mode\n", ip->dev->name);
798 ip->sw_bmcr = BMCR_SPEED100;
799 mii_write(ip, MII_BMCR, ip->sw_bmcr);
801 if (!is_lucent_phy(ip)) {
803 * OK, seems we need do disable the transceiver
804 * for the first tick to make sure we get an
805 * accurate link state at the second tick.
807 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
808 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
809 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
811 ip->timer_state = ltrywait;
812 ip->timer_ticks = 0;
813 restart_timer = 1;
814 } else {
815 /* Anything interesting happen? */
816 ip->sw_bmsr = mii_read(ip, MII_BMSR);
817 if (ip->sw_bmsr & BMSR_ANEGCOMPLETE) {
818 int ret;
820 /* Just what we've been waiting for... */
821 ret = ioc3_set_link_modes(ip);
822 if (ret) {
823 /* Ooops, something bad happened, go to
824 * force mode.
826 * XXX Broken hubs which don't support
827 * XXX 802.3u auto-negotiation make this
828 * XXX happen as well.
830 goto do_force_mode;
834 * Success, at least so far, advance our state
835 * engine.
837 ip->timer_state = lupwait;
838 restart_timer = 1;
839 } else {
840 restart_timer = 1;
843 break;
845 case lupwait:
847 * Auto negotiation was successful and we are awaiting a
848 * link up status. I have decided to let this timer run
849 * forever until some sort of error is signalled, reporting
850 * a message to the user at 10 second intervals.
852 ip->sw_bmsr = mii_read(ip, MII_BMSR);
853 if (ip->sw_bmsr & BMSR_LSTATUS) {
855 * Wheee, it's up, display the link mode in use and put
856 * the timer to sleep.
858 ioc3_display_link_mode(ip);
859 ip->timer_state = asleep;
860 restart_timer = 0;
861 } else {
862 if (ip->timer_ticks >= 10) {
863 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
864 "not completely up.\n", ip->dev->name);
865 ip->timer_ticks = 0;
866 restart_timer = 1;
867 } else {
868 restart_timer = 1;
871 break;
873 case ltrywait:
875 * Making the timeout here too long can make it take
876 * annoyingly long to attempt all of the link mode
877 * permutations, but then again this is essentially
878 * error recovery code for the most part.
880 ip->sw_bmsr = mii_read(ip, MII_BMSR);
881 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
882 if (ip->timer_ticks == 1) {
883 if (!is_lucent_phy(ip)) {
885 * Re-enable transceiver, we'll re-enable the
886 * transceiver next tick, then check link state
887 * on the following tick.
889 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
890 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
892 restart_timer = 1;
893 break;
895 if (ip->timer_ticks == 2) {
896 if (!is_lucent_phy(ip)) {
897 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
898 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
900 restart_timer = 1;
901 break;
903 if (ip->sw_bmsr & BMSR_LSTATUS) {
904 /* Force mode selection success. */
905 ioc3_display_forced_link_mode(ip);
906 ioc3_set_link_modes(ip); /* XXX error? then what? */
907 ip->timer_state = asleep;
908 restart_timer = 0;
909 } else {
910 if (ip->timer_ticks >= 4) { /* 6 seconds or so... */
911 int ret;
913 ret = ioc3_try_next_permutation(ip);
914 if (ret == -1) {
916 * Aieee, tried them all, reset the
917 * chip and try all over again.
919 printk(KERN_NOTICE "%s: Link down, "
920 "cable problem?\n",
921 ip->dev->name);
923 ioc3_init(ip);
924 return;
926 if (!is_lucent_phy(ip)) {
927 ip->sw_csconfig = mii_read(ip,
928 MII_CSCONFIG);
929 ip->sw_csconfig |= CSCONFIG_TCVDISAB;
930 mii_write(ip, MII_CSCONFIG,
931 ip->sw_csconfig);
933 ip->timer_ticks = 0;
934 restart_timer = 1;
935 } else {
936 restart_timer = 1;
939 break;
941 case asleep:
942 default:
943 /* Can't happens.... */
944 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
945 "one anyways!\n", ip->dev->name);
946 restart_timer = 0;
947 ip->timer_ticks = 0;
948 ip->timer_state = asleep; /* foo on you */
949 break;
952 if (restart_timer) {
953 ip->ioc3_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2s */
954 add_timer(&ip->ioc3_timer);
958 static void
959 ioc3_start_auto_negotiation(struct ioc3_private *ip, struct ethtool_cmd *ep)
961 int timeout;
963 /* Read all of the registers we are interested in now. */
964 ip->sw_bmsr = mii_read(ip, MII_BMSR);
965 ip->sw_bmcr = mii_read(ip, MII_BMCR);
966 ip->sw_physid1 = mii_read(ip, MII_PHYSID1);
967 ip->sw_physid2 = mii_read(ip, MII_PHYSID2);
969 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
971 ip->sw_advertise = mii_read(ip, MII_ADVERTISE);
972 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
973 /* Advertise everything we can support. */
974 if (ip->sw_bmsr & BMSR_10HALF)
975 ip->sw_advertise |= ADVERTISE_10HALF;
976 else
977 ip->sw_advertise &= ~ADVERTISE_10HALF;
979 if (ip->sw_bmsr & BMSR_10FULL)
980 ip->sw_advertise |= ADVERTISE_10FULL;
981 else
982 ip->sw_advertise &= ~ADVERTISE_10FULL;
983 if (ip->sw_bmsr & BMSR_100HALF)
984 ip->sw_advertise |= ADVERTISE_100HALF;
985 else
986 ip->sw_advertise &= ~ADVERTISE_100HALF;
987 if (ip->sw_bmsr & BMSR_100FULL)
988 ip->sw_advertise |= ADVERTISE_100FULL;
989 else
990 ip->sw_advertise &= ~ADVERTISE_100FULL;
991 mii_write(ip, MII_ADVERTISE, ip->sw_advertise);
994 * XXX Currently no IOC3 card I know off supports 100BaseT4,
995 * XXX and this is because the DP83840 does not support it,
996 * XXX changes XXX would need to be made to the tx/rx logic in
997 * XXX the driver as well so I completely skip checking for it
998 * XXX in the BMSR for now.
1001 #ifdef AUTO_SWITCH_DEBUG
1002 ASD(("%s: Advertising [ ", ip->dev->name));
1003 if (ip->sw_advertise & ADVERTISE_10HALF)
1004 ASD(("10H "));
1005 if (ip->sw_advertise & ADVERTISE_10FULL)
1006 ASD(("10F "));
1007 if (ip->sw_advertise & ADVERTISE_100HALF)
1008 ASD(("100H "));
1009 if (ip->sw_advertise & ADVERTISE_100FULL)
1010 ASD(("100F "));
1011 #endif
1013 /* Enable Auto-Negotiation, this is usually on already... */
1014 ip->sw_bmcr |= BMCR_ANENABLE;
1015 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1017 /* Restart it to make sure it is going. */
1018 ip->sw_bmcr |= BMCR_ANRESTART;
1019 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1021 /* BMCR_ANRESTART self clears when the process has begun. */
1023 timeout = 64; /* More than enough. */
1024 while (--timeout) {
1025 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1026 if (!(ip->sw_bmcr & BMCR_ANRESTART))
1027 break; /* got it. */
1028 udelay(10);
1030 if (!timeout) {
1031 printk(KERN_ERR "%s: IOC3 would not start auto "
1032 "negotiation BMCR=0x%04x\n",
1033 ip->dev->name, ip->sw_bmcr);
1034 printk(KERN_NOTICE "%s: Performing force link "
1035 "detection.\n", ip->dev->name);
1036 goto force_link;
1037 } else {
1038 ip->timer_state = arbwait;
1040 } else {
1041 force_link:
1043 * Force the link up, trying first a particular mode. Either
1044 * we are here at the request of ethtool or because the IOC3
1045 * would not start to autoneg.
1049 * Disable auto-negotiation in BMCR, enable the duplex and
1050 * speed setting, init the timer state machine, and fire it off.
1052 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
1053 ip->sw_bmcr = BMCR_SPEED100;
1054 } else {
1055 if (ep->speed == SPEED_100)
1056 ip->sw_bmcr = BMCR_SPEED100;
1057 else
1058 ip->sw_bmcr = 0;
1059 if (ep->duplex == DUPLEX_FULL)
1060 ip->sw_bmcr |= BMCR_FULLDPLX;
1062 mii_write(ip, MII_BMCR, ip->sw_bmcr);
1064 if (!is_lucent_phy(ip)) {
1066 * OK, seems we need do disable the transceiver for the
1067 * first tick to make sure we get an accurate link
1068 * state at the second tick.
1070 ip->sw_csconfig = mii_read(ip, MII_CSCONFIG);
1071 ip->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
1072 mii_write(ip, MII_CSCONFIG, ip->sw_csconfig);
1074 ip->timer_state = ltrywait;
1077 del_timer(&ip->ioc3_timer);
1078 ip->timer_ticks = 0;
1079 ip->ioc3_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
1080 ip->ioc3_timer.data = (unsigned long) ip;
1081 ip->ioc3_timer.function = &ioc3_timer;
1082 add_timer(&ip->ioc3_timer);
1085 static int ioc3_mii_init(struct ioc3_private *ip)
1087 int i, found;
1088 u16 word;
1090 found = 0;
1091 spin_lock_irq(&ip->ioc3_lock);
1092 for (i = 0; i < 32; i++) {
1093 ip->phy = i;
1094 word = mii_read(ip, 2);
1095 if ((word != 0xffff) && (word != 0x0000)) {
1096 found = 1;
1097 break; /* Found a PHY */
1100 if (!found) {
1101 spin_unlock_irq(&ip->ioc3_lock);
1102 return -ENODEV;
1105 ioc3_start_auto_negotiation(ip, NULL); // XXX ethtool
1107 spin_unlock_irq(&ip->ioc3_lock);
1109 return 0;
1112 static inline void
1113 ioc3_clean_rx_ring(struct ioc3_private *ip)
1115 struct sk_buff *skb;
1116 int i;
1118 for (i = ip->rx_ci; i & 15; i++) {
1119 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
1120 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
1122 ip->rx_pi &= 511;
1123 ip->rx_ci &= 511;
1125 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
1126 struct ioc3_erxbuf *rxb;
1127 skb = ip->rx_skbs[i];
1128 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
1129 rxb->w0 = 0;
1133 static inline void
1134 ioc3_clean_tx_ring(struct ioc3_private *ip)
1136 struct sk_buff *skb;
1137 int i;
1139 for (i=0; i < 128; i++) {
1140 skb = ip->tx_skbs[i];
1141 if (skb) {
1142 ip->tx_skbs[i] = NULL;
1143 dev_kfree_skb_any(skb);
1145 ip->txr[i].cmd = 0;
1147 ip->tx_pi = 0;
1148 ip->tx_ci = 0;
1151 static void
1152 ioc3_free_rings(struct ioc3_private *ip)
1154 struct sk_buff *skb;
1155 int rx_entry, n_entry;
1157 if (ip->txr) {
1158 ioc3_clean_tx_ring(ip);
1159 free_pages((unsigned long)ip->txr, 2);
1160 ip->txr = NULL;
1163 if (ip->rxr) {
1164 n_entry = ip->rx_ci;
1165 rx_entry = ip->rx_pi;
1167 while (n_entry != rx_entry) {
1168 skb = ip->rx_skbs[n_entry];
1169 if (skb)
1170 dev_kfree_skb_any(skb);
1172 n_entry = (n_entry + 1) & 511;
1174 free_page((unsigned long)ip->rxr);
1175 ip->rxr = NULL;
1179 static void
1180 ioc3_alloc_rings(struct net_device *dev, struct ioc3_private *ip,
1181 struct ioc3 *ioc3)
1183 struct ioc3_erxbuf *rxb;
1184 unsigned long *rxr;
1185 int i;
1187 if (ip->rxr == NULL) {
1188 /* Allocate and initialize rx ring. 4kb = 512 entries */
1189 ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1190 rxr = (unsigned long *) ip->rxr;
1191 if (!rxr)
1192 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
1194 /* Now the rx buffers. The RX ring may be larger but
1195 we only allocate 16 buffers for now. Need to tune
1196 this for performance and memory later. */
1197 for (i = 0; i < RX_BUFFS; i++) {
1198 struct sk_buff *skb;
1200 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1201 if (!skb) {
1202 show_free_areas();
1203 continue;
1206 ip->rx_skbs[i] = skb;
1207 skb->dev = dev;
1209 /* Because we reserve afterwards. */
1210 skb_put(skb, (1664 + RX_OFFSET));
1211 rxb = (struct ioc3_erxbuf *) skb->data;
1212 rxr[i] = cpu_to_be64((0xa5UL << 56) |
1213 ((unsigned long) rxb & TO_PHYS_MASK));
1214 skb_reserve(skb, RX_OFFSET);
1216 ip->rx_ci = 0;
1217 ip->rx_pi = RX_BUFFS;
1220 if (ip->txr == NULL) {
1221 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
1222 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
1223 if (!ip->txr)
1224 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
1225 ip->tx_pi = 0;
1226 ip->tx_ci = 0;
1230 static void
1231 ioc3_init_rings(struct net_device *dev, struct ioc3_private *ip,
1232 struct ioc3 *ioc3)
1234 unsigned long ring;
1236 ioc3_free_rings(ip);
1237 ioc3_alloc_rings(dev, ip, ioc3);
1239 ioc3_clean_rx_ring(ip);
1240 ioc3_clean_tx_ring(ip);
1242 /* Now the rx ring base, consume & produce registers. */
1243 ring = (0xa5UL << 56) | ((unsigned long)ip->rxr & TO_PHYS_MASK);
1244 ioc3->erbr_h = ring >> 32;
1245 ioc3->erbr_l = ring & 0xffffffff;
1246 ioc3->ercir = (ip->rx_ci << 3);
1247 ioc3->erpir = (ip->rx_pi << 3) | ERPIR_ARM;
1249 ring = (0xa5UL << 56) | ((unsigned long)ip->txr & TO_PHYS_MASK);
1251 ip->txqlen = 0; /* nothing queued */
1253 /* Now the tx ring base, consume & produce registers. */
1254 ioc3->etbr_h = ring >> 32;
1255 ioc3->etbr_l = ring & 0xffffffff;
1256 ioc3->etpir = (ip->tx_pi << 7);
1257 ioc3->etcir = (ip->tx_ci << 7);
1258 ioc3->etcir; /* Flush */
1261 static inline void
1262 ioc3_ssram_disc(struct ioc3_private *ip)
1264 struct ioc3 *ioc3 = ip->regs;
1265 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
1266 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
1267 unsigned int pattern = 0x5555;
1269 /* Assume the larger size SSRAM and enable parity checking */
1270 ioc3->emcr |= (EMCR_BUFSIZ | EMCR_RAMPAR);
1272 *ssram0 = pattern;
1273 *ssram1 = ~pattern & IOC3_SSRAM_DM;
1275 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
1276 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
1277 /* set ssram size to 64 KB */
1278 ip->emcr = EMCR_RAMPAR;
1279 ioc3->emcr &= ~EMCR_BUFSIZ;
1280 } else {
1281 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
1285 static void ioc3_init(struct ioc3_private *ip)
1287 struct net_device *dev = ip->dev;
1288 struct ioc3 *ioc3 = ip->regs;
1290 del_timer(&ip->ioc3_timer); /* Kill if running */
1292 ioc3->emcr = EMCR_RST; /* Reset */
1293 ioc3->emcr; /* Flush WB */
1294 udelay(4); /* Give it time ... */
1295 ioc3->emcr = 0;
1296 ioc3->emcr;
1298 /* Misc registers */
1299 ioc3->erbar = 0;
1300 ioc3->etcsr = (17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21;
1301 ioc3->etcdc; /* Clear on read */
1302 ioc3->ercsr = 15; /* RX low watermark */
1303 ioc3->ertr = 0; /* Interrupt immediately */
1304 ioc3->emar_h = (dev->dev_addr[5] << 8) | dev->dev_addr[4];
1305 ioc3->emar_l = (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
1306 (dev->dev_addr[1] << 8) | dev->dev_addr[0];
1307 ioc3->ehar_h = ip->ehar_h;
1308 ioc3->ehar_l = ip->ehar_l;
1309 ioc3->ersr = 42; /* XXX should be random */
1311 ioc3_init_rings(ip->dev, ip, ioc3);
1313 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
1314 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN;
1315 ioc3->emcr = ip->emcr;
1316 ioc3->eier = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
1317 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
1318 EISR_TXEXPLICIT | EISR_TXMEMERR;
1319 ioc3->eier;
1322 static inline void ioc3_stop(struct ioc3_private *ip)
1324 struct ioc3 *ioc3 = ip->regs;
1326 ioc3->emcr = 0; /* Shutup */
1327 ioc3->eier = 0; /* Disable interrupts */
1328 ioc3->eier; /* Flush */
1331 static int
1332 ioc3_open(struct net_device *dev)
1334 struct ioc3_private *ip = dev->priv;
1336 if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
1337 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
1339 return -EAGAIN;
1342 ip->ehar_h = 0;
1343 ip->ehar_l = 0;
1344 ioc3_init(ip);
1346 netif_start_queue(dev);
1347 return 0;
1350 static int
1351 ioc3_close(struct net_device *dev)
1353 struct ioc3_private *ip = dev->priv;
1355 del_timer(&ip->ioc3_timer);
1357 netif_stop_queue(dev);
1359 ioc3_stop(ip);
1360 free_irq(dev->irq, dev);
1362 ioc3_free_rings(ip);
1363 return 0;
1367 * MENET cards have four IOC3 chips, which are attached to two sets of
1368 * PCI slot resources each: the primary connections are on slots
1369 * 0..3 and the secondaries are on 4..7
1371 * All four ethernets are brought out to connectors; six serial ports
1372 * (a pair from each of the first three IOC3s) are brought out to
1373 * MiniDINs; all other subdevices are left swinging in the wind, leave
1374 * them disabled.
1376 static inline int ioc3_is_menet(struct pci_dev *pdev)
1378 struct pci_dev *dev;
1380 return pdev->bus->parent == NULL
1381 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(0, 0)))
1382 && dev->vendor == PCI_VENDOR_ID_SGI
1383 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1384 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(1, 0)))
1385 && dev->vendor == PCI_VENDOR_ID_SGI
1386 && dev->device == PCI_DEVICE_ID_SGI_IOC3
1387 && (dev = pci_find_slot(pdev->bus->number, PCI_DEVFN(2, 0)))
1388 && dev->vendor == PCI_VENDOR_ID_SGI
1389 && dev->device == PCI_DEVICE_ID_SGI_IOC3;
1392 static inline void ioc3_serial_probe(struct pci_dev *pdev,
1393 struct ioc3 *ioc3)
1395 struct serial_struct req;
1398 * We need to recognice and treat the fourth MENET serial as it
1399 * does not have an SuperIO chip attached to it, therefore attempting
1400 * to access it will result in bus errors. We call something an
1401 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1402 * in it. This is paranoid but we want to avoid blowing up on a
1403 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1404 * not paranoid enough ...
1406 if (ioc3_is_menet(pdev) && PCI_SLOT(pdev->devfn) == 3)
1407 return;
1409 /* Register to interrupt zero because we share the interrupt with
1410 the serial driver which we don't properly support yet. */
1411 memset(&req, 0, sizeof(req));
1412 req.irq = 0;
1413 req.flags = IOC3_COM_FLAGS;
1414 req.io_type = SERIAL_IO_MEM;
1415 req.iomem_reg_shift = 0;
1416 req.baud_base = IOC3_BAUD;
1418 req.iomem_base = (unsigned char *) &ioc3->sregs.uarta;
1419 register_serial(&req);
1421 req.iomem_base = (unsigned char *) &ioc3->sregs.uartb;
1422 register_serial(&req);
1425 static int __devinit ioc3_probe(struct pci_dev *pdev,
1426 const struct pci_device_id *ent)
1428 struct net_device *dev = NULL;
1429 struct ioc3_private *ip;
1430 struct ioc3 *ioc3;
1431 unsigned long ioc3_base, ioc3_size;
1432 u32 vendor, model, rev;
1433 int err;
1435 dev = alloc_etherdev(sizeof(struct ioc3_private));
1436 if (!dev)
1437 return -ENOMEM;
1439 err = pci_request_regions(pdev, "ioc3");
1440 if (err)
1441 goto out_free;
1443 SET_MODULE_OWNER(dev);
1444 SET_NETDEV_DEV(dev, &pdev->dev);
1446 ip = dev->priv;
1447 ip->dev = dev;
1449 dev->irq = pdev->irq;
1451 ioc3_base = pci_resource_start(pdev, 0);
1452 ioc3_size = pci_resource_len(pdev, 0);
1453 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
1454 if (!ioc3) {
1455 printk(KERN_CRIT "ioc3eth(%s): ioremap failed, goodbye.\n",
1456 pdev->slot_name);
1457 err = -ENOMEM;
1458 goto out_res;
1460 ip->regs = ioc3;
1462 #ifdef CONFIG_SERIAL_8250
1463 ioc3_serial_probe(pdev, ioc3);
1464 #endif
1466 spin_lock_init(&ip->ioc3_lock);
1468 ioc3_stop(ip);
1469 ioc3_init(ip);
1471 init_timer(&ip->ioc3_timer);
1472 ioc3_mii_init(ip);
1474 if (ip->phy == -1) {
1475 printk(KERN_CRIT "ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1476 pdev->slot_name);
1477 err = -ENODEV;
1478 goto out_stop;
1481 ioc3_ssram_disc(ip);
1482 ioc3_get_eaddr(ip);
1484 /* The IOC3-specific entries in the device structure. */
1485 dev->open = ioc3_open;
1486 dev->hard_start_xmit = ioc3_start_xmit;
1487 dev->tx_timeout = ioc3_timeout;
1488 dev->watchdog_timeo = 5 * HZ;
1489 dev->stop = ioc3_close;
1490 dev->get_stats = ioc3_get_stats;
1491 dev->do_ioctl = ioc3_ioctl;
1492 dev->set_multicast_list = ioc3_set_multicast_list;
1494 err = register_netdev(dev);
1495 if (err)
1496 goto out_stop;
1498 vendor = (ip->sw_physid1 << 12) | (ip->sw_physid2 >> 4);
1499 model = (ip->sw_physid2 >> 4) & 0x3f;
1500 rev = ip->sw_physid2 & 0xf;
1501 printk(KERN_INFO "%s: Using PHY %d, vendor 0x%x, model %d, "
1502 "rev %d.\n", dev->name, ip->phy, vendor, model, rev);
1503 printk(KERN_INFO "%s: IOC3 SSRAM has %d kbyte.\n", dev->name,
1504 ip->emcr & EMCR_BUFSIZ ? 128 : 64);
1506 return 0;
1508 out_stop:
1509 ioc3_stop(ip);
1510 free_irq(dev->irq, dev);
1511 ioc3_free_rings(ip);
1512 out_res:
1513 pci_release_regions(pdev);
1514 out_free:
1515 kfree(dev);
1516 return err;
1519 static void __devexit ioc3_remove_one (struct pci_dev *pdev)
1521 struct net_device *dev = pci_get_drvdata(pdev);
1522 struct ioc3_private *ip = dev->priv;
1523 struct ioc3 *ioc3 = ip->regs;
1525 unregister_netdev(dev);
1526 iounmap(ioc3);
1527 pci_release_regions(pdev);
1528 kfree(dev);
1531 static struct pci_device_id ioc3_pci_tbl[] __devinitdata = {
1532 { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
1533 { 0 }
1535 MODULE_DEVICE_TABLE(pci, ioc3_pci_tbl);
1537 static struct pci_driver ioc3_driver = {
1538 .name = "ioc3-eth",
1539 .id_table = ioc3_pci_tbl,
1540 .probe = ioc3_probe,
1541 .remove = __devexit_p(ioc3_remove_one),
1544 static int __init ioc3_init_module(void)
1546 return pci_module_init(&ioc3_driver);
1549 static void __exit ioc3_cleanup_module(void)
1551 pci_unregister_driver(&ioc3_driver);
1554 static int
1555 ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1557 unsigned long data;
1558 struct ioc3_private *ip = dev->priv;
1559 struct ioc3 *ioc3 = ip->regs;
1560 unsigned int len;
1561 struct ioc3_etxd *desc;
1562 int produce;
1564 spin_lock_irq(&ip->ioc3_lock);
1566 data = (unsigned long) skb->data;
1567 len = skb->len;
1569 produce = ip->tx_pi;
1570 desc = &ip->txr[produce];
1572 if (len <= 104) {
1573 /* Short packet, let's copy it directly into the ring. */
1574 memcpy(desc->data, skb->data, skb->len);
1575 if (len < ETH_ZLEN) {
1576 /* Very short packet, pad with zeros at the end. */
1577 memset(desc->data + len, 0, ETH_ZLEN - len);
1578 len = ETH_ZLEN;
1580 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_D0V);
1581 desc->bufcnt = cpu_to_be32(len);
1582 } else if ((data ^ (data + len)) & 0x4000) {
1583 unsigned long b2, s1, s2;
1585 b2 = (data | 0x3fffUL) + 1UL;
1586 s1 = b2 - data;
1587 s2 = data + len - b2;
1589 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE |
1590 ETXD_B1V | ETXD_B2V);
1591 desc->bufcnt = cpu_to_be32((s1 << ETXD_B1CNT_SHIFT)
1592 | (s2 << ETXD_B2CNT_SHIFT));
1593 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1594 (data & TO_PHYS_MASK));
1595 desc->p2 = cpu_to_be64((0xa5UL << 56) |
1596 (data & TO_PHYS_MASK));
1597 } else {
1598 /* Normal sized packet that doesn't cross a page boundary. */
1599 desc->cmd = cpu_to_be32(len | ETXD_INTWHENDONE | ETXD_B1V);
1600 desc->bufcnt = cpu_to_be32(len << ETXD_B1CNT_SHIFT);
1601 desc->p1 = cpu_to_be64((0xa5UL << 56) |
1602 (data & TO_PHYS_MASK));
1605 BARRIER();
1607 dev->trans_start = jiffies;
1608 ip->tx_skbs[produce] = skb; /* Remember skb */
1609 produce = (produce + 1) & 127;
1610 ip->tx_pi = produce;
1611 ioc3->etpir = produce << 7; /* Fire ... */
1613 ip->txqlen++;
1615 if (ip->txqlen > 127)
1616 netif_stop_queue(dev);
1618 spin_unlock_irq(&ip->ioc3_lock);
1620 return 0;
1623 static void ioc3_timeout(struct net_device *dev)
1625 struct ioc3_private *ip = dev->priv;
1627 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1629 ioc3_stop(ip);
1630 ioc3_init(ip);
1631 ioc3_mii_init(ip);
1633 dev->trans_start = jiffies;
1634 netif_wake_queue(dev);
1638 * Given a multicast ethernet address, this routine calculates the
1639 * address's bit index in the logical address filter mask
1642 static inline unsigned int
1643 ioc3_hash(const unsigned char *addr)
1645 unsigned int temp = 0;
1646 unsigned char byte;
1647 u32 crc;
1648 int bits;
1650 crc = ether_crc_le(ETH_ALEN, addr);
1652 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1653 for (bits = 6; --bits >= 0; ) {
1654 temp <<= 1;
1655 temp |= (crc & 0x1);
1656 crc >>= 1;
1659 return temp;
1663 /* We provide both the mii-tools and the ethtool ioctls. */
1664 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1666 struct ioc3_private *ip = dev->priv;
1667 struct ethtool_cmd *ep_user = (struct ethtool_cmd *) rq->ifr_data;
1668 u16 *data = (u16 *)&rq->ifr_data;
1669 struct ioc3 *ioc3 = ip->regs;
1670 struct ethtool_cmd ecmd;
1672 switch (cmd) {
1673 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1674 if (ip->phy == -1)
1675 return -ENODEV;
1676 data[0] = ip->phy;
1677 return 0;
1679 case SIOCGMIIREG: { /* Read a PHY register. */
1680 unsigned int phy = data[0];
1681 unsigned int reg = data[1];
1683 if (phy > 0x1f || reg > 0x1f)
1684 return -EINVAL;
1686 spin_lock_irq(&ip->ioc3_lock);
1687 while (ioc3->micr & MICR_BUSY);
1688 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
1689 while (ioc3->micr & MICR_BUSY);
1690 data[3] = (ioc3->midr_r & MIDR_DATA_MASK);
1691 spin_unlock_irq(&ip->ioc3_lock);
1693 return 0;
1695 case SIOCSMIIREG: /* Write a PHY register. */
1696 phy = data[0];
1697 reg = data[1];
1699 if (!capable(CAP_NET_ADMIN))
1700 return -EPERM;
1702 if (phy > 0x1f || reg > 0x1f)
1703 return -EINVAL;
1705 spin_lock_irq(&ip->ioc3_lock);
1706 while (ioc3->micr & MICR_BUSY);
1707 ioc3->midr_w = data[2];
1708 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
1709 while (ioc3->micr & MICR_BUSY);
1710 spin_unlock_irq(&ip->ioc3_lock);
1712 return 0;
1714 case SIOCETHTOOL:
1715 if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
1716 return -EFAULT;
1718 if (ecmd.cmd == ETHTOOL_GSET) {
1719 ecmd.supported =
1720 (SUPPORTED_10baseT_Half |
1721 SUPPORTED_10baseT_Full |
1722 SUPPORTED_100baseT_Half |
1723 SUPPORTED_100baseT_Full | SUPPORTED_Autoneg |
1724 SUPPORTED_TP | SUPPORTED_MII);
1726 ecmd.port = PORT_TP;
1727 ecmd.transceiver = XCVR_INTERNAL;
1728 ecmd.phy_address = ip->phy;
1730 /* Record PHY settings. */
1731 spin_lock_irq(&ip->ioc3_lock);
1732 ip->sw_bmcr = mii_read(ip, MII_BMCR);
1733 ip->sw_lpa = mii_read(ip, MII_LPA);
1734 spin_unlock_irq(&ip->ioc3_lock);
1735 if (ip->sw_bmcr & BMCR_ANENABLE) {
1736 ecmd.autoneg = AUTONEG_ENABLE;
1737 ecmd.speed = (ip->sw_lpa &
1738 (LPA_100HALF | LPA_100FULL)) ?
1739 SPEED_100 : SPEED_10;
1740 if (ecmd.speed == SPEED_100)
1741 ecmd.duplex = (ip->sw_lpa & (LPA_100FULL)) ?
1742 DUPLEX_FULL : DUPLEX_HALF;
1743 else
1744 ecmd.duplex = (ip->sw_lpa & (LPA_10FULL)) ?
1745 DUPLEX_FULL : DUPLEX_HALF;
1746 } else {
1747 ecmd.autoneg = AUTONEG_DISABLE;
1748 ecmd.speed = (ip->sw_bmcr & BMCR_SPEED100) ?
1749 SPEED_100 : SPEED_10;
1750 ecmd.duplex = (ip->sw_bmcr & BMCR_FULLDPLX) ?
1751 DUPLEX_FULL : DUPLEX_HALF;
1753 if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
1754 return -EFAULT;
1755 return 0;
1756 } else if (ecmd.cmd == ETHTOOL_SSET) {
1757 if (!capable(CAP_NET_ADMIN))
1758 return -EPERM;
1760 /* Verify the settings we care about. */
1761 if (ecmd.autoneg != AUTONEG_ENABLE &&
1762 ecmd.autoneg != AUTONEG_DISABLE)
1763 return -EINVAL;
1765 if (ecmd.autoneg == AUTONEG_DISABLE &&
1766 ((ecmd.speed != SPEED_100 &&
1767 ecmd.speed != SPEED_10) ||
1768 (ecmd.duplex != DUPLEX_HALF &&
1769 ecmd.duplex != DUPLEX_FULL)))
1770 return -EINVAL;
1772 /* Ok, do it to it. */
1773 del_timer(&ip->ioc3_timer);
1774 spin_lock_irq(&ip->ioc3_lock);
1775 ioc3_start_auto_negotiation(ip, &ecmd);
1776 spin_unlock_irq(&ip->ioc3_lock);
1778 return 0;
1779 } else
1780 default:
1781 return -EOPNOTSUPP;
1784 return -EOPNOTSUPP;
1787 static void ioc3_set_multicast_list(struct net_device *dev)
1789 struct dev_mc_list *dmi = dev->mc_list;
1790 struct ioc3_private *ip = dev->priv;
1791 struct ioc3 *ioc3 = ip->regs;
1792 u64 ehar = 0;
1793 int i;
1795 netif_stop_queue(dev); /* Lock out others. */
1797 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1798 /* Unconditionally log net taps. */
1799 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1800 ip->emcr |= EMCR_PROMISC;
1801 ioc3->emcr = ip->emcr;
1802 ioc3->emcr;
1803 } else {
1804 ip->emcr &= ~EMCR_PROMISC;
1805 ioc3->emcr = ip->emcr; /* Clear promiscuous. */
1806 ioc3->emcr;
1808 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1809 /* Too many for hashing to make sense or we want all
1810 multicast packets anyway, so skip computing all the
1811 hashes and just accept all packets. */
1812 ip->ehar_h = 0xffffffff;
1813 ip->ehar_l = 0xffffffff;
1814 } else {
1815 for (i = 0; i < dev->mc_count; i++) {
1816 char *addr = dmi->dmi_addr;
1817 dmi = dmi->next;
1819 if (!(*addr & 1))
1820 continue;
1822 ehar |= (1UL << ioc3_hash(addr));
1824 ip->ehar_h = ehar >> 32;
1825 ip->ehar_l = ehar & 0xffffffff;
1827 ioc3->ehar_h = ip->ehar_h;
1828 ioc3->ehar_l = ip->ehar_l;
1831 netif_wake_queue(dev); /* Let us get going again. */
1834 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1835 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1836 MODULE_LICENSE("GPL");
1838 module_init(ioc3_init_module);
1839 module_exit(ioc3_cleanup_module);