2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000, 2001, 2003 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000, 2001 by Silicon Graphics, Inc.
12 * o IOC3 ASIC specification 4.51, 1996-04-18
13 * o IEEE 802.3 specification, 2000 edition
14 * o DP38840A Specification, National Semiconductor, March 1997
18 * o Handle allocation failures in ioc3_alloc_skb() more gracefully.
19 * o Handle allocation failures in ioc3_init_rings().
20 * o Use prefetching for large packets. What is a good lower limit for
22 * o We're probably allocating a bit too much memory.
23 * o Use hardware checksums.
24 * o Convert to using a IOC3 meta driver.
25 * o Which PHYs might possibly be attached to the IOC3 in real live,
26 * which workarounds are required for them? Do we ever have Lucent's?
27 * o For the 2.5 branch kill the mii-tool ioctls.
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/kernel.h>
34 #include <linux/errno.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <linux/crc32.h>
39 #ifdef CONFIG_SERIAL_8250
40 #include <linux/serial.h>
41 #include <asm/serial.h>
42 #define IOC3_BAUD (22000000 / (3*16))
43 #define IOC3_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/ethtool.h>
49 #include <linux/skbuff.h>
50 #include <linux/dp83840.h>
52 #include <asm/byteorder.h>
54 #include <asm/pgtable.h>
55 #include <asm/uaccess.h>
56 #include <asm/sn/types.h>
57 #include <asm/sn/sn0/addrs.h>
58 #include <asm/sn/sn0/hubni.h>
59 #include <asm/sn/sn0/hubio.h>
60 #include <asm/sn/klconfig.h>
61 #include <asm/sn/ioc3.h>
62 #include <asm/sn/sn0/ip27.h>
63 #include <asm/pci/bridge.h>
66 * 64 RX buffers. This is tunable in the range of 16 <= x < 512. The
67 * value must be a power of two.
71 /* Timer state engine. */
72 enum ioc3_timer_state
{
73 arbwait
= 0, /* Waiting for auto negotiation to complete. */
74 lupwait
= 1, /* Auto-neg complete, awaiting link-up status. */
75 ltrywait
= 2, /* Forcing try of all modes, from fastest to slowest. */
76 asleep
= 3, /* Time inactive. */
79 /* Private per NIC data of the driver. */
83 unsigned long *rxr
; /* pointer to receiver ring */
84 struct ioc3_etxd
*txr
;
85 struct sk_buff
*rx_skbs
[512];
86 struct sk_buff
*tx_skbs
[128];
87 struct net_device_stats stats
;
88 int rx_ci
; /* RX consumer index */
89 int rx_pi
; /* RX producer index */
90 int tx_ci
; /* TX consumer index */
91 int tx_pi
; /* TX producer index */
93 u32 emcr
, ehar_h
, ehar_l
;
95 struct net_device
*dev
;
97 /* Members used by autonegotiation */
98 struct timer_list ioc3_timer
;
99 enum ioc3_timer_state timer_state
; /* State of auto-neg timer. */
100 unsigned int timer_ticks
; /* Number of clicks at each state */
101 unsigned short sw_bmcr
; /* sw copy of MII config register */
102 unsigned short sw_bmsr
; /* sw copy of MII status register */
103 unsigned short sw_physid1
; /* sw copy of PHYSID1 */
104 unsigned short sw_physid2
; /* sw copy of PHYSID2 */
105 unsigned short sw_advertise
; /* sw copy of ADVERTISE */
106 unsigned short sw_lpa
; /* sw copy of LPA */
107 unsigned short sw_csconfig
; /* sw copy of CSCONFIG */
110 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
111 static void ioc3_set_multicast_list(struct net_device
*dev
);
112 static int ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
113 static void ioc3_timeout(struct net_device
*dev
);
114 static inline unsigned int ioc3_hash(const unsigned char *addr
);
115 static inline void ioc3_stop(struct ioc3_private
*ip
);
116 static void ioc3_init(struct ioc3_private
*ip
);
118 static const char ioc3_str
[] = "IOC3 Ethernet";
120 /* We use this to acquire receive skb's that we can DMA directly into. */
121 #define ALIGNED_RX_SKB_ADDR(addr) \
122 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
124 #define ioc3_alloc_skb(__length, __gfp_flags) \
125 ({ struct sk_buff *__skb; \
126 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
128 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
130 skb_reserve(__skb, __offset); \
135 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
136 1644 while it's actually 1664. This one was nasty to track down ... */
138 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
140 /* DMA barrier to separate cached and uncached accesses. */
142 __asm__("sync" ::: "memory")
145 #define IOC3_SIZE 0x100000
147 #define ioc3_r(reg) \
154 #define ioc3_w(reg,val) \
156 (ioc3->reg = (val)); \
160 mcr_pack(u32 pulse
, u32 sample
)
162 return (pulse
<< 10) | (sample
<< 2);
166 nic_wait(struct ioc3
*ioc3
)
172 } while (!(mcr
& 2));
178 nic_reset(struct ioc3
*ioc3
)
182 ioc3_w(mcr
, mcr_pack(500, 65));
183 presence
= nic_wait(ioc3
);
185 ioc3_w(mcr
, mcr_pack(0, 500));
192 nic_read_bit(struct ioc3
*ioc3
)
196 ioc3_w(mcr
, mcr_pack(6, 13));
197 result
= nic_wait(ioc3
);
198 ioc3_w(mcr
, mcr_pack(0, 100));
205 nic_write_bit(struct ioc3
*ioc3
, int bit
)
208 ioc3_w(mcr
, mcr_pack(6, 110));
210 ioc3_w(mcr
, mcr_pack(80, 30));
216 * Read a byte from an iButton device
219 nic_read_byte(struct ioc3
*ioc3
)
224 for (i
= 0; i
< 8; i
++)
225 result
= (result
>> 1) | (nic_read_bit(ioc3
) << 7);
231 * Write a byte to an iButton device
234 nic_write_byte(struct ioc3
*ioc3
, int byte
)
238 for (i
= 8; i
; i
--) {
242 nic_write_bit(ioc3
, bit
);
247 nic_find(struct ioc3
*ioc3
, int *last
)
249 int a
, b
, index
, disc
;
254 nic_write_byte(ioc3
, 0xf0);
256 /* Algorithm from ``Book of iButton Standards''. */
257 for (index
= 0, disc
= 0; index
< 64; index
++) {
258 a
= nic_read_bit(ioc3
);
259 b
= nic_read_bit(ioc3
);
262 printk("NIC search failed (not fatal).\n");
268 if (index
== *last
) {
269 address
|= 1UL << index
;
270 } else if (index
> *last
) {
271 address
&= ~(1UL << index
);
273 } else if ((address
& (1UL << index
)) == 0)
275 nic_write_bit(ioc3
, address
& (1UL << index
));
279 address
|= 1UL << index
;
281 address
&= ~(1UL << index
);
282 nic_write_bit(ioc3
, a
);
292 static int nic_init(struct ioc3
*ioc3
)
303 reg
= nic_find(ioc3
, &save
);
305 switch (reg
& 0xff) {
311 /* Let the caller try again. */
320 nic_write_byte(ioc3
, 0x55);
321 for (i
= 0; i
< 8; i
++)
322 nic_write_byte(ioc3
, (reg
>> (i
<< 3)) & 0xff);
324 reg
>>= 8; /* Shift out type. */
325 for (i
= 0; i
< 6; i
++) {
326 serial
[i
] = reg
& 0xff;
333 printk("Found %s NIC", type
);
334 if (type
!= "unknown") {
335 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
336 " CRC %02x", serial
[0], serial
[1], serial
[2],
337 serial
[3], serial
[4], serial
[5], crc
);
345 * Read the NIC (Number-In-a-Can) device used to store the MAC address on
346 * SN0 / SN00 nodeboards and PCI cards.
348 static void ioc3_get_eaddr_nic(struct ioc3_private
*ip
)
350 struct ioc3
*ioc3
= ip
->regs
;
352 int tries
= 2; /* There may be some problem with the battery? */
355 ioc3_w(gpcr_s
, (1 << 21));
364 printk("Failed to read MAC address\n");
369 nic_write_byte(ioc3
, 0xf0);
370 nic_write_byte(ioc3
, 0x00);
371 nic_write_byte(ioc3
, 0x00);
373 for (i
= 13; i
>= 0; i
--)
374 nic
[i
] = nic_read_byte(ioc3
);
376 for (i
= 2; i
< 8; i
++)
377 ip
->dev
->dev_addr
[i
- 2] = nic
[i
];
381 * Ok, this is hosed by design. It's necessary to know what machine the
382 * NIC is in in order to know how to read the NIC address. We also have
383 * to know if it's a PCI card or a NIC in on the node board ...
385 static void ioc3_get_eaddr(struct ioc3_private
*ip
)
390 ioc3_get_eaddr_nic(ip
);
392 printk("Ethernet address is ");
393 for (i
= 0; i
< 6; i
++) {
394 printk("%02x", ip
->dev
->dev_addr
[i
]);
403 * Caller must hold the ioc3_lock ever for MII readers. This is also
404 * used to protect the transmitter side but it's low contention.
406 static u16
mii_read(struct ioc3_private
*ip
, int reg
)
408 struct ioc3
*ioc3
= ip
->regs
;
411 while (ioc3
->micr
& MICR_BUSY
);
412 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
| MICR_READTRIG
;
413 while (ioc3
->micr
& MICR_BUSY
);
415 return ioc3
->midr_r
& MIDR_DATA_MASK
;
418 static void mii_write(struct ioc3_private
*ip
, int reg
, u16 data
)
420 struct ioc3
*ioc3
= ip
->regs
;
423 while (ioc3
->micr
& MICR_BUSY
);
425 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
;
426 while (ioc3
->micr
& MICR_BUSY
);
429 static int ioc3_mii_init(struct ioc3_private
*ip
);
431 static struct net_device_stats
*ioc3_get_stats(struct net_device
*dev
)
433 struct ioc3_private
*ip
= dev
->priv
;
434 struct ioc3
*ioc3
= ip
->regs
;
436 ip
->stats
.collisions
+= (ioc3
->etcdc
& ETCDC_COLLCNT_MASK
);
441 ioc3_rx(struct ioc3_private
*ip
)
443 struct sk_buff
*skb
, *new_skb
;
444 struct ioc3
*ioc3
= ip
->regs
;
445 int rx_entry
, n_entry
, len
;
446 struct ioc3_erxbuf
*rxb
;
450 rxr
= (unsigned long *) ip
->rxr
; /* Ring base */
451 rx_entry
= ip
->rx_ci
; /* RX consume index */
454 skb
= ip
->rx_skbs
[rx_entry
];
455 rxb
= (struct ioc3_erxbuf
*) (skb
->data
- RX_OFFSET
);
456 w0
= be32_to_cpu(rxb
->w0
);
458 while (w0
& ERXBUF_V
) {
459 err
= be32_to_cpu(rxb
->err
); /* It's valid ... */
460 if (err
& ERXBUF_GOODPKT
) {
461 len
= ((w0
>> ERXBUF_BYTECNT_SHIFT
) & 0x7ff) - 4;
463 skb
->protocol
= eth_type_trans(skb
, ip
->dev
);
465 new_skb
= ioc3_alloc_skb(RX_BUF_ALLOC_SIZE
, GFP_ATOMIC
);
467 /* Ouch, drop packet and just recycle packet
468 to keep the ring filled. */
469 ip
->stats
.rx_dropped
++;
475 ip
->rx_skbs
[rx_entry
] = NULL
; /* Poison */
477 new_skb
->dev
= ip
->dev
;
479 /* Because we reserve afterwards. */
480 skb_put(new_skb
, (1664 + RX_OFFSET
));
481 rxb
= (struct ioc3_erxbuf
*) new_skb
->data
;
482 skb_reserve(new_skb
, RX_OFFSET
);
484 ip
->dev
->last_rx
= jiffies
;
485 ip
->stats
.rx_packets
++; /* Statistics */
486 ip
->stats
.rx_bytes
+= len
;
488 /* The frame is invalid and the skb never
489 reached the network layer so we can just
492 ip
->stats
.rx_errors
++;
494 if (err
& ERXBUF_CRCERR
) /* Statistics */
495 ip
->stats
.rx_crc_errors
++;
496 if (err
& ERXBUF_FRAMERR
)
497 ip
->stats
.rx_frame_errors
++;
499 ip
->rx_skbs
[n_entry
] = new_skb
;
500 rxr
[n_entry
] = cpu_to_be64((0xa5UL
<< 56) |
501 ((unsigned long) rxb
& TO_PHYS_MASK
));
502 rxb
->w0
= 0; /* Clear valid flag */
503 n_entry
= (n_entry
+ 1) & 511; /* Update erpir */
505 /* Now go on to the next ring entry. */
506 rx_entry
= (rx_entry
+ 1) & 511;
507 skb
= ip
->rx_skbs
[rx_entry
];
508 rxb
= (struct ioc3_erxbuf
*) (skb
->data
- RX_OFFSET
);
509 w0
= be32_to_cpu(rxb
->w0
);
511 ioc3
->erpir
= (n_entry
<< 3) | ERPIR_ARM
;
513 ip
->rx_ci
= rx_entry
;
517 ioc3_tx(struct ioc3_private
*ip
)
519 unsigned long packets
, bytes
;
520 struct ioc3
*ioc3
= ip
->regs
;
521 int tx_entry
, o_entry
;
525 spin_lock(&ip
->ioc3_lock
);
528 tx_entry
= (etcir
>> 7) & 127;
533 while (o_entry
!= tx_entry
) {
535 skb
= ip
->tx_skbs
[o_entry
];
537 dev_kfree_skb_irq(skb
);
538 ip
->tx_skbs
[o_entry
] = NULL
;
540 o_entry
= (o_entry
+ 1) & 127; /* Next */
542 etcir
= ioc3
->etcir
; /* More pkts sent? */
543 tx_entry
= (etcir
>> 7) & 127;
546 ip
->stats
.tx_packets
+= packets
;
547 ip
->stats
.tx_bytes
+= bytes
;
548 ip
->txqlen
-= packets
;
550 if (ip
->txqlen
< 128)
551 netif_wake_queue(ip
->dev
);
554 spin_unlock(&ip
->ioc3_lock
);
558 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
559 * software problems, so we should try to recover
560 * more gracefully if this ever happens. In theory we might be flooded
561 * with such error interrupts if something really goes wrong, so we might
562 * also consider to take the interface down.
565 ioc3_error(struct ioc3_private
*ip
, u32 eisr
)
567 struct net_device
*dev
= ip
->dev
;
568 unsigned char *iface
= dev
->name
;
570 if (eisr
& EISR_RXOFLO
)
571 printk(KERN_ERR
"%s: RX overflow.\n", iface
);
572 if (eisr
& EISR_RXBUFOFLO
)
573 printk(KERN_ERR
"%s: RX buffer overflow.\n", iface
);
574 if (eisr
& EISR_RXMEMERR
)
575 printk(KERN_ERR
"%s: RX PCI error.\n", iface
);
576 if (eisr
& EISR_RXPARERR
)
577 printk(KERN_ERR
"%s: RX SSRAM parity error.\n", iface
);
578 if (eisr
& EISR_TXBUFUFLO
)
579 printk(KERN_ERR
"%s: TX buffer underflow.\n", iface
);
580 if (eisr
& EISR_TXMEMERR
)
581 printk(KERN_ERR
"%s: TX PCI error.\n", iface
);
587 dev
->trans_start
= jiffies
;
588 netif_wake_queue(dev
);
591 /* The interrupt handler does all of the Rx thread work and cleans up
592 after the Tx thread. */
593 static irqreturn_t
ioc3_interrupt(int irq
, void *_dev
, struct pt_regs
*regs
)
595 struct net_device
*dev
= (struct net_device
*)_dev
;
596 struct ioc3_private
*ip
= dev
->priv
;
597 struct ioc3
*ioc3
= ip
->regs
;
598 const u32 enabled
= EISR_RXTIMERINT
| EISR_RXOFLO
| EISR_RXBUFOFLO
|
599 EISR_RXMEMERR
| EISR_RXPARERR
| EISR_TXBUFUFLO
|
600 EISR_TXEXPLICIT
| EISR_TXMEMERR
;
603 eisr
= ioc3
->eisr
& enabled
;
607 ioc3
->eisr
; /* Flush */
609 if (eisr
& (EISR_RXOFLO
| EISR_RXBUFOFLO
| EISR_RXMEMERR
|
610 EISR_RXPARERR
| EISR_TXBUFUFLO
| EISR_TXMEMERR
))
611 ioc3_error(ip
, eisr
);
612 if (eisr
& EISR_RXTIMERINT
)
614 if (eisr
& EISR_TXEXPLICIT
)
617 eisr
= ioc3
->eisr
& enabled
;
623 * Auto negotiation. The scheme is very simple. We have a timer routine that
624 * keeps watching the auto negotiation process as it progresses. The DP83840
625 * is first told to start doing it's thing, we set up the time and place the
626 * timer state machine in it's initial state.
628 * Here the timer peeks at the DP83840 status registers at each click to see
629 * if the auto negotiation has completed, we assume here that the DP83840 PHY
630 * will time out at some point and just tell us what (didn't) happen. For
631 * complete coverage we only allow so many of the ticks at this level to run,
632 * when this has expired we print a warning message and try another strategy.
633 * This "other" strategy is to force the interface into various speed/duplex
634 * configurations and we stop when we see a link-up condition before the
635 * maximum number of "peek" ticks have occurred.
637 * Once a valid link status has been detected we configure the IOC3 to speak
638 * the most efficient protocol we could get a clean link for. The priority
639 * for link configurations, highest first is:
641 * 100 Base-T Full Duplex
642 * 100 Base-T Half Duplex
643 * 10 Base-T Full Duplex
644 * 10 Base-T Half Duplex
646 * We start a new timer now, after a successful auto negotiation status has
647 * been detected. This timer just waits for the link-up bit to get set in
648 * the BMCR of the DP83840. When this occurs we print a kernel log message
649 * describing the link type in use and the fact that it is up.
651 * If a fatal error of some sort is signalled and detected in the interrupt
652 * service routine, and the chip is reset, or the link is ifconfig'd down
653 * and then back up, this entire process repeats itself all over again.
655 static int ioc3_try_next_permutation(struct ioc3_private
*ip
)
657 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
659 /* Downgrade from full to half duplex. Only possible via ethtool. */
660 if (ip
->sw_bmcr
& BMCR_FULLDPLX
) {
661 ip
->sw_bmcr
&= ~BMCR_FULLDPLX
;
662 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
667 /* Downgrade from 100 to 10. */
668 if (ip
->sw_bmcr
& BMCR_SPEED100
) {
669 ip
->sw_bmcr
&= ~BMCR_SPEED100
;
670 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
675 /* We've tried everything. */
680 ioc3_display_link_mode(struct ioc3_private
*ip
)
684 ip
->sw_lpa
= mii_read(ip
, MII_LPA
);
686 if (ip
->sw_lpa
& (LPA_100HALF
| LPA_100FULL
)) {
687 if (ip
->sw_lpa
& LPA_100FULL
)
688 tmode
= "100Mb/s, Full Duplex";
690 tmode
= "100Mb/s, Half Duplex";
692 if (ip
->sw_lpa
& LPA_10FULL
)
693 tmode
= "10Mb/s, Full Duplex";
695 tmode
= "10Mb/s, Half Duplex";
698 printk(KERN_INFO
"%s: Link is up at %s.\n", ip
->dev
->name
, tmode
);
702 ioc3_display_forced_link_mode(struct ioc3_private
*ip
)
704 char *speed
= "", *duplex
= "";
706 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
707 if (ip
->sw_bmcr
& BMCR_SPEED100
)
711 if (ip
->sw_bmcr
& BMCR_FULLDPLX
)
712 duplex
= "Full Duplex.\n";
714 duplex
= "Half Duplex.\n";
716 printk(KERN_INFO
"%s: Link has been forced up at %s%s", ip
->dev
->name
,
720 static int ioc3_set_link_modes(struct ioc3_private
*ip
)
722 struct ioc3
*ioc3
= ip
->regs
;
726 * All we care about is making sure the bigmac tx_cfg has a
727 * proper duplex setting.
729 if (ip
->timer_state
== arbwait
) {
730 ip
->sw_lpa
= mii_read(ip
, MII_LPA
);
731 if (!(ip
->sw_lpa
& (LPA_10HALF
| LPA_10FULL
|
732 LPA_100HALF
| LPA_100FULL
)))
734 if (ip
->sw_lpa
& LPA_100FULL
)
736 else if (ip
->sw_lpa
& LPA_100HALF
)
738 else if (ip
->sw_lpa
& LPA_10FULL
)
743 /* Forcing a link mode. */
744 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
745 if (ip
->sw_bmcr
& BMCR_FULLDPLX
)
752 ip
->emcr
|= EMCR_DUPLEX
;
754 ip
->emcr
&= ~EMCR_DUPLEX
;
756 ioc3
->emcr
= ip
->emcr
;
766 static int is_lucent_phy(struct ioc3_private
*ip
)
768 unsigned short mr2
, mr3
;
771 mr2
= mii_read(ip
, MII_PHYSID1
);
772 mr3
= mii_read(ip
, MII_PHYSID2
);
773 if ((mr2
& 0xffff) == 0x0180 && ((mr3
& 0xffff) >> 10) == 0x1d) {
780 static void ioc3_timer(unsigned long data
)
782 struct ioc3_private
*ip
= (struct ioc3_private
*) data
;
783 int restart_timer
= 0;
786 switch (ip
->timer_state
) {
789 * Only allow for 5 ticks, thats 10 seconds and much too
790 * long to wait for arbitration to complete.
792 if (ip
->timer_ticks
>= 10) {
793 /* Enter force mode. */
795 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
796 printk(KERN_NOTICE
"%s: Auto-Negotiation unsuccessful,"
797 " trying force link mode\n", ip
->dev
->name
);
798 ip
->sw_bmcr
= BMCR_SPEED100
;
799 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
801 if (!is_lucent_phy(ip
)) {
803 * OK, seems we need do disable the transceiver
804 * for the first tick to make sure we get an
805 * accurate link state at the second tick.
807 ip
->sw_csconfig
= mii_read(ip
, MII_CSCONFIG
);
808 ip
->sw_csconfig
&= ~(CSCONFIG_TCVDISAB
);
809 mii_write(ip
, MII_CSCONFIG
, ip
->sw_csconfig
);
811 ip
->timer_state
= ltrywait
;
815 /* Anything interesting happen? */
816 ip
->sw_bmsr
= mii_read(ip
, MII_BMSR
);
817 if (ip
->sw_bmsr
& BMSR_ANEGCOMPLETE
) {
820 /* Just what we've been waiting for... */
821 ret
= ioc3_set_link_modes(ip
);
823 /* Ooops, something bad happened, go to
826 * XXX Broken hubs which don't support
827 * XXX 802.3u auto-negotiation make this
828 * XXX happen as well.
834 * Success, at least so far, advance our state
837 ip
->timer_state
= lupwait
;
847 * Auto negotiation was successful and we are awaiting a
848 * link up status. I have decided to let this timer run
849 * forever until some sort of error is signalled, reporting
850 * a message to the user at 10 second intervals.
852 ip
->sw_bmsr
= mii_read(ip
, MII_BMSR
);
853 if (ip
->sw_bmsr
& BMSR_LSTATUS
) {
855 * Wheee, it's up, display the link mode in use and put
856 * the timer to sleep.
858 ioc3_display_link_mode(ip
);
859 ip
->timer_state
= asleep
;
862 if (ip
->timer_ticks
>= 10) {
863 printk(KERN_NOTICE
"%s: Auto negotiation successful, link still "
864 "not completely up.\n", ip
->dev
->name
);
875 * Making the timeout here too long can make it take
876 * annoyingly long to attempt all of the link mode
877 * permutations, but then again this is essentially
878 * error recovery code for the most part.
880 ip
->sw_bmsr
= mii_read(ip
, MII_BMSR
);
881 ip
->sw_csconfig
= mii_read(ip
, MII_CSCONFIG
);
882 if (ip
->timer_ticks
== 1) {
883 if (!is_lucent_phy(ip
)) {
885 * Re-enable transceiver, we'll re-enable the
886 * transceiver next tick, then check link state
887 * on the following tick.
889 ip
->sw_csconfig
|= CSCONFIG_TCVDISAB
;
890 mii_write(ip
, MII_CSCONFIG
, ip
->sw_csconfig
);
895 if (ip
->timer_ticks
== 2) {
896 if (!is_lucent_phy(ip
)) {
897 ip
->sw_csconfig
&= ~(CSCONFIG_TCVDISAB
);
898 mii_write(ip
, MII_CSCONFIG
, ip
->sw_csconfig
);
903 if (ip
->sw_bmsr
& BMSR_LSTATUS
) {
904 /* Force mode selection success. */
905 ioc3_display_forced_link_mode(ip
);
906 ioc3_set_link_modes(ip
); /* XXX error? then what? */
907 ip
->timer_state
= asleep
;
910 if (ip
->timer_ticks
>= 4) { /* 6 seconds or so... */
913 ret
= ioc3_try_next_permutation(ip
);
916 * Aieee, tried them all, reset the
917 * chip and try all over again.
919 printk(KERN_NOTICE
"%s: Link down, "
926 if (!is_lucent_phy(ip
)) {
927 ip
->sw_csconfig
= mii_read(ip
,
929 ip
->sw_csconfig
|= CSCONFIG_TCVDISAB
;
930 mii_write(ip
, MII_CSCONFIG
,
943 /* Can't happens.... */
944 printk(KERN_ERR
"%s: Aieee, link timer is asleep but we got "
945 "one anyways!\n", ip
->dev
->name
);
948 ip
->timer_state
= asleep
; /* foo on you */
953 ip
->ioc3_timer
.expires
= jiffies
+ ((12 * HZ
)/10); /* 1.2s */
954 add_timer(&ip
->ioc3_timer
);
959 ioc3_start_auto_negotiation(struct ioc3_private
*ip
, struct ethtool_cmd
*ep
)
963 /* Read all of the registers we are interested in now. */
964 ip
->sw_bmsr
= mii_read(ip
, MII_BMSR
);
965 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
966 ip
->sw_physid1
= mii_read(ip
, MII_PHYSID1
);
967 ip
->sw_physid2
= mii_read(ip
, MII_PHYSID2
);
969 /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
971 ip
->sw_advertise
= mii_read(ip
, MII_ADVERTISE
);
972 if (ep
== NULL
|| ep
->autoneg
== AUTONEG_ENABLE
) {
973 /* Advertise everything we can support. */
974 if (ip
->sw_bmsr
& BMSR_10HALF
)
975 ip
->sw_advertise
|= ADVERTISE_10HALF
;
977 ip
->sw_advertise
&= ~ADVERTISE_10HALF
;
979 if (ip
->sw_bmsr
& BMSR_10FULL
)
980 ip
->sw_advertise
|= ADVERTISE_10FULL
;
982 ip
->sw_advertise
&= ~ADVERTISE_10FULL
;
983 if (ip
->sw_bmsr
& BMSR_100HALF
)
984 ip
->sw_advertise
|= ADVERTISE_100HALF
;
986 ip
->sw_advertise
&= ~ADVERTISE_100HALF
;
987 if (ip
->sw_bmsr
& BMSR_100FULL
)
988 ip
->sw_advertise
|= ADVERTISE_100FULL
;
990 ip
->sw_advertise
&= ~ADVERTISE_100FULL
;
991 mii_write(ip
, MII_ADVERTISE
, ip
->sw_advertise
);
994 * XXX Currently no IOC3 card I know off supports 100BaseT4,
995 * XXX and this is because the DP83840 does not support it,
996 * XXX changes XXX would need to be made to the tx/rx logic in
997 * XXX the driver as well so I completely skip checking for it
998 * XXX in the BMSR for now.
1001 #ifdef AUTO_SWITCH_DEBUG
1002 ASD(("%s: Advertising [ ", ip
->dev
->name
));
1003 if (ip
->sw_advertise
& ADVERTISE_10HALF
)
1005 if (ip
->sw_advertise
& ADVERTISE_10FULL
)
1007 if (ip
->sw_advertise
& ADVERTISE_100HALF
)
1009 if (ip
->sw_advertise
& ADVERTISE_100FULL
)
1013 /* Enable Auto-Negotiation, this is usually on already... */
1014 ip
->sw_bmcr
|= BMCR_ANENABLE
;
1015 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
1017 /* Restart it to make sure it is going. */
1018 ip
->sw_bmcr
|= BMCR_ANRESTART
;
1019 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
1021 /* BMCR_ANRESTART self clears when the process has begun. */
1023 timeout
= 64; /* More than enough. */
1025 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
1026 if (!(ip
->sw_bmcr
& BMCR_ANRESTART
))
1027 break; /* got it. */
1031 printk(KERN_ERR
"%s: IOC3 would not start auto "
1032 "negotiation BMCR=0x%04x\n",
1033 ip
->dev
->name
, ip
->sw_bmcr
);
1034 printk(KERN_NOTICE
"%s: Performing force link "
1035 "detection.\n", ip
->dev
->name
);
1038 ip
->timer_state
= arbwait
;
1043 * Force the link up, trying first a particular mode. Either
1044 * we are here at the request of ethtool or because the IOC3
1045 * would not start to autoneg.
1049 * Disable auto-negotiation in BMCR, enable the duplex and
1050 * speed setting, init the timer state machine, and fire it off.
1052 if (ep
== NULL
|| ep
->autoneg
== AUTONEG_ENABLE
) {
1053 ip
->sw_bmcr
= BMCR_SPEED100
;
1055 if (ep
->speed
== SPEED_100
)
1056 ip
->sw_bmcr
= BMCR_SPEED100
;
1059 if (ep
->duplex
== DUPLEX_FULL
)
1060 ip
->sw_bmcr
|= BMCR_FULLDPLX
;
1062 mii_write(ip
, MII_BMCR
, ip
->sw_bmcr
);
1064 if (!is_lucent_phy(ip
)) {
1066 * OK, seems we need do disable the transceiver for the
1067 * first tick to make sure we get an accurate link
1068 * state at the second tick.
1070 ip
->sw_csconfig
= mii_read(ip
, MII_CSCONFIG
);
1071 ip
->sw_csconfig
&= ~(CSCONFIG_TCVDISAB
);
1072 mii_write(ip
, MII_CSCONFIG
, ip
->sw_csconfig
);
1074 ip
->timer_state
= ltrywait
;
1077 del_timer(&ip
->ioc3_timer
);
1078 ip
->timer_ticks
= 0;
1079 ip
->ioc3_timer
.expires
= jiffies
+ (12 * HZ
)/10; /* 1.2 sec. */
1080 ip
->ioc3_timer
.data
= (unsigned long) ip
;
1081 ip
->ioc3_timer
.function
= &ioc3_timer
;
1082 add_timer(&ip
->ioc3_timer
);
1085 static int ioc3_mii_init(struct ioc3_private
*ip
)
1091 spin_lock_irq(&ip
->ioc3_lock
);
1092 for (i
= 0; i
< 32; i
++) {
1094 word
= mii_read(ip
, 2);
1095 if ((word
!= 0xffff) && (word
!= 0x0000)) {
1097 break; /* Found a PHY */
1101 spin_unlock_irq(&ip
->ioc3_lock
);
1105 ioc3_start_auto_negotiation(ip
, NULL
); // XXX ethtool
1107 spin_unlock_irq(&ip
->ioc3_lock
);
1113 ioc3_clean_rx_ring(struct ioc3_private
*ip
)
1115 struct sk_buff
*skb
;
1118 for (i
= ip
->rx_ci
; i
& 15; i
++) {
1119 ip
->rx_skbs
[ip
->rx_pi
] = ip
->rx_skbs
[ip
->rx_ci
];
1120 ip
->rxr
[ip
->rx_pi
++] = ip
->rxr
[ip
->rx_ci
++];
1125 for (i
= ip
->rx_ci
; i
!= ip
->rx_pi
; i
= (i
+1) & 511) {
1126 struct ioc3_erxbuf
*rxb
;
1127 skb
= ip
->rx_skbs
[i
];
1128 rxb
= (struct ioc3_erxbuf
*) (skb
->data
- RX_OFFSET
);
1134 ioc3_clean_tx_ring(struct ioc3_private
*ip
)
1136 struct sk_buff
*skb
;
1139 for (i
=0; i
< 128; i
++) {
1140 skb
= ip
->tx_skbs
[i
];
1142 ip
->tx_skbs
[i
] = NULL
;
1143 dev_kfree_skb_any(skb
);
1152 ioc3_free_rings(struct ioc3_private
*ip
)
1154 struct sk_buff
*skb
;
1155 int rx_entry
, n_entry
;
1158 ioc3_clean_tx_ring(ip
);
1159 free_pages((unsigned long)ip
->txr
, 2);
1164 n_entry
= ip
->rx_ci
;
1165 rx_entry
= ip
->rx_pi
;
1167 while (n_entry
!= rx_entry
) {
1168 skb
= ip
->rx_skbs
[n_entry
];
1170 dev_kfree_skb_any(skb
);
1172 n_entry
= (n_entry
+ 1) & 511;
1174 free_page((unsigned long)ip
->rxr
);
1180 ioc3_alloc_rings(struct net_device
*dev
, struct ioc3_private
*ip
,
1183 struct ioc3_erxbuf
*rxb
;
1187 if (ip
->rxr
== NULL
) {
1188 /* Allocate and initialize rx ring. 4kb = 512 entries */
1189 ip
->rxr
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1190 rxr
= (unsigned long *) ip
->rxr
;
1192 printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
1194 /* Now the rx buffers. The RX ring may be larger but
1195 we only allocate 16 buffers for now. Need to tune
1196 this for performance and memory later. */
1197 for (i
= 0; i
< RX_BUFFS
; i
++) {
1198 struct sk_buff
*skb
;
1200 skb
= ioc3_alloc_skb(RX_BUF_ALLOC_SIZE
, GFP_ATOMIC
);
1206 ip
->rx_skbs
[i
] = skb
;
1209 /* Because we reserve afterwards. */
1210 skb_put(skb
, (1664 + RX_OFFSET
));
1211 rxb
= (struct ioc3_erxbuf
*) skb
->data
;
1212 rxr
[i
] = cpu_to_be64((0xa5UL
<< 56) |
1213 ((unsigned long) rxb
& TO_PHYS_MASK
));
1214 skb_reserve(skb
, RX_OFFSET
);
1217 ip
->rx_pi
= RX_BUFFS
;
1220 if (ip
->txr
== NULL
) {
1221 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
1222 ip
->txr
= (struct ioc3_etxd
*)__get_free_pages(GFP_KERNEL
, 2);
1224 printk("ioc3_alloc_rings(): __get_free_pages() failed!\n");
1231 ioc3_init_rings(struct net_device
*dev
, struct ioc3_private
*ip
,
1236 ioc3_free_rings(ip
);
1237 ioc3_alloc_rings(dev
, ip
, ioc3
);
1239 ioc3_clean_rx_ring(ip
);
1240 ioc3_clean_tx_ring(ip
);
1242 /* Now the rx ring base, consume & produce registers. */
1243 ring
= (0xa5UL
<< 56) | ((unsigned long)ip
->rxr
& TO_PHYS_MASK
);
1244 ioc3
->erbr_h
= ring
>> 32;
1245 ioc3
->erbr_l
= ring
& 0xffffffff;
1246 ioc3
->ercir
= (ip
->rx_ci
<< 3);
1247 ioc3
->erpir
= (ip
->rx_pi
<< 3) | ERPIR_ARM
;
1249 ring
= (0xa5UL
<< 56) | ((unsigned long)ip
->txr
& TO_PHYS_MASK
);
1251 ip
->txqlen
= 0; /* nothing queued */
1253 /* Now the tx ring base, consume & produce registers. */
1254 ioc3
->etbr_h
= ring
>> 32;
1255 ioc3
->etbr_l
= ring
& 0xffffffff;
1256 ioc3
->etpir
= (ip
->tx_pi
<< 7);
1257 ioc3
->etcir
= (ip
->tx_ci
<< 7);
1258 ioc3
->etcir
; /* Flush */
1262 ioc3_ssram_disc(struct ioc3_private
*ip
)
1264 struct ioc3
*ioc3
= ip
->regs
;
1265 volatile u32
*ssram0
= &ioc3
->ssram
[0x0000];
1266 volatile u32
*ssram1
= &ioc3
->ssram
[0x4000];
1267 unsigned int pattern
= 0x5555;
1269 /* Assume the larger size SSRAM and enable parity checking */
1270 ioc3
->emcr
|= (EMCR_BUFSIZ
| EMCR_RAMPAR
);
1273 *ssram1
= ~pattern
& IOC3_SSRAM_DM
;
1275 if ((*ssram0
& IOC3_SSRAM_DM
) != pattern
||
1276 (*ssram1
& IOC3_SSRAM_DM
) != (~pattern
& IOC3_SSRAM_DM
)) {
1277 /* set ssram size to 64 KB */
1278 ip
->emcr
= EMCR_RAMPAR
;
1279 ioc3
->emcr
&= ~EMCR_BUFSIZ
;
1281 ip
->emcr
= EMCR_BUFSIZ
| EMCR_RAMPAR
;
1285 static void ioc3_init(struct ioc3_private
*ip
)
1287 struct net_device
*dev
= ip
->dev
;
1288 struct ioc3
*ioc3
= ip
->regs
;
1290 del_timer(&ip
->ioc3_timer
); /* Kill if running */
1292 ioc3
->emcr
= EMCR_RST
; /* Reset */
1293 ioc3
->emcr
; /* Flush WB */
1294 udelay(4); /* Give it time ... */
1298 /* Misc registers */
1300 ioc3
->etcsr
= (17<<ETCSR_IPGR2_SHIFT
) | (11<<ETCSR_IPGR1_SHIFT
) | 21;
1301 ioc3
->etcdc
; /* Clear on read */
1302 ioc3
->ercsr
= 15; /* RX low watermark */
1303 ioc3
->ertr
= 0; /* Interrupt immediately */
1304 ioc3
->emar_h
= (dev
->dev_addr
[5] << 8) | dev
->dev_addr
[4];
1305 ioc3
->emar_l
= (dev
->dev_addr
[3] << 24) | (dev
->dev_addr
[2] << 16) |
1306 (dev
->dev_addr
[1] << 8) | dev
->dev_addr
[0];
1307 ioc3
->ehar_h
= ip
->ehar_h
;
1308 ioc3
->ehar_l
= ip
->ehar_l
;
1309 ioc3
->ersr
= 42; /* XXX should be random */
1311 ioc3_init_rings(ip
->dev
, ip
, ioc3
);
1313 ip
->emcr
|= ((RX_OFFSET
/ 2) << EMCR_RXOFF_SHIFT
) | EMCR_TXDMAEN
|
1314 EMCR_TXEN
| EMCR_RXDMAEN
| EMCR_RXEN
;
1315 ioc3
->emcr
= ip
->emcr
;
1316 ioc3
->eier
= EISR_RXTIMERINT
| EISR_RXOFLO
| EISR_RXBUFOFLO
|
1317 EISR_RXMEMERR
| EISR_RXPARERR
| EISR_TXBUFUFLO
|
1318 EISR_TXEXPLICIT
| EISR_TXMEMERR
;
1322 static inline void ioc3_stop(struct ioc3_private
*ip
)
1324 struct ioc3
*ioc3
= ip
->regs
;
1326 ioc3
->emcr
= 0; /* Shutup */
1327 ioc3
->eier
= 0; /* Disable interrupts */
1328 ioc3
->eier
; /* Flush */
1332 ioc3_open(struct net_device
*dev
)
1334 struct ioc3_private
*ip
= dev
->priv
;
1336 if (request_irq(dev
->irq
, ioc3_interrupt
, SA_SHIRQ
, ioc3_str
, dev
)) {
1337 printk(KERN_ERR
"%s: Can't get irq %d\n", dev
->name
, dev
->irq
);
1346 netif_start_queue(dev
);
1351 ioc3_close(struct net_device
*dev
)
1353 struct ioc3_private
*ip
= dev
->priv
;
1355 del_timer(&ip
->ioc3_timer
);
1357 netif_stop_queue(dev
);
1360 free_irq(dev
->irq
, dev
);
1362 ioc3_free_rings(ip
);
1367 * MENET cards have four IOC3 chips, which are attached to two sets of
1368 * PCI slot resources each: the primary connections are on slots
1369 * 0..3 and the secondaries are on 4..7
1371 * All four ethernets are brought out to connectors; six serial ports
1372 * (a pair from each of the first three IOC3s) are brought out to
1373 * MiniDINs; all other subdevices are left swinging in the wind, leave
1376 static inline int ioc3_is_menet(struct pci_dev
*pdev
)
1378 struct pci_dev
*dev
;
1380 return pdev
->bus
->parent
== NULL
1381 && (dev
= pci_find_slot(pdev
->bus
->number
, PCI_DEVFN(0, 0)))
1382 && dev
->vendor
== PCI_VENDOR_ID_SGI
1383 && dev
->device
== PCI_DEVICE_ID_SGI_IOC3
1384 && (dev
= pci_find_slot(pdev
->bus
->number
, PCI_DEVFN(1, 0)))
1385 && dev
->vendor
== PCI_VENDOR_ID_SGI
1386 && dev
->device
== PCI_DEVICE_ID_SGI_IOC3
1387 && (dev
= pci_find_slot(pdev
->bus
->number
, PCI_DEVFN(2, 0)))
1388 && dev
->vendor
== PCI_VENDOR_ID_SGI
1389 && dev
->device
== PCI_DEVICE_ID_SGI_IOC3
;
1392 static inline void ioc3_serial_probe(struct pci_dev
*pdev
,
1395 struct serial_struct req
;
1398 * We need to recognice and treat the fourth MENET serial as it
1399 * does not have an SuperIO chip attached to it, therefore attempting
1400 * to access it will result in bus errors. We call something an
1401 * MENET if PCI slot 0, 1, 2 and 3 of a master PCI bus all have an IOC3
1402 * in it. This is paranoid but we want to avoid blowing up on a
1403 * showhorn PCI box that happens to have 4 IOC3 cards in it so it's
1404 * not paranoid enough ...
1406 if (ioc3_is_menet(pdev
) && PCI_SLOT(pdev
->devfn
) == 3)
1409 /* Register to interrupt zero because we share the interrupt with
1410 the serial driver which we don't properly support yet. */
1411 memset(&req
, 0, sizeof(req
));
1413 req
.flags
= IOC3_COM_FLAGS
;
1414 req
.io_type
= SERIAL_IO_MEM
;
1415 req
.iomem_reg_shift
= 0;
1416 req
.baud_base
= IOC3_BAUD
;
1418 req
.iomem_base
= (unsigned char *) &ioc3
->sregs
.uarta
;
1419 register_serial(&req
);
1421 req
.iomem_base
= (unsigned char *) &ioc3
->sregs
.uartb
;
1422 register_serial(&req
);
1425 static int __devinit
ioc3_probe(struct pci_dev
*pdev
,
1426 const struct pci_device_id
*ent
)
1428 struct net_device
*dev
= NULL
;
1429 struct ioc3_private
*ip
;
1431 unsigned long ioc3_base
, ioc3_size
;
1432 u32 vendor
, model
, rev
;
1435 dev
= alloc_etherdev(sizeof(struct ioc3_private
));
1439 err
= pci_request_regions(pdev
, "ioc3");
1443 SET_MODULE_OWNER(dev
);
1444 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1449 dev
->irq
= pdev
->irq
;
1451 ioc3_base
= pci_resource_start(pdev
, 0);
1452 ioc3_size
= pci_resource_len(pdev
, 0);
1453 ioc3
= (struct ioc3
*) ioremap(ioc3_base
, ioc3_size
);
1455 printk(KERN_CRIT
"ioc3eth(%s): ioremap failed, goodbye.\n",
1462 #ifdef CONFIG_SERIAL_8250
1463 ioc3_serial_probe(pdev
, ioc3
);
1466 spin_lock_init(&ip
->ioc3_lock
);
1471 init_timer(&ip
->ioc3_timer
);
1474 if (ip
->phy
== -1) {
1475 printk(KERN_CRIT
"ioc3-eth(%s): Didn't find a PHY, goodbye.\n",
1481 ioc3_ssram_disc(ip
);
1484 /* The IOC3-specific entries in the device structure. */
1485 dev
->open
= ioc3_open
;
1486 dev
->hard_start_xmit
= ioc3_start_xmit
;
1487 dev
->tx_timeout
= ioc3_timeout
;
1488 dev
->watchdog_timeo
= 5 * HZ
;
1489 dev
->stop
= ioc3_close
;
1490 dev
->get_stats
= ioc3_get_stats
;
1491 dev
->do_ioctl
= ioc3_ioctl
;
1492 dev
->set_multicast_list
= ioc3_set_multicast_list
;
1494 err
= register_netdev(dev
);
1498 vendor
= (ip
->sw_physid1
<< 12) | (ip
->sw_physid2
>> 4);
1499 model
= (ip
->sw_physid2
>> 4) & 0x3f;
1500 rev
= ip
->sw_physid2
& 0xf;
1501 printk(KERN_INFO
"%s: Using PHY %d, vendor 0x%x, model %d, "
1502 "rev %d.\n", dev
->name
, ip
->phy
, vendor
, model
, rev
);
1503 printk(KERN_INFO
"%s: IOC3 SSRAM has %d kbyte.\n", dev
->name
,
1504 ip
->emcr
& EMCR_BUFSIZ
? 128 : 64);
1510 free_irq(dev
->irq
, dev
);
1511 ioc3_free_rings(ip
);
1513 pci_release_regions(pdev
);
1519 static void __devexit
ioc3_remove_one (struct pci_dev
*pdev
)
1521 struct net_device
*dev
= pci_get_drvdata(pdev
);
1522 struct ioc3_private
*ip
= dev
->priv
;
1523 struct ioc3
*ioc3
= ip
->regs
;
1525 unregister_netdev(dev
);
1527 pci_release_regions(pdev
);
1531 static struct pci_device_id ioc3_pci_tbl
[] __devinitdata
= {
1532 { PCI_VENDOR_ID_SGI
, PCI_DEVICE_ID_SGI_IOC3
, PCI_ANY_ID
, PCI_ANY_ID
},
1535 MODULE_DEVICE_TABLE(pci
, ioc3_pci_tbl
);
1537 static struct pci_driver ioc3_driver
= {
1539 .id_table
= ioc3_pci_tbl
,
1540 .probe
= ioc3_probe
,
1541 .remove
= __devexit_p(ioc3_remove_one
),
1544 static int __init
ioc3_init_module(void)
1546 return pci_module_init(&ioc3_driver
);
1549 static void __exit
ioc3_cleanup_module(void)
1551 pci_unregister_driver(&ioc3_driver
);
1555 ioc3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1558 struct ioc3_private
*ip
= dev
->priv
;
1559 struct ioc3
*ioc3
= ip
->regs
;
1561 struct ioc3_etxd
*desc
;
1564 spin_lock_irq(&ip
->ioc3_lock
);
1566 data
= (unsigned long) skb
->data
;
1569 produce
= ip
->tx_pi
;
1570 desc
= &ip
->txr
[produce
];
1573 /* Short packet, let's copy it directly into the ring. */
1574 memcpy(desc
->data
, skb
->data
, skb
->len
);
1575 if (len
< ETH_ZLEN
) {
1576 /* Very short packet, pad with zeros at the end. */
1577 memset(desc
->data
+ len
, 0, ETH_ZLEN
- len
);
1580 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
| ETXD_D0V
);
1581 desc
->bufcnt
= cpu_to_be32(len
);
1582 } else if ((data
^ (data
+ len
)) & 0x4000) {
1583 unsigned long b2
, s1
, s2
;
1585 b2
= (data
| 0x3fffUL
) + 1UL;
1587 s2
= data
+ len
- b2
;
1589 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
|
1590 ETXD_B1V
| ETXD_B2V
);
1591 desc
->bufcnt
= cpu_to_be32((s1
<< ETXD_B1CNT_SHIFT
)
1592 | (s2
<< ETXD_B2CNT_SHIFT
));
1593 desc
->p1
= cpu_to_be64((0xa5UL
<< 56) |
1594 (data
& TO_PHYS_MASK
));
1595 desc
->p2
= cpu_to_be64((0xa5UL
<< 56) |
1596 (data
& TO_PHYS_MASK
));
1598 /* Normal sized packet that doesn't cross a page boundary. */
1599 desc
->cmd
= cpu_to_be32(len
| ETXD_INTWHENDONE
| ETXD_B1V
);
1600 desc
->bufcnt
= cpu_to_be32(len
<< ETXD_B1CNT_SHIFT
);
1601 desc
->p1
= cpu_to_be64((0xa5UL
<< 56) |
1602 (data
& TO_PHYS_MASK
));
1607 dev
->trans_start
= jiffies
;
1608 ip
->tx_skbs
[produce
] = skb
; /* Remember skb */
1609 produce
= (produce
+ 1) & 127;
1610 ip
->tx_pi
= produce
;
1611 ioc3
->etpir
= produce
<< 7; /* Fire ... */
1615 if (ip
->txqlen
> 127)
1616 netif_stop_queue(dev
);
1618 spin_unlock_irq(&ip
->ioc3_lock
);
1623 static void ioc3_timeout(struct net_device
*dev
)
1625 struct ioc3_private
*ip
= dev
->priv
;
1627 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
1633 dev
->trans_start
= jiffies
;
1634 netif_wake_queue(dev
);
1638 * Given a multicast ethernet address, this routine calculates the
1639 * address's bit index in the logical address filter mask
1642 static inline unsigned int
1643 ioc3_hash(const unsigned char *addr
)
1645 unsigned int temp
= 0;
1650 crc
= ether_crc_le(ETH_ALEN
, addr
);
1652 crc
&= 0x3f; /* bit reverse lowest 6 bits for hash index */
1653 for (bits
= 6; --bits
>= 0; ) {
1655 temp
|= (crc
& 0x1);
1663 /* We provide both the mii-tools and the ethtool ioctls. */
1664 static int ioc3_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1666 struct ioc3_private
*ip
= dev
->priv
;
1667 struct ethtool_cmd
*ep_user
= (struct ethtool_cmd
*) rq
->ifr_data
;
1668 u16
*data
= (u16
*)&rq
->ifr_data
;
1669 struct ioc3
*ioc3
= ip
->regs
;
1670 struct ethtool_cmd ecmd
;
1673 case SIOCGMIIPHY
: /* Get the address of the PHY in use. */
1679 case SIOCGMIIREG
: { /* Read a PHY register. */
1680 unsigned int phy
= data
[0];
1681 unsigned int reg
= data
[1];
1683 if (phy
> 0x1f || reg
> 0x1f)
1686 spin_lock_irq(&ip
->ioc3_lock
);
1687 while (ioc3
->micr
& MICR_BUSY
);
1688 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
| MICR_READTRIG
;
1689 while (ioc3
->micr
& MICR_BUSY
);
1690 data
[3] = (ioc3
->midr_r
& MIDR_DATA_MASK
);
1691 spin_unlock_irq(&ip
->ioc3_lock
);
1695 case SIOCSMIIREG
: /* Write a PHY register. */
1699 if (!capable(CAP_NET_ADMIN
))
1702 if (phy
> 0x1f || reg
> 0x1f)
1705 spin_lock_irq(&ip
->ioc3_lock
);
1706 while (ioc3
->micr
& MICR_BUSY
);
1707 ioc3
->midr_w
= data
[2];
1708 ioc3
->micr
= (phy
<< MICR_PHYADDR_SHIFT
) | reg
;
1709 while (ioc3
->micr
& MICR_BUSY
);
1710 spin_unlock_irq(&ip
->ioc3_lock
);
1715 if (copy_from_user(&ecmd
, ep_user
, sizeof(ecmd
)))
1718 if (ecmd
.cmd
== ETHTOOL_GSET
) {
1720 (SUPPORTED_10baseT_Half
|
1721 SUPPORTED_10baseT_Full
|
1722 SUPPORTED_100baseT_Half
|
1723 SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg
|
1724 SUPPORTED_TP
| SUPPORTED_MII
);
1726 ecmd
.port
= PORT_TP
;
1727 ecmd
.transceiver
= XCVR_INTERNAL
;
1728 ecmd
.phy_address
= ip
->phy
;
1730 /* Record PHY settings. */
1731 spin_lock_irq(&ip
->ioc3_lock
);
1732 ip
->sw_bmcr
= mii_read(ip
, MII_BMCR
);
1733 ip
->sw_lpa
= mii_read(ip
, MII_LPA
);
1734 spin_unlock_irq(&ip
->ioc3_lock
);
1735 if (ip
->sw_bmcr
& BMCR_ANENABLE
) {
1736 ecmd
.autoneg
= AUTONEG_ENABLE
;
1737 ecmd
.speed
= (ip
->sw_lpa
&
1738 (LPA_100HALF
| LPA_100FULL
)) ?
1739 SPEED_100
: SPEED_10
;
1740 if (ecmd
.speed
== SPEED_100
)
1741 ecmd
.duplex
= (ip
->sw_lpa
& (LPA_100FULL
)) ?
1742 DUPLEX_FULL
: DUPLEX_HALF
;
1744 ecmd
.duplex
= (ip
->sw_lpa
& (LPA_10FULL
)) ?
1745 DUPLEX_FULL
: DUPLEX_HALF
;
1747 ecmd
.autoneg
= AUTONEG_DISABLE
;
1748 ecmd
.speed
= (ip
->sw_bmcr
& BMCR_SPEED100
) ?
1749 SPEED_100
: SPEED_10
;
1750 ecmd
.duplex
= (ip
->sw_bmcr
& BMCR_FULLDPLX
) ?
1751 DUPLEX_FULL
: DUPLEX_HALF
;
1753 if (copy_to_user(ep_user
, &ecmd
, sizeof(ecmd
)))
1756 } else if (ecmd
.cmd
== ETHTOOL_SSET
) {
1757 if (!capable(CAP_NET_ADMIN
))
1760 /* Verify the settings we care about. */
1761 if (ecmd
.autoneg
!= AUTONEG_ENABLE
&&
1762 ecmd
.autoneg
!= AUTONEG_DISABLE
)
1765 if (ecmd
.autoneg
== AUTONEG_DISABLE
&&
1766 ((ecmd
.speed
!= SPEED_100
&&
1767 ecmd
.speed
!= SPEED_10
) ||
1768 (ecmd
.duplex
!= DUPLEX_HALF
&&
1769 ecmd
.duplex
!= DUPLEX_FULL
)))
1772 /* Ok, do it to it. */
1773 del_timer(&ip
->ioc3_timer
);
1774 spin_lock_irq(&ip
->ioc3_lock
);
1775 ioc3_start_auto_negotiation(ip
, &ecmd
);
1776 spin_unlock_irq(&ip
->ioc3_lock
);
1787 static void ioc3_set_multicast_list(struct net_device
*dev
)
1789 struct dev_mc_list
*dmi
= dev
->mc_list
;
1790 struct ioc3_private
*ip
= dev
->priv
;
1791 struct ioc3
*ioc3
= ip
->regs
;
1795 netif_stop_queue(dev
); /* Lock out others. */
1797 if (dev
->flags
& IFF_PROMISC
) { /* Set promiscuous. */
1798 /* Unconditionally log net taps. */
1799 printk(KERN_INFO
"%s: Promiscuous mode enabled.\n", dev
->name
);
1800 ip
->emcr
|= EMCR_PROMISC
;
1801 ioc3
->emcr
= ip
->emcr
;
1804 ip
->emcr
&= ~EMCR_PROMISC
;
1805 ioc3
->emcr
= ip
->emcr
; /* Clear promiscuous. */
1808 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
1809 /* Too many for hashing to make sense or we want all
1810 multicast packets anyway, so skip computing all the
1811 hashes and just accept all packets. */
1812 ip
->ehar_h
= 0xffffffff;
1813 ip
->ehar_l
= 0xffffffff;
1815 for (i
= 0; i
< dev
->mc_count
; i
++) {
1816 char *addr
= dmi
->dmi_addr
;
1822 ehar
|= (1UL << ioc3_hash(addr
));
1824 ip
->ehar_h
= ehar
>> 32;
1825 ip
->ehar_l
= ehar
& 0xffffffff;
1827 ioc3
->ehar_h
= ip
->ehar_h
;
1828 ioc3
->ehar_l
= ip
->ehar_l
;
1831 netif_wake_queue(dev
); /* Let us get going again. */
1834 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1835 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1836 MODULE_LICENSE("GPL");
1838 module_init(ioc3_init_module
);
1839 module_exit(ioc3_cleanup_module
);