MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / net / sunqe.c
blob1ed3ff5b3519bb7316b6c4f2fcb03158038babe7
1 /* $Id: sunqe.c,v 1.55 2002/01/15 06:48:55 davem Exp $
2 * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
3 * Once again I am out to prove that every ethernet
4 * controller out there can be most efficiently programmed
5 * if you make it look like a LANCE.
7 * Copyright (C) 1996, 1999, 2003 David S. Miller (davem@redhat.com)
8 */
10 static char version[] =
11 "sunqe.c:v3.0 8/24/03 David S. Miller (davem@redhat.com)\n";
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/ioport.h>
20 #include <linux/in.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/delay.h>
24 #include <linux/init.h>
25 #include <linux/crc32.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/ethtool.h>
31 #include <asm/system.h>
32 #include <asm/bitops.h>
33 #include <asm/io.h>
34 #include <asm/dma.h>
35 #include <asm/byteorder.h>
36 #include <asm/idprom.h>
37 #include <asm/sbus.h>
38 #include <asm/openprom.h>
39 #include <asm/oplib.h>
40 #include <asm/auxio.h>
41 #include <asm/pgtable.h>
42 #include <asm/irq.h>
44 #include "sunqe.h"
46 static struct sunqec *root_qec_dev;
48 static void qe_set_multicast(struct net_device *dev);
50 #define QEC_RESET_TRIES 200
52 static inline int qec_global_reset(void __iomem *gregs)
54 int tries = QEC_RESET_TRIES;
56 sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
57 while (--tries) {
58 u32 tmp = sbus_readl(gregs + GLOB_CTRL);
59 if (tmp & GLOB_CTRL_RESET) {
60 udelay(20);
61 continue;
63 break;
65 if (tries)
66 return 0;
67 printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
68 return -1;
71 #define MACE_RESET_RETRIES 200
72 #define QE_RESET_RETRIES 200
74 static inline int qe_stop(struct sunqe *qep)
76 void __iomem *cregs = qep->qcregs;
77 void __iomem *mregs = qep->mregs;
78 int tries;
80 /* Reset the MACE, then the QEC channel. */
81 sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
82 tries = MACE_RESET_RETRIES;
83 while (--tries) {
84 u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
85 if (tmp & MREGS_BCONFIG_RESET) {
86 udelay(20);
87 continue;
89 break;
91 if (!tries) {
92 printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
93 return -1;
96 sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
97 tries = QE_RESET_RETRIES;
98 while (--tries) {
99 u32 tmp = sbus_readl(cregs + CREG_CTRL);
100 if (tmp & CREG_CTRL_RESET) {
101 udelay(20);
102 continue;
104 break;
106 if (!tries) {
107 printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
108 return -1;
110 return 0;
113 static void qe_init_rings(struct sunqe *qep)
115 struct qe_init_block *qb = qep->qe_block;
116 struct sunqe_buffers *qbufs = qep->buffers;
117 __u32 qbufs_dvma = qep->buffers_dvma;
118 int i;
120 qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
121 memset(qb, 0, sizeof(struct qe_init_block));
122 memset(qbufs, 0, sizeof(struct sunqe_buffers));
123 for (i = 0; i < RX_RING_SIZE; i++) {
124 qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
125 qb->qe_rxd[i].rx_flags =
126 (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
130 static int qe_init(struct sunqe *qep, int from_irq)
132 struct sunqec *qecp = qep->parent;
133 void __iomem *cregs = qep->qcregs;
134 void __iomem *mregs = qep->mregs;
135 void __iomem *gregs = qecp->gregs;
136 unsigned char *e = &qep->dev->dev_addr[0];
137 u32 tmp;
138 int i;
140 /* Shut it up. */
141 if (qe_stop(qep))
142 return -EAGAIN;
144 /* Setup initial rx/tx init block pointers. */
145 sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
146 sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
148 /* Enable/mask the various irq's. */
149 sbus_writel(0, cregs + CREG_RIMASK);
150 sbus_writel(1, cregs + CREG_TIMASK);
152 sbus_writel(0, cregs + CREG_QMASK);
153 sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
155 /* Setup the FIFO pointers into QEC local memory. */
156 tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
157 sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
158 sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
160 tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
161 sbus_readl(gregs + GLOB_RSIZE);
162 sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
163 sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
165 /* Clear the channel collision counter. */
166 sbus_writel(0, cregs + CREG_CCNT);
168 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
169 sbus_writel(0, cregs + CREG_PIPG);
171 /* Now dork with the AMD MACE. */
172 sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
173 sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
174 sbus_writeb(0, mregs + MREGS_RXFCNTL);
176 /* The QEC dma's the rx'd packets from local memory out to main memory,
177 * and therefore it interrupts when the packet reception is "complete".
178 * So don't listen for the MACE talking about it.
180 sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
181 sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
182 sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
183 MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
184 mregs + MREGS_FCONFIG);
186 /* Only usable interface on QuadEther is twisted pair. */
187 sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
189 /* Tell MACE we are changing the ether address. */
190 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
191 mregs + MREGS_IACONFIG);
192 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
193 barrier();
194 sbus_writeb(e[0], mregs + MREGS_ETHADDR);
195 sbus_writeb(e[1], mregs + MREGS_ETHADDR);
196 sbus_writeb(e[2], mregs + MREGS_ETHADDR);
197 sbus_writeb(e[3], mregs + MREGS_ETHADDR);
198 sbus_writeb(e[4], mregs + MREGS_ETHADDR);
199 sbus_writeb(e[5], mregs + MREGS_ETHADDR);
201 /* Clear out the address filter. */
202 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
203 mregs + MREGS_IACONFIG);
204 while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
205 barrier();
206 for (i = 0; i < 8; i++)
207 sbus_writeb(0, mregs + MREGS_FILTER);
209 /* Address changes are now complete. */
210 sbus_writeb(0, mregs + MREGS_IACONFIG);
212 qe_init_rings(qep);
214 /* Wait a little bit for the link to come up... */
215 mdelay(5);
216 if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
217 int tries = 50;
219 while (tries--) {
220 u8 tmp;
222 mdelay(5);
223 barrier();
224 tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
225 if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
226 break;
228 if (tries == 0)
229 printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
232 /* Missed packet counter is cleared on a read. */
233 sbus_readb(mregs + MREGS_MPCNT);
235 /* Reload multicast information, this will enable the receiver
236 * and transmitter.
238 qe_set_multicast(qep->dev);
240 /* QEC should now start to show interrupts. */
241 return 0;
244 /* Grrr, certain error conditions completely lock up the AMD MACE,
245 * so when we get these we _must_ reset the chip.
247 static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
249 struct net_device *dev = qep->dev;
250 int mace_hwbug_workaround = 0;
252 if (qe_status & CREG_STAT_EDEFER) {
253 printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
254 qep->net_stats.tx_errors++;
257 if (qe_status & CREG_STAT_CLOSS) {
258 printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
259 qep->net_stats.tx_errors++;
260 qep->net_stats.tx_carrier_errors++;
263 if (qe_status & CREG_STAT_ERETRIES) {
264 printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
265 qep->net_stats.tx_errors++;
266 mace_hwbug_workaround = 1;
269 if (qe_status & CREG_STAT_LCOLL) {
270 printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
271 qep->net_stats.tx_errors++;
272 qep->net_stats.collisions++;
273 mace_hwbug_workaround = 1;
276 if (qe_status & CREG_STAT_FUFLOW) {
277 printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
278 qep->net_stats.tx_errors++;
279 mace_hwbug_workaround = 1;
282 if (qe_status & CREG_STAT_JERROR) {
283 printk(KERN_ERR "%s: Jabber error.\n", dev->name);
286 if (qe_status & CREG_STAT_BERROR) {
287 printk(KERN_ERR "%s: Babble error.\n", dev->name);
290 if (qe_status & CREG_STAT_CCOFLOW) {
291 qep->net_stats.tx_errors += 256;
292 qep->net_stats.collisions += 256;
295 if (qe_status & CREG_STAT_TXDERROR) {
296 printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
297 qep->net_stats.tx_errors++;
298 qep->net_stats.tx_aborted_errors++;
299 mace_hwbug_workaround = 1;
302 if (qe_status & CREG_STAT_TXLERR) {
303 printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
304 qep->net_stats.tx_errors++;
305 mace_hwbug_workaround = 1;
308 if (qe_status & CREG_STAT_TXPERR) {
309 printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
310 qep->net_stats.tx_errors++;
311 qep->net_stats.tx_aborted_errors++;
312 mace_hwbug_workaround = 1;
315 if (qe_status & CREG_STAT_TXSERR) {
316 printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
317 qep->net_stats.tx_errors++;
318 qep->net_stats.tx_aborted_errors++;
319 mace_hwbug_workaround = 1;
322 if (qe_status & CREG_STAT_RCCOFLOW) {
323 qep->net_stats.rx_errors += 256;
324 qep->net_stats.collisions += 256;
327 if (qe_status & CREG_STAT_RUOFLOW) {
328 qep->net_stats.rx_errors += 256;
329 qep->net_stats.rx_over_errors += 256;
332 if (qe_status & CREG_STAT_MCOFLOW) {
333 qep->net_stats.rx_errors += 256;
334 qep->net_stats.rx_missed_errors += 256;
337 if (qe_status & CREG_STAT_RXFOFLOW) {
338 printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
339 qep->net_stats.rx_errors++;
340 qep->net_stats.rx_over_errors++;
343 if (qe_status & CREG_STAT_RLCOLL) {
344 printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
345 qep->net_stats.rx_errors++;
346 qep->net_stats.collisions++;
349 if (qe_status & CREG_STAT_FCOFLOW) {
350 qep->net_stats.rx_errors += 256;
351 qep->net_stats.rx_frame_errors += 256;
354 if (qe_status & CREG_STAT_CECOFLOW) {
355 qep->net_stats.rx_errors += 256;
356 qep->net_stats.rx_crc_errors += 256;
359 if (qe_status & CREG_STAT_RXDROP) {
360 printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
361 qep->net_stats.rx_errors++;
362 qep->net_stats.rx_dropped++;
363 qep->net_stats.rx_missed_errors++;
366 if (qe_status & CREG_STAT_RXSMALL) {
367 printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
368 qep->net_stats.rx_errors++;
369 qep->net_stats.rx_length_errors++;
372 if (qe_status & CREG_STAT_RXLERR) {
373 printk(KERN_ERR "%s: Receive late error.\n", dev->name);
374 qep->net_stats.rx_errors++;
375 mace_hwbug_workaround = 1;
378 if (qe_status & CREG_STAT_RXPERR) {
379 printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
380 qep->net_stats.rx_errors++;
381 qep->net_stats.rx_missed_errors++;
382 mace_hwbug_workaround = 1;
385 if (qe_status & CREG_STAT_RXSERR) {
386 printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
387 qep->net_stats.rx_errors++;
388 qep->net_stats.rx_missed_errors++;
389 mace_hwbug_workaround = 1;
392 if (mace_hwbug_workaround)
393 qe_init(qep, 1);
394 return mace_hwbug_workaround;
397 /* Per-QE receive interrupt service routine. Just like on the happy meal
398 * we receive directly into skb's with a small packet copy water mark.
400 static void qe_rx(struct sunqe *qep)
402 struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
403 struct qe_rxd *this;
404 struct sunqe_buffers *qbufs = qep->buffers;
405 __u32 qbufs_dvma = qep->buffers_dvma;
406 int elem = qep->rx_new, drops = 0;
407 u32 flags;
409 this = &rxbase[elem];
410 while (!((flags = this->rx_flags) & RXD_OWN)) {
411 struct sk_buff *skb;
412 unsigned char *this_qbuf =
413 &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
414 __u32 this_qbuf_dvma = qbufs_dvma +
415 qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
416 struct qe_rxd *end_rxd =
417 &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
418 int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
420 /* Check for errors. */
421 if (len < ETH_ZLEN) {
422 qep->net_stats.rx_errors++;
423 qep->net_stats.rx_length_errors++;
424 qep->net_stats.rx_dropped++;
425 } else {
426 skb = dev_alloc_skb(len + 2);
427 if (skb == NULL) {
428 drops++;
429 qep->net_stats.rx_dropped++;
430 } else {
431 skb->dev = qep->dev;
432 skb_reserve(skb, 2);
433 skb_put(skb, len);
434 eth_copy_and_sum(skb, (unsigned char *) this_qbuf,
435 len, 0);
436 skb->protocol = eth_type_trans(skb, qep->dev);
437 netif_rx(skb);
438 qep->dev->last_rx = jiffies;
439 qep->net_stats.rx_packets++;
440 qep->net_stats.rx_bytes += len;
443 end_rxd->rx_addr = this_qbuf_dvma;
444 end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
446 elem = NEXT_RX(elem);
447 this = &rxbase[elem];
449 qep->rx_new = elem;
450 if (drops)
451 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
454 static void qe_tx_reclaim(struct sunqe *qep);
456 /* Interrupts for all QE's get filtered out via the QEC master controller,
457 * so we just run through each qe and check to see who is signaling
458 * and thus needs to be serviced.
460 static irqreturn_t qec_interrupt(int irq, void *dev_id, struct pt_regs *regs)
462 struct sunqec *qecp = (struct sunqec *) dev_id;
463 u32 qec_status;
464 int channel = 0;
466 /* Latch the status now. */
467 qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
468 while (channel < 4) {
469 if (qec_status & 0xf) {
470 struct sunqe *qep = qecp->qes[channel];
471 u32 qe_status;
473 qe_status = sbus_readl(qep->qcregs + CREG_STAT);
474 if (qe_status & CREG_STAT_ERRORS) {
475 if (qe_is_bolixed(qep, qe_status))
476 goto next;
478 if (qe_status & CREG_STAT_RXIRQ)
479 qe_rx(qep);
480 if (netif_queue_stopped(qep->dev) &&
481 (qe_status & CREG_STAT_TXIRQ)) {
482 spin_lock(&qep->lock);
483 qe_tx_reclaim(qep);
484 if (TX_BUFFS_AVAIL(qep) > 0) {
485 /* Wake net queue and return to
486 * lazy tx reclaim.
488 netif_wake_queue(qep->dev);
489 sbus_writel(1, qep->qcregs + CREG_TIMASK);
491 spin_unlock(&qep->lock);
493 next:
496 qec_status >>= 4;
497 channel++;
500 return IRQ_HANDLED;
503 static int qe_open(struct net_device *dev)
505 struct sunqe *qep = (struct sunqe *) dev->priv;
507 qep->mconfig = (MREGS_MCONFIG_TXENAB |
508 MREGS_MCONFIG_RXENAB |
509 MREGS_MCONFIG_MBAENAB);
510 return qe_init(qep, 0);
513 static int qe_close(struct net_device *dev)
515 struct sunqe *qep = (struct sunqe *) dev->priv;
517 qe_stop(qep);
518 return 0;
521 /* Reclaim TX'd frames from the ring. This must always run under
522 * the IRQ protected qep->lock.
524 static void qe_tx_reclaim(struct sunqe *qep)
526 struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
527 int elem = qep->tx_old;
529 while (elem != qep->tx_new) {
530 u32 flags = txbase[elem].tx_flags;
532 if (flags & TXD_OWN)
533 break;
534 elem = NEXT_TX(elem);
536 qep->tx_old = elem;
539 static void qe_tx_timeout(struct net_device *dev)
541 struct sunqe *qep = (struct sunqe *) dev->priv;
542 int tx_full;
544 spin_lock_irq(&qep->lock);
546 /* Try to reclaim, if that frees up some tx
547 * entries, we're fine.
549 qe_tx_reclaim(qep);
550 tx_full = TX_BUFFS_AVAIL(qep) <= 0;
552 spin_unlock_irq(&qep->lock);
554 if (! tx_full)
555 goto out;
557 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
558 qe_init(qep, 1);
560 out:
561 netif_wake_queue(dev);
564 /* Get a packet queued to go onto the wire. */
565 static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
567 struct sunqe *qep = (struct sunqe *) dev->priv;
568 struct sunqe_buffers *qbufs = qep->buffers;
569 __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
570 unsigned char *txbuf;
571 int len, entry;
573 spin_lock_irq(&qep->lock);
575 qe_tx_reclaim(qep);
577 len = skb->len;
578 entry = qep->tx_new;
580 txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
581 txbuf_dvma = qbufs_dvma +
582 qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
584 /* Avoid a race... */
585 qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
587 memcpy(txbuf, skb->data, len);
589 qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
590 qep->qe_block->qe_txd[entry].tx_flags =
591 (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
592 qep->tx_new = NEXT_TX(entry);
594 /* Get it going. */
595 dev->trans_start = jiffies;
596 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
598 qep->net_stats.tx_packets++;
599 qep->net_stats.tx_bytes += len;
601 if (TX_BUFFS_AVAIL(qep) <= 0) {
602 /* Halt the net queue and enable tx interrupts.
603 * When the tx queue empties the tx irq handler
604 * will wake up the queue and return us back to
605 * the lazy tx reclaim scheme.
607 netif_stop_queue(dev);
608 sbus_writel(0, qep->qcregs + CREG_TIMASK);
610 spin_unlock_irq(&qep->lock);
612 dev_kfree_skb(skb);
614 return 0;
617 static struct net_device_stats *qe_get_stats(struct net_device *dev)
619 struct sunqe *qep = (struct sunqe *) dev->priv;
621 return &qep->net_stats;
624 static void qe_set_multicast(struct net_device *dev)
626 struct sunqe *qep = (struct sunqe *) dev->priv;
627 struct dev_mc_list *dmi = dev->mc_list;
628 u8 new_mconfig = qep->mconfig;
629 char *addrs;
630 int i;
631 u32 crc;
633 /* Lock out others. */
634 netif_stop_queue(dev);
636 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
637 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
638 qep->mregs + MREGS_IACONFIG);
639 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
640 barrier();
641 for (i = 0; i < 8; i++)
642 sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
643 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
644 } else if (dev->flags & IFF_PROMISC) {
645 new_mconfig |= MREGS_MCONFIG_PROMISC;
646 } else {
647 u16 hash_table[4];
648 u8 *hbytes = (unsigned char *) &hash_table[0];
650 for (i = 0; i < 4; i++)
651 hash_table[i] = 0;
653 for (i = 0; i < dev->mc_count; i++) {
654 addrs = dmi->dmi_addr;
655 dmi = dmi->next;
657 if (!(*addrs & 1))
658 continue;
659 crc = ether_crc_le(6, addrs);
660 crc >>= 26;
661 hash_table[crc >> 4] |= 1 << (crc & 0xf);
663 /* Program the qe with the new filter value. */
664 sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
665 qep->mregs + MREGS_IACONFIG);
666 while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
667 barrier();
668 for (i = 0; i < 8; i++) {
669 u8 tmp = *hbytes++;
670 sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
672 sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
675 /* Any change of the logical address filter, the physical address,
676 * or enabling/disabling promiscuous mode causes the MACE to disable
677 * the receiver. So we must re-enable them here or else the MACE
678 * refuses to listen to anything on the network. Sheesh, took
679 * me a day or two to find this bug.
681 qep->mconfig = new_mconfig;
682 sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
684 /* Let us get going again. */
685 netif_wake_queue(dev);
688 /* Ethtool support... */
689 static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
691 struct sunqe *qep = dev->priv;
693 strcpy(info->driver, "sunqe");
694 strcpy(info->version, "3.0");
695 sprintf(info->bus_info, "SBUS:%d",
696 qep->qe_sdev->slot);
699 static u32 qe_get_link(struct net_device *dev)
701 struct sunqe *qep = dev->priv;
702 void __iomem *mregs = qep->mregs;
703 u8 phyconfig;
705 spin_lock_irq(&qep->lock);
706 phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
707 spin_unlock_irq(&qep->lock);
709 return (phyconfig & MREGS_PHYCONFIG_LSTAT);
712 static struct ethtool_ops qe_ethtool_ops = {
713 .get_drvinfo = qe_get_drvinfo,
714 .get_link = qe_get_link,
717 /* This is only called once at boot time for each card probed. */
718 static inline void qec_init_once(struct sunqec *qecp, struct sbus_dev *qsdev)
720 u8 bsizes = qecp->qec_bursts;
722 if (sbus_can_burst64(qsdev) && (bsizes & DMA_BURST64)) {
723 sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
724 } else if (bsizes & DMA_BURST32) {
725 sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
726 } else {
727 sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
730 /* Packetsize only used in 100baseT BigMAC configurations,
731 * set it to zero just to be on the safe side.
733 sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
735 /* Set the local memsize register, divided up to one piece per QE channel. */
736 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2),
737 qecp->gregs + GLOB_MSIZE);
739 /* Divide up the local QEC memory amongst the 4 QE receiver and
740 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
742 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
743 qecp->gregs + GLOB_TSIZE);
744 sbus_writel((qsdev->reg_addrs[1].reg_size >> 2) >> 1,
745 qecp->gregs + GLOB_RSIZE);
748 /* Four QE's per QEC card. */
749 static int __init qec_ether_init(struct net_device *dev, struct sbus_dev *sdev)
751 static unsigned version_printed;
752 struct net_device *qe_devs[4];
753 struct sunqe *qeps[4];
754 struct sbus_dev *qesdevs[4];
755 struct sbus_dev *child;
756 struct sunqec *qecp = NULL;
757 u8 bsizes, bsizes_more;
758 int i, j, res = -ENOMEM;
760 for (i = 0; i < 4; i++) {
761 qe_devs[i] = alloc_etherdev(sizeof(struct sunqe));
762 if (!qe_devs[i])
763 goto out;
766 if (version_printed++ == 0)
767 printk(KERN_INFO "%s", version);
769 for (i = 0; i < 4; i++) {
770 qeps[i] = (struct sunqe *) qe_devs[i]->priv;
771 for (j = 0; j < 6; j++)
772 qe_devs[i]->dev_addr[j] = idprom->id_ethaddr[j];
773 qeps[i]->channel = i;
774 spin_lock_init(&qeps[i]->lock);
777 qecp = kmalloc(sizeof(struct sunqec), GFP_KERNEL);
778 if (qecp == NULL)
779 goto out1;
780 qecp->qec_sdev = sdev;
782 for (i = 0; i < 4; i++) {
783 qecp->qes[i] = qeps[i];
784 qeps[i]->dev = qe_devs[i];
785 qeps[i]->parent = qecp;
788 res = -ENODEV;
790 for (i = 0, child = sdev->child; i < 4; i++, child = child->next) {
791 /* Link in channel */
792 j = prom_getintdefault(child->prom_node, "channel#", -1);
793 if (j == -1)
794 goto out2;
795 qesdevs[j] = child;
798 for (i = 0; i < 4; i++)
799 qeps[i]->qe_sdev = qesdevs[i];
801 /* Now map in the registers, QEC globals first. */
802 qecp->gregs = sbus_ioremap(&sdev->resource[0], 0,
803 GLOB_REG_SIZE, "QEC Global Registers");
804 if (!qecp->gregs) {
805 printk(KERN_ERR "QuadEther: Cannot map QEC global registers.\n");
806 goto out2;
809 /* Make sure the QEC is in MACE mode. */
810 if ((sbus_readl(qecp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_MMODE) {
811 printk(KERN_ERR "QuadEther: AIEEE, QEC is not in MACE mode!\n");
812 goto out3;
815 /* Reset the QEC. */
816 if (qec_global_reset(qecp->gregs))
817 goto out3;
819 /* Find and set the burst sizes for the QEC, since it does
820 * the actual dma for all 4 channels.
822 bsizes = prom_getintdefault(sdev->prom_node, "burst-sizes", 0xff);
823 bsizes &= 0xff;
824 bsizes_more = prom_getintdefault(sdev->bus->prom_node, "burst-sizes", 0xff);
826 if (bsizes_more != 0xff)
827 bsizes &= bsizes_more;
828 if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
829 (bsizes & DMA_BURST32)==0)
830 bsizes = (DMA_BURST32 - 1);
832 qecp->qec_bursts = bsizes;
834 /* Perform one time QEC initialization, we never touch the QEC
835 * globals again after this.
837 qec_init_once(qecp, sdev);
839 for (i = 0; i < 4; i++) {
840 struct sunqe *qe = qeps[i];
841 /* Map in QEC per-channel control registers. */
842 qe->qcregs = sbus_ioremap(&qe->qe_sdev->resource[0], 0,
843 CREG_REG_SIZE, "QEC Channel Registers");
844 if (!qe->qcregs) {
845 printk(KERN_ERR "QuadEther: Cannot map QE %d's channel registers.\n", i);
846 goto out4;
849 /* Map in per-channel AMD MACE registers. */
850 qe->mregs = sbus_ioremap(&qe->qe_sdev->resource[1], 0,
851 MREGS_REG_SIZE, "QE MACE Registers");
852 if (!qe->mregs) {
853 printk(KERN_ERR "QuadEther: Cannot map QE %d's MACE registers.\n", i);
854 goto out4;
857 qe->qe_block = sbus_alloc_consistent(qe->qe_sdev,
858 PAGE_SIZE,
859 &qe->qblock_dvma);
860 qe->buffers = sbus_alloc_consistent(qe->qe_sdev,
861 sizeof(struct sunqe_buffers),
862 &qe->buffers_dvma);
863 if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
864 qe->buffers == NULL || qe->buffers_dvma == 0) {
865 goto out4;
868 /* Stop this QE. */
869 qe_stop(qe);
872 for (i = 0; i < 4; i++) {
873 SET_MODULE_OWNER(qe_devs[i]);
874 qe_devs[i]->open = qe_open;
875 qe_devs[i]->stop = qe_close;
876 qe_devs[i]->hard_start_xmit = qe_start_xmit;
877 qe_devs[i]->get_stats = qe_get_stats;
878 qe_devs[i]->set_multicast_list = qe_set_multicast;
879 qe_devs[i]->tx_timeout = qe_tx_timeout;
880 qe_devs[i]->watchdog_timeo = 5*HZ;
881 qe_devs[i]->irq = sdev->irqs[0];
882 qe_devs[i]->dma = 0;
883 qe_devs[i]->ethtool_ops = &qe_ethtool_ops;
886 /* QEC receives interrupts from each QE, then it sends the actual
887 * IRQ to the cpu itself. Since QEC is the single point of
888 * interrupt for all QE channels we register the IRQ handler
889 * for it now.
891 if (request_irq(sdev->irqs[0], &qec_interrupt,
892 SA_SHIRQ, "QuadEther", (void *) qecp)) {
893 printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n");
894 res = -EAGAIN;
895 goto out4;
898 for (i = 0; i < 4; i++) {
899 if (register_netdev(qe_devs[i]) != 0)
900 goto out5;
903 /* Report the QE channels. */
904 for (i = 0; i < 4; i++) {
905 printk(KERN_INFO "%s: QuadEthernet channel[%d] ", qe_devs[i]->name, i);
906 for (j = 0; j < 6; j++)
907 printk ("%2.2x%c",
908 qe_devs[i]->dev_addr[j],
909 j == 5 ? ' ': ':');
910 printk("\n");
913 /* We are home free at this point, link the qe's into
914 * the master list for later driver exit.
916 qecp->next_module = root_qec_dev;
917 root_qec_dev = qecp;
919 return 0;
921 out5:
922 while (i--)
923 unregister_netdev(qe_devs[i]);
924 free_irq(sdev->irqs[0], (void *)qecp);
925 out4:
926 for (i = 0; i < 4; i++) {
927 struct sunqe *qe = (struct sunqe *)qe_devs[i]->priv;
929 if (qe->qcregs)
930 sbus_iounmap(qe->qcregs, CREG_REG_SIZE);
931 if (qe->mregs)
932 sbus_iounmap(qe->mregs, MREGS_REG_SIZE);
933 if (qe->qe_block)
934 sbus_free_consistent(qe->qe_sdev,
935 PAGE_SIZE,
936 qe->qe_block,
937 qe->qblock_dvma);
938 if (qe->buffers)
939 sbus_free_consistent(qe->qe_sdev,
940 sizeof(struct sunqe_buffers),
941 qe->buffers,
942 qe->buffers_dvma);
944 out3:
945 sbus_iounmap(qecp->gregs, GLOB_REG_SIZE);
946 out2:
947 kfree(qecp);
948 out1:
949 i = 4;
950 out:
951 while (i--)
952 free_netdev(qe_devs[i]);
953 return res;
956 static int __init qec_match(struct sbus_dev *sdev)
958 struct sbus_dev *sibling;
959 int i;
961 if (strcmp(sdev->prom_name, "qec") != 0)
962 return 0;
964 /* QEC can be parent of either QuadEthernet or BigMAC
965 * children. Do not confuse this with qfe/SUNW,qfe
966 * which is a quad-happymeal card and handled by
967 * a different driver.
969 sibling = sdev->child;
970 for (i = 0; i < 4; i++) {
971 if (sibling == NULL)
972 return 0;
973 if (strcmp(sibling->prom_name, "qe") != 0)
974 return 0;
975 sibling = sibling->next;
977 return 1;
980 static int __init qec_probe(void)
982 struct net_device *dev = NULL;
983 struct sbus_bus *bus;
984 struct sbus_dev *sdev = NULL;
985 static int called;
986 int cards = 0, v;
988 root_qec_dev = NULL;
990 if (called)
991 return -ENODEV;
992 called++;
994 for_each_sbus(bus) {
995 for_each_sbusdev(sdev, bus) {
996 if (cards)
997 dev = NULL;
999 if (qec_match(sdev)) {
1000 cards++;
1001 if ((v = qec_ether_init(dev, sdev)))
1002 return v;
1006 if (!cards)
1007 return -ENODEV;
1008 return 0;
1011 static void __exit qec_cleanup(void)
1013 struct sunqec *next_qec;
1014 int i;
1016 while (root_qec_dev) {
1017 next_qec = root_qec_dev->next_module;
1019 /* Release all four QE channels, then the QEC itself. */
1020 for (i = 0; i < 4; i++) {
1021 unregister_netdev(root_qec_dev->qes[i]->dev);
1022 sbus_iounmap(root_qec_dev->qes[i]->qcregs, CREG_REG_SIZE);
1023 sbus_iounmap(root_qec_dev->qes[i]->mregs, MREGS_REG_SIZE);
1024 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
1025 PAGE_SIZE,
1026 root_qec_dev->qes[i]->qe_block,
1027 root_qec_dev->qes[i]->qblock_dvma);
1028 sbus_free_consistent(root_qec_dev->qes[i]->qe_sdev,
1029 sizeof(struct sunqe_buffers),
1030 root_qec_dev->qes[i]->buffers,
1031 root_qec_dev->qes[i]->buffers_dvma);
1032 free_netdev(root_qec_dev->qes[i]->dev);
1034 free_irq(root_qec_dev->qec_sdev->irqs[0], (void *)root_qec_dev);
1035 sbus_iounmap(root_qec_dev->gregs, GLOB_REG_SIZE);
1036 kfree(root_qec_dev);
1037 root_qec_dev = next_qec;
1041 module_init(qec_probe);
1042 module_exit(qec_cleanup);
1043 MODULE_LICENSE("GPL");