1 /* $Id: sunqe.c,v 1.52.2.1 2001/12/21 00:52:47 davem Exp $
2 * sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
3 * Once again I am out to prove that every ethernet
4 * controller out there can be most efficiently programmed
5 * if you make it look like a LANCE.
7 * Copyright (C) 1996, 1999 David S. Miller (davem@redhat.com)
10 static char version
[] =
11 "sunqe.c:v2.9 9/11/99 David S. Miller (davem@redhat.com)\n";
13 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/types.h>
18 #include <linux/fcntl.h>
19 #include <linux/interrupt.h>
20 #include <linux/ptrace.h>
21 #include <linux/ioport.h>
23 #include <linux/slab.h>
24 #include <linux/string.h>
25 #include <linux/delay.h>
26 #include <linux/init.h>
27 #include <linux/crc32.h>
29 #include <asm/system.h>
30 #include <asm/bitops.h>
33 #include <linux/errno.h>
34 #include <asm/byteorder.h>
36 #include <asm/idprom.h>
38 #include <asm/openprom.h>
39 #include <asm/oplib.h>
40 #include <asm/auxio.h>
41 #include <asm/pgtable.h>
44 #include <linux/netdevice.h>
45 #include <linux/etherdevice.h>
46 #include <linux/skbuff.h>
50 static struct sunqec
*root_qec_dev
;
52 static void qe_set_multicast(struct net_device
*dev
);
54 #define QEC_RESET_TRIES 200
56 static inline int qec_global_reset(unsigned long gregs
)
58 int tries
= QEC_RESET_TRIES
;
60 sbus_writel(GLOB_CTRL_RESET
, gregs
+ GLOB_CTRL
);
62 u32 tmp
= sbus_readl(gregs
+ GLOB_CTRL
);
63 if (tmp
& GLOB_CTRL_RESET
) {
71 printk(KERN_ERR
"QuadEther: AIEEE cannot reset the QEC!\n");
75 #define MACE_RESET_RETRIES 200
76 #define QE_RESET_RETRIES 200
78 static inline int qe_stop(struct sunqe
*qep
)
80 unsigned long cregs
= qep
->qcregs
;
81 unsigned long mregs
= qep
->mregs
;
84 /* Reset the MACE, then the QEC channel. */
85 sbus_writeb(MREGS_BCONFIG_RESET
, mregs
+ MREGS_BCONFIG
);
86 tries
= MACE_RESET_RETRIES
;
88 u8 tmp
= sbus_readb(mregs
+ MREGS_BCONFIG
);
89 if (tmp
& MREGS_BCONFIG_RESET
) {
96 printk(KERN_ERR
"QuadEther: AIEEE cannot reset the MACE!\n");
100 sbus_writel(CREG_CTRL_RESET
, cregs
+ CREG_CTRL
);
101 tries
= QE_RESET_RETRIES
;
103 u32 tmp
= sbus_readl(cregs
+ CREG_CTRL
);
104 if (tmp
& CREG_CTRL_RESET
) {
111 printk(KERN_ERR
"QuadEther: Cannot reset QE channel!\n");
117 static void qe_init_rings(struct sunqe
*qep
)
119 struct qe_init_block
*qb
= qep
->qe_block
;
120 struct sunqe_buffers
*qbufs
= qep
->buffers
;
121 __u32 qbufs_dvma
= qep
->buffers_dvma
;
124 qep
->rx_new
= qep
->rx_old
= qep
->tx_new
= qep
->tx_old
= 0;
125 memset(qb
, 0, sizeof(struct qe_init_block
));
126 memset(qbufs
, 0, sizeof(struct sunqe_buffers
));
127 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
128 qb
->qe_rxd
[i
].rx_addr
= qbufs_dvma
+ qebuf_offset(rx_buf
, i
);
129 qb
->qe_rxd
[i
].rx_flags
=
130 (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
134 static int qe_init(struct sunqe
*qep
, int from_irq
)
136 struct sunqec
*qecp
= qep
->parent
;
137 unsigned long cregs
= qep
->qcregs
;
138 unsigned long mregs
= qep
->mregs
;
139 unsigned long gregs
= qecp
->gregs
;
140 unsigned char *e
= &qep
->dev
->dev_addr
[0];
148 /* Setup initial rx/tx init block pointers. */
149 sbus_writel(qep
->qblock_dvma
+ qib_offset(qe_rxd
, 0), cregs
+ CREG_RXDS
);
150 sbus_writel(qep
->qblock_dvma
+ qib_offset(qe_txd
, 0), cregs
+ CREG_TXDS
);
152 /* Enable/mask the various irq's. */
153 sbus_writel(0, cregs
+ CREG_RIMASK
);
154 sbus_writel(1, cregs
+ CREG_TIMASK
);
156 sbus_writel(0, cregs
+ CREG_QMASK
);
157 sbus_writel(CREG_MMASK_RXCOLL
, cregs
+ CREG_MMASK
);
159 /* Setup the FIFO pointers into QEC local memory. */
160 tmp
= qep
->channel
* sbus_readl(gregs
+ GLOB_MSIZE
);
161 sbus_writel(tmp
, cregs
+ CREG_RXRBUFPTR
);
162 sbus_writel(tmp
, cregs
+ CREG_RXWBUFPTR
);
164 tmp
= sbus_readl(cregs
+ CREG_RXRBUFPTR
) +
165 sbus_readl(gregs
+ GLOB_RSIZE
);
166 sbus_writel(tmp
, cregs
+ CREG_TXRBUFPTR
);
167 sbus_writel(tmp
, cregs
+ CREG_TXWBUFPTR
);
169 /* Clear the channel collision counter. */
170 sbus_writel(0, cregs
+ CREG_CCNT
);
172 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
173 sbus_writel(0, cregs
+ CREG_PIPG
);
175 /* Now dork with the AMD MACE. */
176 sbus_writeb(MREGS_PHYCONFIG_AUTO
, mregs
+ MREGS_PHYCONFIG
);
177 sbus_writeb(MREGS_TXFCNTL_AUTOPAD
, mregs
+ MREGS_TXFCNTL
);
178 sbus_writeb(0, mregs
+ MREGS_RXFCNTL
);
180 /* The QEC dma's the rx'd packets from local memory out to main memory,
181 * and therefore it interrupts when the packet reception is "complete".
182 * So don't listen for the MACE talking about it.
184 sbus_writeb(MREGS_IMASK_COLL
| MREGS_IMASK_RXIRQ
, mregs
+ MREGS_IMASK
);
185 sbus_writeb(MREGS_BCONFIG_BSWAP
| MREGS_BCONFIG_64TS
, mregs
+ MREGS_BCONFIG
);
186 sbus_writeb((MREGS_FCONFIG_TXF16
| MREGS_FCONFIG_RXF32
|
187 MREGS_FCONFIG_RFWU
| MREGS_FCONFIG_TFWU
),
188 mregs
+ MREGS_FCONFIG
);
190 /* Only usable interface on QuadEther is twisted pair. */
191 sbus_writeb(MREGS_PLSCONFIG_TP
, mregs
+ MREGS_PLSCONFIG
);
193 /* Tell MACE we are changing the ether address. */
194 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_PARESET
,
195 mregs
+ MREGS_IACONFIG
);
196 while ((sbus_readb(mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
198 sbus_writeb(e
[0], mregs
+ MREGS_ETHADDR
);
199 sbus_writeb(e
[1], mregs
+ MREGS_ETHADDR
);
200 sbus_writeb(e
[2], mregs
+ MREGS_ETHADDR
);
201 sbus_writeb(e
[3], mregs
+ MREGS_ETHADDR
);
202 sbus_writeb(e
[4], mregs
+ MREGS_ETHADDR
);
203 sbus_writeb(e
[5], mregs
+ MREGS_ETHADDR
);
205 /* Clear out the address filter. */
206 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
207 mregs
+ MREGS_IACONFIG
);
208 while ((sbus_readb(mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
210 for (i
= 0; i
< 8; i
++)
211 sbus_writeb(0, mregs
+ MREGS_FILTER
);
213 /* Address changes are now complete. */
214 sbus_writeb(0, mregs
+ MREGS_IACONFIG
);
218 /* Wait a little bit for the link to come up... */
220 if (!(sbus_readb(mregs
+ MREGS_PHYCONFIG
) & MREGS_PHYCONFIG_LTESTDIS
)) {
228 tmp
= sbus_readb(mregs
+ MREGS_PHYCONFIG
);
229 if ((tmp
& MREGS_PHYCONFIG_LSTAT
) != 0)
233 printk(KERN_NOTICE
"%s: Warning, link state is down.\n", qep
->dev
->name
);
236 /* Missed packet counter is cleared on a read. */
237 sbus_readb(mregs
+ MREGS_MPCNT
);
239 /* Reload multicast information, this will enable the receiver
242 qe_set_multicast(qep
->dev
);
244 /* QEC should now start to show interrupts. */
248 /* Grrr, certain error conditions completely lock up the AMD MACE,
249 * so when we get these we _must_ reset the chip.
251 static int qe_is_bolixed(struct sunqe
*qep
, u32 qe_status
)
253 struct net_device
*dev
= qep
->dev
;
254 int mace_hwbug_workaround
= 0;
256 if (qe_status
& CREG_STAT_EDEFER
) {
257 printk(KERN_ERR
"%s: Excessive transmit defers.\n", dev
->name
);
258 qep
->net_stats
.tx_errors
++;
261 if (qe_status
& CREG_STAT_CLOSS
) {
262 printk(KERN_ERR
"%s: Carrier lost, link down?\n", dev
->name
);
263 qep
->net_stats
.tx_errors
++;
264 qep
->net_stats
.tx_carrier_errors
++;
267 if (qe_status
& CREG_STAT_ERETRIES
) {
268 printk(KERN_ERR
"%s: Excessive transmit retries (more than 16).\n", dev
->name
);
269 qep
->net_stats
.tx_errors
++;
270 mace_hwbug_workaround
= 1;
273 if (qe_status
& CREG_STAT_LCOLL
) {
274 printk(KERN_ERR
"%s: Late transmit collision.\n", dev
->name
);
275 qep
->net_stats
.tx_errors
++;
276 qep
->net_stats
.collisions
++;
277 mace_hwbug_workaround
= 1;
280 if (qe_status
& CREG_STAT_FUFLOW
) {
281 printk(KERN_ERR
"%s: Transmit fifo underflow, driver bug.\n", dev
->name
);
282 qep
->net_stats
.tx_errors
++;
283 mace_hwbug_workaround
= 1;
286 if (qe_status
& CREG_STAT_JERROR
) {
287 printk(KERN_ERR
"%s: Jabber error.\n", dev
->name
);
290 if (qe_status
& CREG_STAT_BERROR
) {
291 printk(KERN_ERR
"%s: Babble error.\n", dev
->name
);
294 if (qe_status
& CREG_STAT_CCOFLOW
) {
295 qep
->net_stats
.tx_errors
+= 256;
296 qep
->net_stats
.collisions
+= 256;
299 if (qe_status
& CREG_STAT_TXDERROR
) {
300 printk(KERN_ERR
"%s: Transmit descriptor is bogus, driver bug.\n", dev
->name
);
301 qep
->net_stats
.tx_errors
++;
302 qep
->net_stats
.tx_aborted_errors
++;
303 mace_hwbug_workaround
= 1;
306 if (qe_status
& CREG_STAT_TXLERR
) {
307 printk(KERN_ERR
"%s: Transmit late error.\n", dev
->name
);
308 qep
->net_stats
.tx_errors
++;
309 mace_hwbug_workaround
= 1;
312 if (qe_status
& CREG_STAT_TXPERR
) {
313 printk(KERN_ERR
"%s: Transmit DMA parity error.\n", dev
->name
);
314 qep
->net_stats
.tx_errors
++;
315 qep
->net_stats
.tx_aborted_errors
++;
316 mace_hwbug_workaround
= 1;
319 if (qe_status
& CREG_STAT_TXSERR
) {
320 printk(KERN_ERR
"%s: Transmit DMA sbus error ack.\n", dev
->name
);
321 qep
->net_stats
.tx_errors
++;
322 qep
->net_stats
.tx_aborted_errors
++;
323 mace_hwbug_workaround
= 1;
326 if (qe_status
& CREG_STAT_RCCOFLOW
) {
327 qep
->net_stats
.rx_errors
+= 256;
328 qep
->net_stats
.collisions
+= 256;
331 if (qe_status
& CREG_STAT_RUOFLOW
) {
332 qep
->net_stats
.rx_errors
+= 256;
333 qep
->net_stats
.rx_over_errors
+= 256;
336 if (qe_status
& CREG_STAT_MCOFLOW
) {
337 qep
->net_stats
.rx_errors
+= 256;
338 qep
->net_stats
.rx_missed_errors
+= 256;
341 if (qe_status
& CREG_STAT_RXFOFLOW
) {
342 printk(KERN_ERR
"%s: Receive fifo overflow.\n", dev
->name
);
343 qep
->net_stats
.rx_errors
++;
344 qep
->net_stats
.rx_over_errors
++;
347 if (qe_status
& CREG_STAT_RLCOLL
) {
348 printk(KERN_ERR
"%s: Late receive collision.\n", dev
->name
);
349 qep
->net_stats
.rx_errors
++;
350 qep
->net_stats
.collisions
++;
353 if (qe_status
& CREG_STAT_FCOFLOW
) {
354 qep
->net_stats
.rx_errors
+= 256;
355 qep
->net_stats
.rx_frame_errors
+= 256;
358 if (qe_status
& CREG_STAT_CECOFLOW
) {
359 qep
->net_stats
.rx_errors
+= 256;
360 qep
->net_stats
.rx_crc_errors
+= 256;
363 if (qe_status
& CREG_STAT_RXDROP
) {
364 printk(KERN_ERR
"%s: Receive packet dropped.\n", dev
->name
);
365 qep
->net_stats
.rx_errors
++;
366 qep
->net_stats
.rx_dropped
++;
367 qep
->net_stats
.rx_missed_errors
++;
370 if (qe_status
& CREG_STAT_RXSMALL
) {
371 printk(KERN_ERR
"%s: Receive buffer too small, driver bug.\n", dev
->name
);
372 qep
->net_stats
.rx_errors
++;
373 qep
->net_stats
.rx_length_errors
++;
376 if (qe_status
& CREG_STAT_RXLERR
) {
377 printk(KERN_ERR
"%s: Receive late error.\n", dev
->name
);
378 qep
->net_stats
.rx_errors
++;
379 mace_hwbug_workaround
= 1;
382 if (qe_status
& CREG_STAT_RXPERR
) {
383 printk(KERN_ERR
"%s: Receive DMA parity error.\n", dev
->name
);
384 qep
->net_stats
.rx_errors
++;
385 qep
->net_stats
.rx_missed_errors
++;
386 mace_hwbug_workaround
= 1;
389 if (qe_status
& CREG_STAT_RXSERR
) {
390 printk(KERN_ERR
"%s: Receive DMA sbus error ack.\n", dev
->name
);
391 qep
->net_stats
.rx_errors
++;
392 qep
->net_stats
.rx_missed_errors
++;
393 mace_hwbug_workaround
= 1;
396 if (mace_hwbug_workaround
)
398 return mace_hwbug_workaround
;
401 /* Per-QE receive interrupt service routine. Just like on the happy meal
402 * we receive directly into skb's with a small packet copy water mark.
404 static void qe_rx(struct sunqe
*qep
)
406 struct qe_rxd
*rxbase
= &qep
->qe_block
->qe_rxd
[0];
408 struct sunqe_buffers
*qbufs
= qep
->buffers
;
409 __u32 qbufs_dvma
= qep
->buffers_dvma
;
410 int elem
= qep
->rx_new
, drops
= 0;
413 this = &rxbase
[elem
];
414 while (!((flags
= this->rx_flags
) & RXD_OWN
)) {
416 unsigned char *this_qbuf
=
417 &qbufs
->rx_buf
[elem
& (RX_RING_SIZE
- 1)][0];
418 __u32 this_qbuf_dvma
= qbufs_dvma
+
419 qebuf_offset(rx_buf
, (elem
& (RX_RING_SIZE
- 1)));
420 struct qe_rxd
*end_rxd
=
421 &rxbase
[(elem
+RX_RING_SIZE
)&(RX_RING_MAXSIZE
-1)];
422 int len
= (flags
& RXD_LENGTH
) - 4; /* QE adds ether FCS size to len */
424 /* Check for errors. */
425 if (len
< ETH_ZLEN
) {
426 qep
->net_stats
.rx_errors
++;
427 qep
->net_stats
.rx_length_errors
++;
428 qep
->net_stats
.rx_dropped
++;
430 skb
= dev_alloc_skb(len
+ 2);
433 qep
->net_stats
.rx_dropped
++;
438 eth_copy_and_sum(skb
, (unsigned char *) this_qbuf
,
440 skb
->protocol
= eth_type_trans(skb
, qep
->dev
);
442 qep
->dev
->last_rx
= jiffies
;
443 qep
->net_stats
.rx_packets
++;
444 qep
->net_stats
.rx_bytes
+= len
;
447 end_rxd
->rx_addr
= this_qbuf_dvma
;
448 end_rxd
->rx_flags
= (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
450 elem
= NEXT_RX(elem
);
451 this = &rxbase
[elem
];
455 printk(KERN_NOTICE
"%s: Memory squeeze, deferring packet.\n", qep
->dev
->name
);
458 static void qe_tx_reclaim(struct sunqe
*qep
);
460 /* Interrupts for all QE's get filtered out via the QEC master controller,
461 * so we just run through each qe and check to see who is signaling
462 * and thus needs to be serviced.
464 static void qec_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
466 struct sunqec
*qecp
= (struct sunqec
*) dev_id
;
470 /* Latch the status now. */
471 qec_status
= sbus_readl(qecp
->gregs
+ GLOB_STAT
);
472 while (channel
< 4) {
473 if (qec_status
& 0xf) {
474 struct sunqe
*qep
= qecp
->qes
[channel
];
477 qe_status
= sbus_readl(qep
->qcregs
+ CREG_STAT
);
478 if (qe_status
& CREG_STAT_ERRORS
) {
479 if (qe_is_bolixed(qep
, qe_status
))
482 if (qe_status
& CREG_STAT_RXIRQ
)
484 if (netif_queue_stopped(qep
->dev
) &&
485 (qe_status
& CREG_STAT_TXIRQ
)) {
486 spin_lock(&qep
->lock
);
488 if (TX_BUFFS_AVAIL(qep
) > 0) {
489 /* Wake net queue and return to
492 netif_wake_queue(qep
->dev
);
493 sbus_writel(1, qep
->qcregs
+ CREG_TIMASK
);
495 spin_unlock(&qep
->lock
);
505 static int qe_open(struct net_device
*dev
)
507 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
509 qep
->mconfig
= (MREGS_MCONFIG_TXENAB
|
510 MREGS_MCONFIG_RXENAB
|
511 MREGS_MCONFIG_MBAENAB
);
512 return qe_init(qep
, 0);
515 static int qe_close(struct net_device
*dev
)
517 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
523 /* Reclaim TX'd frames from the ring. This must always run under
524 * the IRQ protected qep->lock.
526 static void qe_tx_reclaim(struct sunqe
*qep
)
528 struct qe_txd
*txbase
= &qep
->qe_block
->qe_txd
[0];
529 int elem
= qep
->tx_old
;
531 while (elem
!= qep
->tx_new
) {
532 u32 flags
= txbase
[elem
].tx_flags
;
536 elem
= NEXT_TX(elem
);
541 static void qe_tx_timeout(struct net_device
*dev
)
543 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
546 spin_lock_irq(&qep
->lock
);
548 /* Try to reclaim, if that frees up some tx
549 * entries, we're fine.
552 tx_full
= TX_BUFFS_AVAIL(qep
) <= 0;
554 spin_unlock_irq(&qep
->lock
);
559 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
563 netif_wake_queue(dev
);
566 /* Get a packet queued to go onto the wire. */
567 static int qe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
569 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
570 struct sunqe_buffers
*qbufs
= qep
->buffers
;
571 __u32 txbuf_dvma
, qbufs_dvma
= qep
->buffers_dvma
;
572 unsigned char *txbuf
;
575 spin_lock_irq(&qep
->lock
);
582 txbuf
= &qbufs
->tx_buf
[entry
& (TX_RING_SIZE
- 1)][0];
583 txbuf_dvma
= qbufs_dvma
+
584 qebuf_offset(tx_buf
, (entry
& (TX_RING_SIZE
- 1)));
586 /* Avoid a race... */
587 qep
->qe_block
->qe_txd
[entry
].tx_flags
= TXD_UPDATE
;
589 memcpy(txbuf
, skb
->data
, len
);
591 qep
->qe_block
->qe_txd
[entry
].tx_addr
= txbuf_dvma
;
592 qep
->qe_block
->qe_txd
[entry
].tx_flags
=
593 (TXD_OWN
| TXD_SOP
| TXD_EOP
| (len
& TXD_LENGTH
));
594 qep
->tx_new
= NEXT_TX(entry
);
597 dev
->trans_start
= jiffies
;
598 sbus_writel(CREG_CTRL_TWAKEUP
, qep
->qcregs
+ CREG_CTRL
);
600 qep
->net_stats
.tx_packets
++;
601 qep
->net_stats
.tx_bytes
+= len
;
603 if (TX_BUFFS_AVAIL(qep
) <= 0) {
604 /* Halt the net queue and enable tx interrupts.
605 * When the tx queue empties the tx irq handler
606 * will wake up the queue and return us back to
607 * the lazy tx reclaim scheme.
609 netif_stop_queue(dev
);
610 sbus_writel(0, qep
->qcregs
+ CREG_TIMASK
);
612 spin_unlock_irq(&qep
->lock
);
619 static struct net_device_stats
*qe_get_stats(struct net_device
*dev
)
621 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
623 return &qep
->net_stats
;
626 static void qe_set_multicast(struct net_device
*dev
)
628 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
629 struct dev_mc_list
*dmi
= dev
->mc_list
;
630 u8 new_mconfig
= qep
->mconfig
;
635 /* Lock out others. */
636 netif_stop_queue(dev
);
638 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
639 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
640 qep
->mregs
+ MREGS_IACONFIG
);
641 while ((sbus_readb(qep
->mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
643 for (i
= 0; i
< 8; i
++)
644 sbus_writeb(0xff, qep
->mregs
+ MREGS_FILTER
);
645 sbus_writeb(0, qep
->mregs
+ MREGS_IACONFIG
);
646 } else if (dev
->flags
& IFF_PROMISC
) {
647 new_mconfig
|= MREGS_MCONFIG_PROMISC
;
650 u8
*hbytes
= (unsigned char *) &hash_table
[0];
652 for (i
= 0; i
< 4; i
++)
655 for (i
= 0; i
< dev
->mc_count
; i
++) {
656 addrs
= dmi
->dmi_addr
;
661 crc
= ether_crc_le(6, addrs
);
663 hash_table
[crc
>> 4] |= 1 << (crc
& 0xf);
665 /* Program the qe with the new filter value. */
666 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
667 qep
->mregs
+ MREGS_IACONFIG
);
668 while ((sbus_readb(qep
->mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
670 for (i
= 0; i
< 8; i
++) {
672 sbus_writeb(tmp
, qep
->mregs
+ MREGS_FILTER
);
674 sbus_writeb(0, qep
->mregs
+ MREGS_IACONFIG
);
677 /* Any change of the logical address filter, the physical address,
678 * or enabling/disabling promiscuous mode causes the MACE to disable
679 * the receiver. So we must re-enable them here or else the MACE
680 * refuses to listen to anything on the network. Sheesh, took
681 * me a day or two to find this bug.
683 qep
->mconfig
= new_mconfig
;
684 sbus_writeb(qep
->mconfig
, qep
->mregs
+ MREGS_MCONFIG
);
686 /* Let us get going again. */
687 netif_wake_queue(dev
);
690 /* This is only called once at boot time for each card probed. */
691 static inline void qec_init_once(struct sunqec
*qecp
, struct sbus_dev
*qsdev
)
693 u8 bsizes
= qecp
->qec_bursts
;
695 if (sbus_can_burst64(qsdev
) && (bsizes
& DMA_BURST64
)) {
696 sbus_writel(GLOB_CTRL_B64
, qecp
->gregs
+ GLOB_CTRL
);
697 } else if (bsizes
& DMA_BURST32
) {
698 sbus_writel(GLOB_CTRL_B32
, qecp
->gregs
+ GLOB_CTRL
);
700 sbus_writel(GLOB_CTRL_B16
, qecp
->gregs
+ GLOB_CTRL
);
703 /* Packetsize only used in 100baseT BigMAC configurations,
704 * set it to zero just to be on the safe side.
706 sbus_writel(GLOB_PSIZE_2048
, qecp
->gregs
+ GLOB_PSIZE
);
708 /* Set the local memsize register, divided up to one piece per QE channel. */
709 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2),
710 qecp
->gregs
+ GLOB_MSIZE
);
712 /* Divide up the local QEC memory amongst the 4 QE receiver and
713 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
715 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1,
716 qecp
->gregs
+ GLOB_TSIZE
);
717 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1,
718 qecp
->gregs
+ GLOB_RSIZE
);
721 /* Four QE's per QEC card. */
722 static int __init
qec_ether_init(struct net_device
*dev
, struct sbus_dev
*sdev
)
724 static unsigned version_printed
;
725 struct net_device
*qe_devs
[4];
726 struct sunqe
*qeps
[4];
727 struct sbus_dev
*qesdevs
[4];
728 struct sunqec
*qecp
= NULL
;
729 u8 bsizes
, bsizes_more
;
730 int i
, j
, res
= ENOMEM
;
732 dev
= init_etherdev(0, sizeof(struct sunqe
));
734 qeps
[0] = (struct sunqe
*) dev
->priv
;
735 qeps
[0]->channel
= 0;
736 spin_lock_init(&qeps
[0]->lock
);
737 for (j
= 0; j
< 6; j
++)
738 qe_devs
[0]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
740 if (version_printed
++ == 0)
741 printk(KERN_INFO
"%s", version
);
743 qe_devs
[1] = qe_devs
[2] = qe_devs
[3] = NULL
;
744 for (i
= 1; i
< 4; i
++) {
745 qe_devs
[i
] = init_etherdev(0, sizeof(struct sunqe
));
746 if (qe_devs
[i
] == NULL
|| qe_devs
[i
]->priv
== NULL
)
748 qeps
[i
] = (struct sunqe
*) qe_devs
[i
]->priv
;
749 for (j
= 0; j
< 6; j
++)
750 qe_devs
[i
]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
751 qeps
[i
]->channel
= i
;
753 qecp
= kmalloc(sizeof(struct sunqec
), GFP_KERNEL
);
756 qecp
->qec_sdev
= sdev
;
758 for (i
= 0; i
< 4; i
++) {
759 qecp
->qes
[i
] = qeps
[i
];
760 qeps
[i
]->dev
= qe_devs
[i
];
761 qeps
[i
]->parent
= qecp
;
764 /* Link in channel 0. */
765 i
= prom_getintdefault(sdev
->child
->prom_node
, "channel#", -1);
766 if (i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
767 qesdevs
[i
] = sdev
->child
;
769 /* Link in channel 1. */
770 i
= prom_getintdefault(sdev
->child
->next
->prom_node
, "channel#", -1);
771 if (i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
772 qesdevs
[i
] = sdev
->child
->next
;
774 /* Link in channel 2. */
775 i
= prom_getintdefault(sdev
->child
->next
->next
->prom_node
, "channel#", -1);
776 if (i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
777 qesdevs
[i
] = sdev
->child
->next
->next
;
779 /* Link in channel 3. */
780 i
= prom_getintdefault(sdev
->child
->next
->next
->next
->prom_node
, "channel#", -1);
781 if (i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
782 qesdevs
[i
] = sdev
->child
->next
->next
->next
;
784 for (i
= 0; i
< 4; i
++)
785 qeps
[i
]->qe_sdev
= qesdevs
[i
];
787 /* Now map in the registers, QEC globals first. */
788 qecp
->gregs
= sbus_ioremap(&sdev
->resource
[0], 0,
789 GLOB_REG_SIZE
, "QEC Global Registers");
791 printk(KERN_ERR
"QuadEther: Cannot map QEC global registers.\n");
796 /* Make sure the QEC is in MACE mode. */
797 if ((sbus_readl(qecp
->gregs
+ GLOB_CTRL
) & 0xf0000000) != GLOB_CTRL_MMODE
) {
798 printk(KERN_ERR
"QuadEther: AIEEE, QEC is not in MACE mode!\n");
804 if (qec_global_reset(qecp
->gregs
)) {
809 /* Find and set the burst sizes for the QEC, since it does
810 * the actual dma for all 4 channels.
812 bsizes
= prom_getintdefault(sdev
->prom_node
, "burst-sizes", 0xff);
814 bsizes_more
= prom_getintdefault(sdev
->bus
->prom_node
, "burst-sizes", 0xff);
816 if (bsizes_more
!= 0xff)
817 bsizes
&= bsizes_more
;
818 if (bsizes
== 0xff || (bsizes
& DMA_BURST16
) == 0 ||
819 (bsizes
& DMA_BURST32
)==0)
820 bsizes
= (DMA_BURST32
- 1);
822 qecp
->qec_bursts
= bsizes
;
824 /* Perform one time QEC initialization, we never touch the QEC
825 * globals again after this.
827 qec_init_once(qecp
, sdev
);
829 for (i
= 0; i
< 4; i
++) {
830 /* Map in QEC per-channel control registers. */
831 qeps
[i
]->qcregs
= sbus_ioremap(&qesdevs
[i
]->resource
[0], 0,
832 CREG_REG_SIZE
, "QEC Channel Registers");
833 if (!qeps
[i
]->qcregs
) {
834 printk(KERN_ERR
"QuadEther: Cannot map QE %d's channel registers.\n", i
);
839 /* Map in per-channel AMD MACE registers. */
840 qeps
[i
]->mregs
= sbus_ioremap(&qesdevs
[i
]->resource
[1], 0,
841 MREGS_REG_SIZE
, "QE MACE Registers");
842 if (!qeps
[i
]->mregs
) {
843 printk(KERN_ERR
"QuadEther: Cannot map QE %d's MACE registers.\n", i
);
848 qeps
[i
]->qe_block
= sbus_alloc_consistent(qesdevs
[i
],
850 &qeps
[i
]->qblock_dvma
);
851 qeps
[i
]->buffers
= sbus_alloc_consistent(qesdevs
[i
],
852 sizeof(struct sunqe_buffers
),
853 &qeps
[i
]->buffers_dvma
);
854 if (qeps
[i
]->qe_block
== NULL
||
855 qeps
[i
]->qblock_dvma
== 0 ||
856 qeps
[i
]->buffers
== NULL
||
857 qeps
[i
]->buffers_dvma
== 0) {
866 for (i
= 0; i
< 4; i
++) {
867 SET_MODULE_OWNER(qe_devs
[i
]);
868 qe_devs
[i
]->open
= qe_open
;
869 qe_devs
[i
]->stop
= qe_close
;
870 qe_devs
[i
]->hard_start_xmit
= qe_start_xmit
;
871 qe_devs
[i
]->get_stats
= qe_get_stats
;
872 qe_devs
[i
]->set_multicast_list
= qe_set_multicast
;
873 qe_devs
[i
]->tx_timeout
= qe_tx_timeout
;
874 qe_devs
[i
]->watchdog_timeo
= 5*HZ
;
875 qe_devs
[i
]->irq
= sdev
->irqs
[0];
877 ether_setup(qe_devs
[i
]);
880 /* QEC receives interrupts from each QE, then it sends the actual
881 * IRQ to the cpu itself. Since QEC is the single point of
882 * interrupt for all QE channels we register the IRQ handler
885 if (request_irq(sdev
->irqs
[0], &qec_interrupt
,
886 SA_SHIRQ
, "QuadEther", (void *) qecp
)) {
887 printk(KERN_ERR
"QuadEther: Can't register QEC master irq handler.\n");
892 /* Report the QE channels. */
893 for (i
= 0; i
< 4; i
++) {
894 printk(KERN_INFO
"%s: QuadEthernet channel[%d] ", qe_devs
[i
]->name
, i
);
895 for (j
= 0; j
< 6; j
++)
897 qe_devs
[i
]->dev_addr
[j
],
902 /* We are home free at this point, link the qe's into
903 * the master list for later driver exit.
905 for (i
= 0; i
< 4; i
++)
906 qe_devs
[i
]->ifindex
= dev_new_index();
907 qecp
->next_module
= root_qec_dev
;
913 for (i
= 0; i
< 4; i
++) {
914 if (qe_devs
[i
] != NULL
) {
915 if (qe_devs
[i
]->priv
) {
916 struct sunqe
*qe
= (struct sunqe
*)qe_devs
[i
]->priv
;
919 sbus_iounmap(qe
->qcregs
, CREG_REG_SIZE
);
921 sbus_iounmap(qe
->mregs
, MREGS_REG_SIZE
);
922 if (qe
->qe_block
!= NULL
)
923 sbus_free_consistent(qe
->qe_sdev
,
927 if (qe
->buffers
!= NULL
)
928 sbus_free_consistent(qe
->qe_sdev
,
929 sizeof(struct sunqe_buffers
),
938 sbus_iounmap(qecp
->gregs
, GLOB_REG_SIZE
);
944 static int __init
qec_match(struct sbus_dev
*sdev
)
946 struct sbus_dev
*sibling
;
949 if (strcmp(sdev
->prom_name
, "qec") != 0)
952 /* QEC can be parent of either QuadEthernet or BigMAC
953 * children. Do not confuse this with qfe/SUNW,qfe
954 * which is a quad-happymeal card and handled by
955 * a different driver.
957 sibling
= sdev
->child
;
958 for (i
= 0; i
< 4; i
++) {
961 if (strcmp(sibling
->prom_name
, "qe") != 0)
963 sibling
= sibling
->next
;
968 static int __init
qec_probe(void)
970 struct net_device
*dev
= NULL
;
971 struct sbus_bus
*bus
;
972 struct sbus_dev
*sdev
= 0;
983 for_each_sbusdev(sdev
, bus
) {
987 if (qec_match(sdev
)) {
989 if ((v
= qec_ether_init(dev
, sdev
)))
999 static void __exit
qec_cleanup(void)
1001 struct sunqec
*next_qec
;
1004 while (root_qec_dev
) {
1005 next_qec
= root_qec_dev
->next_module
;
1007 /* Release all four QE channels, then the QEC itself. */
1008 for (i
= 0; i
< 4; i
++) {
1009 unregister_netdev(root_qec_dev
->qes
[i
]->dev
);
1010 sbus_iounmap(root_qec_dev
->qes
[i
]->qcregs
, CREG_REG_SIZE
);
1011 sbus_iounmap(root_qec_dev
->qes
[i
]->mregs
, MREGS_REG_SIZE
);
1012 sbus_free_consistent(root_qec_dev
->qes
[i
]->qe_sdev
,
1014 root_qec_dev
->qes
[i
]->qe_block
,
1015 root_qec_dev
->qes
[i
]->qblock_dvma
);
1016 sbus_free_consistent(root_qec_dev
->qes
[i
]->qe_sdev
,
1017 sizeof(struct sunqe_buffers
),
1018 root_qec_dev
->qes
[i
]->buffers
,
1019 root_qec_dev
->qes
[i
]->buffers_dvma
);
1020 kfree(root_qec_dev
->qes
[i
]->dev
);
1022 free_irq(root_qec_dev
->qec_sdev
->irqs
[0], (void *)root_qec_dev
);
1023 sbus_iounmap(root_qec_dev
->gregs
, GLOB_REG_SIZE
);
1024 kfree(root_qec_dev
);
1025 root_qec_dev
= next_qec
;
1029 module_init(qec_probe
);
1030 module_exit(qec_cleanup
);
1031 MODULE_LICENSE("GPL");