1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
2 * Once again I am out to prove that every ethernet
3 * controller out there can be most efficiently programmed
4 * if you make it look like a LANCE.
6 * Copyright (C) 1996, 1999, 2003, 2006 David S. Miller (davem@davemloft.net)
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/fcntl.h>
14 #include <linux/interrupt.h>
15 #include <linux/ioport.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/crc32.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/ethtool.h>
26 #include <linux/bitops.h>
28 #include <asm/system.h>
31 #include <asm/byteorder.h>
32 #include <asm/idprom.h>
34 #include <asm/openprom.h>
35 #include <asm/oplib.h>
36 #include <asm/auxio.h>
37 #include <asm/pgtable.h>
42 #define DRV_NAME "sunqe"
43 #define DRV_VERSION "4.0"
44 #define DRV_RELDATE "June 23, 2006"
45 #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
47 static char version
[] =
48 DRV_NAME
".c:v" DRV_VERSION
" " DRV_RELDATE
" " DRV_AUTHOR
"\n";
50 MODULE_VERSION(DRV_VERSION
);
51 MODULE_AUTHOR(DRV_AUTHOR
);
52 MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
53 MODULE_LICENSE("GPL");
55 static struct sunqec
*root_qec_dev
;
57 static void qe_set_multicast(struct net_device
*dev
);
59 #define QEC_RESET_TRIES 200
61 static inline int qec_global_reset(void __iomem
*gregs
)
63 int tries
= QEC_RESET_TRIES
;
65 sbus_writel(GLOB_CTRL_RESET
, gregs
+ GLOB_CTRL
);
67 u32 tmp
= sbus_readl(gregs
+ GLOB_CTRL
);
68 if (tmp
& GLOB_CTRL_RESET
) {
76 printk(KERN_ERR
"QuadEther: AIEEE cannot reset the QEC!\n");
80 #define MACE_RESET_RETRIES 200
81 #define QE_RESET_RETRIES 200
83 static inline int qe_stop(struct sunqe
*qep
)
85 void __iomem
*cregs
= qep
->qcregs
;
86 void __iomem
*mregs
= qep
->mregs
;
89 /* Reset the MACE, then the QEC channel. */
90 sbus_writeb(MREGS_BCONFIG_RESET
, mregs
+ MREGS_BCONFIG
);
91 tries
= MACE_RESET_RETRIES
;
93 u8 tmp
= sbus_readb(mregs
+ MREGS_BCONFIG
);
94 if (tmp
& MREGS_BCONFIG_RESET
) {
101 printk(KERN_ERR
"QuadEther: AIEEE cannot reset the MACE!\n");
105 sbus_writel(CREG_CTRL_RESET
, cregs
+ CREG_CTRL
);
106 tries
= QE_RESET_RETRIES
;
108 u32 tmp
= sbus_readl(cregs
+ CREG_CTRL
);
109 if (tmp
& CREG_CTRL_RESET
) {
116 printk(KERN_ERR
"QuadEther: Cannot reset QE channel!\n");
122 static void qe_init_rings(struct sunqe
*qep
)
124 struct qe_init_block
*qb
= qep
->qe_block
;
125 struct sunqe_buffers
*qbufs
= qep
->buffers
;
126 __u32 qbufs_dvma
= qep
->buffers_dvma
;
129 qep
->rx_new
= qep
->rx_old
= qep
->tx_new
= qep
->tx_old
= 0;
130 memset(qb
, 0, sizeof(struct qe_init_block
));
131 memset(qbufs
, 0, sizeof(struct sunqe_buffers
));
132 for (i
= 0; i
< RX_RING_SIZE
; i
++) {
133 qb
->qe_rxd
[i
].rx_addr
= qbufs_dvma
+ qebuf_offset(rx_buf
, i
);
134 qb
->qe_rxd
[i
].rx_flags
=
135 (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
139 static int qe_init(struct sunqe
*qep
, int from_irq
)
141 struct sunqec
*qecp
= qep
->parent
;
142 void __iomem
*cregs
= qep
->qcregs
;
143 void __iomem
*mregs
= qep
->mregs
;
144 void __iomem
*gregs
= qecp
->gregs
;
145 unsigned char *e
= &qep
->dev
->dev_addr
[0];
153 /* Setup initial rx/tx init block pointers. */
154 sbus_writel(qep
->qblock_dvma
+ qib_offset(qe_rxd
, 0), cregs
+ CREG_RXDS
);
155 sbus_writel(qep
->qblock_dvma
+ qib_offset(qe_txd
, 0), cregs
+ CREG_TXDS
);
157 /* Enable/mask the various irq's. */
158 sbus_writel(0, cregs
+ CREG_RIMASK
);
159 sbus_writel(1, cregs
+ CREG_TIMASK
);
161 sbus_writel(0, cregs
+ CREG_QMASK
);
162 sbus_writel(CREG_MMASK_RXCOLL
, cregs
+ CREG_MMASK
);
164 /* Setup the FIFO pointers into QEC local memory. */
165 tmp
= qep
->channel
* sbus_readl(gregs
+ GLOB_MSIZE
);
166 sbus_writel(tmp
, cregs
+ CREG_RXRBUFPTR
);
167 sbus_writel(tmp
, cregs
+ CREG_RXWBUFPTR
);
169 tmp
= sbus_readl(cregs
+ CREG_RXRBUFPTR
) +
170 sbus_readl(gregs
+ GLOB_RSIZE
);
171 sbus_writel(tmp
, cregs
+ CREG_TXRBUFPTR
);
172 sbus_writel(tmp
, cregs
+ CREG_TXWBUFPTR
);
174 /* Clear the channel collision counter. */
175 sbus_writel(0, cregs
+ CREG_CCNT
);
177 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
178 sbus_writel(0, cregs
+ CREG_PIPG
);
180 /* Now dork with the AMD MACE. */
181 sbus_writeb(MREGS_PHYCONFIG_AUTO
, mregs
+ MREGS_PHYCONFIG
);
182 sbus_writeb(MREGS_TXFCNTL_AUTOPAD
, mregs
+ MREGS_TXFCNTL
);
183 sbus_writeb(0, mregs
+ MREGS_RXFCNTL
);
185 /* The QEC dma's the rx'd packets from local memory out to main memory,
186 * and therefore it interrupts when the packet reception is "complete".
187 * So don't listen for the MACE talking about it.
189 sbus_writeb(MREGS_IMASK_COLL
| MREGS_IMASK_RXIRQ
, mregs
+ MREGS_IMASK
);
190 sbus_writeb(MREGS_BCONFIG_BSWAP
| MREGS_BCONFIG_64TS
, mregs
+ MREGS_BCONFIG
);
191 sbus_writeb((MREGS_FCONFIG_TXF16
| MREGS_FCONFIG_RXF32
|
192 MREGS_FCONFIG_RFWU
| MREGS_FCONFIG_TFWU
),
193 mregs
+ MREGS_FCONFIG
);
195 /* Only usable interface on QuadEther is twisted pair. */
196 sbus_writeb(MREGS_PLSCONFIG_TP
, mregs
+ MREGS_PLSCONFIG
);
198 /* Tell MACE we are changing the ether address. */
199 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_PARESET
,
200 mregs
+ MREGS_IACONFIG
);
201 while ((sbus_readb(mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
203 sbus_writeb(e
[0], mregs
+ MREGS_ETHADDR
);
204 sbus_writeb(e
[1], mregs
+ MREGS_ETHADDR
);
205 sbus_writeb(e
[2], mregs
+ MREGS_ETHADDR
);
206 sbus_writeb(e
[3], mregs
+ MREGS_ETHADDR
);
207 sbus_writeb(e
[4], mregs
+ MREGS_ETHADDR
);
208 sbus_writeb(e
[5], mregs
+ MREGS_ETHADDR
);
210 /* Clear out the address filter. */
211 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
212 mregs
+ MREGS_IACONFIG
);
213 while ((sbus_readb(mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
215 for (i
= 0; i
< 8; i
++)
216 sbus_writeb(0, mregs
+ MREGS_FILTER
);
218 /* Address changes are now complete. */
219 sbus_writeb(0, mregs
+ MREGS_IACONFIG
);
223 /* Wait a little bit for the link to come up... */
225 if (!(sbus_readb(mregs
+ MREGS_PHYCONFIG
) & MREGS_PHYCONFIG_LTESTDIS
)) {
233 tmp
= sbus_readb(mregs
+ MREGS_PHYCONFIG
);
234 if ((tmp
& MREGS_PHYCONFIG_LSTAT
) != 0)
238 printk(KERN_NOTICE
"%s: Warning, link state is down.\n", qep
->dev
->name
);
241 /* Missed packet counter is cleared on a read. */
242 sbus_readb(mregs
+ MREGS_MPCNT
);
244 /* Reload multicast information, this will enable the receiver
247 qe_set_multicast(qep
->dev
);
249 /* QEC should now start to show interrupts. */
253 /* Grrr, certain error conditions completely lock up the AMD MACE,
254 * so when we get these we _must_ reset the chip.
256 static int qe_is_bolixed(struct sunqe
*qep
, u32 qe_status
)
258 struct net_device
*dev
= qep
->dev
;
259 int mace_hwbug_workaround
= 0;
261 if (qe_status
& CREG_STAT_EDEFER
) {
262 printk(KERN_ERR
"%s: Excessive transmit defers.\n", dev
->name
);
263 qep
->net_stats
.tx_errors
++;
266 if (qe_status
& CREG_STAT_CLOSS
) {
267 printk(KERN_ERR
"%s: Carrier lost, link down?\n", dev
->name
);
268 qep
->net_stats
.tx_errors
++;
269 qep
->net_stats
.tx_carrier_errors
++;
272 if (qe_status
& CREG_STAT_ERETRIES
) {
273 printk(KERN_ERR
"%s: Excessive transmit retries (more than 16).\n", dev
->name
);
274 qep
->net_stats
.tx_errors
++;
275 mace_hwbug_workaround
= 1;
278 if (qe_status
& CREG_STAT_LCOLL
) {
279 printk(KERN_ERR
"%s: Late transmit collision.\n", dev
->name
);
280 qep
->net_stats
.tx_errors
++;
281 qep
->net_stats
.collisions
++;
282 mace_hwbug_workaround
= 1;
285 if (qe_status
& CREG_STAT_FUFLOW
) {
286 printk(KERN_ERR
"%s: Transmit fifo underflow, driver bug.\n", dev
->name
);
287 qep
->net_stats
.tx_errors
++;
288 mace_hwbug_workaround
= 1;
291 if (qe_status
& CREG_STAT_JERROR
) {
292 printk(KERN_ERR
"%s: Jabber error.\n", dev
->name
);
295 if (qe_status
& CREG_STAT_BERROR
) {
296 printk(KERN_ERR
"%s: Babble error.\n", dev
->name
);
299 if (qe_status
& CREG_STAT_CCOFLOW
) {
300 qep
->net_stats
.tx_errors
+= 256;
301 qep
->net_stats
.collisions
+= 256;
304 if (qe_status
& CREG_STAT_TXDERROR
) {
305 printk(KERN_ERR
"%s: Transmit descriptor is bogus, driver bug.\n", dev
->name
);
306 qep
->net_stats
.tx_errors
++;
307 qep
->net_stats
.tx_aborted_errors
++;
308 mace_hwbug_workaround
= 1;
311 if (qe_status
& CREG_STAT_TXLERR
) {
312 printk(KERN_ERR
"%s: Transmit late error.\n", dev
->name
);
313 qep
->net_stats
.tx_errors
++;
314 mace_hwbug_workaround
= 1;
317 if (qe_status
& CREG_STAT_TXPERR
) {
318 printk(KERN_ERR
"%s: Transmit DMA parity error.\n", dev
->name
);
319 qep
->net_stats
.tx_errors
++;
320 qep
->net_stats
.tx_aborted_errors
++;
321 mace_hwbug_workaround
= 1;
324 if (qe_status
& CREG_STAT_TXSERR
) {
325 printk(KERN_ERR
"%s: Transmit DMA sbus error ack.\n", dev
->name
);
326 qep
->net_stats
.tx_errors
++;
327 qep
->net_stats
.tx_aborted_errors
++;
328 mace_hwbug_workaround
= 1;
331 if (qe_status
& CREG_STAT_RCCOFLOW
) {
332 qep
->net_stats
.rx_errors
+= 256;
333 qep
->net_stats
.collisions
+= 256;
336 if (qe_status
& CREG_STAT_RUOFLOW
) {
337 qep
->net_stats
.rx_errors
+= 256;
338 qep
->net_stats
.rx_over_errors
+= 256;
341 if (qe_status
& CREG_STAT_MCOFLOW
) {
342 qep
->net_stats
.rx_errors
+= 256;
343 qep
->net_stats
.rx_missed_errors
+= 256;
346 if (qe_status
& CREG_STAT_RXFOFLOW
) {
347 printk(KERN_ERR
"%s: Receive fifo overflow.\n", dev
->name
);
348 qep
->net_stats
.rx_errors
++;
349 qep
->net_stats
.rx_over_errors
++;
352 if (qe_status
& CREG_STAT_RLCOLL
) {
353 printk(KERN_ERR
"%s: Late receive collision.\n", dev
->name
);
354 qep
->net_stats
.rx_errors
++;
355 qep
->net_stats
.collisions
++;
358 if (qe_status
& CREG_STAT_FCOFLOW
) {
359 qep
->net_stats
.rx_errors
+= 256;
360 qep
->net_stats
.rx_frame_errors
+= 256;
363 if (qe_status
& CREG_STAT_CECOFLOW
) {
364 qep
->net_stats
.rx_errors
+= 256;
365 qep
->net_stats
.rx_crc_errors
+= 256;
368 if (qe_status
& CREG_STAT_RXDROP
) {
369 printk(KERN_ERR
"%s: Receive packet dropped.\n", dev
->name
);
370 qep
->net_stats
.rx_errors
++;
371 qep
->net_stats
.rx_dropped
++;
372 qep
->net_stats
.rx_missed_errors
++;
375 if (qe_status
& CREG_STAT_RXSMALL
) {
376 printk(KERN_ERR
"%s: Receive buffer too small, driver bug.\n", dev
->name
);
377 qep
->net_stats
.rx_errors
++;
378 qep
->net_stats
.rx_length_errors
++;
381 if (qe_status
& CREG_STAT_RXLERR
) {
382 printk(KERN_ERR
"%s: Receive late error.\n", dev
->name
);
383 qep
->net_stats
.rx_errors
++;
384 mace_hwbug_workaround
= 1;
387 if (qe_status
& CREG_STAT_RXPERR
) {
388 printk(KERN_ERR
"%s: Receive DMA parity error.\n", dev
->name
);
389 qep
->net_stats
.rx_errors
++;
390 qep
->net_stats
.rx_missed_errors
++;
391 mace_hwbug_workaround
= 1;
394 if (qe_status
& CREG_STAT_RXSERR
) {
395 printk(KERN_ERR
"%s: Receive DMA sbus error ack.\n", dev
->name
);
396 qep
->net_stats
.rx_errors
++;
397 qep
->net_stats
.rx_missed_errors
++;
398 mace_hwbug_workaround
= 1;
401 if (mace_hwbug_workaround
)
403 return mace_hwbug_workaround
;
406 /* Per-QE receive interrupt service routine. Just like on the happy meal
407 * we receive directly into skb's with a small packet copy water mark.
409 static void qe_rx(struct sunqe
*qep
)
411 struct qe_rxd
*rxbase
= &qep
->qe_block
->qe_rxd
[0];
413 struct sunqe_buffers
*qbufs
= qep
->buffers
;
414 __u32 qbufs_dvma
= qep
->buffers_dvma
;
415 int elem
= qep
->rx_new
, drops
= 0;
418 this = &rxbase
[elem
];
419 while (!((flags
= this->rx_flags
) & RXD_OWN
)) {
421 unsigned char *this_qbuf
=
422 &qbufs
->rx_buf
[elem
& (RX_RING_SIZE
- 1)][0];
423 __u32 this_qbuf_dvma
= qbufs_dvma
+
424 qebuf_offset(rx_buf
, (elem
& (RX_RING_SIZE
- 1)));
425 struct qe_rxd
*end_rxd
=
426 &rxbase
[(elem
+RX_RING_SIZE
)&(RX_RING_MAXSIZE
-1)];
427 int len
= (flags
& RXD_LENGTH
) - 4; /* QE adds ether FCS size to len */
429 /* Check for errors. */
430 if (len
< ETH_ZLEN
) {
431 qep
->net_stats
.rx_errors
++;
432 qep
->net_stats
.rx_length_errors
++;
433 qep
->net_stats
.rx_dropped
++;
435 skb
= dev_alloc_skb(len
+ 2);
438 qep
->net_stats
.rx_dropped
++;
443 eth_copy_and_sum(skb
, (unsigned char *) this_qbuf
,
445 skb
->protocol
= eth_type_trans(skb
, qep
->dev
);
447 qep
->dev
->last_rx
= jiffies
;
448 qep
->net_stats
.rx_packets
++;
449 qep
->net_stats
.rx_bytes
+= len
;
452 end_rxd
->rx_addr
= this_qbuf_dvma
;
453 end_rxd
->rx_flags
= (RXD_OWN
| ((RXD_PKT_SZ
) & RXD_LENGTH
));
455 elem
= NEXT_RX(elem
);
456 this = &rxbase
[elem
];
460 printk(KERN_NOTICE
"%s: Memory squeeze, deferring packet.\n", qep
->dev
->name
);
463 static void qe_tx_reclaim(struct sunqe
*qep
);
465 /* Interrupts for all QE's get filtered out via the QEC master controller,
466 * so we just run through each qe and check to see who is signaling
467 * and thus needs to be serviced.
469 static irqreturn_t
qec_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
471 struct sunqec
*qecp
= (struct sunqec
*) dev_id
;
475 /* Latch the status now. */
476 qec_status
= sbus_readl(qecp
->gregs
+ GLOB_STAT
);
477 while (channel
< 4) {
478 if (qec_status
& 0xf) {
479 struct sunqe
*qep
= qecp
->qes
[channel
];
482 qe_status
= sbus_readl(qep
->qcregs
+ CREG_STAT
);
483 if (qe_status
& CREG_STAT_ERRORS
) {
484 if (qe_is_bolixed(qep
, qe_status
))
487 if (qe_status
& CREG_STAT_RXIRQ
)
489 if (netif_queue_stopped(qep
->dev
) &&
490 (qe_status
& CREG_STAT_TXIRQ
)) {
491 spin_lock(&qep
->lock
);
493 if (TX_BUFFS_AVAIL(qep
) > 0) {
494 /* Wake net queue and return to
497 netif_wake_queue(qep
->dev
);
498 sbus_writel(1, qep
->qcregs
+ CREG_TIMASK
);
500 spin_unlock(&qep
->lock
);
512 static int qe_open(struct net_device
*dev
)
514 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
516 qep
->mconfig
= (MREGS_MCONFIG_TXENAB
|
517 MREGS_MCONFIG_RXENAB
|
518 MREGS_MCONFIG_MBAENAB
);
519 return qe_init(qep
, 0);
522 static int qe_close(struct net_device
*dev
)
524 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
530 /* Reclaim TX'd frames from the ring. This must always run under
531 * the IRQ protected qep->lock.
533 static void qe_tx_reclaim(struct sunqe
*qep
)
535 struct qe_txd
*txbase
= &qep
->qe_block
->qe_txd
[0];
536 int elem
= qep
->tx_old
;
538 while (elem
!= qep
->tx_new
) {
539 u32 flags
= txbase
[elem
].tx_flags
;
543 elem
= NEXT_TX(elem
);
548 static void qe_tx_timeout(struct net_device
*dev
)
550 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
553 spin_lock_irq(&qep
->lock
);
555 /* Try to reclaim, if that frees up some tx
556 * entries, we're fine.
559 tx_full
= TX_BUFFS_AVAIL(qep
) <= 0;
561 spin_unlock_irq(&qep
->lock
);
566 printk(KERN_ERR
"%s: transmit timed out, resetting\n", dev
->name
);
570 netif_wake_queue(dev
);
573 /* Get a packet queued to go onto the wire. */
574 static int qe_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
576 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
577 struct sunqe_buffers
*qbufs
= qep
->buffers
;
578 __u32 txbuf_dvma
, qbufs_dvma
= qep
->buffers_dvma
;
579 unsigned char *txbuf
;
582 spin_lock_irq(&qep
->lock
);
589 txbuf
= &qbufs
->tx_buf
[entry
& (TX_RING_SIZE
- 1)][0];
590 txbuf_dvma
= qbufs_dvma
+
591 qebuf_offset(tx_buf
, (entry
& (TX_RING_SIZE
- 1)));
593 /* Avoid a race... */
594 qep
->qe_block
->qe_txd
[entry
].tx_flags
= TXD_UPDATE
;
596 memcpy(txbuf
, skb
->data
, len
);
598 qep
->qe_block
->qe_txd
[entry
].tx_addr
= txbuf_dvma
;
599 qep
->qe_block
->qe_txd
[entry
].tx_flags
=
600 (TXD_OWN
| TXD_SOP
| TXD_EOP
| (len
& TXD_LENGTH
));
601 qep
->tx_new
= NEXT_TX(entry
);
604 dev
->trans_start
= jiffies
;
605 sbus_writel(CREG_CTRL_TWAKEUP
, qep
->qcregs
+ CREG_CTRL
);
607 qep
->net_stats
.tx_packets
++;
608 qep
->net_stats
.tx_bytes
+= len
;
610 if (TX_BUFFS_AVAIL(qep
) <= 0) {
611 /* Halt the net queue and enable tx interrupts.
612 * When the tx queue empties the tx irq handler
613 * will wake up the queue and return us back to
614 * the lazy tx reclaim scheme.
616 netif_stop_queue(dev
);
617 sbus_writel(0, qep
->qcregs
+ CREG_TIMASK
);
619 spin_unlock_irq(&qep
->lock
);
626 static struct net_device_stats
*qe_get_stats(struct net_device
*dev
)
628 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
630 return &qep
->net_stats
;
633 static void qe_set_multicast(struct net_device
*dev
)
635 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
636 struct dev_mc_list
*dmi
= dev
->mc_list
;
637 u8 new_mconfig
= qep
->mconfig
;
642 /* Lock out others. */
643 netif_stop_queue(dev
);
645 if ((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
646 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
647 qep
->mregs
+ MREGS_IACONFIG
);
648 while ((sbus_readb(qep
->mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
650 for (i
= 0; i
< 8; i
++)
651 sbus_writeb(0xff, qep
->mregs
+ MREGS_FILTER
);
652 sbus_writeb(0, qep
->mregs
+ MREGS_IACONFIG
);
653 } else if (dev
->flags
& IFF_PROMISC
) {
654 new_mconfig
|= MREGS_MCONFIG_PROMISC
;
657 u8
*hbytes
= (unsigned char *) &hash_table
[0];
659 for (i
= 0; i
< 4; i
++)
662 for (i
= 0; i
< dev
->mc_count
; i
++) {
663 addrs
= dmi
->dmi_addr
;
668 crc
= ether_crc_le(6, addrs
);
670 hash_table
[crc
>> 4] |= 1 << (crc
& 0xf);
672 /* Program the qe with the new filter value. */
673 sbus_writeb(MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
,
674 qep
->mregs
+ MREGS_IACONFIG
);
675 while ((sbus_readb(qep
->mregs
+ MREGS_IACONFIG
) & MREGS_IACONFIG_ACHNGE
) != 0)
677 for (i
= 0; i
< 8; i
++) {
679 sbus_writeb(tmp
, qep
->mregs
+ MREGS_FILTER
);
681 sbus_writeb(0, qep
->mregs
+ MREGS_IACONFIG
);
684 /* Any change of the logical address filter, the physical address,
685 * or enabling/disabling promiscuous mode causes the MACE to disable
686 * the receiver. So we must re-enable them here or else the MACE
687 * refuses to listen to anything on the network. Sheesh, took
688 * me a day or two to find this bug.
690 qep
->mconfig
= new_mconfig
;
691 sbus_writeb(qep
->mconfig
, qep
->mregs
+ MREGS_MCONFIG
);
693 /* Let us get going again. */
694 netif_wake_queue(dev
);
697 /* Ethtool support... */
698 static void qe_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
700 struct sunqe
*qep
= dev
->priv
;
702 strcpy(info
->driver
, "sunqe");
703 strcpy(info
->version
, "3.0");
704 sprintf(info
->bus_info
, "SBUS:%d",
708 static u32
qe_get_link(struct net_device
*dev
)
710 struct sunqe
*qep
= dev
->priv
;
711 void __iomem
*mregs
= qep
->mregs
;
714 spin_lock_irq(&qep
->lock
);
715 phyconfig
= sbus_readb(mregs
+ MREGS_PHYCONFIG
);
716 spin_unlock_irq(&qep
->lock
);
718 return (phyconfig
& MREGS_PHYCONFIG_LSTAT
);
721 static struct ethtool_ops qe_ethtool_ops
= {
722 .get_drvinfo
= qe_get_drvinfo
,
723 .get_link
= qe_get_link
,
726 /* This is only called once at boot time for each card probed. */
727 static inline void qec_init_once(struct sunqec
*qecp
, struct sbus_dev
*qsdev
)
729 u8 bsizes
= qecp
->qec_bursts
;
731 if (sbus_can_burst64(qsdev
) && (bsizes
& DMA_BURST64
)) {
732 sbus_writel(GLOB_CTRL_B64
, qecp
->gregs
+ GLOB_CTRL
);
733 } else if (bsizes
& DMA_BURST32
) {
734 sbus_writel(GLOB_CTRL_B32
, qecp
->gregs
+ GLOB_CTRL
);
736 sbus_writel(GLOB_CTRL_B16
, qecp
->gregs
+ GLOB_CTRL
);
739 /* Packetsize only used in 100baseT BigMAC configurations,
740 * set it to zero just to be on the safe side.
742 sbus_writel(GLOB_PSIZE_2048
, qecp
->gregs
+ GLOB_PSIZE
);
744 /* Set the local memsize register, divided up to one piece per QE channel. */
745 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2),
746 qecp
->gregs
+ GLOB_MSIZE
);
748 /* Divide up the local QEC memory amongst the 4 QE receiver and
749 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
751 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1,
752 qecp
->gregs
+ GLOB_TSIZE
);
753 sbus_writel((qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1,
754 qecp
->gregs
+ GLOB_RSIZE
);
757 static u8 __init
qec_get_burst(struct device_node
*dp
)
759 u8 bsizes
, bsizes_more
;
761 /* Find and set the burst sizes for the QEC, since it
762 * does the actual dma for all 4 channels.
764 bsizes
= of_getintprop_default(dp
, "burst-sizes", 0xff);
766 bsizes_more
= of_getintprop_default(dp
->parent
, "burst-sizes", 0xff);
768 if (bsizes_more
!= 0xff)
769 bsizes
&= bsizes_more
;
770 if (bsizes
== 0xff || (bsizes
& DMA_BURST16
) == 0 ||
771 (bsizes
& DMA_BURST32
)==0)
772 bsizes
= (DMA_BURST32
- 1);
777 static struct sunqec
* __init
get_qec(struct sbus_dev
*child_sdev
)
779 struct sbus_dev
*qec_sdev
= child_sdev
->parent
;
782 for (qecp
= root_qec_dev
; qecp
; qecp
= qecp
->next_module
) {
783 if (qecp
->qec_sdev
== qec_sdev
)
787 qecp
= kzalloc(sizeof(struct sunqec
), GFP_KERNEL
);
791 qecp
->qec_sdev
= qec_sdev
;
792 qecp
->gregs
= sbus_ioremap(&qec_sdev
->resource
[0], 0,
794 "QEC Global Registers");
798 /* Make sure the QEC is in MACE mode. */
799 ctrl
= sbus_readl(qecp
->gregs
+ GLOB_CTRL
);
801 if (ctrl
!= GLOB_CTRL_MMODE
) {
802 printk(KERN_ERR
"qec: Not in MACE mode!\n");
806 if (qec_global_reset(qecp
->gregs
))
809 qecp
->qec_bursts
= qec_get_burst(qec_sdev
->ofdev
.node
);
811 qec_init_once(qecp
, qec_sdev
);
813 if (request_irq(qec_sdev
->irqs
[0], &qec_interrupt
,
814 IRQF_SHARED
, "qec", (void *) qecp
)) {
815 printk(KERN_ERR
"qec: Can't register irq.\n");
819 qecp
->next_module
= root_qec_dev
;
828 sbus_iounmap(qecp
->gregs
, GLOB_REG_SIZE
);
833 static int __init
qec_ether_init(struct sbus_dev
*sdev
)
835 static unsigned version_printed
;
836 struct net_device
*dev
;
841 if (version_printed
++ == 0)
842 printk(KERN_INFO
"%s", version
);
844 dev
= alloc_etherdev(sizeof(struct sunqe
));
848 qe
= netdev_priv(dev
);
850 i
= of_getintprop_default(sdev
->ofdev
.node
, "channel#", -1);
852 struct sbus_dev
*td
= sdev
->parent
->child
;
860 spin_lock_init(&qe
->lock
);
863 qecp
= get_qec(sdev
);
867 qecp
->qes
[qe
->channel
] = qe
;
873 qe
->qcregs
= sbus_ioremap(&qe
->qe_sdev
->resource
[0], 0,
874 CREG_REG_SIZE
, "QEC Channel Registers");
876 printk(KERN_ERR
"qe: Cannot map channel registers.\n");
880 qe
->mregs
= sbus_ioremap(&qe
->qe_sdev
->resource
[1], 0,
881 MREGS_REG_SIZE
, "QE MACE Registers");
883 printk(KERN_ERR
"qe: Cannot map MACE registers.\n");
887 qe
->qe_block
= sbus_alloc_consistent(qe
->qe_sdev
,
890 qe
->buffers
= sbus_alloc_consistent(qe
->qe_sdev
,
891 sizeof(struct sunqe_buffers
),
893 if (qe
->qe_block
== NULL
|| qe
->qblock_dvma
== 0 ||
894 qe
->buffers
== NULL
|| qe
->buffers_dvma
== 0)
900 SET_MODULE_OWNER(dev
);
901 SET_NETDEV_DEV(dev
, &sdev
->ofdev
.dev
);
904 dev
->stop
= qe_close
;
905 dev
->hard_start_xmit
= qe_start_xmit
;
906 dev
->get_stats
= qe_get_stats
;
907 dev
->set_multicast_list
= qe_set_multicast
;
908 dev
->tx_timeout
= qe_tx_timeout
;
909 dev
->watchdog_timeo
= 5*HZ
;
910 dev
->irq
= sdev
->irqs
[0];
912 dev
->ethtool_ops
= &qe_ethtool_ops
;
914 res
= register_netdev(dev
);
918 dev_set_drvdata(&sdev
->ofdev
.dev
, qe
);
920 printk(KERN_INFO
"%s: qe channel[%d] ", dev
->name
, qe
->channel
);
921 for (i
= 0; i
< 6; i
++)
932 sbus_iounmap(qe
->qcregs
, CREG_REG_SIZE
);
934 sbus_iounmap(qe
->mregs
, MREGS_REG_SIZE
);
936 sbus_free_consistent(qe
->qe_sdev
,
941 sbus_free_consistent(qe
->qe_sdev
,
942 sizeof(struct sunqe_buffers
),
951 static int __devinit
qec_sbus_probe(struct of_device
*dev
, const struct of_device_id
*match
)
953 struct sbus_dev
*sdev
= to_sbus_device(&dev
->dev
);
955 return qec_ether_init(sdev
);
958 static int __devexit
qec_sbus_remove(struct of_device
*dev
)
960 struct sunqe
*qp
= dev_get_drvdata(&dev
->dev
);
961 struct net_device
*net_dev
= qp
->dev
;
963 unregister_netdevice(net_dev
);
965 sbus_iounmap(qp
->qcregs
, CREG_REG_SIZE
);
966 sbus_iounmap(qp
->mregs
, MREGS_REG_SIZE
);
967 sbus_free_consistent(qp
->qe_sdev
,
971 sbus_free_consistent(qp
->qe_sdev
,
972 sizeof(struct sunqe_buffers
),
976 free_netdev(net_dev
);
978 dev_set_drvdata(&dev
->dev
, NULL
);
983 static struct of_device_id qec_sbus_match
[] = {
990 MODULE_DEVICE_TABLE(of
, qec_sbus_match
);
992 static struct of_platform_driver qec_sbus_driver
= {
994 .match_table
= qec_sbus_match
,
995 .probe
= qec_sbus_probe
,
996 .remove
= __devexit_p(qec_sbus_remove
),
999 static int __init
qec_init(void)
1001 return of_register_driver(&qec_sbus_driver
, &sbus_bus_type
);
1004 static void __exit
qec_exit(void)
1006 of_unregister_driver(&qec_sbus_driver
);
1008 while (root_qec_dev
) {
1009 struct sunqec
*next
= root_qec_dev
->next_module
;
1011 free_irq(root_qec_dev
->qec_sdev
->irqs
[0],
1012 (void *) root_qec_dev
);
1013 sbus_iounmap(root_qec_dev
->gregs
, GLOB_REG_SIZE
);
1015 kfree(root_qec_dev
);
1017 root_qec_dev
= next
;
1021 module_init(qec_init
);
1022 module_exit(qec_exit
);