1 /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
2 * Once again I am out to prove that every ethernet
3 * controller out there can be most efficiently programmed
4 * if you make it look like a LANCE.
6 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
10 "sunqe.c:v1.1 8/Nov/96 David S. Miller (davem@caipfs.rutgers.edu)\n";
12 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/fcntl.h>
18 #include <linux/interrupt.h>
19 #include <linux/ptrace.h>
20 #include <linux/ioport.h>
22 #include <linux/malloc.h>
23 #include <linux/string.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
27 #include <asm/system.h>
28 #include <asm/bitops.h>
31 #include <linux/errno.h>
32 #include <asm/byteorder.h>
34 #include <asm/idprom.h>
36 #include <asm/openprom.h>
37 #include <asm/oplib.h>
38 #include <asm/auxio.h>
39 #include <asm/pgtable.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
49 static struct sunqec
*root_qec_dev
= NULL
;
52 #define QEC_RESET_TRIES 200
54 static inline int qec_global_reset(struct qe_globreg
*gregs
)
56 int tries
= QEC_RESET_TRIES
;
58 gregs
->ctrl
= GLOB_CTRL_RESET
;
60 if(gregs
->ctrl
& GLOB_CTRL_RESET
) {
68 printk("QuadEther: AIEEE cannot reset the QEC!\n");
72 #define MACE_RESET_RETRIES 200
73 #define QE_RESET_RETRIES 200
75 static inline int qe_stop(struct sunqe
*qep
)
77 struct qe_creg
*cregs
= qep
->qcregs
;
78 struct qe_mregs
*mregs
= qep
->mregs
;
81 /* Reset the MACE, then the QEC channel. */
82 mregs
->bconfig
= MREGS_BCONFIG_RESET
;
83 tries
= MACE_RESET_RETRIES
;
85 if(mregs
->bconfig
& MREGS_BCONFIG_RESET
) {
92 printk("QuadEther: AIEEE cannot reset the MACE!\n");
96 cregs
->ctrl
= CREG_CTRL_RESET
;
97 tries
= QE_RESET_RETRIES
;
99 if(cregs
->ctrl
& CREG_CTRL_RESET
) {
106 printk("QuadEther: Cannot reset QE channel!\n");
112 static inline void qe_clean_rings(struct sunqe
*qep
)
116 for(i
= 0; i
< RX_RING_SIZE
; i
++) {
117 if(qep
->rx_skbs
[i
] != NULL
) {
118 dev_kfree_skb(qep
->rx_skbs
[i
]);
119 qep
->rx_skbs
[i
] = NULL
;
123 for(i
= 0; i
< TX_RING_SIZE
; i
++) {
124 if(qep
->tx_skbs
[i
] != NULL
) {
125 dev_kfree_skb(qep
->tx_skbs
[i
]);
126 qep
->tx_skbs
[i
] = NULL
;
131 static void qe_init_rings(struct sunqe
*qep
, int from_irq
)
133 struct qe_init_block
*qb
= qep
->qe_block
;
134 struct device
*dev
= qep
->dev
;
135 int i
, gfp_flags
= GFP_KERNEL
;
137 if(from_irq
|| in_interrupt())
138 gfp_flags
= GFP_ATOMIC
;
140 qep
->rx_new
= qep
->rx_old
= qep
->tx_new
= qep
->tx_old
= 0;
144 for(i
= 0; i
< RX_RING_SIZE
; i
++) {
147 skb
= qe_alloc_skb(RX_BUF_ALLOC_SIZE
, gfp_flags
| GFP_DMA
);
151 qep
->rx_skbs
[i
] = skb
;
154 skb_put(skb
, ETH_FRAME_LEN
);
155 skb_reserve(skb
, 34);
157 qb
->qe_rxd
[i
].rx_addr
= sbus_dvma_addr(skb
->data
);
158 qb
->qe_rxd
[i
].rx_flags
=
159 (RXD_OWN
| ((RX_BUF_ALLOC_SIZE
- 34) & RXD_LENGTH
));
162 for(i
= 0; i
< TX_RING_SIZE
; i
++)
163 qb
->qe_txd
[i
].tx_flags
= qb
->qe_txd
[i
].tx_addr
= 0;
166 static void sun4c_qe_init_rings(struct sunqe
*qep
)
168 struct qe_init_block
*qb
= qep
->qe_block
;
169 struct sunqe_buffers
*qbufs
= qep
->sun4c_buffers
;
170 __u32 qbufs_dvma
= qep
->s4c_buf_dvma
;
173 qep
->rx_new
= qep
->rx_old
= qep
->tx_new
= qep
->tx_old
= 0;
175 memset(qbufs
, 0, sizeof(struct sunqe_buffers
));
177 for(i
= 0; i
< RX_RING_SIZE
; i
++)
178 qb
->qe_rxd
[i
].rx_flags
= qb
->qe_rxd
[i
].rx_addr
= 0;
180 for(i
= 0; i
< SUN4C_RX_RING_SIZE
; i
++) {
181 qb
->qe_rxd
[i
].rx_addr
= qbufs_dvma
+ qebuf_offset(rx_buf
, i
);
182 qb
->qe_rxd
[i
].rx_flags
=
183 (RXD_OWN
| ((SUN4C_RX_BUFF_SIZE
) & RXD_LENGTH
));
186 for(i
= 0; i
< TX_RING_SIZE
; i
++)
187 qb
->qe_txd
[i
].tx_flags
= qb
->qe_txd
[i
].tx_addr
= 0;
190 static int qe_init(struct sunqe
*qep
, int from_irq
)
192 struct sunqec
*qecp
= qep
->parent
;
193 struct qe_creg
*cregs
= qep
->qcregs
;
194 struct qe_mregs
*mregs
= qep
->mregs
;
195 struct qe_globreg
*gregs
= qecp
->gregs
;
196 unsigned char *e
= &qep
->dev
->dev_addr
[0];
197 volatile unsigned char garbage
;
204 /* Setup initial rx/tx init block pointers. */
205 cregs
->rxds
= qep
->qblock_dvma
+ qib_offset(qe_rxd
, 0);
206 cregs
->txds
= qep
->qblock_dvma
+ qib_offset(qe_txd
, 0);
208 /* Enable the various irq's. */
212 cregs
->mmask
= CREG_MMASK_RXCOLL
;
214 /* Setup the FIFO pointers into QEC local memory. */
215 cregs
->rxwbufptr
= cregs
->rxrbufptr
= qep
->channel
* gregs
->msize
;
216 cregs
->txwbufptr
= cregs
->txrbufptr
= cregs
->rxrbufptr
+ gregs
->rsize
;
218 /* Clear the channel collision counter. */
221 /* For 10baseT, inter frame space nor throttle seems to be necessary. */
224 /* Now dork with the AMD MACE. */
225 mregs
->txfcntl
= MREGS_TXFCNTL_AUTOPAD
; /* Save us some tx work. */
228 /* The QEC dma's the rx'd packets from local memory out to main memory,
229 * and therefore it interrupts when the packet reception is "complete".
230 * So don't listen for the MACE talking about it.
232 mregs
->imask
= (MREGS_IMASK_COLL
| MREGS_IMASK_RXIRQ
);
234 mregs
->bconfig
= (MREGS_BCONFIG_BSWAP
| MREGS_BCONFIG_64TS
);
235 mregs
->fconfig
= (MREGS_FCONFIG_TXF16
| MREGS_FCONFIG_RXF32
|
236 MREGS_FCONFIG_RFWU
| MREGS_FCONFIG_TFWU
);
238 /* Only usable interface on QuadEther is twisted pair. */
239 mregs
->plsconfig
= (MREGS_PLSCONFIG_TP
);
241 /* Tell MACE we are changing the ether address. */
242 mregs
->iaconfig
= (MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_PARESET
);
243 mregs
->ethaddr
= e
[0];
244 mregs
->ethaddr
= e
[1];
245 mregs
->ethaddr
= e
[2];
246 mregs
->ethaddr
= e
[3];
247 mregs
->ethaddr
= e
[4];
248 mregs
->ethaddr
= e
[5];
250 /* Clear out the address filter. */
251 mregs
->iaconfig
= (MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
);
252 for(i
= 0; i
< 8; i
++) mregs
->filter
= 0;
254 /* Address changes are now complete. */
257 if(sparc_cpu_model
== sun4c
)
258 sun4c_qe_init_rings(qep
);
260 qe_init_rings(qep
, from_irq
);
262 /* Wait a little bit for the link to come up... */
263 if(!(mregs
->phyconfig
& MREGS_PHYCONFIG_LTESTDIS
)) {
265 if(!(mregs
->phyconfig
& MREGS_PHYCONFIG_LSTAT
))
266 printk("%s: Warning, link state is down.\n", qep
->dev
->name
);
269 /* Missed packet counter is cleared on a read. */
270 garbage
= mregs
->mpcnt
;
272 /* Turn on the MACE receiver and transmitter. */
273 mregs
->mconfig
= (MREGS_MCONFIG_TXENAB
| MREGS_MCONFIG_RXENAB
);
275 /* QEC should now start to show interrupts. */
279 /* Grrr, certain error conditions completely lock up the AMD MACE,
280 * so when we get these we _must_ reset the chip.
282 static int qe_is_bolixed(struct sunqe
*qep
, unsigned int qe_status
)
284 struct device
*dev
= qep
->dev
;
285 int mace_hwbug_workaround
= 0;
287 if(qe_status
& CREG_STAT_EDEFER
) {
288 printk("%s: Excessive transmit defers.\n", dev
->name
);
289 qep
->net_stats
.tx_errors
++;
292 if(qe_status
& CREG_STAT_CLOSS
) {
293 printk("%s: Carrier lost, link down?\n", dev
->name
);
294 qep
->net_stats
.tx_errors
++;
295 qep
->net_stats
.tx_carrier_errors
++;
298 if(qe_status
& CREG_STAT_ERETRIES
) {
299 printk("%s: Excessive transmit retries (more than 16).\n", dev
->name
);
300 qep
->net_stats
.tx_errors
++;
301 mace_hwbug_workaround
= 1;
304 if(qe_status
& CREG_STAT_LCOLL
) {
305 printk("%s: Late transmit collision.\n", dev
->name
);
306 qep
->net_stats
.tx_errors
++;
307 qep
->net_stats
.collisions
++;
308 mace_hwbug_workaround
= 1;
311 if(qe_status
& CREG_STAT_FUFLOW
) {
312 printk("%s: Transmit fifo underflow, driver bug.\n", dev
->name
);
313 qep
->net_stats
.tx_errors
++;
314 mace_hwbug_workaround
= 1;
317 if(qe_status
& CREG_STAT_JERROR
) {
318 printk("%s: Jabber error.\n", dev
->name
);
321 if(qe_status
& CREG_STAT_BERROR
) {
322 printk("%s: Babble error.\n", dev
->name
);
325 if(qe_status
& CREG_STAT_CCOFLOW
) {
326 qep
->net_stats
.tx_errors
+= 256;
327 qep
->net_stats
.collisions
+= 256;
330 if(qe_status
& CREG_STAT_TXDERROR
) {
331 printk("%s: Transmit descriptor is bogus, driver bug.\n", dev
->name
);
332 qep
->net_stats
.tx_errors
++;
333 qep
->net_stats
.tx_aborted_errors
++;
334 mace_hwbug_workaround
= 1;
337 if(qe_status
& CREG_STAT_TXLERR
) {
338 printk("%s: Transmit late error.\n", dev
->name
);
339 qep
->net_stats
.tx_errors
++;
340 mace_hwbug_workaround
= 1;
343 if(qe_status
& CREG_STAT_TXPERR
) {
344 printk("%s: Transmit DMA parity error.\n", dev
->name
);
345 qep
->net_stats
.tx_errors
++;
346 qep
->net_stats
.tx_aborted_errors
++;
347 mace_hwbug_workaround
= 1;
350 if(qe_status
& CREG_STAT_TXSERR
) {
351 printk("%s: Transmit DMA sbus error ack.\n", dev
->name
);
352 qep
->net_stats
.tx_errors
++;
353 qep
->net_stats
.tx_aborted_errors
++;
354 mace_hwbug_workaround
= 1;
357 if(qe_status
& CREG_STAT_RCCOFLOW
) {
358 qep
->net_stats
.rx_errors
+= 256;
359 qep
->net_stats
.collisions
+= 256;
362 if(qe_status
& CREG_STAT_RUOFLOW
) {
363 qep
->net_stats
.rx_errors
+= 256;
364 qep
->net_stats
.rx_over_errors
+= 256;
367 if(qe_status
& CREG_STAT_MCOFLOW
) {
368 qep
->net_stats
.rx_errors
+= 256;
369 qep
->net_stats
.rx_missed_errors
+= 256;
372 if(qe_status
& CREG_STAT_RXFOFLOW
) {
373 printk("%s: Receive fifo overflow.\n", dev
->name
);
374 qep
->net_stats
.rx_errors
++;
375 qep
->net_stats
.rx_over_errors
++;
378 if(qe_status
& CREG_STAT_RLCOLL
) {
379 printk("%s: Late receive collision.\n", dev
->name
);
380 qep
->net_stats
.rx_errors
++;
381 qep
->net_stats
.collisions
++;
384 if(qe_status
& CREG_STAT_FCOFLOW
) {
385 qep
->net_stats
.rx_errors
+= 256;
386 qep
->net_stats
.rx_frame_errors
+= 256;
389 if(qe_status
& CREG_STAT_CECOFLOW
) {
390 qep
->net_stats
.rx_errors
+= 256;
391 qep
->net_stats
.rx_crc_errors
+= 256;
394 if(qe_status
& CREG_STAT_RXDROP
) {
395 printk("%s: Receive packet dropped.\n", dev
->name
);
396 qep
->net_stats
.rx_errors
++;
397 qep
->net_stats
.rx_dropped
++;
398 qep
->net_stats
.rx_missed_errors
++;
401 if(qe_status
& CREG_STAT_RXSMALL
) {
402 printk("%s: Receive buffer too small, driver bug.\n", dev
->name
);
403 qep
->net_stats
.rx_errors
++;
404 qep
->net_stats
.rx_length_errors
++;
407 if(qe_status
& CREG_STAT_RXLERR
) {
408 printk("%s: Receive late error.\n", dev
->name
);
409 qep
->net_stats
.rx_errors
++;
410 mace_hwbug_workaround
= 1;
413 if(qe_status
& CREG_STAT_RXPERR
) {
414 printk("%s: Receive DMA parity error.\n", dev
->name
);
415 qep
->net_stats
.rx_errors
++;
416 qep
->net_stats
.rx_missed_errors
++;
417 mace_hwbug_workaround
= 1;
420 if(qe_status
& CREG_STAT_RXSERR
) {
421 printk("%s: Receive DMA sbus error ack.\n", dev
->name
);
422 qep
->net_stats
.rx_errors
++;
423 qep
->net_stats
.rx_missed_errors
++;
424 mace_hwbug_workaround
= 1;
427 if(mace_hwbug_workaround
)
429 return mace_hwbug_workaround
;
432 /* Per-QE transmit complete interrupt service routine. */
433 static inline void qe_tx(struct sunqe
*qep
)
435 struct qe_txd
*txbase
= &qep
->qe_block
->qe_txd
[0];
437 int elem
= qep
->tx_old
;
439 while(elem
!= qep
->tx_new
) {
442 this = &txbase
[elem
];
443 if(this->tx_flags
& TXD_OWN
)
445 skb
= qep
->tx_skbs
[elem
];
446 qep
->tx_skbs
[elem
] = NULL
;
447 qep
->net_stats
.tx_bytes
+=skb
->len
;
450 qep
->net_stats
.tx_packets
++;
451 elem
= NEXT_TX(elem
);
456 static inline void sun4c_qe_tx(struct sunqe
*qep
)
458 struct qe_txd
*txbase
= &qep
->qe_block
->qe_txd
[0];
460 int elem
= qep
->tx_old
;
462 while(elem
!= qep
->tx_new
) {
463 this = &txbase
[elem
];
464 if(this->tx_flags
& TXD_OWN
)
466 qep
->net_stats
.tx_packets
++;
467 elem
= NEXT_TX(elem
);
472 /* Per-QE receive interrupt service routine. Just like on the happy meal
473 * we receive directly into skb's with a small packet copy water mark.
475 static inline void qe_rx(struct sunqe
*qep
)
477 struct qe_rxd
*rxbase
= &qep
->qe_block
->qe_rxd
[0];
479 int elem
= qep
->rx_new
, drops
= 0;
481 this = &rxbase
[elem
];
482 while(!(this->rx_flags
& RXD_OWN
)) {
484 unsigned int flags
= this->rx_flags
;
485 int len
= (flags
& RXD_LENGTH
) - 4; /* QE adds ether FCS size to len */
487 /* Check for errors. */
489 qep
->net_stats
.rx_errors
++;
490 qep
->net_stats
.rx_length_errors
++;
493 /* Return it to the QE. */
494 qep
->net_stats
.rx_dropped
++;
495 this->rx_addr
= sbus_dvma_addr(qep
->rx_skbs
[elem
]->data
);
497 (RXD_OWN
| (RX_BUF_ALLOC_SIZE
& RXD_LENGTH
));
500 skb
= qep
->rx_skbs
[elem
];
501 #ifdef NEED_DMA_SYNCHRONIZATION
503 if ((unsigned long) (skb
->data
+ skb
->len
) >= MAX_DMA_ADDRESS
) {
504 printk("sunqe: Bogus DMA buffer address "
505 "[%016lx]\n", ((unsigned long) skb
->data
));
506 panic("DMA address too large, tell DaveM");
509 mmu_sync_dma(sbus_dvma_addr(skb
->data
),
510 skb
->len
, qep
->qe_sbusdev
->my_bus
);
512 if(len
> RX_COPY_THRESHOLD
) {
513 struct sk_buff
*new_skb
;
515 /* Now refill the entry, if we can. */
516 new_skb
= qe_alloc_skb(RX_BUF_ALLOC_SIZE
, (GFP_DMA
|GFP_ATOMIC
));
522 qep
->rx_skbs
[elem
] = new_skb
;
523 new_skb
->dev
= qep
->dev
;
524 skb_put(new_skb
, ETH_FRAME_LEN
);
525 skb_reserve(new_skb
, 34);
527 rxbase
[elem
].rx_addr
= sbus_dvma_addr(new_skb
->data
);
528 rxbase
[elem
].rx_flags
=
529 (RXD_OWN
| ((RX_BUF_ALLOC_SIZE
- 34) & RXD_LENGTH
));
531 /* Trim the original skb for the netif. */
534 struct sk_buff
*copy_skb
= dev_alloc_skb(len
+ 2);
541 copy_skb
->dev
= qep
->dev
;
542 skb_reserve(copy_skb
, 2);
543 skb_put(copy_skb
, len
);
544 eth_copy_and_sum(copy_skb
, (unsigned char *)skb
->data
, len
, 0);
546 /* Reuse original ring buffer. */
547 rxbase
[elem
].rx_addr
= sbus_dvma_addr(skb
->data
);
548 rxbase
[elem
].rx_flags
=
549 (RXD_OWN
| ((RX_BUF_ALLOC_SIZE
- 34) & RXD_LENGTH
));
554 /* No checksums are done by this card ;-( */
555 skb
->protocol
= eth_type_trans(skb
, qep
->dev
);
557 qep
->net_stats
.rx_packets
++;
559 elem
= NEXT_RX(elem
);
560 this = &rxbase
[elem
];
564 printk("%s: Memory squeeze, deferring packet.\n", qep
->dev
->name
);
567 static inline void sun4c_qe_rx(struct sunqe
*qep
)
569 struct qe_rxd
*rxbase
= &qep
->qe_block
->qe_rxd
[0];
571 struct sunqe_buffers
*qbufs
= qep
->sun4c_buffers
;
572 __u32 qbufs_dvma
= qep
->s4c_buf_dvma
;
573 int elem
= qep
->rx_new
, drops
= 0;
575 this = &rxbase
[elem
];
576 while(!(this->rx_flags
& RXD_OWN
)) {
578 unsigned char *this_qbuf
=
579 qbufs
->rx_buf
[elem
& (SUN4C_RX_RING_SIZE
- 1)];
580 __u32 this_qbuf_dvma
= qbufs_dvma
+
581 qebuf_offset(rx_buf
, (elem
& (SUN4C_RX_RING_SIZE
- 1)));
582 struct qe_rxd
*end_rxd
=
583 &rxbase
[(elem
+SUN4C_RX_RING_SIZE
)&(RX_RING_SIZE
-1)];
584 unsigned int flags
= this->rx_flags
;
585 int len
= (flags
& RXD_LENGTH
) - 4; /* QE adds ether FCS size to len */
587 /* Check for errors. */
589 qep
->net_stats
.rx_errors
++;
590 qep
->net_stats
.rx_length_errors
++;
591 qep
->net_stats
.rx_dropped
++;
593 skb
= dev_alloc_skb(len
+ 2);
596 qep
->net_stats
.rx_dropped
++;
601 eth_copy_and_sum(skb
, (unsigned char *)this_qbuf
,
603 skb
->protocol
= eth_type_trans(skb
, qep
->dev
);
605 qep
->net_stats
.rx_packets
++;
608 end_rxd
->rx_addr
= this_qbuf_dvma
;
609 end_rxd
->rx_flags
= (RXD_OWN
| (SUN4C_RX_BUFF_SIZE
& RXD_LENGTH
));
611 elem
= NEXT_RX(elem
);
612 this = &rxbase
[elem
];
616 printk("%s: Memory squeeze, deferring packet.\n", qep
->dev
->name
);
619 /* Interrupts for all QE's get filtered out via the QEC master controller,
620 * so we just run through each qe and check to see who is signaling
621 * and thus needs to be serviced.
623 static void qec_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
625 struct sunqec
*qecp
= (struct sunqec
*) dev_id
;
626 unsigned int qec_status
;
629 /* Latch the status now. */
630 qec_status
= qecp
->gregs
->stat
;
632 if(qec_status
& 0xf) {
633 struct sunqe
*qep
= qecp
->qes
[channel
];
634 struct device
*dev
= qep
->dev
;
635 unsigned int qe_status
;
639 qe_status
= qep
->qcregs
->stat
;
640 if(qe_status
& CREG_STAT_ERRORS
)
641 if(qe_is_bolixed(qep
, qe_status
))
644 if(qe_status
& CREG_STAT_RXIRQ
)
647 if(qe_status
& CREG_STAT_TXIRQ
)
650 if(dev
->tbusy
&& (TX_BUFFS_AVAIL(qep
) >= 0)) {
663 static void sun4c_qec_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
665 struct sunqec
*qecp
= (struct sunqec
*) dev_id
;
666 unsigned int qec_status
;
669 /* Latch the status now. */
670 qec_status
= qecp
->gregs
->stat
;
672 if(qec_status
& 0xf) {
673 struct sunqe
*qep
= qecp
->qes
[channel
];
674 struct device
*dev
= qep
->dev
;
675 unsigned int qe_status
;
679 qe_status
= qep
->qcregs
->stat
;
680 if(qe_status
& CREG_STAT_ERRORS
)
681 if(qe_is_bolixed(qep
, qe_status
))
684 if(qe_status
& CREG_STAT_RXIRQ
)
687 if(qe_status
& CREG_STAT_TXIRQ
)
690 if(dev
->tbusy
&& (SUN4C_TX_BUFFS_AVAIL(qep
) >= 0)) {
703 static int qe_open(struct device
*dev
)
705 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
708 res
= qe_init(qep
, 0);
715 static int qe_close(struct device
*dev
)
717 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
725 /* Get a packet queued to go onto the wire. */
726 static int qe_start_xmit(struct sk_buff
*skb
, struct device
*dev
)
728 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
734 if(test_and_set_bit(0, (void *) &dev
->tbusy
) != 0) {
735 printk("%s: Transmitter access conflict.\n", dev
->name
);
739 if(!TX_BUFFS_AVAIL(qep
))
742 #ifdef NEED_DMA_SYNCHRONIZATION
744 if ((unsigned long) (skb
->data
+ skb
->len
) >= MAX_DMA_ADDRESS
) {
745 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_DMA
| GFP_ATOMIC
);
752 mmu_sync_dma(sbus_dvma_addr(skb
->data
),
753 skb
->len
, qep
->qe_sbusdev
->my_bus
);
758 /* Avoid a race... */
759 qep
->qe_block
->qe_txd
[entry
].tx_flags
= TXD_UPDATE
;
761 qep
->tx_skbs
[entry
] = skb
;
763 qep
->qe_block
->qe_txd
[entry
].tx_addr
= sbus_dvma_addr(skb
->data
);
764 qep
->qe_block
->qe_txd
[entry
].tx_flags
=
765 (TXD_OWN
| TXD_SOP
| TXD_EOP
| (len
& TXD_LENGTH
));
766 qep
->tx_new
= NEXT_TX(entry
);
769 qep
->qcregs
->ctrl
= CREG_CTRL_TWAKEUP
;
771 if(TX_BUFFS_AVAIL(qep
))
777 static int sun4c_qe_start_xmit(struct sk_buff
*skb
, struct device
*dev
)
779 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
780 struct sunqe_buffers
*qbufs
= qep
->sun4c_buffers
;
781 __u32 txbuf_dvma
, qbufs_dvma
= qep
->s4c_buf_dvma
;
782 unsigned char *txbuf
;
788 if(test_and_set_bit(0, (void *) &dev
->tbusy
) != 0) {
789 printk("%s: Transmitter access conflict.\n", dev
->name
);
793 if(!SUN4C_TX_BUFFS_AVAIL(qep
))
799 txbuf
= &qbufs
->tx_buf
[entry
& (SUN4C_TX_RING_SIZE
- 1)][0];
800 txbuf_dvma
= qbufs_dvma
+
801 qebuf_offset(tx_buf
, (entry
& (SUN4C_TX_RING_SIZE
- 1)));
803 /* Avoid a race... */
804 qep
->qe_block
->qe_txd
[entry
].tx_flags
= TXD_UPDATE
;
806 memcpy(txbuf
, skb
->data
, len
);
808 qep
->qe_block
->qe_txd
[entry
].tx_addr
= txbuf_dvma
;
809 qep
->qe_block
->qe_txd
[entry
].tx_flags
=
810 (TXD_OWN
| TXD_SOP
| TXD_EOP
| (len
& TXD_LENGTH
));
811 qep
->tx_new
= NEXT_TX(entry
);
814 qep
->qcregs
->ctrl
= CREG_CTRL_TWAKEUP
;
816 qep
->net_stats
.tx_bytes
+=skb
->len
;
820 if(SUN4C_TX_BUFFS_AVAIL(qep
))
826 static struct net_device_stats
*qe_get_stats(struct device
*dev
)
828 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
830 return &qep
->net_stats
;
833 #define CRC_POLYNOMIAL_BE 0x04c11db7UL /* Ethernet CRC, big endian */
834 #define CRC_POLYNOMIAL_LE 0xedb88320UL /* Ethernet CRC, little endian */
836 static void qe_set_multicast(struct device
*dev
)
838 struct sunqe
*qep
= (struct sunqe
*) dev
->priv
;
839 struct dev_mc_list
*dmi
= dev
->mc_list
;
840 unsigned char new_mconfig
= (MREGS_MCONFIG_TXENAB
| MREGS_MCONFIG_RXENAB
);
843 u32 crc
, poly
= CRC_POLYNOMIAL_LE
;
845 /* Lock out others. */
846 set_bit(0, (void *) &dev
->tbusy
);
848 if((dev
->flags
& IFF_ALLMULTI
) || (dev
->mc_count
> 64)) {
849 qep
->mregs
->iaconfig
= MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
;
850 for(i
= 0; i
< 8; i
++)
851 qep
->mregs
->filter
= 0xff;
852 qep
->mregs
->iaconfig
= 0;
853 } else if(dev
->flags
& IFF_PROMISC
) {
854 new_mconfig
|= MREGS_MCONFIG_PROMISC
;
857 unsigned char *hbytes
= (unsigned char *) &hash_table
[0];
859 for(i
= 0; i
< 4; i
++)
862 for(i
= 0; i
< dev
->mc_count
; i
++) {
863 addrs
= dmi
->dmi_addr
;
870 for(byte
= 0; byte
< 6; byte
++) {
871 for(bit
= *addrs
++, j
= 0; j
< 8; j
++, bit
>>= 1) {
874 test
= ((bit
^ crc
) & 0x01);
881 hash_table
[crc
>> 4] |= 1 << (crc
& 0xf);
883 /* Program the qe with the new filter value. */
884 qep
->mregs
->iaconfig
= MREGS_IACONFIG_ACHNGE
| MREGS_IACONFIG_LARESET
;
885 for(i
= 0; i
< 8; i
++)
886 qep
->mregs
->filter
= *hbytes
++;
887 qep
->mregs
->iaconfig
= 0;
890 /* Any change of the logical address filter, the physical address,
891 * or enabling/disabling promiscuous mode causes the MACE to disable
892 * the receiver. So we must re-enable them here or else the MACE
893 * refuses to listen to anything on the network. Sheesh, took
894 * me a day or two to find this bug.
896 qep
->mregs
->mconfig
= new_mconfig
;
898 /* Let us get going again. */
902 /* This is only called once at boot time for each card probed. */
903 static inline void qec_init_once(struct sunqec
*qecp
, struct linux_sbus_device
*qsdev
)
905 unsigned char bsizes
= qecp
->qec_bursts
;
907 if(bsizes
& DMA_BURST32
)
908 qecp
->gregs
->ctrl
= GLOB_CTRL_B32
;
910 qecp
->gregs
->ctrl
= GLOB_CTRL_B16
;
912 /* Packetsize only used in 100baseT BigMAC configurations,
913 * set it to zero just to be on the safe side.
915 qecp
->gregs
->psize
= 0;
917 /* Set the local memsize register, divided up to one piece per QE channel. */
918 qecp
->gregs
->msize
= (qsdev
->reg_addrs
[1].reg_size
>> 2);
920 /* Divide up the local QEC memory amongst the 4 QE receiver and
921 * transmitter FIFOs. Basically it is (total / 2 / num_channels).
923 qecp
->gregs
->rsize
= qecp
->gregs
->tsize
=
924 (qsdev
->reg_addrs
[1].reg_size
>> 2) >> 1;
928 /* Four QE's per QEC card. */
929 static inline int qec_ether_init(struct device
*dev
, struct linux_sbus_device
*sdev
)
931 static unsigned version_printed
= 0;
932 struct device
*qe_devs
[4];
933 struct sunqe
*qeps
[4];
934 struct linux_sbus_device
*qesdevs
[4];
936 struct linux_prom_ranges qranges
[8];
937 unsigned char bsizes
, bsizes_more
, num_qranges
;
938 int i
, j
, res
= ENOMEM
;
940 dev
= init_etherdev(0, sizeof(struct sunqe
));
942 qeps
[0] = (struct sunqe
*) dev
->priv
;
943 qeps
[0]->channel
= 0;
944 for(j
= 0; j
< 6; j
++)
945 qe_devs
[0]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
947 if(version_printed
++ == 0)
950 qe_devs
[1] = qe_devs
[2] = qe_devs
[3] = NULL
;
951 for(i
= 1; i
< 4; i
++) {
952 qe_devs
[i
] = init_etherdev(0, sizeof(struct sunqe
));
953 if(qe_devs
[i
] == NULL
|| qe_devs
[i
]->priv
== NULL
)
955 qeps
[i
] = (struct sunqe
*) qe_devs
[i
]->priv
;
956 for(j
= 0; j
< 6; j
++)
957 qe_devs
[i
]->dev_addr
[j
] = idprom
->id_ethaddr
[j
];
958 qeps
[i
]->channel
= i
;
960 qecp
= kmalloc(sizeof(struct sunqec
), GFP_KERNEL
);
963 qecp
->qec_sbus_dev
= sdev
;
965 for(i
= 0; i
< 4; i
++) {
966 qecp
->qes
[i
] = qeps
[i
];
967 qeps
[i
]->dev
= qe_devs
[i
];
968 qeps
[i
]->parent
= qecp
;
971 /* Link in channel 0. */
972 i
= prom_getintdefault(sdev
->child
->prom_node
, "channel#", -1);
973 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
974 qesdevs
[i
] = sdev
->child
;
975 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
977 /* Link in channel 1. */
978 i
= prom_getintdefault(sdev
->child
->next
->prom_node
, "channel#", -1);
979 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
980 qesdevs
[i
] = sdev
->child
->next
;
981 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
983 /* Link in channel 2. */
984 i
= prom_getintdefault(sdev
->child
->next
->next
->prom_node
, "channel#", -1);
985 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
986 qesdevs
[i
] = sdev
->child
->next
->next
;
987 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
989 /* Link in channel 3. */
990 i
= prom_getintdefault(sdev
->child
->next
->next
->next
->prom_node
, "channel#", -1);
991 if(i
== -1) { res
=ENODEV
; goto qec_free_devs
; }
992 qesdevs
[i
] = sdev
->child
->next
->next
->next
;
993 qe_devs
[i
]->base_addr
= (long) qesdevs
[i
];
995 for(i
= 0; i
< 4; i
++)
996 qeps
[i
]->qe_sbusdev
= qesdevs
[i
];
998 /* This is a bit of fun, get QEC ranges. */
999 i
= prom_getproperty(sdev
->prom_node
, "ranges",
1000 (char *) &qranges
[0], sizeof(qranges
));
1001 num_qranges
= (i
/ sizeof(struct linux_prom_ranges
));
1003 /* Now, apply all the ranges, QEC ranges then the SBUS ones for each QE. */
1004 for(i
= 0; i
< 4; i
++) {
1005 for(j
= 0; j
< 2; j
++) {
1008 for(k
= 0; k
< num_qranges
; k
++)
1009 if(qesdevs
[i
]->reg_addrs
[j
].which_io
==
1010 qranges
[k
].ot_child_space
)
1012 if(k
>= num_qranges
)
1013 printk("QuadEther: Aieee, bogus QEC range for "
1014 "space %08x\n",qesdevs
[i
]->reg_addrs
[j
].which_io
);
1015 qesdevs
[i
]->reg_addrs
[j
].which_io
= qranges
[k
].ot_parent_space
;
1016 qesdevs
[i
]->reg_addrs
[j
].phys_addr
+= qranges
[k
].ot_parent_base
;
1019 prom_apply_sbus_ranges(qesdevs
[i
]->my_bus
, &qesdevs
[i
]->reg_addrs
[0],
1023 /* Now map in the registers, QEC globals first. */
1024 prom_apply_sbus_ranges(sdev
->my_bus
, &sdev
->reg_addrs
[0],
1025 sdev
->num_registers
, sdev
);
1026 qecp
->gregs
= sparc_alloc_io(sdev
->reg_addrs
[0].phys_addr
, 0,
1027 sizeof(struct qe_globreg
),
1028 "QEC Global Registers",
1029 sdev
->reg_addrs
[0].which_io
, 0);
1031 printk("QuadEther: Cannot map QEC global registers.\n");
1036 /* Make sure the QEC is in MACE mode. */
1037 if((qecp
->gregs
->ctrl
& 0xf0000000) != GLOB_CTRL_MMODE
) {
1038 printk("QuadEther: AIEEE, QEC is not in MACE mode!\n");
1043 /* Reset the QEC. */
1044 if(qec_global_reset(qecp
->gregs
)) {
1049 /* Find and set the burst sizes for the QEC, since it does
1050 * the actual dma for all 4 channels.
1052 bsizes
= prom_getintdefault(sdev
->prom_node
, "burst-sizes", 0xff);
1054 bsizes_more
= prom_getintdefault(sdev
->my_bus
->prom_node
, "burst-sizes", 0xff);
1056 if(bsizes_more
!= 0xff)
1057 bsizes
&= bsizes_more
;
1058 if(bsizes
== 0xff || (bsizes
& DMA_BURST16
) == 0 ||
1059 (bsizes
& DMA_BURST32
)==0)
1060 bsizes
= (DMA_BURST32
- 1);
1062 qecp
->qec_bursts
= bsizes
;
1064 /* Perform one time QEC initialization, we never touch the QEC
1065 * globals again after this.
1067 qec_init_once(qecp
, sdev
);
1069 for(i
= 0; i
< 4; i
++) {
1070 /* Map in QEC per-channel control registers. */
1071 qeps
[i
]->qcregs
= sparc_alloc_io(qesdevs
[i
]->reg_addrs
[0].phys_addr
, 0,
1072 sizeof(struct qe_creg
),
1073 "QEC Per-Channel Registers",
1074 qesdevs
[i
]->reg_addrs
[0].which_io
, 0);
1075 if(!qeps
[i
]->qcregs
) {
1076 printk("QuadEther: Cannot map QE %d's channel registers.\n", i
);
1081 /* Map in per-channel AMD MACE registers. */
1082 qeps
[i
]->mregs
= sparc_alloc_io(qesdevs
[i
]->reg_addrs
[1].phys_addr
, 0,
1083 sizeof(struct qe_mregs
),
1084 "QE MACE Registers",
1085 qesdevs
[i
]->reg_addrs
[1].which_io
, 0);
1086 if(!qeps
[i
]->mregs
) {
1087 printk("QuadEther: Cannot map QE %d's MACE registers.\n", i
);
1092 qeps
[i
]->qe_block
= (struct qe_init_block
*)
1093 sparc_dvma_malloc(PAGE_SIZE
, "QE Init Block",
1094 &qeps
[i
]->qblock_dvma
);
1096 if(sparc_cpu_model
== sun4c
)
1097 qeps
[i
]->sun4c_buffers
= (struct sunqe_buffers
*)
1098 sparc_dvma_malloc(sizeof(struct sunqe_buffers
),
1100 &qeps
[i
]->s4c_buf_dvma
);
1102 qeps
[i
]->sun4c_buffers
= 0;
1108 for(i
= 0; i
< 4; i
++) {
1109 qe_devs
[i
]->open
= qe_open
;
1110 qe_devs
[i
]->stop
= qe_close
;
1111 if(sparc_cpu_model
== sun4c
)
1112 qe_devs
[i
]->hard_start_xmit
= sun4c_qe_start_xmit
;
1114 qe_devs
[i
]->hard_start_xmit
= qe_start_xmit
;
1115 qe_devs
[i
]->get_stats
= qe_get_stats
;
1116 qe_devs
[i
]->set_multicast_list
= qe_set_multicast
;
1117 qe_devs
[i
]->irq
= sdev
->irqs
[0];
1118 qe_devs
[i
]->dma
= 0;
1119 ether_setup(qe_devs
[i
]);
1122 /* QEC receives interrupts from each QE, then it send the actual
1123 * IRQ to the cpu itself. Since QEC is the single point of
1124 * interrupt for all QE channels we register the IRQ handler
1127 if(sparc_cpu_model
== sun4c
) {
1128 if(request_irq(sdev
->irqs
[0], &sun4c_qec_interrupt
,
1129 SA_SHIRQ
, "QuadEther", (void *) qecp
)) {
1130 printk("QuadEther: Can't register QEC master irq handler.\n");
1135 if(request_irq(sdev
->irqs
[0], &qec_interrupt
,
1136 SA_SHIRQ
, "QuadEther", (void *) qecp
)) {
1137 printk("QuadEther: Can't register QEC master irq handler.\n");
1143 /* Report the QE channels. */
1144 for(i
= 0; i
< 4; i
++) {
1145 printk("%s: QuadEthernet channel[%d] ", qe_devs
[i
]->name
, i
);
1146 for(j
= 0; j
< 6; j
++)
1148 qe_devs
[i
]->dev_addr
[j
],
1154 /* We are home free at this point, link the qe's into
1155 * the master list for later module unloading.
1157 for(i
= 0; i
< 4; i
++)
1158 qe_devs
[i
]->ifindex
= dev_new_index();
1159 qecp
->next_module
= root_qec_dev
;
1160 root_qec_dev
= qecp
;
1166 for(i
= 0; i
< 4; i
++) {
1168 if(qe_devs
[i
]->priv
)
1169 kfree(qe_devs
[i
]->priv
);
1176 int __init
qec_probe(struct device
*dev
)
1178 struct linux_sbus
*bus
;
1179 struct linux_sbus_device
*sdev
= 0;
1180 static int called
= 0;
1187 for_each_sbus(bus
) {
1188 for_each_sbusdev(sdev
, bus
) {
1189 if(cards
) dev
= NULL
;
1191 /* QEC can be parent of either QuadEthernet or BigMAC
1194 if(!strcmp(sdev
->prom_name
, "qec") && sdev
->child
&&
1195 !strcmp(sdev
->child
->prom_name
, "qe") &&
1196 sdev
->child
->next
&&
1197 !strcmp(sdev
->child
->next
->prom_name
, "qe") &&
1198 sdev
->child
->next
->next
&&
1199 !strcmp(sdev
->child
->next
->next
->prom_name
, "qe") &&
1200 sdev
->child
->next
->next
->next
&&
1201 !strcmp(sdev
->child
->next
->next
->next
->prom_name
, "qe")) {
1203 if((v
= qec_ether_init(dev
, sdev
)))
1218 root_qec_dev
= NULL
;
1219 return qec_probe(NULL
);
1223 cleanup_module(void)
1225 struct sunqec
*next_qec
;
1228 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1229 while (root_qec_dev
) {
1230 next_qec
= root_qec_dev
->next_module
;
1232 /* Release all four QE channels, then the QEC itself. */
1233 for(i
= 0; i
< 4; i
++) {
1234 unregister_netdev(root_qec_dev
->qes
[i
]->dev
);
1235 kfree(root_qec_dev
->qes
[i
]);
1237 free_irq(root_qec_dev
->qec_sbus_dev
->irqs
[0], (void *)root_qec_dev
);
1238 kfree(root_qec_dev
);
1239 root_qec_dev
= next_qec
;