2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@redhat.com>
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/delay.h>
23 #include <linux/string.h>
24 #include <linux/crc32.h>
26 #include <asm/pgtable.h>
28 #include <asm/macintosh.h>
29 #include <asm/macints.h>
30 #include <asm/mac_psc.h>
36 #define N_RX_PAGES ((N_RX_RING * 0x0800 + PAGE_SIZE - 1) / PAGE_SIZE)
39 /* Bits in transmit DMA status */
40 #define TX_DMA_ERR 0x80
42 /* The MACE is simply wired down on a Mac68K box */
44 #define MACE_BASE (void *)(0x50F1C000)
45 #define MACE_PROM (void *)(0x50F08001)
48 volatile struct mace
*mace
;
49 volatile unsigned char *tx_ring
;
50 volatile unsigned char *tx_ring_phys
;
51 volatile unsigned char *rx_ring
;
52 volatile unsigned char *rx_ring_phys
;
54 struct net_device_stats stats
;
56 int tx_slot
, tx_sloti
, tx_count
;
67 /* And frame continues.. */
70 #define PRIV_BYTES sizeof(struct mace_data)
72 extern void psc_debug_dump(void);
74 static int mace_open(struct net_device
*dev
);
75 static int mace_close(struct net_device
*dev
);
76 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
);
77 static struct net_device_stats
*mace_stats(struct net_device
*dev
);
78 static void mace_set_multicast(struct net_device
*dev
);
79 static int mace_set_address(struct net_device
*dev
, void *addr
);
80 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
);
81 static irqreturn_t
mace_dma_intr(int irq
, void *dev_id
, struct pt_regs
*regs
);
82 static void mace_tx_timeout(struct net_device
*dev
);
84 /* Bit-reverse one byte of an ethernet hardware address. */
86 static int bitrev(int b
)
90 for (i
= 0; i
< 8; ++i
, b
>>= 1) {
91 d
= (d
<< 1) | (b
& 1);
98 * Load a receive DMA channel with a base address and ring length
101 static void mace_load_rxdma_base(struct net_device
*dev
, int set
)
103 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
105 psc_write_word(PSC_ENETRD_CMD
+ set
, 0x0100);
106 psc_write_long(PSC_ENETRD_ADDR
+ set
, (u32
) mp
->rx_ring_phys
);
107 psc_write_long(PSC_ENETRD_LEN
+ set
, N_RX_RING
);
108 psc_write_word(PSC_ENETRD_CMD
+ set
, 0x9800);
113 * Reset the receive DMA subsystem
116 static void mace_rxdma_reset(struct net_device
*dev
)
118 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
119 volatile struct mace
*mace
= mp
->mace
;
120 u8 maccc
= mace
->maccc
;
122 mace
->maccc
= maccc
& ~ENRCV
;
124 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
125 mace_load_rxdma_base(dev
, 0x00);
126 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
128 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
129 mace_load_rxdma_base(dev
, 0x10);
130 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
135 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET0
, 0x9800);
136 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET1
, 0x9800);
140 * Reset the transmit DMA subsystem
143 static void mace_txdma_reset(struct net_device
*dev
)
145 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
146 volatile struct mace
*mace
= mp
->mace
;
149 psc_write_word(PSC_ENETWR_CTL
, 0x8800);
152 mace
->maccc
= maccc
& ~ENXMT
;
154 mp
->tx_slot
= mp
->tx_sloti
= 0;
155 mp
->tx_count
= N_TX_RING
;
157 psc_write_word(PSC_ENETWR_CTL
, 0x0400);
165 static void mace_dma_off(struct net_device
*dev
)
167 psc_write_word(PSC_ENETRD_CTL
, 0x8800);
168 psc_write_word(PSC_ENETRD_CTL
, 0x1000);
169 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET0
, 0x1100);
170 psc_write_word(PSC_ENETRD_CMD
+ PSC_SET1
, 0x1100);
172 psc_write_word(PSC_ENETWR_CTL
, 0x8800);
173 psc_write_word(PSC_ENETWR_CTL
, 0x1000);
174 psc_write_word(PSC_ENETWR_CMD
+ PSC_SET0
, 0x1100);
175 psc_write_word(PSC_ENETWR_CMD
+ PSC_SET1
, 0x1100);
179 * Not really much of a probe. The hardware table tells us if this
180 * model of Macintrash has a MACE (AV macintoshes)
183 struct net_device
*mace_probe(int unit
)
186 struct mace_data
*mp
;
188 struct net_device
*dev
;
189 unsigned char checksum
= 0;
190 static int found
= 0;
193 if (found
|| macintosh_config
->ether_type
!= MAC_ETHER_MACE
)
194 return ERR_PTR(-ENODEV
);
196 found
= 1; /* prevent 'finding' one on every device probe */
198 dev
= alloc_etherdev(PRIV_BYTES
);
200 return ERR_PTR(-ENOMEM
);
203 sprintf(dev
->name
, "eth%d", unit
);
205 mp
= (struct mace_data
*) dev
->priv
;
206 dev
->base_addr
= (u32
)MACE_BASE
;
207 mp
->mace
= (volatile struct mace
*) MACE_BASE
;
209 dev
->irq
= IRQ_MAC_MACE
;
210 mp
->dma_intr
= IRQ_MAC_MACE_DMA
;
213 * The PROM contains 8 bytes which total 0xFF when XOR'd
214 * together. Due to the usual peculiar apple brain damage
215 * the bytes are spaced out in a strange boundary and the
219 addr
= (void *)MACE_PROM
;
221 for (j
= 0; j
< 6; ++j
) {
222 u8 v
=bitrev(addr
[j
<<4]);
224 dev
->dev_addr
[j
] = v
;
227 checksum
^= bitrev(addr
[j
<<4]);
230 if (checksum
!= 0xFF) {
232 return ERR_PTR(-ENODEV
);
235 memset(&mp
->stats
, 0, sizeof(mp
->stats
));
237 dev
->open
= mace_open
;
238 dev
->stop
= mace_close
;
239 dev
->hard_start_xmit
= mace_xmit_start
;
240 dev
->tx_timeout
= mace_tx_timeout
;
241 dev
->watchdog_timeo
= TX_TIMEOUT
;
242 dev
->get_stats
= mace_stats
;
243 dev
->set_multicast_list
= mace_set_multicast
;
244 dev
->set_mac_address
= mace_set_address
;
246 printk(KERN_INFO
"%s: 68K MACE, hardware address %.2X", dev
->name
, dev
->dev_addr
[0]);
247 for (j
= 1 ; j
< 6 ; j
++) printk(":%.2X", dev
->dev_addr
[j
]);
250 err
= register_netdev(dev
);
259 * Load the address on a mace controller.
262 static int mace_set_address(struct net_device
*dev
, void *addr
)
264 unsigned char *p
= addr
;
265 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
266 volatile struct mace
*mb
= mp
->mace
;
271 local_irq_save(flags
);
275 /* load up the hardware address */
276 mb
->iac
= ADDRCHG
| PHYADDR
;
277 while ((mb
->iac
& ADDRCHG
) != 0);
279 for (i
= 0; i
< 6; ++i
) {
280 mb
->padr
= dev
->dev_addr
[i
] = p
[i
];
284 local_irq_restore(flags
);
290 * Open the Macintosh MACE. Most of this is playing with the DMA
291 * engine. The ethernet chip is quite friendly.
294 static int mace_open(struct net_device
*dev
)
296 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
297 volatile struct mace
*mb
= mp
->mace
;
304 if (mb
->biucc
& SWRST
) {
311 printk(KERN_ERR
"%s: software reset failed!!\n", dev
->name
);
316 mb
->biucc
= XMTSP_64
;
317 mb
->fifocc
= XMTFW_16
| RCVFW_64
| XMTFWU
| RCVFWU
| XMTBRST
| RCVBRST
;
318 mb
->xmtfc
= AUTO_PAD_XMIT
;
319 mb
->plscc
= PORTSEL_AUI
;
320 /* mb->utr = RTRD; */
322 if (request_irq(dev
->irq
, mace_interrupt
, 0, dev
->name
, dev
)) {
323 printk(KERN_ERR
"%s: can't get irq %d\n", dev
->name
, dev
->irq
);
326 if (request_irq(mp
->dma_intr
, mace_dma_intr
, 0, dev
->name
, dev
)) {
327 printk(KERN_ERR
"%s: can't get irq %d\n", dev
->name
, mp
->dma_intr
);
328 free_irq(dev
->irq
, dev
);
332 /* Allocate the DMA ring buffers */
334 mp
->rx_ring
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, N_RX_PAGES
);
335 mp
->tx_ring
= (void *) __get_free_pages(GFP_KERNEL
| GFP_DMA
, 0);
337 if (mp
->tx_ring
==NULL
|| mp
->rx_ring
==NULL
) {
338 if (mp
->rx_ring
) free_pages((u32
) mp
->rx_ring
, N_RX_PAGES
);
339 if (mp
->tx_ring
) free_pages((u32
) mp
->tx_ring
, 0);
340 free_irq(dev
->irq
, dev
);
341 free_irq(mp
->dma_intr
, dev
);
342 printk(KERN_ERR
"%s: unable to allocate DMA buffers\n", dev
->name
);
346 mp
->rx_ring_phys
= (unsigned char *) virt_to_bus((void *)mp
->rx_ring
);
347 mp
->tx_ring_phys
= (unsigned char *) virt_to_bus((void *)mp
->tx_ring
);
349 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */
351 kernel_set_cachemode((void *)mp
->rx_ring
, N_RX_PAGES
* PAGE_SIZE
, IOMAP_NOCACHE_NONSER
);
352 kernel_set_cachemode((void *)mp
->tx_ring
, PAGE_SIZE
, IOMAP_WRITETHROUGH
);
356 /* Not sure what these do */
358 psc_write_word(PSC_ENETWR_CTL
, 0x9000);
359 psc_write_word(PSC_ENETRD_CTL
, 0x9000);
360 psc_write_word(PSC_ENETWR_CTL
, 0x0400);
361 psc_write_word(PSC_ENETRD_CTL
, 0x0400);
364 /* load up the hardware address */
366 mb
->iac
= ADDRCHG
| PHYADDR
;
368 while ((mb
->iac
& ADDRCHG
) != 0);
370 for (i
= 0; i
< 6; ++i
)
371 mb
->padr
= dev
->dev_addr
[i
];
373 /* clear the multicast filter */
374 mb
->iac
= ADDRCHG
| LOGADDR
;
376 while ((mb
->iac
& ADDRCHG
) != 0);
378 for (i
= 0; i
< 8; ++i
)
381 mb
->plscc
= PORTSEL_GPSI
+ ENPLSIO
;
383 mb
->maccc
= ENXMT
| ENRCV
;
387 mace_rxdma_reset(dev
);
388 mace_txdma_reset(dev
);
394 * Shut down the mace and its interrupt channel
397 static int mace_close(struct net_device
*dev
)
399 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
400 volatile struct mace
*mb
= mp
->mace
;
402 mb
->maccc
= 0; /* disable rx and tx */
403 mb
->imr
= 0xFF; /* disable all irqs */
404 mace_dma_off(dev
); /* disable rx and tx dma */
406 free_irq(dev
->irq
, dev
);
407 free_irq(IRQ_MAC_MACE_DMA
, dev
);
409 free_pages((u32
) mp
->rx_ring
, N_RX_PAGES
);
410 free_pages((u32
) mp
->tx_ring
, 0);
419 static int mace_xmit_start(struct sk_buff
*skb
, struct net_device
*dev
)
421 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
423 /* Stop the queue if the buffer is full */
426 netif_stop_queue(dev
);
431 mp
->stats
.tx_packets
++;
432 mp
->stats
.tx_bytes
+= skb
->len
;
434 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
436 memcpy((void *) mp
->tx_ring
, skb
->data
, skb
->len
);
438 /* load the Tx DMA and fire it off */
440 psc_write_long(PSC_ENETWR_ADDR
+ mp
->tx_slot
, (u32
) mp
->tx_ring_phys
);
441 psc_write_long(PSC_ENETWR_LEN
+ mp
->tx_slot
, skb
->len
);
442 psc_write_word(PSC_ENETWR_CMD
+ mp
->tx_slot
, 0x9800);
451 static struct net_device_stats
*mace_stats(struct net_device
*dev
)
453 struct mace_data
*p
= (struct mace_data
*) dev
->priv
;
457 static void mace_set_multicast(struct net_device
*dev
)
459 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
460 volatile struct mace
*mb
= mp
->mace
;
468 if (dev
->flags
& IFF_PROMISC
) {
471 unsigned char multicast_filter
[8];
472 struct dev_mc_list
*dmi
= dev
->mc_list
;
474 if (dev
->flags
& IFF_ALLMULTI
) {
475 for (i
= 0; i
< 8; i
++) {
476 multicast_filter
[i
] = 0xFF;
479 for (i
= 0; i
< 8; i
++)
480 multicast_filter
[i
] = 0;
481 for (i
= 0; i
< dev
->mc_count
; i
++) {
482 crc
= ether_crc_le(6, dmi
->dmi_addr
);
483 j
= crc
>> 26; /* bit number in multicast_filter */
484 multicast_filter
[j
>> 3] |= 1 << (j
& 7);
489 mb
->iac
= ADDRCHG
| LOGADDR
;
490 while (mb
->iac
& ADDRCHG
);
492 for (i
= 0; i
< 8; ++i
) {
493 mb
->ladrf
= multicast_filter
[i
];
501 * Miscellaneous interrupts are handled here. We may end up
502 * having to bash the chip on the head for bad errors
505 static void mace_handle_misc_intrs(struct mace_data
*mp
, int intr
)
507 volatile struct mace
*mb
= mp
->mace
;
508 static int mace_babbles
, mace_jabbers
;
511 mp
->stats
.rx_missed_errors
+= 256;
513 mp
->stats
.rx_missed_errors
+= mb
->mpc
; /* reading clears it */
516 mp
->stats
.rx_length_errors
+= 256;
518 mp
->stats
.rx_length_errors
+= mb
->rntpc
; /* reading clears it */
521 ++mp
->stats
.tx_heartbeat_errors
;
524 if (mace_babbles
++ < 4) {
525 printk(KERN_DEBUG
"mace: babbling transmitter\n");
529 if (mace_jabbers
++ < 4) {
530 printk(KERN_DEBUG
"mace: jabbering transceiver\n");
536 * A transmit error has occurred. (We kick the transmit side from
537 * the DMA completion)
540 static void mace_xmit_error(struct net_device
*dev
)
542 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
543 volatile struct mace
*mb
= mp
->mace
;
551 printk("%s: DMA underrun.\n", dev
->name
);
552 mp
->stats
.tx_errors
++;
553 mp
->stats
.tx_fifo_errors
++;
554 mace_txdma_reset(dev
);
557 mp
->stats
.collisions
++;
563 * A receive interrupt occurred.
566 static void mace_recv_interrupt(struct net_device
*dev
)
568 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
569 // volatile struct mace *mb = mp->mace;
573 * Process the chip interrupt
576 static irqreturn_t
mace_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
578 struct net_device
*dev
= (struct net_device
*) dev_id
;
579 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
580 volatile struct mace
*mb
= mp
->mace
;
584 mace_handle_misc_intrs(mp
, ir
);
587 mace_xmit_error(dev
);
590 mace_recv_interrupt(dev
);
595 static void mace_tx_timeout(struct net_device
*dev
)
597 /* struct mace_data *mp = (struct mace_data *) dev->priv; */
598 // volatile struct mace *mb = mp->mace;
602 * Handle a newly arrived frame
605 static void mace_dma_rx_frame(struct net_device
*dev
, struct mace_frame
*mf
)
607 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
610 if (mf
->status
& RS_OFLO
) {
611 printk("%s: fifo overflow.\n", dev
->name
);
612 mp
->stats
.rx_errors
++;
613 mp
->stats
.rx_fifo_errors
++;
615 if (mf
->status
&(RS_CLSN
|RS_FRAMERR
|RS_FCSERR
))
616 mp
->stats
.rx_errors
++;
618 if (mf
->status
&RS_CLSN
) {
619 mp
->stats
.collisions
++;
621 if (mf
->status
&RS_FRAMERR
) {
622 mp
->stats
.rx_frame_errors
++;
624 if (mf
->status
&RS_FCSERR
) {
625 mp
->stats
.rx_crc_errors
++;
628 skb
= dev_alloc_skb(mf
->len
+2);
630 mp
->stats
.rx_dropped
++;
634 memcpy(skb_put(skb
, mf
->len
), mf
->data
, mf
->len
);
637 skb
->protocol
= eth_type_trans(skb
, dev
);
639 dev
->last_rx
= jiffies
;
640 mp
->stats
.rx_packets
++;
641 mp
->stats
.rx_bytes
+= mf
->len
;
645 * The PSC has passed us a DMA interrupt event.
648 static irqreturn_t
mace_dma_intr(int irq
, void *dev_id
, struct pt_regs
*regs
)
650 struct net_device
*dev
= (struct net_device
*) dev_id
;
651 struct mace_data
*mp
= (struct mace_data
*) dev
->priv
;
656 /* Not sure what this does */
658 while ((baka
= psc_read_long(PSC_MYSTERY
)) != psc_read_long(PSC_MYSTERY
));
659 if (!(baka
& 0x60000000)) return IRQ_NONE
;
662 * Process the read queue
665 status
= psc_read_word(PSC_ENETRD_CTL
);
667 if (status
& 0x2000) {
668 mace_rxdma_reset(dev
);
669 } else if (status
& 0x0100) {
670 psc_write_word(PSC_ENETRD_CMD
+ mp
->rx_slot
, 0x1100);
672 left
= psc_read_long(PSC_ENETRD_LEN
+ mp
->rx_slot
);
673 head
= N_RX_RING
- left
;
675 /* Loop through the ring buffer and process new packages */
677 while (mp
->rx_tail
< head
) {
678 mace_dma_rx_frame(dev
, (struct mace_frame
*) (mp
->rx_ring
+ (mp
->rx_tail
* 0x0800)));
682 /* If we're out of buffers in this ring then switch to */
683 /* the other set, otherwise just reactivate this one. */
686 mace_load_rxdma_base(dev
, mp
->rx_slot
);
689 psc_write_word(PSC_ENETRD_CMD
+ mp
->rx_slot
, 0x9800);
694 * Process the write queue
697 status
= psc_read_word(PSC_ENETWR_CTL
);
699 if (status
& 0x2000) {
700 mace_txdma_reset(dev
);
701 } else if (status
& 0x0100) {
702 psc_write_word(PSC_ENETWR_CMD
+ mp
->tx_sloti
, 0x0100);
703 mp
->tx_sloti
^= 0x10;
705 netif_wake_queue(dev
);
710 MODULE_LICENSE("GPL");