GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / net / bmac.c
blob332546f858e3a36b07f725deef00b2507f4ab622
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/proc_fs.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/bitrev.h>
22 #include <linux/ethtool.h>
23 #include <linux/slab.h>
24 #include <asm/prom.h>
25 #include <asm/dbdma.h>
26 #include <asm/io.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/machdep.h>
30 #include <asm/pmac_feature.h>
31 #include <asm/macio.h>
32 #include <asm/irq.h>
34 #include "bmac.h"
36 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
37 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
40 * CRC polynomial - used in working out multicast filter bits.
42 #define ENET_CRCPOLY 0x04c11db7
44 /* switch to use multicast code lifted from sunhme driver */
45 #define SUNHME_MULTICAST
47 #define N_RX_RING 64
48 #define N_TX_RING 32
49 #define MAX_TX_ACTIVE 1
50 #define ETHERCRC 4
51 #define ETHERMINPACKET 64
52 #define ETHERMTU 1500
53 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
54 #define TX_TIMEOUT HZ /* 1 second */
56 /* Bits in transmit DMA status */
57 #define TX_DMA_ERR 0x80
59 #define XXDEBUG(args)
61 struct bmac_data {
62 /* volatile struct bmac *bmac; */
63 struct sk_buff_head *queue;
64 volatile struct dbdma_regs __iomem *tx_dma;
65 int tx_dma_intr;
66 volatile struct dbdma_regs __iomem *rx_dma;
67 int rx_dma_intr;
68 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
69 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
70 struct macio_dev *mdev;
71 int is_bmac_plus;
72 struct sk_buff *rx_bufs[N_RX_RING];
73 int rx_fill;
74 int rx_empty;
75 struct sk_buff *tx_bufs[N_TX_RING];
76 int tx_fill;
77 int tx_empty;
78 unsigned char tx_fullup;
79 struct timer_list tx_timeout;
80 int timeout_active;
81 int sleeping;
82 int opened;
83 unsigned short hash_use_count[64];
84 unsigned short hash_table_mask[4];
85 spinlock_t lock;
89 static unsigned char *bmac_emergency_rxbuf;
92 * Number of bytes of private data per BMAC: allow enough for
93 * the rx and tx dma commands plus a branch dma command each,
94 * and another 16 bytes to allow us to align the dma command
95 * buffers on a 16 byte boundary.
97 #define PRIV_BYTES (sizeof(struct bmac_data) \
98 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
99 + sizeof(struct sk_buff_head))
101 static int bmac_open(struct net_device *dev);
102 static int bmac_close(struct net_device *dev);
103 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
104 static void bmac_set_multicast(struct net_device *dev);
105 static void bmac_reset_and_enable(struct net_device *dev);
106 static void bmac_start_chip(struct net_device *dev);
107 static void bmac_init_chip(struct net_device *dev);
108 static void bmac_init_registers(struct net_device *dev);
109 static void bmac_enable_and_reset_chip(struct net_device *dev);
110 static int bmac_set_address(struct net_device *dev, void *addr);
111 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
112 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
113 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
114 static void bmac_set_timeout(struct net_device *dev);
115 static void bmac_tx_timeout(unsigned long data);
116 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
117 static void bmac_start(struct net_device *dev);
119 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
120 #define DBDMA_CLEAR(x) ( (x) << 16)
122 static inline void
123 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
125 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
128 static inline unsigned long
129 dbdma_ld32(volatile __u32 __iomem *a)
131 __u32 swap;
132 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
133 return swap;
136 static void
137 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
139 dbdma_st32(&dmap->control,
140 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
141 eieio();
144 static void
145 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
147 dbdma_st32(&dmap->control,
148 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
149 eieio();
150 while (dbdma_ld32(&dmap->status) & RUN)
151 eieio();
154 static void
155 dbdma_setcmd(volatile struct dbdma_cmd *cp,
156 unsigned short cmd, unsigned count, unsigned long addr,
157 unsigned long cmd_dep)
159 out_le16(&cp->command, cmd);
160 out_le16(&cp->req_count, count);
161 out_le32(&cp->phy_addr, addr);
162 out_le32(&cp->cmd_dep, cmd_dep);
163 out_le16(&cp->xfer_status, 0);
164 out_le16(&cp->res_count, 0);
167 static inline
168 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
170 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
174 static inline
175 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
177 return in_le16((void __iomem *)dev->base_addr + reg_offset);
180 static void
181 bmac_enable_and_reset_chip(struct net_device *dev)
183 struct bmac_data *bp = netdev_priv(dev);
184 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
185 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
187 if (rd)
188 dbdma_reset(rd);
189 if (td)
190 dbdma_reset(td);
192 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
195 #define MIFDELAY udelay(10)
197 static unsigned int
198 bmac_mif_readbits(struct net_device *dev, int nb)
200 unsigned int val = 0;
202 while (--nb >= 0) {
203 bmwrite(dev, MIFCSR, 0);
204 MIFDELAY;
205 if (bmread(dev, MIFCSR) & 8)
206 val |= 1 << nb;
207 bmwrite(dev, MIFCSR, 1);
208 MIFDELAY;
210 bmwrite(dev, MIFCSR, 0);
211 MIFDELAY;
212 bmwrite(dev, MIFCSR, 1);
213 MIFDELAY;
214 return val;
217 static void
218 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
220 int b;
222 while (--nb >= 0) {
223 b = (val & (1 << nb))? 6: 4;
224 bmwrite(dev, MIFCSR, b);
225 MIFDELAY;
226 bmwrite(dev, MIFCSR, b|1);
227 MIFDELAY;
231 static unsigned int
232 bmac_mif_read(struct net_device *dev, unsigned int addr)
234 unsigned int val;
236 bmwrite(dev, MIFCSR, 4);
237 MIFDELAY;
238 bmac_mif_writebits(dev, ~0U, 32);
239 bmac_mif_writebits(dev, 6, 4);
240 bmac_mif_writebits(dev, addr, 10);
241 bmwrite(dev, MIFCSR, 2);
242 MIFDELAY;
243 bmwrite(dev, MIFCSR, 1);
244 MIFDELAY;
245 val = bmac_mif_readbits(dev, 17);
246 bmwrite(dev, MIFCSR, 4);
247 MIFDELAY;
248 return val;
251 static void
252 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
254 bmwrite(dev, MIFCSR, 4);
255 MIFDELAY;
256 bmac_mif_writebits(dev, ~0U, 32);
257 bmac_mif_writebits(dev, 5, 4);
258 bmac_mif_writebits(dev, addr, 10);
259 bmac_mif_writebits(dev, 2, 2);
260 bmac_mif_writebits(dev, val, 16);
261 bmac_mif_writebits(dev, 3, 2);
264 static void
265 bmac_init_registers(struct net_device *dev)
267 struct bmac_data *bp = netdev_priv(dev);
268 volatile unsigned short regValue;
269 unsigned short *pWord16;
270 int i;
272 /* XXDEBUG(("bmac: enter init_registers\n")); */
274 bmwrite(dev, RXRST, RxResetValue);
275 bmwrite(dev, TXRST, TxResetBit);
277 i = 100;
278 do {
279 --i;
280 udelay(10000);
281 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
282 } while ((regValue & TxResetBit) && i > 0);
284 if (!bp->is_bmac_plus) {
285 regValue = bmread(dev, XCVRIF);
286 regValue |= ClkBit | SerialMode | COLActiveLow;
287 bmwrite(dev, XCVRIF, regValue);
288 udelay(10000);
291 bmwrite(dev, RSEED, (unsigned short)0x1968);
293 regValue = bmread(dev, XIFC);
294 regValue |= TxOutputEnable;
295 bmwrite(dev, XIFC, regValue);
297 bmread(dev, PAREG);
299 /* set collision counters to 0 */
300 bmwrite(dev, NCCNT, 0);
301 bmwrite(dev, NTCNT, 0);
302 bmwrite(dev, EXCNT, 0);
303 bmwrite(dev, LTCNT, 0);
305 /* set rx counters to 0 */
306 bmwrite(dev, FRCNT, 0);
307 bmwrite(dev, LECNT, 0);
308 bmwrite(dev, AECNT, 0);
309 bmwrite(dev, FECNT, 0);
310 bmwrite(dev, RXCV, 0);
312 /* set tx fifo information */
313 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
315 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
316 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
318 /* set rx fifo information */
319 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
320 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
322 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
323 bmread(dev, STATUS); /* read it just to clear it */
325 /* zero out the chip Hash Filter registers */
326 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
327 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
328 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
329 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
330 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
332 pWord16 = (unsigned short *)dev->dev_addr;
333 bmwrite(dev, MADD0, *pWord16++);
334 bmwrite(dev, MADD1, *pWord16++);
335 bmwrite(dev, MADD2, *pWord16);
337 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
339 bmwrite(dev, INTDISABLE, EnableNormal);
344 static void
345 bmac_start_chip(struct net_device *dev)
347 struct bmac_data *bp = netdev_priv(dev);
348 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
349 unsigned short oldConfig;
351 /* enable rx dma channel */
352 dbdma_continue(rd);
354 oldConfig = bmread(dev, TXCFG);
355 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
357 /* turn on rx plus any other bits already on (promiscuous possibly) */
358 oldConfig = bmread(dev, RXCFG);
359 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
360 udelay(20000);
363 static void
364 bmac_init_phy(struct net_device *dev)
366 unsigned int addr;
367 struct bmac_data *bp = netdev_priv(dev);
369 printk(KERN_DEBUG "phy registers:");
370 for (addr = 0; addr < 32; ++addr) {
371 if ((addr & 7) == 0)
372 printk(KERN_DEBUG);
373 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
375 printk(KERN_CONT "\n");
377 if (bp->is_bmac_plus) {
378 unsigned int capable, ctrl;
380 ctrl = bmac_mif_read(dev, 0);
381 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
382 if (bmac_mif_read(dev, 4) != capable ||
383 (ctrl & 0x1000) == 0) {
384 bmac_mif_write(dev, 4, capable);
385 bmac_mif_write(dev, 0, 0x1200);
386 } else
387 bmac_mif_write(dev, 0, 0x1000);
391 static void bmac_init_chip(struct net_device *dev)
393 bmac_init_phy(dev);
394 bmac_init_registers(dev);
397 #ifdef CONFIG_PM
398 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
400 struct net_device* dev = macio_get_drvdata(mdev);
401 struct bmac_data *bp = netdev_priv(dev);
402 unsigned long flags;
403 unsigned short config;
404 int i;
406 netif_device_detach(dev);
407 /* prolly should wait for dma to finish & turn off the chip */
408 spin_lock_irqsave(&bp->lock, flags);
409 if (bp->timeout_active) {
410 del_timer(&bp->tx_timeout);
411 bp->timeout_active = 0;
413 disable_irq(dev->irq);
414 disable_irq(bp->tx_dma_intr);
415 disable_irq(bp->rx_dma_intr);
416 bp->sleeping = 1;
417 spin_unlock_irqrestore(&bp->lock, flags);
418 if (bp->opened) {
419 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
420 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
422 config = bmread(dev, RXCFG);
423 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
424 config = bmread(dev, TXCFG);
425 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
426 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
427 /* disable rx and tx dma */
428 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
429 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
430 /* free some skb's */
431 for (i=0; i<N_RX_RING; i++) {
432 if (bp->rx_bufs[i] != NULL) {
433 dev_kfree_skb(bp->rx_bufs[i]);
434 bp->rx_bufs[i] = NULL;
437 for (i = 0; i<N_TX_RING; i++) {
438 if (bp->tx_bufs[i] != NULL) {
439 dev_kfree_skb(bp->tx_bufs[i]);
440 bp->tx_bufs[i] = NULL;
444 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
445 return 0;
448 static int bmac_resume(struct macio_dev *mdev)
450 struct net_device* dev = macio_get_drvdata(mdev);
451 struct bmac_data *bp = netdev_priv(dev);
453 /* see if this is enough */
454 if (bp->opened)
455 bmac_reset_and_enable(dev);
457 enable_irq(dev->irq);
458 enable_irq(bp->tx_dma_intr);
459 enable_irq(bp->rx_dma_intr);
460 netif_device_attach(dev);
462 return 0;
464 #endif /* CONFIG_PM */
466 static int bmac_set_address(struct net_device *dev, void *addr)
468 struct bmac_data *bp = netdev_priv(dev);
469 unsigned char *p = addr;
470 unsigned short *pWord16;
471 unsigned long flags;
472 int i;
474 XXDEBUG(("bmac: enter set_address\n"));
475 spin_lock_irqsave(&bp->lock, flags);
477 for (i = 0; i < 6; ++i) {
478 dev->dev_addr[i] = p[i];
480 /* load up the hardware address */
481 pWord16 = (unsigned short *)dev->dev_addr;
482 bmwrite(dev, MADD0, *pWord16++);
483 bmwrite(dev, MADD1, *pWord16++);
484 bmwrite(dev, MADD2, *pWord16);
486 spin_unlock_irqrestore(&bp->lock, flags);
487 XXDEBUG(("bmac: exit set_address\n"));
488 return 0;
491 static inline void bmac_set_timeout(struct net_device *dev)
493 struct bmac_data *bp = netdev_priv(dev);
494 unsigned long flags;
496 spin_lock_irqsave(&bp->lock, flags);
497 if (bp->timeout_active)
498 del_timer(&bp->tx_timeout);
499 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
500 bp->tx_timeout.function = bmac_tx_timeout;
501 bp->tx_timeout.data = (unsigned long) dev;
502 add_timer(&bp->tx_timeout);
503 bp->timeout_active = 1;
504 spin_unlock_irqrestore(&bp->lock, flags);
507 static void
508 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
510 void *vaddr;
511 unsigned long baddr;
512 unsigned long len;
514 len = skb->len;
515 vaddr = skb->data;
516 baddr = virt_to_bus(vaddr);
518 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
521 static void
522 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
524 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
526 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
527 virt_to_bus(addr), 0);
530 static void
531 bmac_init_tx_ring(struct bmac_data *bp)
533 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
535 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
537 bp->tx_empty = 0;
538 bp->tx_fill = 0;
539 bp->tx_fullup = 0;
541 /* put a branch at the end of the tx command list */
542 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
543 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
545 /* reset tx dma */
546 dbdma_reset(td);
547 out_le32(&td->wait_sel, 0x00200020);
548 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
551 static int
552 bmac_init_rx_ring(struct bmac_data *bp)
554 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
555 int i;
556 struct sk_buff *skb;
558 /* initialize list of sk_buffs for receiving and set up recv dma */
559 memset((char *)bp->rx_cmds, 0,
560 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
561 for (i = 0; i < N_RX_RING; i++) {
562 if ((skb = bp->rx_bufs[i]) == NULL) {
563 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
564 if (skb != NULL)
565 skb_reserve(skb, 2);
567 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
570 bp->rx_empty = 0;
571 bp->rx_fill = i;
573 /* Put a branch back to the beginning of the receive command list */
574 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
575 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
577 /* start rx dma */
578 dbdma_reset(rd);
579 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
581 return 1;
585 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
587 struct bmac_data *bp = netdev_priv(dev);
588 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
589 int i;
591 /* see if there's a free slot in the tx ring */
592 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
593 /* bp->tx_empty, bp->tx_fill)); */
594 i = bp->tx_fill + 1;
595 if (i >= N_TX_RING)
596 i = 0;
597 if (i == bp->tx_empty) {
598 netif_stop_queue(dev);
599 bp->tx_fullup = 1;
600 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
601 return -1; /* can't take it at the moment */
604 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
606 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
608 bp->tx_bufs[bp->tx_fill] = skb;
609 bp->tx_fill = i;
611 dev->stats.tx_bytes += skb->len;
613 dbdma_continue(td);
615 return 0;
618 static int rxintcount;
620 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
622 struct net_device *dev = (struct net_device *) dev_id;
623 struct bmac_data *bp = netdev_priv(dev);
624 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
625 volatile struct dbdma_cmd *cp;
626 int i, nb, stat;
627 struct sk_buff *skb;
628 unsigned int residual;
629 int last;
630 unsigned long flags;
632 spin_lock_irqsave(&bp->lock, flags);
634 if (++rxintcount < 10) {
635 XXDEBUG(("bmac_rxdma_intr\n"));
638 last = -1;
639 i = bp->rx_empty;
641 while (1) {
642 cp = &bp->rx_cmds[i];
643 stat = ld_le16(&cp->xfer_status);
644 residual = ld_le16(&cp->res_count);
645 if ((stat & ACTIVE) == 0)
646 break;
647 nb = RX_BUFLEN - residual - 2;
648 if (nb < (ETHERMINPACKET - ETHERCRC)) {
649 skb = NULL;
650 dev->stats.rx_length_errors++;
651 dev->stats.rx_errors++;
652 } else {
653 skb = bp->rx_bufs[i];
654 bp->rx_bufs[i] = NULL;
656 if (skb != NULL) {
657 nb -= ETHERCRC;
658 skb_put(skb, nb);
659 skb->protocol = eth_type_trans(skb, dev);
660 netif_rx(skb);
661 ++dev->stats.rx_packets;
662 dev->stats.rx_bytes += nb;
663 } else {
664 ++dev->stats.rx_dropped;
666 if ((skb = bp->rx_bufs[i]) == NULL) {
667 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
668 if (skb != NULL)
669 skb_reserve(bp->rx_bufs[i], 2);
671 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
672 st_le16(&cp->res_count, 0);
673 st_le16(&cp->xfer_status, 0);
674 last = i;
675 if (++i >= N_RX_RING) i = 0;
678 if (last != -1) {
679 bp->rx_fill = last;
680 bp->rx_empty = i;
683 dbdma_continue(rd);
684 spin_unlock_irqrestore(&bp->lock, flags);
686 if (rxintcount < 10) {
687 XXDEBUG(("bmac_rxdma_intr done\n"));
689 return IRQ_HANDLED;
692 static int txintcount;
694 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
696 struct net_device *dev = (struct net_device *) dev_id;
697 struct bmac_data *bp = netdev_priv(dev);
698 volatile struct dbdma_cmd *cp;
699 int stat;
700 unsigned long flags;
702 spin_lock_irqsave(&bp->lock, flags);
704 if (txintcount++ < 10) {
705 XXDEBUG(("bmac_txdma_intr\n"));
708 /* del_timer(&bp->tx_timeout); */
709 /* bp->timeout_active = 0; */
711 while (1) {
712 cp = &bp->tx_cmds[bp->tx_empty];
713 stat = ld_le16(&cp->xfer_status);
714 if (txintcount < 10) {
715 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
717 if (!(stat & ACTIVE)) {
719 * status field might not have been filled by DBDMA
721 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
722 break;
725 if (bp->tx_bufs[bp->tx_empty]) {
726 ++dev->stats.tx_packets;
727 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
729 bp->tx_bufs[bp->tx_empty] = NULL;
730 bp->tx_fullup = 0;
731 netif_wake_queue(dev);
732 if (++bp->tx_empty >= N_TX_RING)
733 bp->tx_empty = 0;
734 if (bp->tx_empty == bp->tx_fill)
735 break;
738 spin_unlock_irqrestore(&bp->lock, flags);
740 if (txintcount < 10) {
741 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
744 bmac_start(dev);
745 return IRQ_HANDLED;
748 #ifndef SUNHME_MULTICAST
749 /* Real fast bit-reversal algorithm, 6-bit values */
750 static int reverse6[64] = {
751 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
752 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
753 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
754 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
755 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
756 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
757 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
758 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
761 static unsigned int
762 crc416(unsigned int curval, unsigned short nxtval)
764 register unsigned int counter, cur = curval, next = nxtval;
765 register int high_crc_set, low_data_set;
767 /* Swap bytes */
768 next = ((next & 0x00FF) << 8) | (next >> 8);
770 /* Compute bit-by-bit */
771 for (counter = 0; counter < 16; ++counter) {
772 /* is high CRC bit set? */
773 if ((cur & 0x80000000) == 0) high_crc_set = 0;
774 else high_crc_set = 1;
776 cur = cur << 1;
778 if ((next & 0x0001) == 0) low_data_set = 0;
779 else low_data_set = 1;
781 next = next >> 1;
783 /* do the XOR */
784 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
786 return cur;
789 static unsigned int
790 bmac_crc(unsigned short *address)
792 unsigned int newcrc;
794 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
795 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
796 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
797 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
799 return(newcrc);
803 * Add requested mcast addr to BMac's hash table filter.
807 static void
808 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
810 unsigned int crc;
811 unsigned short mask;
813 if (!(*addr)) return;
814 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
815 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
816 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
817 mask = crc % 16;
818 mask = (unsigned char)1 << mask;
819 bp->hash_use_count[crc/16] |= mask;
822 static void
823 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
825 unsigned int crc;
826 unsigned char mask;
828 /* Now, delete the address from the filter copy, as indicated */
829 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
830 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
831 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
832 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
833 mask = crc % 16;
834 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
835 bp->hash_table_mask[crc/16] &= mask;
839 * Sync the adapter with the software copy of the multicast mask
840 * (logical address filter).
843 static void
844 bmac_rx_off(struct net_device *dev)
846 unsigned short rx_cfg;
848 rx_cfg = bmread(dev, RXCFG);
849 rx_cfg &= ~RxMACEnable;
850 bmwrite(dev, RXCFG, rx_cfg);
851 do {
852 rx_cfg = bmread(dev, RXCFG);
853 } while (rx_cfg & RxMACEnable);
856 unsigned short
857 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
859 unsigned short rx_cfg;
861 rx_cfg = bmread(dev, RXCFG);
862 rx_cfg |= RxMACEnable;
863 if (hash_enable) rx_cfg |= RxHashFilterEnable;
864 else rx_cfg &= ~RxHashFilterEnable;
865 if (promisc_enable) rx_cfg |= RxPromiscEnable;
866 else rx_cfg &= ~RxPromiscEnable;
867 bmwrite(dev, RXRST, RxResetValue);
868 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
869 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
870 bmwrite(dev, RXCFG, rx_cfg );
871 return rx_cfg;
874 static void
875 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
877 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
878 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
879 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
880 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
884 /* Set or clear the multicast filter for this adaptor.
885 num_addrs == -1 Promiscuous mode, receive all packets
886 num_addrs == 0 Normal mode, clear multicast list
887 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
888 best-effort filtering.
890 static void bmac_set_multicast(struct net_device *dev)
892 struct netdev_hw_addr *ha;
893 struct bmac_data *bp = netdev_priv(dev);
894 int num_addrs = netdev_mc_count(dev);
895 unsigned short rx_cfg;
896 int i;
898 if (bp->sleeping)
899 return;
901 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
903 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
904 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
905 bmac_update_hash_table_mask(dev, bp);
906 rx_cfg = bmac_rx_on(dev, 1, 0);
907 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
908 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
909 rx_cfg = bmread(dev, RXCFG);
910 rx_cfg |= RxPromiscEnable;
911 bmwrite(dev, RXCFG, rx_cfg);
912 rx_cfg = bmac_rx_on(dev, 0, 1);
913 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
914 } else {
915 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
916 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
917 if (num_addrs == 0) {
918 rx_cfg = bmac_rx_on(dev, 0, 0);
919 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
920 } else {
921 netdev_for_each_mc_addr(ha, dev)
922 bmac_addhash(bp, ha->addr);
923 bmac_update_hash_table_mask(dev, bp);
924 rx_cfg = bmac_rx_on(dev, 1, 0);
925 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
928 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
930 #else /* ifdef SUNHME_MULTICAST */
932 /* The version of set_multicast below was lifted from sunhme.c */
934 static void bmac_set_multicast(struct net_device *dev)
936 struct netdev_hw_addr *ha;
937 char *addrs;
938 int i;
939 unsigned short rx_cfg;
940 u32 crc;
942 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
943 bmwrite(dev, BHASH0, 0xffff);
944 bmwrite(dev, BHASH1, 0xffff);
945 bmwrite(dev, BHASH2, 0xffff);
946 bmwrite(dev, BHASH3, 0xffff);
947 } else if(dev->flags & IFF_PROMISC) {
948 rx_cfg = bmread(dev, RXCFG);
949 rx_cfg |= RxPromiscEnable;
950 bmwrite(dev, RXCFG, rx_cfg);
951 } else {
952 u16 hash_table[4];
954 rx_cfg = bmread(dev, RXCFG);
955 rx_cfg &= ~RxPromiscEnable;
956 bmwrite(dev, RXCFG, rx_cfg);
958 for(i = 0; i < 4; i++) hash_table[i] = 0;
960 netdev_for_each_mc_addr(ha, dev) {
961 addrs = ha->addr;
963 if(!(*addrs & 1))
964 continue;
966 crc = ether_crc_le(6, addrs);
967 crc >>= 26;
968 hash_table[crc >> 4] |= 1 << (crc & 0xf);
970 bmwrite(dev, BHASH0, hash_table[0]);
971 bmwrite(dev, BHASH1, hash_table[1]);
972 bmwrite(dev, BHASH2, hash_table[2]);
973 bmwrite(dev, BHASH3, hash_table[3]);
976 #endif /* SUNHME_MULTICAST */
978 static int miscintcount;
980 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
982 struct net_device *dev = (struct net_device *) dev_id;
983 unsigned int status = bmread(dev, STATUS);
984 if (miscintcount++ < 10) {
985 XXDEBUG(("bmac_misc_intr\n"));
987 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
988 /* bmac_txdma_intr_inner(irq, dev_id); */
989 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
990 if (status & RxErrorMask) dev->stats.rx_errors++;
991 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
992 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
993 if (status & RxOverFlow) dev->stats.rx_over_errors++;
994 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
996 /* if (status & FrameSent) dev->stats.tx_dropped++; */
997 if (status & TxErrorMask) dev->stats.tx_errors++;
998 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
999 if (status & TxNormalCollExp) dev->stats.collisions++;
1000 return IRQ_HANDLED;
1004 * Procedure for reading EEPROM
1006 #define SROMAddressLength 5
1007 #define DataInOn 0x0008
1008 #define DataInOff 0x0000
1009 #define Clk 0x0002
1010 #define ChipSelect 0x0001
1011 #define SDIShiftCount 3
1012 #define SD0ShiftCount 2
1013 #define DelayValue 1000 /* number of microseconds */
1014 #define SROMStartOffset 10 /* this is in words */
1015 #define SROMReadCount 3 /* number of words to read from SROM */
1016 #define SROMAddressBits 6
1017 #define EnetAddressOffset 20
1019 static unsigned char
1020 bmac_clock_out_bit(struct net_device *dev)
1022 unsigned short data;
1023 unsigned short val;
1025 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1026 udelay(DelayValue);
1028 data = bmread(dev, SROMCSR);
1029 udelay(DelayValue);
1030 val = (data >> SD0ShiftCount) & 1;
1032 bmwrite(dev, SROMCSR, ChipSelect);
1033 udelay(DelayValue);
1035 return val;
1038 static void
1039 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1041 unsigned short data;
1043 if (val != 0 && val != 1) return;
1045 data = (val << SDIShiftCount);
1046 bmwrite(dev, SROMCSR, data | ChipSelect );
1047 udelay(DelayValue);
1049 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1050 udelay(DelayValue);
1052 bmwrite(dev, SROMCSR, data | ChipSelect);
1053 udelay(DelayValue);
1056 static void
1057 reset_and_select_srom(struct net_device *dev)
1059 /* first reset */
1060 bmwrite(dev, SROMCSR, 0);
1061 udelay(DelayValue);
1063 /* send it the read command (110) */
1064 bmac_clock_in_bit(dev, 1);
1065 bmac_clock_in_bit(dev, 1);
1066 bmac_clock_in_bit(dev, 0);
1069 static unsigned short
1070 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1072 unsigned short data, val;
1073 int i;
1075 /* send out the address we want to read from */
1076 for (i = 0; i < addr_len; i++) {
1077 val = addr >> (addr_len-i-1);
1078 bmac_clock_in_bit(dev, val & 1);
1081 /* Now read in the 16-bit data */
1082 data = 0;
1083 for (i = 0; i < 16; i++) {
1084 val = bmac_clock_out_bit(dev);
1085 data <<= 1;
1086 data |= val;
1088 bmwrite(dev, SROMCSR, 0);
1090 return data;
1094 * It looks like Cogent and SMC use different methods for calculating
1095 * checksums. What a pain..
1098 static int
1099 bmac_verify_checksum(struct net_device *dev)
1101 unsigned short data, storedCS;
1103 reset_and_select_srom(dev);
1104 data = read_srom(dev, 3, SROMAddressBits);
1105 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1107 return 0;
1111 static void
1112 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1114 int i;
1115 unsigned short data;
1117 for (i = 0; i < 6; i++)
1119 reset_and_select_srom(dev);
1120 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1121 ea[2*i] = bitrev8(data & 0x0ff);
1122 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1126 static void bmac_reset_and_enable(struct net_device *dev)
1128 struct bmac_data *bp = netdev_priv(dev);
1129 unsigned long flags;
1130 struct sk_buff *skb;
1131 unsigned char *data;
1133 spin_lock_irqsave(&bp->lock, flags);
1134 bmac_enable_and_reset_chip(dev);
1135 bmac_init_tx_ring(bp);
1136 bmac_init_rx_ring(bp);
1137 bmac_init_chip(dev);
1138 bmac_start_chip(dev);
1139 bmwrite(dev, INTDISABLE, EnableNormal);
1140 bp->sleeping = 0;
1143 * It seems that the bmac can't receive until it's transmitted
1144 * a packet. So we give it a dummy packet to transmit.
1146 skb = dev_alloc_skb(ETHERMINPACKET);
1147 if (skb != NULL) {
1148 data = skb_put(skb, ETHERMINPACKET);
1149 memset(data, 0, ETHERMINPACKET);
1150 memcpy(data, dev->dev_addr, 6);
1151 memcpy(data+6, dev->dev_addr, 6);
1152 bmac_transmit_packet(skb, dev);
1154 spin_unlock_irqrestore(&bp->lock, flags);
1156 static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1158 struct bmac_data *bp = netdev_priv(dev);
1159 strcpy(info->driver, "bmac");
1160 strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1163 static const struct ethtool_ops bmac_ethtool_ops = {
1164 .get_drvinfo = bmac_get_drvinfo,
1165 .get_link = ethtool_op_get_link,
1168 static const struct net_device_ops bmac_netdev_ops = {
1169 .ndo_open = bmac_open,
1170 .ndo_stop = bmac_close,
1171 .ndo_start_xmit = bmac_output,
1172 .ndo_set_multicast_list = bmac_set_multicast,
1173 .ndo_set_mac_address = bmac_set_address,
1174 .ndo_change_mtu = eth_change_mtu,
1175 .ndo_validate_addr = eth_validate_addr,
1178 static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1180 int j, rev, ret;
1181 struct bmac_data *bp;
1182 const unsigned char *prop_addr;
1183 unsigned char addr[6];
1184 struct net_device *dev;
1185 int is_bmac_plus = ((int)match->data) != 0;
1187 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1188 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1189 return -ENODEV;
1191 prop_addr = of_get_property(macio_get_of_node(mdev),
1192 "mac-address", NULL);
1193 if (prop_addr == NULL) {
1194 prop_addr = of_get_property(macio_get_of_node(mdev),
1195 "local-mac-address", NULL);
1196 if (prop_addr == NULL) {
1197 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1198 return -ENODEV;
1201 memcpy(addr, prop_addr, sizeof(addr));
1203 dev = alloc_etherdev(PRIV_BYTES);
1204 if (!dev) {
1205 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1206 return -ENOMEM;
1209 bp = netdev_priv(dev);
1210 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1211 macio_set_drvdata(mdev, dev);
1213 bp->mdev = mdev;
1214 spin_lock_init(&bp->lock);
1216 if (macio_request_resources(mdev, "bmac")) {
1217 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1218 goto out_free;
1221 dev->base_addr = (unsigned long)
1222 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1223 if (dev->base_addr == 0)
1224 goto out_release;
1226 dev->irq = macio_irq(mdev, 0);
1228 bmac_enable_and_reset_chip(dev);
1229 bmwrite(dev, INTDISABLE, DisableAll);
1231 rev = addr[0] == 0 && addr[1] == 0xA0;
1232 for (j = 0; j < 6; ++j)
1233 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1235 /* Enable chip without interrupts for now */
1236 bmac_enable_and_reset_chip(dev);
1237 bmwrite(dev, INTDISABLE, DisableAll);
1239 dev->netdev_ops = &bmac_netdev_ops;
1240 dev->ethtool_ops = &bmac_ethtool_ops;
1242 bmac_get_station_address(dev, addr);
1243 if (bmac_verify_checksum(dev) != 0)
1244 goto err_out_iounmap;
1246 bp->is_bmac_plus = is_bmac_plus;
1247 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1248 if (!bp->tx_dma)
1249 goto err_out_iounmap;
1250 bp->tx_dma_intr = macio_irq(mdev, 1);
1251 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1252 if (!bp->rx_dma)
1253 goto err_out_iounmap_tx;
1254 bp->rx_dma_intr = macio_irq(mdev, 2);
1256 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1257 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1259 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1260 skb_queue_head_init(bp->queue);
1262 init_timer(&bp->tx_timeout);
1264 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1265 if (ret) {
1266 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1267 goto err_out_iounmap_rx;
1269 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1270 if (ret) {
1271 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1272 goto err_out_irq0;
1274 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1275 if (ret) {
1276 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1277 goto err_out_irq1;
1280 /* Mask chip interrupts and disable chip, will be
1281 * re-enabled on open()
1283 disable_irq(dev->irq);
1284 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1286 if (register_netdev(dev) != 0) {
1287 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1288 goto err_out_irq2;
1291 printk(KERN_INFO "%s: BMAC%s at %pM",
1292 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1293 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1294 printk("\n");
1296 return 0;
1298 err_out_irq2:
1299 free_irq(bp->rx_dma_intr, dev);
1300 err_out_irq1:
1301 free_irq(bp->tx_dma_intr, dev);
1302 err_out_irq0:
1303 free_irq(dev->irq, dev);
1304 err_out_iounmap_rx:
1305 iounmap(bp->rx_dma);
1306 err_out_iounmap_tx:
1307 iounmap(bp->tx_dma);
1308 err_out_iounmap:
1309 iounmap((void __iomem *)dev->base_addr);
1310 out_release:
1311 macio_release_resources(mdev);
1312 out_free:
1313 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1314 free_netdev(dev);
1316 return -ENODEV;
1319 static int bmac_open(struct net_device *dev)
1321 struct bmac_data *bp = netdev_priv(dev);
1322 /* XXDEBUG(("bmac: enter open\n")); */
1323 /* reset the chip */
1324 bp->opened = 1;
1325 bmac_reset_and_enable(dev);
1326 enable_irq(dev->irq);
1327 return 0;
1330 static int bmac_close(struct net_device *dev)
1332 struct bmac_data *bp = netdev_priv(dev);
1333 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1334 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1335 unsigned short config;
1336 int i;
1338 bp->sleeping = 1;
1340 /* disable rx and tx */
1341 config = bmread(dev, RXCFG);
1342 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1344 config = bmread(dev, TXCFG);
1345 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1347 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1349 /* disable rx and tx dma */
1350 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1351 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1353 /* free some skb's */
1354 XXDEBUG(("bmac: free rx bufs\n"));
1355 for (i=0; i<N_RX_RING; i++) {
1356 if (bp->rx_bufs[i] != NULL) {
1357 dev_kfree_skb(bp->rx_bufs[i]);
1358 bp->rx_bufs[i] = NULL;
1361 XXDEBUG(("bmac: free tx bufs\n"));
1362 for (i = 0; i<N_TX_RING; i++) {
1363 if (bp->tx_bufs[i] != NULL) {
1364 dev_kfree_skb(bp->tx_bufs[i]);
1365 bp->tx_bufs[i] = NULL;
1368 XXDEBUG(("bmac: all bufs freed\n"));
1370 bp->opened = 0;
1371 disable_irq(dev->irq);
1372 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1374 return 0;
1377 static void
1378 bmac_start(struct net_device *dev)
1380 struct bmac_data *bp = netdev_priv(dev);
1381 int i;
1382 struct sk_buff *skb;
1383 unsigned long flags;
1385 if (bp->sleeping)
1386 return;
1388 spin_lock_irqsave(&bp->lock, flags);
1389 while (1) {
1390 i = bp->tx_fill + 1;
1391 if (i >= N_TX_RING)
1392 i = 0;
1393 if (i == bp->tx_empty)
1394 break;
1395 skb = skb_dequeue(bp->queue);
1396 if (skb == NULL)
1397 break;
1398 bmac_transmit_packet(skb, dev);
1400 spin_unlock_irqrestore(&bp->lock, flags);
1403 static int
1404 bmac_output(struct sk_buff *skb, struct net_device *dev)
1406 struct bmac_data *bp = netdev_priv(dev);
1407 skb_queue_tail(bp->queue, skb);
1408 bmac_start(dev);
1409 return NETDEV_TX_OK;
1412 static void bmac_tx_timeout(unsigned long data)
1414 struct net_device *dev = (struct net_device *) data;
1415 struct bmac_data *bp = netdev_priv(dev);
1416 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1417 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1418 volatile struct dbdma_cmd *cp;
1419 unsigned long flags;
1420 unsigned short config, oldConfig;
1421 int i;
1423 XXDEBUG(("bmac: tx_timeout called\n"));
1424 spin_lock_irqsave(&bp->lock, flags);
1425 bp->timeout_active = 0;
1427 /* update various counters */
1428 /* bmac_handle_misc_intrs(bp, 0); */
1430 cp = &bp->tx_cmds[bp->tx_empty];
1431 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1432 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1433 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1435 /* turn off both tx and rx and reset the chip */
1436 config = bmread(dev, RXCFG);
1437 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1438 config = bmread(dev, TXCFG);
1439 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1440 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1441 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1442 bmac_enable_and_reset_chip(dev);
1444 /* restart rx dma */
1445 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1446 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1447 out_le16(&cp->xfer_status, 0);
1448 out_le32(&rd->cmdptr, virt_to_bus(cp));
1449 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1451 /* fix up the transmit side */
1452 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1453 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1454 i = bp->tx_empty;
1455 ++dev->stats.tx_errors;
1456 if (i != bp->tx_fill) {
1457 dev_kfree_skb(bp->tx_bufs[i]);
1458 bp->tx_bufs[i] = NULL;
1459 if (++i >= N_TX_RING) i = 0;
1460 bp->tx_empty = i;
1462 bp->tx_fullup = 0;
1463 netif_wake_queue(dev);
1464 if (i != bp->tx_fill) {
1465 cp = &bp->tx_cmds[i];
1466 out_le16(&cp->xfer_status, 0);
1467 out_le16(&cp->command, OUTPUT_LAST);
1468 out_le32(&td->cmdptr, virt_to_bus(cp));
1469 out_le32(&td->control, DBDMA_SET(RUN));
1470 /* bmac_set_timeout(dev); */
1471 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1474 /* turn it back on */
1475 oldConfig = bmread(dev, RXCFG);
1476 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1477 oldConfig = bmread(dev, TXCFG);
1478 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1480 spin_unlock_irqrestore(&bp->lock, flags);
1485 static int __devexit bmac_remove(struct macio_dev *mdev)
1487 struct net_device *dev = macio_get_drvdata(mdev);
1488 struct bmac_data *bp = netdev_priv(dev);
1490 unregister_netdev(dev);
1492 free_irq(dev->irq, dev);
1493 free_irq(bp->tx_dma_intr, dev);
1494 free_irq(bp->rx_dma_intr, dev);
1496 iounmap((void __iomem *)dev->base_addr);
1497 iounmap(bp->tx_dma);
1498 iounmap(bp->rx_dma);
1500 macio_release_resources(mdev);
1502 free_netdev(dev);
1504 return 0;
1507 static struct of_device_id bmac_match[] =
1510 .name = "bmac",
1511 .data = (void *)0,
1514 .type = "network",
1515 .compatible = "bmac+",
1516 .data = (void *)1,
1520 MODULE_DEVICE_TABLE (of, bmac_match);
1522 static struct macio_driver bmac_driver =
1524 .driver = {
1525 .name = "bmac",
1526 .owner = THIS_MODULE,
1527 .of_match_table = bmac_match,
1529 .probe = bmac_probe,
1530 .remove = bmac_remove,
1531 #ifdef CONFIG_PM
1532 .suspend = bmac_suspend,
1533 .resume = bmac_resume,
1534 #endif
1538 static int __init bmac_init(void)
1540 if (bmac_emergency_rxbuf == NULL) {
1541 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1542 if (bmac_emergency_rxbuf == NULL) {
1543 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1544 return -ENOMEM;
1548 return macio_register_driver(&bmac_driver);
1551 static void __exit bmac_exit(void)
1553 macio_unregister_driver(&bmac_driver);
1555 kfree(bmac_emergency_rxbuf);
1556 bmac_emergency_rxbuf = NULL;
1559 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1560 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1561 MODULE_LICENSE("GPL");
1563 module_init(bmac_init);
1564 module_exit(bmac_exit);