gma500: add more ops
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bmac.c
bloba1b8c8b8010b006f1781ea87af7a4cc20515198b
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/proc_fs.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/bitrev.h>
22 #include <linux/ethtool.h>
23 #include <linux/slab.h>
24 #include <asm/prom.h>
25 #include <asm/dbdma.h>
26 #include <asm/io.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/machdep.h>
30 #include <asm/pmac_feature.h>
31 #include <asm/macio.h>
32 #include <asm/irq.h>
34 #include "bmac.h"
36 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
37 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
40 * CRC polynomial - used in working out multicast filter bits.
42 #define ENET_CRCPOLY 0x04c11db7
44 /* switch to use multicast code lifted from sunhme driver */
45 #define SUNHME_MULTICAST
47 #define N_RX_RING 64
48 #define N_TX_RING 32
49 #define MAX_TX_ACTIVE 1
50 #define ETHERCRC 4
51 #define ETHERMINPACKET 64
52 #define ETHERMTU 1500
53 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
54 #define TX_TIMEOUT HZ /* 1 second */
56 /* Bits in transmit DMA status */
57 #define TX_DMA_ERR 0x80
59 #define XXDEBUG(args)
61 struct bmac_data {
62 /* volatile struct bmac *bmac; */
63 struct sk_buff_head *queue;
64 volatile struct dbdma_regs __iomem *tx_dma;
65 int tx_dma_intr;
66 volatile struct dbdma_regs __iomem *rx_dma;
67 int rx_dma_intr;
68 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
69 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
70 struct macio_dev *mdev;
71 int is_bmac_plus;
72 struct sk_buff *rx_bufs[N_RX_RING];
73 int rx_fill;
74 int rx_empty;
75 struct sk_buff *tx_bufs[N_TX_RING];
76 int tx_fill;
77 int tx_empty;
78 unsigned char tx_fullup;
79 struct timer_list tx_timeout;
80 int timeout_active;
81 int sleeping;
82 int opened;
83 unsigned short hash_use_count[64];
84 unsigned short hash_table_mask[4];
85 spinlock_t lock;
88 #if 0 /* Move that to ethtool */
90 typedef struct bmac_reg_entry {
91 char *name;
92 unsigned short reg_offset;
93 } bmac_reg_entry_t;
95 #define N_REG_ENTRIES 31
97 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
98 {"MEMADD", MEMADD},
99 {"MEMDATAHI", MEMDATAHI},
100 {"MEMDATALO", MEMDATALO},
101 {"TXPNTR", TXPNTR},
102 {"RXPNTR", RXPNTR},
103 {"IPG1", IPG1},
104 {"IPG2", IPG2},
105 {"ALIMIT", ALIMIT},
106 {"SLOT", SLOT},
107 {"PALEN", PALEN},
108 {"PAPAT", PAPAT},
109 {"TXSFD", TXSFD},
110 {"JAM", JAM},
111 {"TXCFG", TXCFG},
112 {"TXMAX", TXMAX},
113 {"TXMIN", TXMIN},
114 {"PAREG", PAREG},
115 {"DCNT", DCNT},
116 {"NCCNT", NCCNT},
117 {"NTCNT", NTCNT},
118 {"EXCNT", EXCNT},
119 {"LTCNT", LTCNT},
120 {"TXSM", TXSM},
121 {"RXCFG", RXCFG},
122 {"RXMAX", RXMAX},
123 {"RXMIN", RXMIN},
124 {"FRCNT", FRCNT},
125 {"AECNT", AECNT},
126 {"FECNT", FECNT},
127 {"RXSM", RXSM},
128 {"RXCV", RXCV}
131 #endif
133 static unsigned char *bmac_emergency_rxbuf;
136 * Number of bytes of private data per BMAC: allow enough for
137 * the rx and tx dma commands plus a branch dma command each,
138 * and another 16 bytes to allow us to align the dma command
139 * buffers on a 16 byte boundary.
141 #define PRIV_BYTES (sizeof(struct bmac_data) \
142 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
143 + sizeof(struct sk_buff_head))
145 static int bmac_open(struct net_device *dev);
146 static int bmac_close(struct net_device *dev);
147 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
148 static void bmac_set_multicast(struct net_device *dev);
149 static void bmac_reset_and_enable(struct net_device *dev);
150 static void bmac_start_chip(struct net_device *dev);
151 static void bmac_init_chip(struct net_device *dev);
152 static void bmac_init_registers(struct net_device *dev);
153 static void bmac_enable_and_reset_chip(struct net_device *dev);
154 static int bmac_set_address(struct net_device *dev, void *addr);
155 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
156 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
157 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
158 static void bmac_set_timeout(struct net_device *dev);
159 static void bmac_tx_timeout(unsigned long data);
160 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
161 static void bmac_start(struct net_device *dev);
163 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
164 #define DBDMA_CLEAR(x) ( (x) << 16)
166 static inline void
167 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
172 static inline unsigned long
173 dbdma_ld32(volatile __u32 __iomem *a)
175 __u32 swap;
176 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
177 return swap;
180 static void
181 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
183 dbdma_st32(&dmap->control,
184 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
185 eieio();
188 static void
189 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
191 dbdma_st32(&dmap->control,
192 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
193 eieio();
194 while (dbdma_ld32(&dmap->status) & RUN)
195 eieio();
198 static void
199 dbdma_setcmd(volatile struct dbdma_cmd *cp,
200 unsigned short cmd, unsigned count, unsigned long addr,
201 unsigned long cmd_dep)
203 out_le16(&cp->command, cmd);
204 out_le16(&cp->req_count, count);
205 out_le32(&cp->phy_addr, addr);
206 out_le32(&cp->cmd_dep, cmd_dep);
207 out_le16(&cp->xfer_status, 0);
208 out_le16(&cp->res_count, 0);
211 static inline
212 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
214 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
218 static inline
219 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
221 return in_le16((void __iomem *)dev->base_addr + reg_offset);
224 static void
225 bmac_enable_and_reset_chip(struct net_device *dev)
227 struct bmac_data *bp = netdev_priv(dev);
228 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
229 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
231 if (rd)
232 dbdma_reset(rd);
233 if (td)
234 dbdma_reset(td);
236 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
239 #define MIFDELAY udelay(10)
241 static unsigned int
242 bmac_mif_readbits(struct net_device *dev, int nb)
244 unsigned int val = 0;
246 while (--nb >= 0) {
247 bmwrite(dev, MIFCSR, 0);
248 MIFDELAY;
249 if (bmread(dev, MIFCSR) & 8)
250 val |= 1 << nb;
251 bmwrite(dev, MIFCSR, 1);
252 MIFDELAY;
254 bmwrite(dev, MIFCSR, 0);
255 MIFDELAY;
256 bmwrite(dev, MIFCSR, 1);
257 MIFDELAY;
258 return val;
261 static void
262 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
264 int b;
266 while (--nb >= 0) {
267 b = (val & (1 << nb))? 6: 4;
268 bmwrite(dev, MIFCSR, b);
269 MIFDELAY;
270 bmwrite(dev, MIFCSR, b|1);
271 MIFDELAY;
275 static unsigned int
276 bmac_mif_read(struct net_device *dev, unsigned int addr)
278 unsigned int val;
280 bmwrite(dev, MIFCSR, 4);
281 MIFDELAY;
282 bmac_mif_writebits(dev, ~0U, 32);
283 bmac_mif_writebits(dev, 6, 4);
284 bmac_mif_writebits(dev, addr, 10);
285 bmwrite(dev, MIFCSR, 2);
286 MIFDELAY;
287 bmwrite(dev, MIFCSR, 1);
288 MIFDELAY;
289 val = bmac_mif_readbits(dev, 17);
290 bmwrite(dev, MIFCSR, 4);
291 MIFDELAY;
292 return val;
295 static void
296 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
298 bmwrite(dev, MIFCSR, 4);
299 MIFDELAY;
300 bmac_mif_writebits(dev, ~0U, 32);
301 bmac_mif_writebits(dev, 5, 4);
302 bmac_mif_writebits(dev, addr, 10);
303 bmac_mif_writebits(dev, 2, 2);
304 bmac_mif_writebits(dev, val, 16);
305 bmac_mif_writebits(dev, 3, 2);
308 static void
309 bmac_init_registers(struct net_device *dev)
311 struct bmac_data *bp = netdev_priv(dev);
312 volatile unsigned short regValue;
313 unsigned short *pWord16;
314 int i;
316 /* XXDEBUG(("bmac: enter init_registers\n")); */
318 bmwrite(dev, RXRST, RxResetValue);
319 bmwrite(dev, TXRST, TxResetBit);
321 i = 100;
322 do {
323 --i;
324 udelay(10000);
325 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
326 } while ((regValue & TxResetBit) && i > 0);
328 if (!bp->is_bmac_plus) {
329 regValue = bmread(dev, XCVRIF);
330 regValue |= ClkBit | SerialMode | COLActiveLow;
331 bmwrite(dev, XCVRIF, regValue);
332 udelay(10000);
335 bmwrite(dev, RSEED, (unsigned short)0x1968);
337 regValue = bmread(dev, XIFC);
338 regValue |= TxOutputEnable;
339 bmwrite(dev, XIFC, regValue);
341 bmread(dev, PAREG);
343 /* set collision counters to 0 */
344 bmwrite(dev, NCCNT, 0);
345 bmwrite(dev, NTCNT, 0);
346 bmwrite(dev, EXCNT, 0);
347 bmwrite(dev, LTCNT, 0);
349 /* set rx counters to 0 */
350 bmwrite(dev, FRCNT, 0);
351 bmwrite(dev, LECNT, 0);
352 bmwrite(dev, AECNT, 0);
353 bmwrite(dev, FECNT, 0);
354 bmwrite(dev, RXCV, 0);
356 /* set tx fifo information */
357 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
359 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
360 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
362 /* set rx fifo information */
363 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
364 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
366 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
367 bmread(dev, STATUS); /* read it just to clear it */
369 /* zero out the chip Hash Filter registers */
370 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
371 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
372 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
373 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
374 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
376 pWord16 = (unsigned short *)dev->dev_addr;
377 bmwrite(dev, MADD0, *pWord16++);
378 bmwrite(dev, MADD1, *pWord16++);
379 bmwrite(dev, MADD2, *pWord16);
381 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
383 bmwrite(dev, INTDISABLE, EnableNormal);
386 #if 0
387 static void
388 bmac_disable_interrupts(struct net_device *dev)
390 bmwrite(dev, INTDISABLE, DisableAll);
393 static void
394 bmac_enable_interrupts(struct net_device *dev)
396 bmwrite(dev, INTDISABLE, EnableNormal);
398 #endif
401 static void
402 bmac_start_chip(struct net_device *dev)
404 struct bmac_data *bp = netdev_priv(dev);
405 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
406 unsigned short oldConfig;
408 /* enable rx dma channel */
409 dbdma_continue(rd);
411 oldConfig = bmread(dev, TXCFG);
412 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
414 /* turn on rx plus any other bits already on (promiscuous possibly) */
415 oldConfig = bmread(dev, RXCFG);
416 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
417 udelay(20000);
420 static void
421 bmac_init_phy(struct net_device *dev)
423 unsigned int addr;
424 struct bmac_data *bp = netdev_priv(dev);
426 printk(KERN_DEBUG "phy registers:");
427 for (addr = 0; addr < 32; ++addr) {
428 if ((addr & 7) == 0)
429 printk(KERN_DEBUG);
430 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
432 printk(KERN_CONT "\n");
434 if (bp->is_bmac_plus) {
435 unsigned int capable, ctrl;
437 ctrl = bmac_mif_read(dev, 0);
438 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
439 if (bmac_mif_read(dev, 4) != capable ||
440 (ctrl & 0x1000) == 0) {
441 bmac_mif_write(dev, 4, capable);
442 bmac_mif_write(dev, 0, 0x1200);
443 } else
444 bmac_mif_write(dev, 0, 0x1000);
448 static void bmac_init_chip(struct net_device *dev)
450 bmac_init_phy(dev);
451 bmac_init_registers(dev);
454 #ifdef CONFIG_PM
455 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
457 struct net_device* dev = macio_get_drvdata(mdev);
458 struct bmac_data *bp = netdev_priv(dev);
459 unsigned long flags;
460 unsigned short config;
461 int i;
463 netif_device_detach(dev);
464 /* prolly should wait for dma to finish & turn off the chip */
465 spin_lock_irqsave(&bp->lock, flags);
466 if (bp->timeout_active) {
467 del_timer(&bp->tx_timeout);
468 bp->timeout_active = 0;
470 disable_irq(dev->irq);
471 disable_irq(bp->tx_dma_intr);
472 disable_irq(bp->rx_dma_intr);
473 bp->sleeping = 1;
474 spin_unlock_irqrestore(&bp->lock, flags);
475 if (bp->opened) {
476 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
477 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
479 config = bmread(dev, RXCFG);
480 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
481 config = bmread(dev, TXCFG);
482 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
483 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
484 /* disable rx and tx dma */
485 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
486 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
487 /* free some skb's */
488 for (i=0; i<N_RX_RING; i++) {
489 if (bp->rx_bufs[i] != NULL) {
490 dev_kfree_skb(bp->rx_bufs[i]);
491 bp->rx_bufs[i] = NULL;
494 for (i = 0; i<N_TX_RING; i++) {
495 if (bp->tx_bufs[i] != NULL) {
496 dev_kfree_skb(bp->tx_bufs[i]);
497 bp->tx_bufs[i] = NULL;
501 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
502 return 0;
505 static int bmac_resume(struct macio_dev *mdev)
507 struct net_device* dev = macio_get_drvdata(mdev);
508 struct bmac_data *bp = netdev_priv(dev);
510 /* see if this is enough */
511 if (bp->opened)
512 bmac_reset_and_enable(dev);
514 enable_irq(dev->irq);
515 enable_irq(bp->tx_dma_intr);
516 enable_irq(bp->rx_dma_intr);
517 netif_device_attach(dev);
519 return 0;
521 #endif /* CONFIG_PM */
523 static int bmac_set_address(struct net_device *dev, void *addr)
525 struct bmac_data *bp = netdev_priv(dev);
526 unsigned char *p = addr;
527 unsigned short *pWord16;
528 unsigned long flags;
529 int i;
531 XXDEBUG(("bmac: enter set_address\n"));
532 spin_lock_irqsave(&bp->lock, flags);
534 for (i = 0; i < 6; ++i) {
535 dev->dev_addr[i] = p[i];
537 /* load up the hardware address */
538 pWord16 = (unsigned short *)dev->dev_addr;
539 bmwrite(dev, MADD0, *pWord16++);
540 bmwrite(dev, MADD1, *pWord16++);
541 bmwrite(dev, MADD2, *pWord16);
543 spin_unlock_irqrestore(&bp->lock, flags);
544 XXDEBUG(("bmac: exit set_address\n"));
545 return 0;
548 static inline void bmac_set_timeout(struct net_device *dev)
550 struct bmac_data *bp = netdev_priv(dev);
551 unsigned long flags;
553 spin_lock_irqsave(&bp->lock, flags);
554 if (bp->timeout_active)
555 del_timer(&bp->tx_timeout);
556 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
557 bp->tx_timeout.function = bmac_tx_timeout;
558 bp->tx_timeout.data = (unsigned long) dev;
559 add_timer(&bp->tx_timeout);
560 bp->timeout_active = 1;
561 spin_unlock_irqrestore(&bp->lock, flags);
564 static void
565 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
567 void *vaddr;
568 unsigned long baddr;
569 unsigned long len;
571 len = skb->len;
572 vaddr = skb->data;
573 baddr = virt_to_bus(vaddr);
575 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
578 static void
579 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
581 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
583 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
584 virt_to_bus(addr), 0);
587 static void
588 bmac_init_tx_ring(struct bmac_data *bp)
590 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
592 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
594 bp->tx_empty = 0;
595 bp->tx_fill = 0;
596 bp->tx_fullup = 0;
598 /* put a branch at the end of the tx command list */
599 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
600 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
602 /* reset tx dma */
603 dbdma_reset(td);
604 out_le32(&td->wait_sel, 0x00200020);
605 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
608 static int
609 bmac_init_rx_ring(struct bmac_data *bp)
611 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
612 int i;
613 struct sk_buff *skb;
615 /* initialize list of sk_buffs for receiving and set up recv dma */
616 memset((char *)bp->rx_cmds, 0,
617 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
618 for (i = 0; i < N_RX_RING; i++) {
619 if ((skb = bp->rx_bufs[i]) == NULL) {
620 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
621 if (skb != NULL)
622 skb_reserve(skb, 2);
624 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
627 bp->rx_empty = 0;
628 bp->rx_fill = i;
630 /* Put a branch back to the beginning of the receive command list */
631 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
632 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
634 /* start rx dma */
635 dbdma_reset(rd);
636 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
638 return 1;
642 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
644 struct bmac_data *bp = netdev_priv(dev);
645 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
646 int i;
648 /* see if there's a free slot in the tx ring */
649 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
650 /* bp->tx_empty, bp->tx_fill)); */
651 i = bp->tx_fill + 1;
652 if (i >= N_TX_RING)
653 i = 0;
654 if (i == bp->tx_empty) {
655 netif_stop_queue(dev);
656 bp->tx_fullup = 1;
657 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
658 return -1; /* can't take it at the moment */
661 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
663 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
665 bp->tx_bufs[bp->tx_fill] = skb;
666 bp->tx_fill = i;
668 dev->stats.tx_bytes += skb->len;
670 dbdma_continue(td);
672 return 0;
675 static int rxintcount;
677 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
679 struct net_device *dev = (struct net_device *) dev_id;
680 struct bmac_data *bp = netdev_priv(dev);
681 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
682 volatile struct dbdma_cmd *cp;
683 int i, nb, stat;
684 struct sk_buff *skb;
685 unsigned int residual;
686 int last;
687 unsigned long flags;
689 spin_lock_irqsave(&bp->lock, flags);
691 if (++rxintcount < 10) {
692 XXDEBUG(("bmac_rxdma_intr\n"));
695 last = -1;
696 i = bp->rx_empty;
698 while (1) {
699 cp = &bp->rx_cmds[i];
700 stat = ld_le16(&cp->xfer_status);
701 residual = ld_le16(&cp->res_count);
702 if ((stat & ACTIVE) == 0)
703 break;
704 nb = RX_BUFLEN - residual - 2;
705 if (nb < (ETHERMINPACKET - ETHERCRC)) {
706 skb = NULL;
707 dev->stats.rx_length_errors++;
708 dev->stats.rx_errors++;
709 } else {
710 skb = bp->rx_bufs[i];
711 bp->rx_bufs[i] = NULL;
713 if (skb != NULL) {
714 nb -= ETHERCRC;
715 skb_put(skb, nb);
716 skb->protocol = eth_type_trans(skb, dev);
717 netif_rx(skb);
718 ++dev->stats.rx_packets;
719 dev->stats.rx_bytes += nb;
720 } else {
721 ++dev->stats.rx_dropped;
723 if ((skb = bp->rx_bufs[i]) == NULL) {
724 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
725 if (skb != NULL)
726 skb_reserve(bp->rx_bufs[i], 2);
728 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
729 st_le16(&cp->res_count, 0);
730 st_le16(&cp->xfer_status, 0);
731 last = i;
732 if (++i >= N_RX_RING) i = 0;
735 if (last != -1) {
736 bp->rx_fill = last;
737 bp->rx_empty = i;
740 dbdma_continue(rd);
741 spin_unlock_irqrestore(&bp->lock, flags);
743 if (rxintcount < 10) {
744 XXDEBUG(("bmac_rxdma_intr done\n"));
746 return IRQ_HANDLED;
749 static int txintcount;
751 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
753 struct net_device *dev = (struct net_device *) dev_id;
754 struct bmac_data *bp = netdev_priv(dev);
755 volatile struct dbdma_cmd *cp;
756 int stat;
757 unsigned long flags;
759 spin_lock_irqsave(&bp->lock, flags);
761 if (txintcount++ < 10) {
762 XXDEBUG(("bmac_txdma_intr\n"));
765 /* del_timer(&bp->tx_timeout); */
766 /* bp->timeout_active = 0; */
768 while (1) {
769 cp = &bp->tx_cmds[bp->tx_empty];
770 stat = ld_le16(&cp->xfer_status);
771 if (txintcount < 10) {
772 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
774 if (!(stat & ACTIVE)) {
776 * status field might not have been filled by DBDMA
778 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
779 break;
782 if (bp->tx_bufs[bp->tx_empty]) {
783 ++dev->stats.tx_packets;
784 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
786 bp->tx_bufs[bp->tx_empty] = NULL;
787 bp->tx_fullup = 0;
788 netif_wake_queue(dev);
789 if (++bp->tx_empty >= N_TX_RING)
790 bp->tx_empty = 0;
791 if (bp->tx_empty == bp->tx_fill)
792 break;
795 spin_unlock_irqrestore(&bp->lock, flags);
797 if (txintcount < 10) {
798 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
801 bmac_start(dev);
802 return IRQ_HANDLED;
805 #ifndef SUNHME_MULTICAST
806 /* Real fast bit-reversal algorithm, 6-bit values */
807 static int reverse6[64] = {
808 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
809 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
810 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
811 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
812 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
813 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
814 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
815 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
818 static unsigned int
819 crc416(unsigned int curval, unsigned short nxtval)
821 register unsigned int counter, cur = curval, next = nxtval;
822 register int high_crc_set, low_data_set;
824 /* Swap bytes */
825 next = ((next & 0x00FF) << 8) | (next >> 8);
827 /* Compute bit-by-bit */
828 for (counter = 0; counter < 16; ++counter) {
829 /* is high CRC bit set? */
830 if ((cur & 0x80000000) == 0) high_crc_set = 0;
831 else high_crc_set = 1;
833 cur = cur << 1;
835 if ((next & 0x0001) == 0) low_data_set = 0;
836 else low_data_set = 1;
838 next = next >> 1;
840 /* do the XOR */
841 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
843 return cur;
846 static unsigned int
847 bmac_crc(unsigned short *address)
849 unsigned int newcrc;
851 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
852 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
853 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
854 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
856 return(newcrc);
860 * Add requested mcast addr to BMac's hash table filter.
864 static void
865 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
867 unsigned int crc;
868 unsigned short mask;
870 if (!(*addr)) return;
871 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
872 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
873 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
874 mask = crc % 16;
875 mask = (unsigned char)1 << mask;
876 bp->hash_use_count[crc/16] |= mask;
879 static void
880 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
882 unsigned int crc;
883 unsigned char mask;
885 /* Now, delete the address from the filter copy, as indicated */
886 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
887 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
888 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
889 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
890 mask = crc % 16;
891 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
892 bp->hash_table_mask[crc/16] &= mask;
896 * Sync the adapter with the software copy of the multicast mask
897 * (logical address filter).
900 static void
901 bmac_rx_off(struct net_device *dev)
903 unsigned short rx_cfg;
905 rx_cfg = bmread(dev, RXCFG);
906 rx_cfg &= ~RxMACEnable;
907 bmwrite(dev, RXCFG, rx_cfg);
908 do {
909 rx_cfg = bmread(dev, RXCFG);
910 } while (rx_cfg & RxMACEnable);
913 unsigned short
914 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
916 unsigned short rx_cfg;
918 rx_cfg = bmread(dev, RXCFG);
919 rx_cfg |= RxMACEnable;
920 if (hash_enable) rx_cfg |= RxHashFilterEnable;
921 else rx_cfg &= ~RxHashFilterEnable;
922 if (promisc_enable) rx_cfg |= RxPromiscEnable;
923 else rx_cfg &= ~RxPromiscEnable;
924 bmwrite(dev, RXRST, RxResetValue);
925 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
926 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
927 bmwrite(dev, RXCFG, rx_cfg );
928 return rx_cfg;
931 static void
932 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
934 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
935 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
936 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
937 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
940 #if 0
941 static void
942 bmac_add_multi(struct net_device *dev,
943 struct bmac_data *bp, unsigned char *addr)
945 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
946 bmac_addhash(bp, addr);
947 bmac_rx_off(dev);
948 bmac_update_hash_table_mask(dev, bp);
949 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
950 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
953 static void
954 bmac_remove_multi(struct net_device *dev,
955 struct bmac_data *bp, unsigned char *addr)
957 bmac_removehash(bp, addr);
958 bmac_rx_off(dev);
959 bmac_update_hash_table_mask(dev, bp);
960 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
962 #endif
964 /* Set or clear the multicast filter for this adaptor.
965 num_addrs == -1 Promiscuous mode, receive all packets
966 num_addrs == 0 Normal mode, clear multicast list
967 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
968 best-effort filtering.
970 static void bmac_set_multicast(struct net_device *dev)
972 struct netdev_hw_addr *ha;
973 struct bmac_data *bp = netdev_priv(dev);
974 int num_addrs = netdev_mc_count(dev);
975 unsigned short rx_cfg;
976 int i;
978 if (bp->sleeping)
979 return;
981 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
983 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
984 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
985 bmac_update_hash_table_mask(dev, bp);
986 rx_cfg = bmac_rx_on(dev, 1, 0);
987 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
988 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
989 rx_cfg = bmread(dev, RXCFG);
990 rx_cfg |= RxPromiscEnable;
991 bmwrite(dev, RXCFG, rx_cfg);
992 rx_cfg = bmac_rx_on(dev, 0, 1);
993 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
994 } else {
995 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
996 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
997 if (num_addrs == 0) {
998 rx_cfg = bmac_rx_on(dev, 0, 0);
999 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1000 } else {
1001 netdev_for_each_mc_addr(ha, dev)
1002 bmac_addhash(bp, ha->addr);
1003 bmac_update_hash_table_mask(dev, bp);
1004 rx_cfg = bmac_rx_on(dev, 1, 0);
1005 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1008 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1010 #else /* ifdef SUNHME_MULTICAST */
1012 /* The version of set_multicast below was lifted from sunhme.c */
1014 static void bmac_set_multicast(struct net_device *dev)
1016 struct netdev_hw_addr *ha;
1017 char *addrs;
1018 int i;
1019 unsigned short rx_cfg;
1020 u32 crc;
1022 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1023 bmwrite(dev, BHASH0, 0xffff);
1024 bmwrite(dev, BHASH1, 0xffff);
1025 bmwrite(dev, BHASH2, 0xffff);
1026 bmwrite(dev, BHASH3, 0xffff);
1027 } else if(dev->flags & IFF_PROMISC) {
1028 rx_cfg = bmread(dev, RXCFG);
1029 rx_cfg |= RxPromiscEnable;
1030 bmwrite(dev, RXCFG, rx_cfg);
1031 } else {
1032 u16 hash_table[4];
1034 rx_cfg = bmread(dev, RXCFG);
1035 rx_cfg &= ~RxPromiscEnable;
1036 bmwrite(dev, RXCFG, rx_cfg);
1038 for(i = 0; i < 4; i++) hash_table[i] = 0;
1040 netdev_for_each_mc_addr(ha, dev) {
1041 addrs = ha->addr;
1043 if(!(*addrs & 1))
1044 continue;
1046 crc = ether_crc_le(6, addrs);
1047 crc >>= 26;
1048 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1050 bmwrite(dev, BHASH0, hash_table[0]);
1051 bmwrite(dev, BHASH1, hash_table[1]);
1052 bmwrite(dev, BHASH2, hash_table[2]);
1053 bmwrite(dev, BHASH3, hash_table[3]);
1056 #endif /* SUNHME_MULTICAST */
1058 static int miscintcount;
1060 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1062 struct net_device *dev = (struct net_device *) dev_id;
1063 unsigned int status = bmread(dev, STATUS);
1064 if (miscintcount++ < 10) {
1065 XXDEBUG(("bmac_misc_intr\n"));
1067 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1068 /* bmac_txdma_intr_inner(irq, dev_id); */
1069 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1070 if (status & RxErrorMask) dev->stats.rx_errors++;
1071 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1072 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1073 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1074 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1076 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1077 if (status & TxErrorMask) dev->stats.tx_errors++;
1078 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1079 if (status & TxNormalCollExp) dev->stats.collisions++;
1080 return IRQ_HANDLED;
1084 * Procedure for reading EEPROM
1086 #define SROMAddressLength 5
1087 #define DataInOn 0x0008
1088 #define DataInOff 0x0000
1089 #define Clk 0x0002
1090 #define ChipSelect 0x0001
1091 #define SDIShiftCount 3
1092 #define SD0ShiftCount 2
1093 #define DelayValue 1000 /* number of microseconds */
1094 #define SROMStartOffset 10 /* this is in words */
1095 #define SROMReadCount 3 /* number of words to read from SROM */
1096 #define SROMAddressBits 6
1097 #define EnetAddressOffset 20
1099 static unsigned char
1100 bmac_clock_out_bit(struct net_device *dev)
1102 unsigned short data;
1103 unsigned short val;
1105 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1106 udelay(DelayValue);
1108 data = bmread(dev, SROMCSR);
1109 udelay(DelayValue);
1110 val = (data >> SD0ShiftCount) & 1;
1112 bmwrite(dev, SROMCSR, ChipSelect);
1113 udelay(DelayValue);
1115 return val;
1118 static void
1119 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1121 unsigned short data;
1123 if (val != 0 && val != 1) return;
1125 data = (val << SDIShiftCount);
1126 bmwrite(dev, SROMCSR, data | ChipSelect );
1127 udelay(DelayValue);
1129 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1130 udelay(DelayValue);
1132 bmwrite(dev, SROMCSR, data | ChipSelect);
1133 udelay(DelayValue);
1136 static void
1137 reset_and_select_srom(struct net_device *dev)
1139 /* first reset */
1140 bmwrite(dev, SROMCSR, 0);
1141 udelay(DelayValue);
1143 /* send it the read command (110) */
1144 bmac_clock_in_bit(dev, 1);
1145 bmac_clock_in_bit(dev, 1);
1146 bmac_clock_in_bit(dev, 0);
1149 static unsigned short
1150 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1152 unsigned short data, val;
1153 int i;
1155 /* send out the address we want to read from */
1156 for (i = 0; i < addr_len; i++) {
1157 val = addr >> (addr_len-i-1);
1158 bmac_clock_in_bit(dev, val & 1);
1161 /* Now read in the 16-bit data */
1162 data = 0;
1163 for (i = 0; i < 16; i++) {
1164 val = bmac_clock_out_bit(dev);
1165 data <<= 1;
1166 data |= val;
1168 bmwrite(dev, SROMCSR, 0);
1170 return data;
1174 * It looks like Cogent and SMC use different methods for calculating
1175 * checksums. What a pain..
1178 static int
1179 bmac_verify_checksum(struct net_device *dev)
1181 unsigned short data, storedCS;
1183 reset_and_select_srom(dev);
1184 data = read_srom(dev, 3, SROMAddressBits);
1185 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1187 return 0;
1191 static void
1192 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1194 int i;
1195 unsigned short data;
1197 for (i = 0; i < 6; i++)
1199 reset_and_select_srom(dev);
1200 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1201 ea[2*i] = bitrev8(data & 0x0ff);
1202 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1206 static void bmac_reset_and_enable(struct net_device *dev)
1208 struct bmac_data *bp = netdev_priv(dev);
1209 unsigned long flags;
1210 struct sk_buff *skb;
1211 unsigned char *data;
1213 spin_lock_irqsave(&bp->lock, flags);
1214 bmac_enable_and_reset_chip(dev);
1215 bmac_init_tx_ring(bp);
1216 bmac_init_rx_ring(bp);
1217 bmac_init_chip(dev);
1218 bmac_start_chip(dev);
1219 bmwrite(dev, INTDISABLE, EnableNormal);
1220 bp->sleeping = 0;
1223 * It seems that the bmac can't receive until it's transmitted
1224 * a packet. So we give it a dummy packet to transmit.
1226 skb = dev_alloc_skb(ETHERMINPACKET);
1227 if (skb != NULL) {
1228 data = skb_put(skb, ETHERMINPACKET);
1229 memset(data, 0, ETHERMINPACKET);
1230 memcpy(data, dev->dev_addr, 6);
1231 memcpy(data+6, dev->dev_addr, 6);
1232 bmac_transmit_packet(skb, dev);
1234 spin_unlock_irqrestore(&bp->lock, flags);
1237 static const struct ethtool_ops bmac_ethtool_ops = {
1238 .get_link = ethtool_op_get_link,
1241 static const struct net_device_ops bmac_netdev_ops = {
1242 .ndo_open = bmac_open,
1243 .ndo_stop = bmac_close,
1244 .ndo_start_xmit = bmac_output,
1245 .ndo_set_multicast_list = bmac_set_multicast,
1246 .ndo_set_mac_address = bmac_set_address,
1247 .ndo_change_mtu = eth_change_mtu,
1248 .ndo_validate_addr = eth_validate_addr,
1251 static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1253 int j, rev, ret;
1254 struct bmac_data *bp;
1255 const unsigned char *prop_addr;
1256 unsigned char addr[6];
1257 struct net_device *dev;
1258 int is_bmac_plus = ((int)match->data) != 0;
1260 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1261 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1262 return -ENODEV;
1264 prop_addr = of_get_property(macio_get_of_node(mdev),
1265 "mac-address", NULL);
1266 if (prop_addr == NULL) {
1267 prop_addr = of_get_property(macio_get_of_node(mdev),
1268 "local-mac-address", NULL);
1269 if (prop_addr == NULL) {
1270 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1271 return -ENODEV;
1274 memcpy(addr, prop_addr, sizeof(addr));
1276 dev = alloc_etherdev(PRIV_BYTES);
1277 if (!dev) {
1278 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1279 return -ENOMEM;
1282 bp = netdev_priv(dev);
1283 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1284 macio_set_drvdata(mdev, dev);
1286 bp->mdev = mdev;
1287 spin_lock_init(&bp->lock);
1289 if (macio_request_resources(mdev, "bmac")) {
1290 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1291 goto out_free;
1294 dev->base_addr = (unsigned long)
1295 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1296 if (dev->base_addr == 0)
1297 goto out_release;
1299 dev->irq = macio_irq(mdev, 0);
1301 bmac_enable_and_reset_chip(dev);
1302 bmwrite(dev, INTDISABLE, DisableAll);
1304 rev = addr[0] == 0 && addr[1] == 0xA0;
1305 for (j = 0; j < 6; ++j)
1306 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1308 /* Enable chip without interrupts for now */
1309 bmac_enable_and_reset_chip(dev);
1310 bmwrite(dev, INTDISABLE, DisableAll);
1312 dev->netdev_ops = &bmac_netdev_ops;
1313 dev->ethtool_ops = &bmac_ethtool_ops;
1315 bmac_get_station_address(dev, addr);
1316 if (bmac_verify_checksum(dev) != 0)
1317 goto err_out_iounmap;
1319 bp->is_bmac_plus = is_bmac_plus;
1320 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1321 if (!bp->tx_dma)
1322 goto err_out_iounmap;
1323 bp->tx_dma_intr = macio_irq(mdev, 1);
1324 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1325 if (!bp->rx_dma)
1326 goto err_out_iounmap_tx;
1327 bp->rx_dma_intr = macio_irq(mdev, 2);
1329 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1330 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1332 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1333 skb_queue_head_init(bp->queue);
1335 init_timer(&bp->tx_timeout);
1337 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1338 if (ret) {
1339 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1340 goto err_out_iounmap_rx;
1342 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1343 if (ret) {
1344 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1345 goto err_out_irq0;
1347 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1348 if (ret) {
1349 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1350 goto err_out_irq1;
1353 /* Mask chip interrupts and disable chip, will be
1354 * re-enabled on open()
1356 disable_irq(dev->irq);
1357 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1359 if (register_netdev(dev) != 0) {
1360 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1361 goto err_out_irq2;
1364 printk(KERN_INFO "%s: BMAC%s at %pM",
1365 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1366 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1367 printk("\n");
1369 return 0;
1371 err_out_irq2:
1372 free_irq(bp->rx_dma_intr, dev);
1373 err_out_irq1:
1374 free_irq(bp->tx_dma_intr, dev);
1375 err_out_irq0:
1376 free_irq(dev->irq, dev);
1377 err_out_iounmap_rx:
1378 iounmap(bp->rx_dma);
1379 err_out_iounmap_tx:
1380 iounmap(bp->tx_dma);
1381 err_out_iounmap:
1382 iounmap((void __iomem *)dev->base_addr);
1383 out_release:
1384 macio_release_resources(mdev);
1385 out_free:
1386 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1387 free_netdev(dev);
1389 return -ENODEV;
1392 static int bmac_open(struct net_device *dev)
1394 struct bmac_data *bp = netdev_priv(dev);
1395 /* XXDEBUG(("bmac: enter open\n")); */
1396 /* reset the chip */
1397 bp->opened = 1;
1398 bmac_reset_and_enable(dev);
1399 enable_irq(dev->irq);
1400 return 0;
1403 static int bmac_close(struct net_device *dev)
1405 struct bmac_data *bp = netdev_priv(dev);
1406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1407 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1408 unsigned short config;
1409 int i;
1411 bp->sleeping = 1;
1413 /* disable rx and tx */
1414 config = bmread(dev, RXCFG);
1415 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1417 config = bmread(dev, TXCFG);
1418 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1420 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1422 /* disable rx and tx dma */
1423 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1424 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1426 /* free some skb's */
1427 XXDEBUG(("bmac: free rx bufs\n"));
1428 for (i=0; i<N_RX_RING; i++) {
1429 if (bp->rx_bufs[i] != NULL) {
1430 dev_kfree_skb(bp->rx_bufs[i]);
1431 bp->rx_bufs[i] = NULL;
1434 XXDEBUG(("bmac: free tx bufs\n"));
1435 for (i = 0; i<N_TX_RING; i++) {
1436 if (bp->tx_bufs[i] != NULL) {
1437 dev_kfree_skb(bp->tx_bufs[i]);
1438 bp->tx_bufs[i] = NULL;
1441 XXDEBUG(("bmac: all bufs freed\n"));
1443 bp->opened = 0;
1444 disable_irq(dev->irq);
1445 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1447 return 0;
1450 static void
1451 bmac_start(struct net_device *dev)
1453 struct bmac_data *bp = netdev_priv(dev);
1454 int i;
1455 struct sk_buff *skb;
1456 unsigned long flags;
1458 if (bp->sleeping)
1459 return;
1461 spin_lock_irqsave(&bp->lock, flags);
1462 while (1) {
1463 i = bp->tx_fill + 1;
1464 if (i >= N_TX_RING)
1465 i = 0;
1466 if (i == bp->tx_empty)
1467 break;
1468 skb = skb_dequeue(bp->queue);
1469 if (skb == NULL)
1470 break;
1471 bmac_transmit_packet(skb, dev);
1473 spin_unlock_irqrestore(&bp->lock, flags);
1476 static int
1477 bmac_output(struct sk_buff *skb, struct net_device *dev)
1479 struct bmac_data *bp = netdev_priv(dev);
1480 skb_queue_tail(bp->queue, skb);
1481 bmac_start(dev);
1482 return NETDEV_TX_OK;
1485 static void bmac_tx_timeout(unsigned long data)
1487 struct net_device *dev = (struct net_device *) data;
1488 struct bmac_data *bp = netdev_priv(dev);
1489 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1490 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1491 volatile struct dbdma_cmd *cp;
1492 unsigned long flags;
1493 unsigned short config, oldConfig;
1494 int i;
1496 XXDEBUG(("bmac: tx_timeout called\n"));
1497 spin_lock_irqsave(&bp->lock, flags);
1498 bp->timeout_active = 0;
1500 /* update various counters */
1501 /* bmac_handle_misc_intrs(bp, 0); */
1503 cp = &bp->tx_cmds[bp->tx_empty];
1504 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1505 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1506 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1508 /* turn off both tx and rx and reset the chip */
1509 config = bmread(dev, RXCFG);
1510 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1511 config = bmread(dev, TXCFG);
1512 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1513 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1514 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1515 bmac_enable_and_reset_chip(dev);
1517 /* restart rx dma */
1518 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1519 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1520 out_le16(&cp->xfer_status, 0);
1521 out_le32(&rd->cmdptr, virt_to_bus(cp));
1522 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1524 /* fix up the transmit side */
1525 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1526 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1527 i = bp->tx_empty;
1528 ++dev->stats.tx_errors;
1529 if (i != bp->tx_fill) {
1530 dev_kfree_skb(bp->tx_bufs[i]);
1531 bp->tx_bufs[i] = NULL;
1532 if (++i >= N_TX_RING) i = 0;
1533 bp->tx_empty = i;
1535 bp->tx_fullup = 0;
1536 netif_wake_queue(dev);
1537 if (i != bp->tx_fill) {
1538 cp = &bp->tx_cmds[i];
1539 out_le16(&cp->xfer_status, 0);
1540 out_le16(&cp->command, OUTPUT_LAST);
1541 out_le32(&td->cmdptr, virt_to_bus(cp));
1542 out_le32(&td->control, DBDMA_SET(RUN));
1543 /* bmac_set_timeout(dev); */
1544 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1547 /* turn it back on */
1548 oldConfig = bmread(dev, RXCFG);
1549 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1550 oldConfig = bmread(dev, TXCFG);
1551 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1553 spin_unlock_irqrestore(&bp->lock, flags);
1556 #if 0
1557 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1559 int i,*ip;
1561 for (i=0;i< count;i++) {
1562 ip = (int*)(cp+i);
1564 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1565 ld_le32(ip+0),
1566 ld_le32(ip+1),
1567 ld_le32(ip+2),
1568 ld_le32(ip+3));
1572 #endif
1574 #if 0
1575 static int
1576 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1578 int len = 0;
1579 off_t pos = 0;
1580 off_t begin = 0;
1581 int i;
1583 if (bmac_devs == NULL)
1584 return -ENOSYS;
1586 len += sprintf(buffer, "BMAC counters & registers\n");
1588 for (i = 0; i<N_REG_ENTRIES; i++) {
1589 len += sprintf(buffer + len, "%s: %#08x\n",
1590 reg_entries[i].name,
1591 bmread(bmac_devs, reg_entries[i].reg_offset));
1592 pos = begin + len;
1594 if (pos < offset) {
1595 len = 0;
1596 begin = pos;
1599 if (pos > offset+length) break;
1602 *start = buffer + (offset - begin);
1603 len -= (offset - begin);
1605 if (len > length) len = length;
1607 return len;
1609 #endif
1611 static int __devexit bmac_remove(struct macio_dev *mdev)
1613 struct net_device *dev = macio_get_drvdata(mdev);
1614 struct bmac_data *bp = netdev_priv(dev);
1616 unregister_netdev(dev);
1618 free_irq(dev->irq, dev);
1619 free_irq(bp->tx_dma_intr, dev);
1620 free_irq(bp->rx_dma_intr, dev);
1622 iounmap((void __iomem *)dev->base_addr);
1623 iounmap(bp->tx_dma);
1624 iounmap(bp->rx_dma);
1626 macio_release_resources(mdev);
1628 free_netdev(dev);
1630 return 0;
1633 static struct of_device_id bmac_match[] =
1636 .name = "bmac",
1637 .data = (void *)0,
1640 .type = "network",
1641 .compatible = "bmac+",
1642 .data = (void *)1,
1646 MODULE_DEVICE_TABLE (of, bmac_match);
1648 static struct macio_driver bmac_driver =
1650 .driver = {
1651 .name = "bmac",
1652 .owner = THIS_MODULE,
1653 .of_match_table = bmac_match,
1655 .probe = bmac_probe,
1656 .remove = bmac_remove,
1657 #ifdef CONFIG_PM
1658 .suspend = bmac_suspend,
1659 .resume = bmac_resume,
1660 #endif
1664 static int __init bmac_init(void)
1666 if (bmac_emergency_rxbuf == NULL) {
1667 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1668 if (bmac_emergency_rxbuf == NULL) {
1669 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1670 return -ENOMEM;
1674 return macio_register_driver(&bmac_driver);
1677 static void __exit bmac_exit(void)
1679 macio_unregister_driver(&bmac_driver);
1681 kfree(bmac_emergency_rxbuf);
1682 bmac_emergency_rxbuf = NULL;
1685 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1686 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1687 MODULE_LICENSE("GPL");
1689 module_init(bmac_init);
1690 module_exit(bmac_exit);