[PATCH] volatile unsigned short f(...) doesn't make sense
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / bmac.c
blob60dba4a1ca5ce8d57081088c22d16eeb7a2ee8f6
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <asm/prom.h>
23 #include <asm/dbdma.h>
24 #include <asm/io.h>
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/machdep.h>
28 #include <asm/pmac_feature.h>
29 #include <asm/macio.h>
30 #include <asm/irq.h>
32 #include "bmac.h"
34 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
35 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
38 * CRC polynomial - used in working out multicast filter bits.
40 #define ENET_CRCPOLY 0x04c11db7
42 /* switch to use multicast code lifted from sunhme driver */
43 #define SUNHME_MULTICAST
45 #define N_RX_RING 64
46 #define N_TX_RING 32
47 #define MAX_TX_ACTIVE 1
48 #define ETHERCRC 4
49 #define ETHERMINPACKET 64
50 #define ETHERMTU 1500
51 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
52 #define TX_TIMEOUT HZ /* 1 second */
54 /* Bits in transmit DMA status */
55 #define TX_DMA_ERR 0x80
57 #define XXDEBUG(args)
59 struct bmac_data {
60 /* volatile struct bmac *bmac; */
61 struct sk_buff_head *queue;
62 volatile struct dbdma_regs __iomem *tx_dma;
63 int tx_dma_intr;
64 volatile struct dbdma_regs __iomem *rx_dma;
65 int rx_dma_intr;
66 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
67 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
68 struct macio_dev *mdev;
69 int is_bmac_plus;
70 struct sk_buff *rx_bufs[N_RX_RING];
71 int rx_fill;
72 int rx_empty;
73 struct sk_buff *tx_bufs[N_TX_RING];
74 int tx_fill;
75 int tx_empty;
76 unsigned char tx_fullup;
77 struct net_device_stats stats;
78 struct timer_list tx_timeout;
79 int timeout_active;
80 int sleeping;
81 int opened;
82 unsigned short hash_use_count[64];
83 unsigned short hash_table_mask[4];
84 spinlock_t lock;
87 #if 0 /* Move that to ethtool */
89 typedef struct bmac_reg_entry {
90 char *name;
91 unsigned short reg_offset;
92 } bmac_reg_entry_t;
94 #define N_REG_ENTRIES 31
96 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
97 {"MEMADD", MEMADD},
98 {"MEMDATAHI", MEMDATAHI},
99 {"MEMDATALO", MEMDATALO},
100 {"TXPNTR", TXPNTR},
101 {"RXPNTR", RXPNTR},
102 {"IPG1", IPG1},
103 {"IPG2", IPG2},
104 {"ALIMIT", ALIMIT},
105 {"SLOT", SLOT},
106 {"PALEN", PALEN},
107 {"PAPAT", PAPAT},
108 {"TXSFD", TXSFD},
109 {"JAM", JAM},
110 {"TXCFG", TXCFG},
111 {"TXMAX", TXMAX},
112 {"TXMIN", TXMIN},
113 {"PAREG", PAREG},
114 {"DCNT", DCNT},
115 {"NCCNT", NCCNT},
116 {"NTCNT", NTCNT},
117 {"EXCNT", EXCNT},
118 {"LTCNT", LTCNT},
119 {"TXSM", TXSM},
120 {"RXCFG", RXCFG},
121 {"RXMAX", RXMAX},
122 {"RXMIN", RXMIN},
123 {"FRCNT", FRCNT},
124 {"AECNT", AECNT},
125 {"FECNT", FECNT},
126 {"RXSM", RXSM},
127 {"RXCV", RXCV}
130 #endif
132 static unsigned char *bmac_emergency_rxbuf;
135 * Number of bytes of private data per BMAC: allow enough for
136 * the rx and tx dma commands plus a branch dma command each,
137 * and another 16 bytes to allow us to align the dma command
138 * buffers on a 16 byte boundary.
140 #define PRIV_BYTES (sizeof(struct bmac_data) \
141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
142 + sizeof(struct sk_buff_head))
144 static unsigned char bitrev(unsigned char b);
145 static int bmac_open(struct net_device *dev);
146 static int bmac_close(struct net_device *dev);
147 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
148 static struct net_device_stats *bmac_stats(struct net_device *dev);
149 static void bmac_set_multicast(struct net_device *dev);
150 static void bmac_reset_and_enable(struct net_device *dev);
151 static void bmac_start_chip(struct net_device *dev);
152 static void bmac_init_chip(struct net_device *dev);
153 static void bmac_init_registers(struct net_device *dev);
154 static void bmac_enable_and_reset_chip(struct net_device *dev);
155 static int bmac_set_address(struct net_device *dev, void *addr);
156 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
157 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
158 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
159 static void bmac_set_timeout(struct net_device *dev);
160 static void bmac_tx_timeout(unsigned long data);
161 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
162 static void bmac_start(struct net_device *dev);
164 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
165 #define DBDMA_CLEAR(x) ( (x) << 16)
167 static inline void
168 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
170 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
171 return;
174 static inline unsigned long
175 dbdma_ld32(volatile __u32 __iomem *a)
177 __u32 swap;
178 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
179 return swap;
182 static void
183 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
185 dbdma_st32(&dmap->control,
186 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
187 eieio();
190 static void
191 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
193 dbdma_st32(&dmap->control,
194 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
195 eieio();
196 while (dbdma_ld32(&dmap->status) & RUN)
197 eieio();
200 static void
201 dbdma_setcmd(volatile struct dbdma_cmd *cp,
202 unsigned short cmd, unsigned count, unsigned long addr,
203 unsigned long cmd_dep)
205 out_le16(&cp->command, cmd);
206 out_le16(&cp->req_count, count);
207 out_le32(&cp->phy_addr, addr);
208 out_le32(&cp->cmd_dep, cmd_dep);
209 out_le16(&cp->xfer_status, 0);
210 out_le16(&cp->res_count, 0);
213 static inline
214 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
216 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
220 static inline
221 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
223 return in_le16((void __iomem *)dev->base_addr + reg_offset);
226 static void
227 bmac_enable_and_reset_chip(struct net_device *dev)
229 struct bmac_data *bp = netdev_priv(dev);
230 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
231 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
233 if (rd)
234 dbdma_reset(rd);
235 if (td)
236 dbdma_reset(td);
238 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
241 #define MIFDELAY udelay(10)
243 static unsigned int
244 bmac_mif_readbits(struct net_device *dev, int nb)
246 unsigned int val = 0;
248 while (--nb >= 0) {
249 bmwrite(dev, MIFCSR, 0);
250 MIFDELAY;
251 if (bmread(dev, MIFCSR) & 8)
252 val |= 1 << nb;
253 bmwrite(dev, MIFCSR, 1);
254 MIFDELAY;
256 bmwrite(dev, MIFCSR, 0);
257 MIFDELAY;
258 bmwrite(dev, MIFCSR, 1);
259 MIFDELAY;
260 return val;
263 static void
264 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
266 int b;
268 while (--nb >= 0) {
269 b = (val & (1 << nb))? 6: 4;
270 bmwrite(dev, MIFCSR, b);
271 MIFDELAY;
272 bmwrite(dev, MIFCSR, b|1);
273 MIFDELAY;
277 static unsigned int
278 bmac_mif_read(struct net_device *dev, unsigned int addr)
280 unsigned int val;
282 bmwrite(dev, MIFCSR, 4);
283 MIFDELAY;
284 bmac_mif_writebits(dev, ~0U, 32);
285 bmac_mif_writebits(dev, 6, 4);
286 bmac_mif_writebits(dev, addr, 10);
287 bmwrite(dev, MIFCSR, 2);
288 MIFDELAY;
289 bmwrite(dev, MIFCSR, 1);
290 MIFDELAY;
291 val = bmac_mif_readbits(dev, 17);
292 bmwrite(dev, MIFCSR, 4);
293 MIFDELAY;
294 return val;
297 static void
298 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
300 bmwrite(dev, MIFCSR, 4);
301 MIFDELAY;
302 bmac_mif_writebits(dev, ~0U, 32);
303 bmac_mif_writebits(dev, 5, 4);
304 bmac_mif_writebits(dev, addr, 10);
305 bmac_mif_writebits(dev, 2, 2);
306 bmac_mif_writebits(dev, val, 16);
307 bmac_mif_writebits(dev, 3, 2);
310 static void
311 bmac_init_registers(struct net_device *dev)
313 struct bmac_data *bp = netdev_priv(dev);
314 volatile unsigned short regValue;
315 unsigned short *pWord16;
316 int i;
318 /* XXDEBUG(("bmac: enter init_registers\n")); */
320 bmwrite(dev, RXRST, RxResetValue);
321 bmwrite(dev, TXRST, TxResetBit);
323 i = 100;
324 do {
325 --i;
326 udelay(10000);
327 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
328 } while ((regValue & TxResetBit) && i > 0);
330 if (!bp->is_bmac_plus) {
331 regValue = bmread(dev, XCVRIF);
332 regValue |= ClkBit | SerialMode | COLActiveLow;
333 bmwrite(dev, XCVRIF, regValue);
334 udelay(10000);
337 bmwrite(dev, RSEED, (unsigned short)0x1968);
339 regValue = bmread(dev, XIFC);
340 regValue |= TxOutputEnable;
341 bmwrite(dev, XIFC, regValue);
343 bmread(dev, PAREG);
345 /* set collision counters to 0 */
346 bmwrite(dev, NCCNT, 0);
347 bmwrite(dev, NTCNT, 0);
348 bmwrite(dev, EXCNT, 0);
349 bmwrite(dev, LTCNT, 0);
351 /* set rx counters to 0 */
352 bmwrite(dev, FRCNT, 0);
353 bmwrite(dev, LECNT, 0);
354 bmwrite(dev, AECNT, 0);
355 bmwrite(dev, FECNT, 0);
356 bmwrite(dev, RXCV, 0);
358 /* set tx fifo information */
359 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
361 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
362 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
364 /* set rx fifo information */
365 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
366 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
368 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
369 bmread(dev, STATUS); /* read it just to clear it */
371 /* zero out the chip Hash Filter registers */
372 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
373 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
374 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
375 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
376 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
378 pWord16 = (unsigned short *)dev->dev_addr;
379 bmwrite(dev, MADD0, *pWord16++);
380 bmwrite(dev, MADD1, *pWord16++);
381 bmwrite(dev, MADD2, *pWord16);
383 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
385 bmwrite(dev, INTDISABLE, EnableNormal);
387 return;
390 #if 0
391 static void
392 bmac_disable_interrupts(struct net_device *dev)
394 bmwrite(dev, INTDISABLE, DisableAll);
397 static void
398 bmac_enable_interrupts(struct net_device *dev)
400 bmwrite(dev, INTDISABLE, EnableNormal);
402 #endif
405 static void
406 bmac_start_chip(struct net_device *dev)
408 struct bmac_data *bp = netdev_priv(dev);
409 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
410 unsigned short oldConfig;
412 /* enable rx dma channel */
413 dbdma_continue(rd);
415 oldConfig = bmread(dev, TXCFG);
416 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
418 /* turn on rx plus any other bits already on (promiscuous possibly) */
419 oldConfig = bmread(dev, RXCFG);
420 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
421 udelay(20000);
424 static void
425 bmac_init_phy(struct net_device *dev)
427 unsigned int addr;
428 struct bmac_data *bp = netdev_priv(dev);
430 printk(KERN_DEBUG "phy registers:");
431 for (addr = 0; addr < 32; ++addr) {
432 if ((addr & 7) == 0)
433 printk("\n" KERN_DEBUG);
434 printk(" %.4x", bmac_mif_read(dev, addr));
436 printk("\n");
437 if (bp->is_bmac_plus) {
438 unsigned int capable, ctrl;
440 ctrl = bmac_mif_read(dev, 0);
441 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
442 if (bmac_mif_read(dev, 4) != capable
443 || (ctrl & 0x1000) == 0) {
444 bmac_mif_write(dev, 4, capable);
445 bmac_mif_write(dev, 0, 0x1200);
446 } else
447 bmac_mif_write(dev, 0, 0x1000);
451 static void bmac_init_chip(struct net_device *dev)
453 bmac_init_phy(dev);
454 bmac_init_registers(dev);
457 #ifdef CONFIG_PM
458 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
460 struct net_device* dev = macio_get_drvdata(mdev);
461 struct bmac_data *bp = netdev_priv(dev);
462 unsigned long flags;
463 unsigned short config;
464 int i;
466 netif_device_detach(dev);
467 /* prolly should wait for dma to finish & turn off the chip */
468 spin_lock_irqsave(&bp->lock, flags);
469 if (bp->timeout_active) {
470 del_timer(&bp->tx_timeout);
471 bp->timeout_active = 0;
473 disable_irq(dev->irq);
474 disable_irq(bp->tx_dma_intr);
475 disable_irq(bp->rx_dma_intr);
476 bp->sleeping = 1;
477 spin_unlock_irqrestore(&bp->lock, flags);
478 if (bp->opened) {
479 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
480 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
482 config = bmread(dev, RXCFG);
483 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
484 config = bmread(dev, TXCFG);
485 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
486 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
487 /* disable rx and tx dma */
488 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
489 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
490 /* free some skb's */
491 for (i=0; i<N_RX_RING; i++) {
492 if (bp->rx_bufs[i] != NULL) {
493 dev_kfree_skb(bp->rx_bufs[i]);
494 bp->rx_bufs[i] = NULL;
497 for (i = 0; i<N_TX_RING; i++) {
498 if (bp->tx_bufs[i] != NULL) {
499 dev_kfree_skb(bp->tx_bufs[i]);
500 bp->tx_bufs[i] = NULL;
504 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
505 return 0;
508 static int bmac_resume(struct macio_dev *mdev)
510 struct net_device* dev = macio_get_drvdata(mdev);
511 struct bmac_data *bp = netdev_priv(dev);
513 /* see if this is enough */
514 if (bp->opened)
515 bmac_reset_and_enable(dev);
517 enable_irq(dev->irq);
518 enable_irq(bp->tx_dma_intr);
519 enable_irq(bp->rx_dma_intr);
520 netif_device_attach(dev);
522 return 0;
524 #endif /* CONFIG_PM */
526 static int bmac_set_address(struct net_device *dev, void *addr)
528 struct bmac_data *bp = netdev_priv(dev);
529 unsigned char *p = addr;
530 unsigned short *pWord16;
531 unsigned long flags;
532 int i;
534 XXDEBUG(("bmac: enter set_address\n"));
535 spin_lock_irqsave(&bp->lock, flags);
537 for (i = 0; i < 6; ++i) {
538 dev->dev_addr[i] = p[i];
540 /* load up the hardware address */
541 pWord16 = (unsigned short *)dev->dev_addr;
542 bmwrite(dev, MADD0, *pWord16++);
543 bmwrite(dev, MADD1, *pWord16++);
544 bmwrite(dev, MADD2, *pWord16);
546 spin_unlock_irqrestore(&bp->lock, flags);
547 XXDEBUG(("bmac: exit set_address\n"));
548 return 0;
551 static inline void bmac_set_timeout(struct net_device *dev)
553 struct bmac_data *bp = netdev_priv(dev);
554 unsigned long flags;
556 spin_lock_irqsave(&bp->lock, flags);
557 if (bp->timeout_active)
558 del_timer(&bp->tx_timeout);
559 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
560 bp->tx_timeout.function = bmac_tx_timeout;
561 bp->tx_timeout.data = (unsigned long) dev;
562 add_timer(&bp->tx_timeout);
563 bp->timeout_active = 1;
564 spin_unlock_irqrestore(&bp->lock, flags);
567 static void
568 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
570 void *vaddr;
571 unsigned long baddr;
572 unsigned long len;
574 len = skb->len;
575 vaddr = skb->data;
576 baddr = virt_to_bus(vaddr);
578 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
581 static void
582 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
584 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
586 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
587 virt_to_bus(addr), 0);
590 /* Bit-reverse one byte of an ethernet hardware address. */
591 static unsigned char
592 bitrev(unsigned char b)
594 int d = 0, i;
596 for (i = 0; i < 8; ++i, b >>= 1)
597 d = (d << 1) | (b & 1);
598 return d;
602 static void
603 bmac_init_tx_ring(struct bmac_data *bp)
605 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
607 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
609 bp->tx_empty = 0;
610 bp->tx_fill = 0;
611 bp->tx_fullup = 0;
613 /* put a branch at the end of the tx command list */
614 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
615 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
617 /* reset tx dma */
618 dbdma_reset(td);
619 out_le32(&td->wait_sel, 0x00200020);
620 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
623 static int
624 bmac_init_rx_ring(struct bmac_data *bp)
626 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
627 int i;
628 struct sk_buff *skb;
630 /* initialize list of sk_buffs for receiving and set up recv dma */
631 memset((char *)bp->rx_cmds, 0,
632 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
633 for (i = 0; i < N_RX_RING; i++) {
634 if ((skb = bp->rx_bufs[i]) == NULL) {
635 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
636 if (skb != NULL)
637 skb_reserve(skb, 2);
639 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
642 bp->rx_empty = 0;
643 bp->rx_fill = i;
645 /* Put a branch back to the beginning of the receive command list */
646 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
647 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
649 /* start rx dma */
650 dbdma_reset(rd);
651 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
653 return 1;
657 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
659 struct bmac_data *bp = netdev_priv(dev);
660 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
661 int i;
663 /* see if there's a free slot in the tx ring */
664 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
665 /* bp->tx_empty, bp->tx_fill)); */
666 i = bp->tx_fill + 1;
667 if (i >= N_TX_RING)
668 i = 0;
669 if (i == bp->tx_empty) {
670 netif_stop_queue(dev);
671 bp->tx_fullup = 1;
672 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
673 return -1; /* can't take it at the moment */
676 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
678 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
680 bp->tx_bufs[bp->tx_fill] = skb;
681 bp->tx_fill = i;
683 bp->stats.tx_bytes += skb->len;
685 dbdma_continue(td);
687 return 0;
690 static int rxintcount;
692 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
694 struct net_device *dev = (struct net_device *) dev_id;
695 struct bmac_data *bp = netdev_priv(dev);
696 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
697 volatile struct dbdma_cmd *cp;
698 int i, nb, stat;
699 struct sk_buff *skb;
700 unsigned int residual;
701 int last;
702 unsigned long flags;
704 spin_lock_irqsave(&bp->lock, flags);
706 if (++rxintcount < 10) {
707 XXDEBUG(("bmac_rxdma_intr\n"));
710 last = -1;
711 i = bp->rx_empty;
713 while (1) {
714 cp = &bp->rx_cmds[i];
715 stat = ld_le16(&cp->xfer_status);
716 residual = ld_le16(&cp->res_count);
717 if ((stat & ACTIVE) == 0)
718 break;
719 nb = RX_BUFLEN - residual - 2;
720 if (nb < (ETHERMINPACKET - ETHERCRC)) {
721 skb = NULL;
722 bp->stats.rx_length_errors++;
723 bp->stats.rx_errors++;
724 } else {
725 skb = bp->rx_bufs[i];
726 bp->rx_bufs[i] = NULL;
728 if (skb != NULL) {
729 nb -= ETHERCRC;
730 skb_put(skb, nb);
731 skb->dev = dev;
732 skb->protocol = eth_type_trans(skb, dev);
733 netif_rx(skb);
734 dev->last_rx = jiffies;
735 ++bp->stats.rx_packets;
736 bp->stats.rx_bytes += nb;
737 } else {
738 ++bp->stats.rx_dropped;
740 dev->last_rx = jiffies;
741 if ((skb = bp->rx_bufs[i]) == NULL) {
742 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
743 if (skb != NULL)
744 skb_reserve(bp->rx_bufs[i], 2);
746 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
747 st_le16(&cp->res_count, 0);
748 st_le16(&cp->xfer_status, 0);
749 last = i;
750 if (++i >= N_RX_RING) i = 0;
753 if (last != -1) {
754 bp->rx_fill = last;
755 bp->rx_empty = i;
758 dbdma_continue(rd);
759 spin_unlock_irqrestore(&bp->lock, flags);
761 if (rxintcount < 10) {
762 XXDEBUG(("bmac_rxdma_intr done\n"));
764 return IRQ_HANDLED;
767 static int txintcount;
769 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
771 struct net_device *dev = (struct net_device *) dev_id;
772 struct bmac_data *bp = netdev_priv(dev);
773 volatile struct dbdma_cmd *cp;
774 int stat;
775 unsigned long flags;
777 spin_lock_irqsave(&bp->lock, flags);
779 if (txintcount++ < 10) {
780 XXDEBUG(("bmac_txdma_intr\n"));
783 /* del_timer(&bp->tx_timeout); */
784 /* bp->timeout_active = 0; */
786 while (1) {
787 cp = &bp->tx_cmds[bp->tx_empty];
788 stat = ld_le16(&cp->xfer_status);
789 if (txintcount < 10) {
790 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
792 if (!(stat & ACTIVE)) {
794 * status field might not have been filled by DBDMA
796 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
797 break;
800 if (bp->tx_bufs[bp->tx_empty]) {
801 ++bp->stats.tx_packets;
802 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
804 bp->tx_bufs[bp->tx_empty] = NULL;
805 bp->tx_fullup = 0;
806 netif_wake_queue(dev);
807 if (++bp->tx_empty >= N_TX_RING)
808 bp->tx_empty = 0;
809 if (bp->tx_empty == bp->tx_fill)
810 break;
813 spin_unlock_irqrestore(&bp->lock, flags);
815 if (txintcount < 10) {
816 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
819 bmac_start(dev);
820 return IRQ_HANDLED;
823 static struct net_device_stats *bmac_stats(struct net_device *dev)
825 struct bmac_data *p = netdev_priv(dev);
827 return &p->stats;
830 #ifndef SUNHME_MULTICAST
831 /* Real fast bit-reversal algorithm, 6-bit values */
832 static int reverse6[64] = {
833 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
834 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
835 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
836 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
837 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
838 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
839 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
840 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
843 static unsigned int
844 crc416(unsigned int curval, unsigned short nxtval)
846 register unsigned int counter, cur = curval, next = nxtval;
847 register int high_crc_set, low_data_set;
849 /* Swap bytes */
850 next = ((next & 0x00FF) << 8) | (next >> 8);
852 /* Compute bit-by-bit */
853 for (counter = 0; counter < 16; ++counter) {
854 /* is high CRC bit set? */
855 if ((cur & 0x80000000) == 0) high_crc_set = 0;
856 else high_crc_set = 1;
858 cur = cur << 1;
860 if ((next & 0x0001) == 0) low_data_set = 0;
861 else low_data_set = 1;
863 next = next >> 1;
865 /* do the XOR */
866 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
868 return cur;
871 static unsigned int
872 bmac_crc(unsigned short *address)
874 unsigned int newcrc;
876 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
877 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
878 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
879 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
881 return(newcrc);
885 * Add requested mcast addr to BMac's hash table filter.
889 static void
890 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
892 unsigned int crc;
893 unsigned short mask;
895 if (!(*addr)) return;
896 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
897 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
898 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
899 mask = crc % 16;
900 mask = (unsigned char)1 << mask;
901 bp->hash_use_count[crc/16] |= mask;
904 static void
905 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
907 unsigned int crc;
908 unsigned char mask;
910 /* Now, delete the address from the filter copy, as indicated */
911 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
912 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
913 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
914 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
915 mask = crc % 16;
916 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
917 bp->hash_table_mask[crc/16] &= mask;
921 * Sync the adapter with the software copy of the multicast mask
922 * (logical address filter).
925 static void
926 bmac_rx_off(struct net_device *dev)
928 unsigned short rx_cfg;
930 rx_cfg = bmread(dev, RXCFG);
931 rx_cfg &= ~RxMACEnable;
932 bmwrite(dev, RXCFG, rx_cfg);
933 do {
934 rx_cfg = bmread(dev, RXCFG);
935 } while (rx_cfg & RxMACEnable);
938 unsigned short
939 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
941 unsigned short rx_cfg;
943 rx_cfg = bmread(dev, RXCFG);
944 rx_cfg |= RxMACEnable;
945 if (hash_enable) rx_cfg |= RxHashFilterEnable;
946 else rx_cfg &= ~RxHashFilterEnable;
947 if (promisc_enable) rx_cfg |= RxPromiscEnable;
948 else rx_cfg &= ~RxPromiscEnable;
949 bmwrite(dev, RXRST, RxResetValue);
950 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
951 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
952 bmwrite(dev, RXCFG, rx_cfg );
953 return rx_cfg;
956 static void
957 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
959 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
960 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
961 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
962 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
965 #if 0
966 static void
967 bmac_add_multi(struct net_device *dev,
968 struct bmac_data *bp, unsigned char *addr)
970 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
971 bmac_addhash(bp, addr);
972 bmac_rx_off(dev);
973 bmac_update_hash_table_mask(dev, bp);
974 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
975 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
978 static void
979 bmac_remove_multi(struct net_device *dev,
980 struct bmac_data *bp, unsigned char *addr)
982 bmac_removehash(bp, addr);
983 bmac_rx_off(dev);
984 bmac_update_hash_table_mask(dev, bp);
985 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
987 #endif
989 /* Set or clear the multicast filter for this adaptor.
990 num_addrs == -1 Promiscuous mode, receive all packets
991 num_addrs == 0 Normal mode, clear multicast list
992 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
993 best-effort filtering.
995 static void bmac_set_multicast(struct net_device *dev)
997 struct dev_mc_list *dmi;
998 struct bmac_data *bp = netdev_priv(dev);
999 int num_addrs = dev->mc_count;
1000 unsigned short rx_cfg;
1001 int i;
1003 if (bp->sleeping)
1004 return;
1006 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
1008 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1009 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
1010 bmac_update_hash_table_mask(dev, bp);
1011 rx_cfg = bmac_rx_on(dev, 1, 0);
1012 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
1013 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
1014 rx_cfg = bmread(dev, RXCFG);
1015 rx_cfg |= RxPromiscEnable;
1016 bmwrite(dev, RXCFG, rx_cfg);
1017 rx_cfg = bmac_rx_on(dev, 0, 1);
1018 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
1019 } else {
1020 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
1021 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
1022 if (num_addrs == 0) {
1023 rx_cfg = bmac_rx_on(dev, 0, 0);
1024 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1025 } else {
1026 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
1027 bmac_addhash(bp, dmi->dmi_addr);
1028 bmac_update_hash_table_mask(dev, bp);
1029 rx_cfg = bmac_rx_on(dev, 1, 0);
1030 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1033 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1035 #else /* ifdef SUNHME_MULTICAST */
1037 /* The version of set_multicast below was lifted from sunhme.c */
1039 static void bmac_set_multicast(struct net_device *dev)
1041 struct dev_mc_list *dmi = dev->mc_list;
1042 char *addrs;
1043 int i;
1044 unsigned short rx_cfg;
1045 u32 crc;
1047 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1048 bmwrite(dev, BHASH0, 0xffff);
1049 bmwrite(dev, BHASH1, 0xffff);
1050 bmwrite(dev, BHASH2, 0xffff);
1051 bmwrite(dev, BHASH3, 0xffff);
1052 } else if(dev->flags & IFF_PROMISC) {
1053 rx_cfg = bmread(dev, RXCFG);
1054 rx_cfg |= RxPromiscEnable;
1055 bmwrite(dev, RXCFG, rx_cfg);
1056 } else {
1057 u16 hash_table[4];
1059 rx_cfg = bmread(dev, RXCFG);
1060 rx_cfg &= ~RxPromiscEnable;
1061 bmwrite(dev, RXCFG, rx_cfg);
1063 for(i = 0; i < 4; i++) hash_table[i] = 0;
1065 for(i = 0; i < dev->mc_count; i++) {
1066 addrs = dmi->dmi_addr;
1067 dmi = dmi->next;
1069 if(!(*addrs & 1))
1070 continue;
1072 crc = ether_crc_le(6, addrs);
1073 crc >>= 26;
1074 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1076 bmwrite(dev, BHASH0, hash_table[0]);
1077 bmwrite(dev, BHASH1, hash_table[1]);
1078 bmwrite(dev, BHASH2, hash_table[2]);
1079 bmwrite(dev, BHASH3, hash_table[3]);
1082 #endif /* SUNHME_MULTICAST */
1084 static int miscintcount;
1086 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1088 struct net_device *dev = (struct net_device *) dev_id;
1089 struct bmac_data *bp = netdev_priv(dev);
1090 unsigned int status = bmread(dev, STATUS);
1091 if (miscintcount++ < 10) {
1092 XXDEBUG(("bmac_misc_intr\n"));
1094 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1095 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
1096 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
1097 if (status & RxErrorMask) bp->stats.rx_errors++;
1098 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1099 if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1100 if (status & RxOverFlow) bp->stats.rx_over_errors++;
1101 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1103 /* if (status & FrameSent) bp->stats.tx_dropped++; */
1104 if (status & TxErrorMask) bp->stats.tx_errors++;
1105 if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1106 if (status & TxNormalCollExp) bp->stats.collisions++;
1107 return IRQ_HANDLED;
1111 * Procedure for reading EEPROM
1113 #define SROMAddressLength 5
1114 #define DataInOn 0x0008
1115 #define DataInOff 0x0000
1116 #define Clk 0x0002
1117 #define ChipSelect 0x0001
1118 #define SDIShiftCount 3
1119 #define SD0ShiftCount 2
1120 #define DelayValue 1000 /* number of microseconds */
1121 #define SROMStartOffset 10 /* this is in words */
1122 #define SROMReadCount 3 /* number of words to read from SROM */
1123 #define SROMAddressBits 6
1124 #define EnetAddressOffset 20
1126 static unsigned char
1127 bmac_clock_out_bit(struct net_device *dev)
1129 unsigned short data;
1130 unsigned short val;
1132 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1133 udelay(DelayValue);
1135 data = bmread(dev, SROMCSR);
1136 udelay(DelayValue);
1137 val = (data >> SD0ShiftCount) & 1;
1139 bmwrite(dev, SROMCSR, ChipSelect);
1140 udelay(DelayValue);
1142 return val;
1145 static void
1146 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1148 unsigned short data;
1150 if (val != 0 && val != 1) return;
1152 data = (val << SDIShiftCount);
1153 bmwrite(dev, SROMCSR, data | ChipSelect );
1154 udelay(DelayValue);
1156 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1157 udelay(DelayValue);
1159 bmwrite(dev, SROMCSR, data | ChipSelect);
1160 udelay(DelayValue);
1163 static void
1164 reset_and_select_srom(struct net_device *dev)
1166 /* first reset */
1167 bmwrite(dev, SROMCSR, 0);
1168 udelay(DelayValue);
1170 /* send it the read command (110) */
1171 bmac_clock_in_bit(dev, 1);
1172 bmac_clock_in_bit(dev, 1);
1173 bmac_clock_in_bit(dev, 0);
1176 static unsigned short
1177 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1179 unsigned short data, val;
1180 int i;
1182 /* send out the address we want to read from */
1183 for (i = 0; i < addr_len; i++) {
1184 val = addr >> (addr_len-i-1);
1185 bmac_clock_in_bit(dev, val & 1);
1188 /* Now read in the 16-bit data */
1189 data = 0;
1190 for (i = 0; i < 16; i++) {
1191 val = bmac_clock_out_bit(dev);
1192 data <<= 1;
1193 data |= val;
1195 bmwrite(dev, SROMCSR, 0);
1197 return data;
1201 * It looks like Cogent and SMC use different methods for calculating
1202 * checksums. What a pain..
1205 static int
1206 bmac_verify_checksum(struct net_device *dev)
1208 unsigned short data, storedCS;
1210 reset_and_select_srom(dev);
1211 data = read_srom(dev, 3, SROMAddressBits);
1212 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1214 return 0;
1218 static void
1219 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1221 int i;
1222 unsigned short data;
1224 for (i = 0; i < 6; i++)
1226 reset_and_select_srom(dev);
1227 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1228 ea[2*i] = bitrev(data & 0x0ff);
1229 ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1233 static void bmac_reset_and_enable(struct net_device *dev)
1235 struct bmac_data *bp = netdev_priv(dev);
1236 unsigned long flags;
1237 struct sk_buff *skb;
1238 unsigned char *data;
1240 spin_lock_irqsave(&bp->lock, flags);
1241 bmac_enable_and_reset_chip(dev);
1242 bmac_init_tx_ring(bp);
1243 bmac_init_rx_ring(bp);
1244 bmac_init_chip(dev);
1245 bmac_start_chip(dev);
1246 bmwrite(dev, INTDISABLE, EnableNormal);
1247 bp->sleeping = 0;
1250 * It seems that the bmac can't receive until it's transmitted
1251 * a packet. So we give it a dummy packet to transmit.
1253 skb = dev_alloc_skb(ETHERMINPACKET);
1254 if (skb != NULL) {
1255 data = skb_put(skb, ETHERMINPACKET);
1256 memset(data, 0, ETHERMINPACKET);
1257 memcpy(data, dev->dev_addr, 6);
1258 memcpy(data+6, dev->dev_addr, 6);
1259 bmac_transmit_packet(skb, dev);
1261 spin_unlock_irqrestore(&bp->lock, flags);
1264 static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1266 int j, rev, ret;
1267 struct bmac_data *bp;
1268 unsigned char *addr;
1269 struct net_device *dev;
1270 int is_bmac_plus = ((int)match->data) != 0;
1272 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1273 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1274 return -ENODEV;
1276 addr = get_property(macio_get_of_node(mdev), "mac-address", NULL);
1277 if (addr == NULL) {
1278 addr = get_property(macio_get_of_node(mdev), "local-mac-address", NULL);
1279 if (addr == NULL) {
1280 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1281 return -ENODEV;
1285 dev = alloc_etherdev(PRIV_BYTES);
1286 if (!dev) {
1287 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1288 return -ENOMEM;
1291 bp = netdev_priv(dev);
1292 SET_MODULE_OWNER(dev);
1293 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1294 macio_set_drvdata(mdev, dev);
1296 bp->mdev = mdev;
1297 spin_lock_init(&bp->lock);
1299 if (macio_request_resources(mdev, "bmac")) {
1300 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1301 goto out_free;
1304 dev->base_addr = (unsigned long)
1305 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1306 if (dev->base_addr == 0)
1307 goto out_release;
1309 dev->irq = macio_irq(mdev, 0);
1311 bmac_enable_and_reset_chip(dev);
1312 bmwrite(dev, INTDISABLE, DisableAll);
1314 rev = addr[0] == 0 && addr[1] == 0xA0;
1315 for (j = 0; j < 6; ++j)
1316 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1318 /* Enable chip without interrupts for now */
1319 bmac_enable_and_reset_chip(dev);
1320 bmwrite(dev, INTDISABLE, DisableAll);
1322 dev->open = bmac_open;
1323 dev->stop = bmac_close;
1324 dev->hard_start_xmit = bmac_output;
1325 dev->get_stats = bmac_stats;
1326 dev->set_multicast_list = bmac_set_multicast;
1327 dev->set_mac_address = bmac_set_address;
1329 bmac_get_station_address(dev, addr);
1330 if (bmac_verify_checksum(dev) != 0)
1331 goto err_out_iounmap;
1333 bp->is_bmac_plus = is_bmac_plus;
1334 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1335 if (!bp->tx_dma)
1336 goto err_out_iounmap;
1337 bp->tx_dma_intr = macio_irq(mdev, 1);
1338 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1339 if (!bp->rx_dma)
1340 goto err_out_iounmap_tx;
1341 bp->rx_dma_intr = macio_irq(mdev, 2);
1343 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1344 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1346 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1347 skb_queue_head_init(bp->queue);
1349 init_timer(&bp->tx_timeout);
1351 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1352 if (ret) {
1353 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1354 goto err_out_iounmap_rx;
1356 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1357 if (ret) {
1358 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1359 goto err_out_irq0;
1361 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1362 if (ret) {
1363 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1364 goto err_out_irq1;
1367 /* Mask chip interrupts and disable chip, will be
1368 * re-enabled on open()
1370 disable_irq(dev->irq);
1371 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1373 if (register_netdev(dev) != 0) {
1374 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1375 goto err_out_irq2;
1378 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1379 for (j = 0; j < 6; ++j)
1380 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1381 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1382 printk("\n");
1384 return 0;
1386 err_out_irq2:
1387 free_irq(bp->rx_dma_intr, dev);
1388 err_out_irq1:
1389 free_irq(bp->tx_dma_intr, dev);
1390 err_out_irq0:
1391 free_irq(dev->irq, dev);
1392 err_out_iounmap_rx:
1393 iounmap(bp->rx_dma);
1394 err_out_iounmap_tx:
1395 iounmap(bp->tx_dma);
1396 err_out_iounmap:
1397 iounmap((void __iomem *)dev->base_addr);
1398 out_release:
1399 macio_release_resources(mdev);
1400 out_free:
1401 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1402 free_netdev(dev);
1404 return -ENODEV;
1407 static int bmac_open(struct net_device *dev)
1409 struct bmac_data *bp = netdev_priv(dev);
1410 /* XXDEBUG(("bmac: enter open\n")); */
1411 /* reset the chip */
1412 bp->opened = 1;
1413 bmac_reset_and_enable(dev);
1414 enable_irq(dev->irq);
1415 return 0;
1418 static int bmac_close(struct net_device *dev)
1420 struct bmac_data *bp = netdev_priv(dev);
1421 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1422 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1423 unsigned short config;
1424 int i;
1426 bp->sleeping = 1;
1428 /* disable rx and tx */
1429 config = bmread(dev, RXCFG);
1430 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1432 config = bmread(dev, TXCFG);
1433 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1435 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1437 /* disable rx and tx dma */
1438 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1439 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1441 /* free some skb's */
1442 XXDEBUG(("bmac: free rx bufs\n"));
1443 for (i=0; i<N_RX_RING; i++) {
1444 if (bp->rx_bufs[i] != NULL) {
1445 dev_kfree_skb(bp->rx_bufs[i]);
1446 bp->rx_bufs[i] = NULL;
1449 XXDEBUG(("bmac: free tx bufs\n"));
1450 for (i = 0; i<N_TX_RING; i++) {
1451 if (bp->tx_bufs[i] != NULL) {
1452 dev_kfree_skb(bp->tx_bufs[i]);
1453 bp->tx_bufs[i] = NULL;
1456 XXDEBUG(("bmac: all bufs freed\n"));
1458 bp->opened = 0;
1459 disable_irq(dev->irq);
1460 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1462 return 0;
1465 static void
1466 bmac_start(struct net_device *dev)
1468 struct bmac_data *bp = netdev_priv(dev);
1469 int i;
1470 struct sk_buff *skb;
1471 unsigned long flags;
1473 if (bp->sleeping)
1474 return;
1476 spin_lock_irqsave(&bp->lock, flags);
1477 while (1) {
1478 i = bp->tx_fill + 1;
1479 if (i >= N_TX_RING)
1480 i = 0;
1481 if (i == bp->tx_empty)
1482 break;
1483 skb = skb_dequeue(bp->queue);
1484 if (skb == NULL)
1485 break;
1486 bmac_transmit_packet(skb, dev);
1488 spin_unlock_irqrestore(&bp->lock, flags);
1491 static int
1492 bmac_output(struct sk_buff *skb, struct net_device *dev)
1494 struct bmac_data *bp = netdev_priv(dev);
1495 skb_queue_tail(bp->queue, skb);
1496 bmac_start(dev);
1497 return 0;
1500 static void bmac_tx_timeout(unsigned long data)
1502 struct net_device *dev = (struct net_device *) data;
1503 struct bmac_data *bp = netdev_priv(dev);
1504 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1505 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1506 volatile struct dbdma_cmd *cp;
1507 unsigned long flags;
1508 unsigned short config, oldConfig;
1509 int i;
1511 XXDEBUG(("bmac: tx_timeout called\n"));
1512 spin_lock_irqsave(&bp->lock, flags);
1513 bp->timeout_active = 0;
1515 /* update various counters */
1516 /* bmac_handle_misc_intrs(bp, 0); */
1518 cp = &bp->tx_cmds[bp->tx_empty];
1519 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1520 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1521 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1523 /* turn off both tx and rx and reset the chip */
1524 config = bmread(dev, RXCFG);
1525 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1526 config = bmread(dev, TXCFG);
1527 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1528 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1529 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1530 bmac_enable_and_reset_chip(dev);
1532 /* restart rx dma */
1533 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1534 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1535 out_le16(&cp->xfer_status, 0);
1536 out_le32(&rd->cmdptr, virt_to_bus(cp));
1537 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1539 /* fix up the transmit side */
1540 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1541 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1542 i = bp->tx_empty;
1543 ++bp->stats.tx_errors;
1544 if (i != bp->tx_fill) {
1545 dev_kfree_skb(bp->tx_bufs[i]);
1546 bp->tx_bufs[i] = NULL;
1547 if (++i >= N_TX_RING) i = 0;
1548 bp->tx_empty = i;
1550 bp->tx_fullup = 0;
1551 netif_wake_queue(dev);
1552 if (i != bp->tx_fill) {
1553 cp = &bp->tx_cmds[i];
1554 out_le16(&cp->xfer_status, 0);
1555 out_le16(&cp->command, OUTPUT_LAST);
1556 out_le32(&td->cmdptr, virt_to_bus(cp));
1557 out_le32(&td->control, DBDMA_SET(RUN));
1558 /* bmac_set_timeout(dev); */
1559 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1562 /* turn it back on */
1563 oldConfig = bmread(dev, RXCFG);
1564 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1565 oldConfig = bmread(dev, TXCFG);
1566 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1568 spin_unlock_irqrestore(&bp->lock, flags);
1571 #if 0
1572 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1574 int i,*ip;
1576 for (i=0;i< count;i++) {
1577 ip = (int*)(cp+i);
1579 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1580 ld_le32(ip+0),
1581 ld_le32(ip+1),
1582 ld_le32(ip+2),
1583 ld_le32(ip+3));
1587 #endif
1589 #if 0
1590 static int
1591 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1593 int len = 0;
1594 off_t pos = 0;
1595 off_t begin = 0;
1596 int i;
1598 if (bmac_devs == NULL)
1599 return (-ENOSYS);
1601 len += sprintf(buffer, "BMAC counters & registers\n");
1603 for (i = 0; i<N_REG_ENTRIES; i++) {
1604 len += sprintf(buffer + len, "%s: %#08x\n",
1605 reg_entries[i].name,
1606 bmread(bmac_devs, reg_entries[i].reg_offset));
1607 pos = begin + len;
1609 if (pos < offset) {
1610 len = 0;
1611 begin = pos;
1614 if (pos > offset+length) break;
1617 *start = buffer + (offset - begin);
1618 len -= (offset - begin);
1620 if (len > length) len = length;
1622 return len;
1624 #endif
1626 static int __devexit bmac_remove(struct macio_dev *mdev)
1628 struct net_device *dev = macio_get_drvdata(mdev);
1629 struct bmac_data *bp = netdev_priv(dev);
1631 unregister_netdev(dev);
1633 free_irq(dev->irq, dev);
1634 free_irq(bp->tx_dma_intr, dev);
1635 free_irq(bp->rx_dma_intr, dev);
1637 iounmap((void __iomem *)dev->base_addr);
1638 iounmap(bp->tx_dma);
1639 iounmap(bp->rx_dma);
1641 macio_release_resources(mdev);
1643 free_netdev(dev);
1645 return 0;
1648 static struct of_device_id bmac_match[] =
1651 .name = "bmac",
1652 .data = (void *)0,
1655 .type = "network",
1656 .compatible = "bmac+",
1657 .data = (void *)1,
1662 static struct macio_driver bmac_driver =
1664 .name = "bmac",
1665 .match_table = bmac_match,
1666 .probe = bmac_probe,
1667 .remove = bmac_remove,
1668 #ifdef CONFIG_PM
1669 .suspend = bmac_suspend,
1670 .resume = bmac_resume,
1671 #endif
1675 static int __init bmac_init(void)
1677 if (bmac_emergency_rxbuf == NULL) {
1678 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1679 if (bmac_emergency_rxbuf == NULL) {
1680 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1681 return -ENOMEM;
1685 return macio_register_driver(&bmac_driver);
1688 static void __exit bmac_exit(void)
1690 macio_unregister_driver(&bmac_driver);
1692 if (bmac_emergency_rxbuf != NULL) {
1693 kfree(bmac_emergency_rxbuf);
1694 bmac_emergency_rxbuf = NULL;
1698 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1699 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1700 MODULE_LICENSE("GPL");
1702 module_init(bmac_init);
1703 module_exit(bmac_exit);