cfq-iosched: fix a kbuild regression
[linux-2.6/kvm.git] / drivers / net / bmac.c
blob119468e76323de626291e3d0af96790fc9907f5b
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/proc_fs.h>
18 #include <linux/init.h>
19 #include <linux/spinlock.h>
20 #include <linux/crc32.h>
21 #include <linux/bitrev.h>
22 #include <linux/ethtool.h>
23 #include <asm/prom.h>
24 #include <asm/dbdma.h>
25 #include <asm/io.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/machdep.h>
29 #include <asm/pmac_feature.h>
30 #include <asm/macio.h>
31 #include <asm/irq.h>
33 #include "bmac.h"
35 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
36 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
39 * CRC polynomial - used in working out multicast filter bits.
41 #define ENET_CRCPOLY 0x04c11db7
43 /* switch to use multicast code lifted from sunhme driver */
44 #define SUNHME_MULTICAST
46 #define N_RX_RING 64
47 #define N_TX_RING 32
48 #define MAX_TX_ACTIVE 1
49 #define ETHERCRC 4
50 #define ETHERMINPACKET 64
51 #define ETHERMTU 1500
52 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
53 #define TX_TIMEOUT HZ /* 1 second */
55 /* Bits in transmit DMA status */
56 #define TX_DMA_ERR 0x80
58 #define XXDEBUG(args)
60 struct bmac_data {
61 /* volatile struct bmac *bmac; */
62 struct sk_buff_head *queue;
63 volatile struct dbdma_regs __iomem *tx_dma;
64 int tx_dma_intr;
65 volatile struct dbdma_regs __iomem *rx_dma;
66 int rx_dma_intr;
67 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
68 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
69 struct macio_dev *mdev;
70 int is_bmac_plus;
71 struct sk_buff *rx_bufs[N_RX_RING];
72 int rx_fill;
73 int rx_empty;
74 struct sk_buff *tx_bufs[N_TX_RING];
75 int tx_fill;
76 int tx_empty;
77 unsigned char tx_fullup;
78 struct timer_list tx_timeout;
79 int timeout_active;
80 int sleeping;
81 int opened;
82 unsigned short hash_use_count[64];
83 unsigned short hash_table_mask[4];
84 spinlock_t lock;
87 #if 0 /* Move that to ethtool */
89 typedef struct bmac_reg_entry {
90 char *name;
91 unsigned short reg_offset;
92 } bmac_reg_entry_t;
94 #define N_REG_ENTRIES 31
96 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
97 {"MEMADD", MEMADD},
98 {"MEMDATAHI", MEMDATAHI},
99 {"MEMDATALO", MEMDATALO},
100 {"TXPNTR", TXPNTR},
101 {"RXPNTR", RXPNTR},
102 {"IPG1", IPG1},
103 {"IPG2", IPG2},
104 {"ALIMIT", ALIMIT},
105 {"SLOT", SLOT},
106 {"PALEN", PALEN},
107 {"PAPAT", PAPAT},
108 {"TXSFD", TXSFD},
109 {"JAM", JAM},
110 {"TXCFG", TXCFG},
111 {"TXMAX", TXMAX},
112 {"TXMIN", TXMIN},
113 {"PAREG", PAREG},
114 {"DCNT", DCNT},
115 {"NCCNT", NCCNT},
116 {"NTCNT", NTCNT},
117 {"EXCNT", EXCNT},
118 {"LTCNT", LTCNT},
119 {"TXSM", TXSM},
120 {"RXCFG", RXCFG},
121 {"RXMAX", RXMAX},
122 {"RXMIN", RXMIN},
123 {"FRCNT", FRCNT},
124 {"AECNT", AECNT},
125 {"FECNT", FECNT},
126 {"RXSM", RXSM},
127 {"RXCV", RXCV}
130 #endif
132 static unsigned char *bmac_emergency_rxbuf;
135 * Number of bytes of private data per BMAC: allow enough for
136 * the rx and tx dma commands plus a branch dma command each,
137 * and another 16 bytes to allow us to align the dma command
138 * buffers on a 16 byte boundary.
140 #define PRIV_BYTES (sizeof(struct bmac_data) \
141 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
142 + sizeof(struct sk_buff_head))
144 static int bmac_open(struct net_device *dev);
145 static int bmac_close(struct net_device *dev);
146 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
147 static void bmac_set_multicast(struct net_device *dev);
148 static void bmac_reset_and_enable(struct net_device *dev);
149 static void bmac_start_chip(struct net_device *dev);
150 static void bmac_init_chip(struct net_device *dev);
151 static void bmac_init_registers(struct net_device *dev);
152 static void bmac_enable_and_reset_chip(struct net_device *dev);
153 static int bmac_set_address(struct net_device *dev, void *addr);
154 static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
155 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
156 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
157 static void bmac_set_timeout(struct net_device *dev);
158 static void bmac_tx_timeout(unsigned long data);
159 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
160 static void bmac_start(struct net_device *dev);
162 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
163 #define DBDMA_CLEAR(x) ( (x) << 16)
165 static inline void
166 dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
168 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
169 return;
172 static inline unsigned long
173 dbdma_ld32(volatile __u32 __iomem *a)
175 __u32 swap;
176 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
177 return swap;
180 static void
181 dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
183 dbdma_st32(&dmap->control,
184 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
185 eieio();
188 static void
189 dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
191 dbdma_st32(&dmap->control,
192 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
193 eieio();
194 while (dbdma_ld32(&dmap->status) & RUN)
195 eieio();
198 static void
199 dbdma_setcmd(volatile struct dbdma_cmd *cp,
200 unsigned short cmd, unsigned count, unsigned long addr,
201 unsigned long cmd_dep)
203 out_le16(&cp->command, cmd);
204 out_le16(&cp->req_count, count);
205 out_le32(&cp->phy_addr, addr);
206 out_le32(&cp->cmd_dep, cmd_dep);
207 out_le16(&cp->xfer_status, 0);
208 out_le16(&cp->res_count, 0);
211 static inline
212 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
214 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
218 static inline
219 unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
221 return in_le16((void __iomem *)dev->base_addr + reg_offset);
224 static void
225 bmac_enable_and_reset_chip(struct net_device *dev)
227 struct bmac_data *bp = netdev_priv(dev);
228 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
229 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
231 if (rd)
232 dbdma_reset(rd);
233 if (td)
234 dbdma_reset(td);
236 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
239 #define MIFDELAY udelay(10)
241 static unsigned int
242 bmac_mif_readbits(struct net_device *dev, int nb)
244 unsigned int val = 0;
246 while (--nb >= 0) {
247 bmwrite(dev, MIFCSR, 0);
248 MIFDELAY;
249 if (bmread(dev, MIFCSR) & 8)
250 val |= 1 << nb;
251 bmwrite(dev, MIFCSR, 1);
252 MIFDELAY;
254 bmwrite(dev, MIFCSR, 0);
255 MIFDELAY;
256 bmwrite(dev, MIFCSR, 1);
257 MIFDELAY;
258 return val;
261 static void
262 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
264 int b;
266 while (--nb >= 0) {
267 b = (val & (1 << nb))? 6: 4;
268 bmwrite(dev, MIFCSR, b);
269 MIFDELAY;
270 bmwrite(dev, MIFCSR, b|1);
271 MIFDELAY;
275 static unsigned int
276 bmac_mif_read(struct net_device *dev, unsigned int addr)
278 unsigned int val;
280 bmwrite(dev, MIFCSR, 4);
281 MIFDELAY;
282 bmac_mif_writebits(dev, ~0U, 32);
283 bmac_mif_writebits(dev, 6, 4);
284 bmac_mif_writebits(dev, addr, 10);
285 bmwrite(dev, MIFCSR, 2);
286 MIFDELAY;
287 bmwrite(dev, MIFCSR, 1);
288 MIFDELAY;
289 val = bmac_mif_readbits(dev, 17);
290 bmwrite(dev, MIFCSR, 4);
291 MIFDELAY;
292 return val;
295 static void
296 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
298 bmwrite(dev, MIFCSR, 4);
299 MIFDELAY;
300 bmac_mif_writebits(dev, ~0U, 32);
301 bmac_mif_writebits(dev, 5, 4);
302 bmac_mif_writebits(dev, addr, 10);
303 bmac_mif_writebits(dev, 2, 2);
304 bmac_mif_writebits(dev, val, 16);
305 bmac_mif_writebits(dev, 3, 2);
308 static void
309 bmac_init_registers(struct net_device *dev)
311 struct bmac_data *bp = netdev_priv(dev);
312 volatile unsigned short regValue;
313 unsigned short *pWord16;
314 int i;
316 /* XXDEBUG(("bmac: enter init_registers\n")); */
318 bmwrite(dev, RXRST, RxResetValue);
319 bmwrite(dev, TXRST, TxResetBit);
321 i = 100;
322 do {
323 --i;
324 udelay(10000);
325 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
326 } while ((regValue & TxResetBit) && i > 0);
328 if (!bp->is_bmac_plus) {
329 regValue = bmread(dev, XCVRIF);
330 regValue |= ClkBit | SerialMode | COLActiveLow;
331 bmwrite(dev, XCVRIF, regValue);
332 udelay(10000);
335 bmwrite(dev, RSEED, (unsigned short)0x1968);
337 regValue = bmread(dev, XIFC);
338 regValue |= TxOutputEnable;
339 bmwrite(dev, XIFC, regValue);
341 bmread(dev, PAREG);
343 /* set collision counters to 0 */
344 bmwrite(dev, NCCNT, 0);
345 bmwrite(dev, NTCNT, 0);
346 bmwrite(dev, EXCNT, 0);
347 bmwrite(dev, LTCNT, 0);
349 /* set rx counters to 0 */
350 bmwrite(dev, FRCNT, 0);
351 bmwrite(dev, LECNT, 0);
352 bmwrite(dev, AECNT, 0);
353 bmwrite(dev, FECNT, 0);
354 bmwrite(dev, RXCV, 0);
356 /* set tx fifo information */
357 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
359 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
360 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
362 /* set rx fifo information */
363 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
364 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
366 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
367 bmread(dev, STATUS); /* read it just to clear it */
369 /* zero out the chip Hash Filter registers */
370 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
371 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
372 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
373 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
374 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
376 pWord16 = (unsigned short *)dev->dev_addr;
377 bmwrite(dev, MADD0, *pWord16++);
378 bmwrite(dev, MADD1, *pWord16++);
379 bmwrite(dev, MADD2, *pWord16);
381 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
383 bmwrite(dev, INTDISABLE, EnableNormal);
385 return;
388 #if 0
389 static void
390 bmac_disable_interrupts(struct net_device *dev)
392 bmwrite(dev, INTDISABLE, DisableAll);
395 static void
396 bmac_enable_interrupts(struct net_device *dev)
398 bmwrite(dev, INTDISABLE, EnableNormal);
400 #endif
403 static void
404 bmac_start_chip(struct net_device *dev)
406 struct bmac_data *bp = netdev_priv(dev);
407 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
408 unsigned short oldConfig;
410 /* enable rx dma channel */
411 dbdma_continue(rd);
413 oldConfig = bmread(dev, TXCFG);
414 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
416 /* turn on rx plus any other bits already on (promiscuous possibly) */
417 oldConfig = bmread(dev, RXCFG);
418 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
419 udelay(20000);
422 static void
423 bmac_init_phy(struct net_device *dev)
425 unsigned int addr;
426 struct bmac_data *bp = netdev_priv(dev);
428 printk(KERN_DEBUG "phy registers:");
429 for (addr = 0; addr < 32; ++addr) {
430 if ((addr & 7) == 0)
431 printk(KERN_DEBUG);
432 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
434 printk(KERN_CONT "\n");
436 if (bp->is_bmac_plus) {
437 unsigned int capable, ctrl;
439 ctrl = bmac_mif_read(dev, 0);
440 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
441 if (bmac_mif_read(dev, 4) != capable ||
442 (ctrl & 0x1000) == 0) {
443 bmac_mif_write(dev, 4, capable);
444 bmac_mif_write(dev, 0, 0x1200);
445 } else
446 bmac_mif_write(dev, 0, 0x1000);
450 static void bmac_init_chip(struct net_device *dev)
452 bmac_init_phy(dev);
453 bmac_init_registers(dev);
456 #ifdef CONFIG_PM
457 static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
459 struct net_device* dev = macio_get_drvdata(mdev);
460 struct bmac_data *bp = netdev_priv(dev);
461 unsigned long flags;
462 unsigned short config;
463 int i;
465 netif_device_detach(dev);
466 /* prolly should wait for dma to finish & turn off the chip */
467 spin_lock_irqsave(&bp->lock, flags);
468 if (bp->timeout_active) {
469 del_timer(&bp->tx_timeout);
470 bp->timeout_active = 0;
472 disable_irq(dev->irq);
473 disable_irq(bp->tx_dma_intr);
474 disable_irq(bp->rx_dma_intr);
475 bp->sleeping = 1;
476 spin_unlock_irqrestore(&bp->lock, flags);
477 if (bp->opened) {
478 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
479 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
481 config = bmread(dev, RXCFG);
482 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
483 config = bmread(dev, TXCFG);
484 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
485 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
486 /* disable rx and tx dma */
487 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
488 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
489 /* free some skb's */
490 for (i=0; i<N_RX_RING; i++) {
491 if (bp->rx_bufs[i] != NULL) {
492 dev_kfree_skb(bp->rx_bufs[i]);
493 bp->rx_bufs[i] = NULL;
496 for (i = 0; i<N_TX_RING; i++) {
497 if (bp->tx_bufs[i] != NULL) {
498 dev_kfree_skb(bp->tx_bufs[i]);
499 bp->tx_bufs[i] = NULL;
503 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
504 return 0;
507 static int bmac_resume(struct macio_dev *mdev)
509 struct net_device* dev = macio_get_drvdata(mdev);
510 struct bmac_data *bp = netdev_priv(dev);
512 /* see if this is enough */
513 if (bp->opened)
514 bmac_reset_and_enable(dev);
516 enable_irq(dev->irq);
517 enable_irq(bp->tx_dma_intr);
518 enable_irq(bp->rx_dma_intr);
519 netif_device_attach(dev);
521 return 0;
523 #endif /* CONFIG_PM */
525 static int bmac_set_address(struct net_device *dev, void *addr)
527 struct bmac_data *bp = netdev_priv(dev);
528 unsigned char *p = addr;
529 unsigned short *pWord16;
530 unsigned long flags;
531 int i;
533 XXDEBUG(("bmac: enter set_address\n"));
534 spin_lock_irqsave(&bp->lock, flags);
536 for (i = 0; i < 6; ++i) {
537 dev->dev_addr[i] = p[i];
539 /* load up the hardware address */
540 pWord16 = (unsigned short *)dev->dev_addr;
541 bmwrite(dev, MADD0, *pWord16++);
542 bmwrite(dev, MADD1, *pWord16++);
543 bmwrite(dev, MADD2, *pWord16);
545 spin_unlock_irqrestore(&bp->lock, flags);
546 XXDEBUG(("bmac: exit set_address\n"));
547 return 0;
550 static inline void bmac_set_timeout(struct net_device *dev)
552 struct bmac_data *bp = netdev_priv(dev);
553 unsigned long flags;
555 spin_lock_irqsave(&bp->lock, flags);
556 if (bp->timeout_active)
557 del_timer(&bp->tx_timeout);
558 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
559 bp->tx_timeout.function = bmac_tx_timeout;
560 bp->tx_timeout.data = (unsigned long) dev;
561 add_timer(&bp->tx_timeout);
562 bp->timeout_active = 1;
563 spin_unlock_irqrestore(&bp->lock, flags);
566 static void
567 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
569 void *vaddr;
570 unsigned long baddr;
571 unsigned long len;
573 len = skb->len;
574 vaddr = skb->data;
575 baddr = virt_to_bus(vaddr);
577 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
580 static void
581 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
583 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
585 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
586 virt_to_bus(addr), 0);
589 static void
590 bmac_init_tx_ring(struct bmac_data *bp)
592 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
594 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
596 bp->tx_empty = 0;
597 bp->tx_fill = 0;
598 bp->tx_fullup = 0;
600 /* put a branch at the end of the tx command list */
601 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
602 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
604 /* reset tx dma */
605 dbdma_reset(td);
606 out_le32(&td->wait_sel, 0x00200020);
607 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
610 static int
611 bmac_init_rx_ring(struct bmac_data *bp)
613 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
614 int i;
615 struct sk_buff *skb;
617 /* initialize list of sk_buffs for receiving and set up recv dma */
618 memset((char *)bp->rx_cmds, 0,
619 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
620 for (i = 0; i < N_RX_RING; i++) {
621 if ((skb = bp->rx_bufs[i]) == NULL) {
622 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
623 if (skb != NULL)
624 skb_reserve(skb, 2);
626 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
629 bp->rx_empty = 0;
630 bp->rx_fill = i;
632 /* Put a branch back to the beginning of the receive command list */
633 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
634 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
636 /* start rx dma */
637 dbdma_reset(rd);
638 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
640 return 1;
644 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
646 struct bmac_data *bp = netdev_priv(dev);
647 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
648 int i;
650 /* see if there's a free slot in the tx ring */
651 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
652 /* bp->tx_empty, bp->tx_fill)); */
653 i = bp->tx_fill + 1;
654 if (i >= N_TX_RING)
655 i = 0;
656 if (i == bp->tx_empty) {
657 netif_stop_queue(dev);
658 bp->tx_fullup = 1;
659 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
660 return -1; /* can't take it at the moment */
663 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
665 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
667 bp->tx_bufs[bp->tx_fill] = skb;
668 bp->tx_fill = i;
670 dev->stats.tx_bytes += skb->len;
672 dbdma_continue(td);
674 return 0;
677 static int rxintcount;
679 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
681 struct net_device *dev = (struct net_device *) dev_id;
682 struct bmac_data *bp = netdev_priv(dev);
683 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
684 volatile struct dbdma_cmd *cp;
685 int i, nb, stat;
686 struct sk_buff *skb;
687 unsigned int residual;
688 int last;
689 unsigned long flags;
691 spin_lock_irqsave(&bp->lock, flags);
693 if (++rxintcount < 10) {
694 XXDEBUG(("bmac_rxdma_intr\n"));
697 last = -1;
698 i = bp->rx_empty;
700 while (1) {
701 cp = &bp->rx_cmds[i];
702 stat = ld_le16(&cp->xfer_status);
703 residual = ld_le16(&cp->res_count);
704 if ((stat & ACTIVE) == 0)
705 break;
706 nb = RX_BUFLEN - residual - 2;
707 if (nb < (ETHERMINPACKET - ETHERCRC)) {
708 skb = NULL;
709 dev->stats.rx_length_errors++;
710 dev->stats.rx_errors++;
711 } else {
712 skb = bp->rx_bufs[i];
713 bp->rx_bufs[i] = NULL;
715 if (skb != NULL) {
716 nb -= ETHERCRC;
717 skb_put(skb, nb);
718 skb->protocol = eth_type_trans(skb, dev);
719 netif_rx(skb);
720 ++dev->stats.rx_packets;
721 dev->stats.rx_bytes += nb;
722 } else {
723 ++dev->stats.rx_dropped;
725 if ((skb = bp->rx_bufs[i]) == NULL) {
726 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
727 if (skb != NULL)
728 skb_reserve(bp->rx_bufs[i], 2);
730 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
731 st_le16(&cp->res_count, 0);
732 st_le16(&cp->xfer_status, 0);
733 last = i;
734 if (++i >= N_RX_RING) i = 0;
737 if (last != -1) {
738 bp->rx_fill = last;
739 bp->rx_empty = i;
742 dbdma_continue(rd);
743 spin_unlock_irqrestore(&bp->lock, flags);
745 if (rxintcount < 10) {
746 XXDEBUG(("bmac_rxdma_intr done\n"));
748 return IRQ_HANDLED;
751 static int txintcount;
753 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
755 struct net_device *dev = (struct net_device *) dev_id;
756 struct bmac_data *bp = netdev_priv(dev);
757 volatile struct dbdma_cmd *cp;
758 int stat;
759 unsigned long flags;
761 spin_lock_irqsave(&bp->lock, flags);
763 if (txintcount++ < 10) {
764 XXDEBUG(("bmac_txdma_intr\n"));
767 /* del_timer(&bp->tx_timeout); */
768 /* bp->timeout_active = 0; */
770 while (1) {
771 cp = &bp->tx_cmds[bp->tx_empty];
772 stat = ld_le16(&cp->xfer_status);
773 if (txintcount < 10) {
774 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
776 if (!(stat & ACTIVE)) {
778 * status field might not have been filled by DBDMA
780 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
781 break;
784 if (bp->tx_bufs[bp->tx_empty]) {
785 ++dev->stats.tx_packets;
786 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
788 bp->tx_bufs[bp->tx_empty] = NULL;
789 bp->tx_fullup = 0;
790 netif_wake_queue(dev);
791 if (++bp->tx_empty >= N_TX_RING)
792 bp->tx_empty = 0;
793 if (bp->tx_empty == bp->tx_fill)
794 break;
797 spin_unlock_irqrestore(&bp->lock, flags);
799 if (txintcount < 10) {
800 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
803 bmac_start(dev);
804 return IRQ_HANDLED;
807 #ifndef SUNHME_MULTICAST
808 /* Real fast bit-reversal algorithm, 6-bit values */
809 static int reverse6[64] = {
810 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
811 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
812 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
813 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
814 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
815 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
816 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
817 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
820 static unsigned int
821 crc416(unsigned int curval, unsigned short nxtval)
823 register unsigned int counter, cur = curval, next = nxtval;
824 register int high_crc_set, low_data_set;
826 /* Swap bytes */
827 next = ((next & 0x00FF) << 8) | (next >> 8);
829 /* Compute bit-by-bit */
830 for (counter = 0; counter < 16; ++counter) {
831 /* is high CRC bit set? */
832 if ((cur & 0x80000000) == 0) high_crc_set = 0;
833 else high_crc_set = 1;
835 cur = cur << 1;
837 if ((next & 0x0001) == 0) low_data_set = 0;
838 else low_data_set = 1;
840 next = next >> 1;
842 /* do the XOR */
843 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
845 return cur;
848 static unsigned int
849 bmac_crc(unsigned short *address)
851 unsigned int newcrc;
853 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
854 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
855 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
856 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
858 return(newcrc);
862 * Add requested mcast addr to BMac's hash table filter.
866 static void
867 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
869 unsigned int crc;
870 unsigned short mask;
872 if (!(*addr)) return;
873 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
874 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
875 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
876 mask = crc % 16;
877 mask = (unsigned char)1 << mask;
878 bp->hash_use_count[crc/16] |= mask;
881 static void
882 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
884 unsigned int crc;
885 unsigned char mask;
887 /* Now, delete the address from the filter copy, as indicated */
888 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
889 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
890 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
891 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
892 mask = crc % 16;
893 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
894 bp->hash_table_mask[crc/16] &= mask;
898 * Sync the adapter with the software copy of the multicast mask
899 * (logical address filter).
902 static void
903 bmac_rx_off(struct net_device *dev)
905 unsigned short rx_cfg;
907 rx_cfg = bmread(dev, RXCFG);
908 rx_cfg &= ~RxMACEnable;
909 bmwrite(dev, RXCFG, rx_cfg);
910 do {
911 rx_cfg = bmread(dev, RXCFG);
912 } while (rx_cfg & RxMACEnable);
915 unsigned short
916 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
918 unsigned short rx_cfg;
920 rx_cfg = bmread(dev, RXCFG);
921 rx_cfg |= RxMACEnable;
922 if (hash_enable) rx_cfg |= RxHashFilterEnable;
923 else rx_cfg &= ~RxHashFilterEnable;
924 if (promisc_enable) rx_cfg |= RxPromiscEnable;
925 else rx_cfg &= ~RxPromiscEnable;
926 bmwrite(dev, RXRST, RxResetValue);
927 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
928 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
929 bmwrite(dev, RXCFG, rx_cfg );
930 return rx_cfg;
933 static void
934 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
936 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
937 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
938 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
939 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
942 #if 0
943 static void
944 bmac_add_multi(struct net_device *dev,
945 struct bmac_data *bp, unsigned char *addr)
947 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
948 bmac_addhash(bp, addr);
949 bmac_rx_off(dev);
950 bmac_update_hash_table_mask(dev, bp);
951 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
952 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
955 static void
956 bmac_remove_multi(struct net_device *dev,
957 struct bmac_data *bp, unsigned char *addr)
959 bmac_removehash(bp, addr);
960 bmac_rx_off(dev);
961 bmac_update_hash_table_mask(dev, bp);
962 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
964 #endif
966 /* Set or clear the multicast filter for this adaptor.
967 num_addrs == -1 Promiscuous mode, receive all packets
968 num_addrs == 0 Normal mode, clear multicast list
969 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
970 best-effort filtering.
972 static void bmac_set_multicast(struct net_device *dev)
974 struct dev_mc_list *dmi;
975 struct bmac_data *bp = netdev_priv(dev);
976 int num_addrs = netdev_mc_count(dev);
977 unsigned short rx_cfg;
978 int i;
980 if (bp->sleeping)
981 return;
983 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
985 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
986 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
987 bmac_update_hash_table_mask(dev, bp);
988 rx_cfg = bmac_rx_on(dev, 1, 0);
989 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
990 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
991 rx_cfg = bmread(dev, RXCFG);
992 rx_cfg |= RxPromiscEnable;
993 bmwrite(dev, RXCFG, rx_cfg);
994 rx_cfg = bmac_rx_on(dev, 0, 1);
995 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
996 } else {
997 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
998 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
999 if (num_addrs == 0) {
1000 rx_cfg = bmac_rx_on(dev, 0, 0);
1001 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1002 } else {
1003 netdev_for_each_mc_addr(dmi, dev)
1004 bmac_addhash(bp, dmi->dmi_addr);
1005 bmac_update_hash_table_mask(dev, bp);
1006 rx_cfg = bmac_rx_on(dev, 1, 0);
1007 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1010 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1012 #else /* ifdef SUNHME_MULTICAST */
1014 /* The version of set_multicast below was lifted from sunhme.c */
1016 static void bmac_set_multicast(struct net_device *dev)
1018 struct dev_mc_list *dmi;
1019 char *addrs;
1020 int i;
1021 unsigned short rx_cfg;
1022 u32 crc;
1024 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1025 bmwrite(dev, BHASH0, 0xffff);
1026 bmwrite(dev, BHASH1, 0xffff);
1027 bmwrite(dev, BHASH2, 0xffff);
1028 bmwrite(dev, BHASH3, 0xffff);
1029 } else if(dev->flags & IFF_PROMISC) {
1030 rx_cfg = bmread(dev, RXCFG);
1031 rx_cfg |= RxPromiscEnable;
1032 bmwrite(dev, RXCFG, rx_cfg);
1033 } else {
1034 u16 hash_table[4];
1036 rx_cfg = bmread(dev, RXCFG);
1037 rx_cfg &= ~RxPromiscEnable;
1038 bmwrite(dev, RXCFG, rx_cfg);
1040 for(i = 0; i < 4; i++) hash_table[i] = 0;
1042 netdev_for_each_mc_addr(dmi, dev) {
1043 addrs = dmi->dmi_addr;
1045 if(!(*addrs & 1))
1046 continue;
1048 crc = ether_crc_le(6, addrs);
1049 crc >>= 26;
1050 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1052 bmwrite(dev, BHASH0, hash_table[0]);
1053 bmwrite(dev, BHASH1, hash_table[1]);
1054 bmwrite(dev, BHASH2, hash_table[2]);
1055 bmwrite(dev, BHASH3, hash_table[3]);
1058 #endif /* SUNHME_MULTICAST */
1060 static int miscintcount;
1062 static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1064 struct net_device *dev = (struct net_device *) dev_id;
1065 unsigned int status = bmread(dev, STATUS);
1066 if (miscintcount++ < 10) {
1067 XXDEBUG(("bmac_misc_intr\n"));
1069 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1070 /* bmac_txdma_intr_inner(irq, dev_id); */
1071 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1072 if (status & RxErrorMask) dev->stats.rx_errors++;
1073 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1074 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1075 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1076 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1078 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1079 if (status & TxErrorMask) dev->stats.tx_errors++;
1080 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1081 if (status & TxNormalCollExp) dev->stats.collisions++;
1082 return IRQ_HANDLED;
1086 * Procedure for reading EEPROM
1088 #define SROMAddressLength 5
1089 #define DataInOn 0x0008
1090 #define DataInOff 0x0000
1091 #define Clk 0x0002
1092 #define ChipSelect 0x0001
1093 #define SDIShiftCount 3
1094 #define SD0ShiftCount 2
1095 #define DelayValue 1000 /* number of microseconds */
1096 #define SROMStartOffset 10 /* this is in words */
1097 #define SROMReadCount 3 /* number of words to read from SROM */
1098 #define SROMAddressBits 6
1099 #define EnetAddressOffset 20
1101 static unsigned char
1102 bmac_clock_out_bit(struct net_device *dev)
1104 unsigned short data;
1105 unsigned short val;
1107 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1108 udelay(DelayValue);
1110 data = bmread(dev, SROMCSR);
1111 udelay(DelayValue);
1112 val = (data >> SD0ShiftCount) & 1;
1114 bmwrite(dev, SROMCSR, ChipSelect);
1115 udelay(DelayValue);
1117 return val;
1120 static void
1121 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1123 unsigned short data;
1125 if (val != 0 && val != 1) return;
1127 data = (val << SDIShiftCount);
1128 bmwrite(dev, SROMCSR, data | ChipSelect );
1129 udelay(DelayValue);
1131 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1132 udelay(DelayValue);
1134 bmwrite(dev, SROMCSR, data | ChipSelect);
1135 udelay(DelayValue);
1138 static void
1139 reset_and_select_srom(struct net_device *dev)
1141 /* first reset */
1142 bmwrite(dev, SROMCSR, 0);
1143 udelay(DelayValue);
1145 /* send it the read command (110) */
1146 bmac_clock_in_bit(dev, 1);
1147 bmac_clock_in_bit(dev, 1);
1148 bmac_clock_in_bit(dev, 0);
1151 static unsigned short
1152 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1154 unsigned short data, val;
1155 int i;
1157 /* send out the address we want to read from */
1158 for (i = 0; i < addr_len; i++) {
1159 val = addr >> (addr_len-i-1);
1160 bmac_clock_in_bit(dev, val & 1);
1163 /* Now read in the 16-bit data */
1164 data = 0;
1165 for (i = 0; i < 16; i++) {
1166 val = bmac_clock_out_bit(dev);
1167 data <<= 1;
1168 data |= val;
1170 bmwrite(dev, SROMCSR, 0);
1172 return data;
1176 * It looks like Cogent and SMC use different methods for calculating
1177 * checksums. What a pain..
1180 static int
1181 bmac_verify_checksum(struct net_device *dev)
1183 unsigned short data, storedCS;
1185 reset_and_select_srom(dev);
1186 data = read_srom(dev, 3, SROMAddressBits);
1187 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1189 return 0;
1193 static void
1194 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1196 int i;
1197 unsigned short data;
1199 for (i = 0; i < 6; i++)
1201 reset_and_select_srom(dev);
1202 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1203 ea[2*i] = bitrev8(data & 0x0ff);
1204 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1208 static void bmac_reset_and_enable(struct net_device *dev)
1210 struct bmac_data *bp = netdev_priv(dev);
1211 unsigned long flags;
1212 struct sk_buff *skb;
1213 unsigned char *data;
1215 spin_lock_irqsave(&bp->lock, flags);
1216 bmac_enable_and_reset_chip(dev);
1217 bmac_init_tx_ring(bp);
1218 bmac_init_rx_ring(bp);
1219 bmac_init_chip(dev);
1220 bmac_start_chip(dev);
1221 bmwrite(dev, INTDISABLE, EnableNormal);
1222 bp->sleeping = 0;
1225 * It seems that the bmac can't receive until it's transmitted
1226 * a packet. So we give it a dummy packet to transmit.
1228 skb = dev_alloc_skb(ETHERMINPACKET);
1229 if (skb != NULL) {
1230 data = skb_put(skb, ETHERMINPACKET);
1231 memset(data, 0, ETHERMINPACKET);
1232 memcpy(data, dev->dev_addr, 6);
1233 memcpy(data+6, dev->dev_addr, 6);
1234 bmac_transmit_packet(skb, dev);
1236 spin_unlock_irqrestore(&bp->lock, flags);
1238 static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1240 struct bmac_data *bp = netdev_priv(dev);
1241 strcpy(info->driver, "bmac");
1242 strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev));
1245 static const struct ethtool_ops bmac_ethtool_ops = {
1246 .get_drvinfo = bmac_get_drvinfo,
1247 .get_link = ethtool_op_get_link,
1250 static const struct net_device_ops bmac_netdev_ops = {
1251 .ndo_open = bmac_open,
1252 .ndo_stop = bmac_close,
1253 .ndo_start_xmit = bmac_output,
1254 .ndo_set_multicast_list = bmac_set_multicast,
1255 .ndo_set_mac_address = bmac_set_address,
1256 .ndo_change_mtu = eth_change_mtu,
1257 .ndo_validate_addr = eth_validate_addr,
1260 static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1262 int j, rev, ret;
1263 struct bmac_data *bp;
1264 const unsigned char *prop_addr;
1265 unsigned char addr[6];
1266 struct net_device *dev;
1267 int is_bmac_plus = ((int)match->data) != 0;
1269 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1270 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1271 return -ENODEV;
1273 prop_addr = of_get_property(macio_get_of_node(mdev),
1274 "mac-address", NULL);
1275 if (prop_addr == NULL) {
1276 prop_addr = of_get_property(macio_get_of_node(mdev),
1277 "local-mac-address", NULL);
1278 if (prop_addr == NULL) {
1279 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1280 return -ENODEV;
1283 memcpy(addr, prop_addr, sizeof(addr));
1285 dev = alloc_etherdev(PRIV_BYTES);
1286 if (!dev) {
1287 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1288 return -ENOMEM;
1291 bp = netdev_priv(dev);
1292 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1293 macio_set_drvdata(mdev, dev);
1295 bp->mdev = mdev;
1296 spin_lock_init(&bp->lock);
1298 if (macio_request_resources(mdev, "bmac")) {
1299 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1300 goto out_free;
1303 dev->base_addr = (unsigned long)
1304 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1305 if (dev->base_addr == 0)
1306 goto out_release;
1308 dev->irq = macio_irq(mdev, 0);
1310 bmac_enable_and_reset_chip(dev);
1311 bmwrite(dev, INTDISABLE, DisableAll);
1313 rev = addr[0] == 0 && addr[1] == 0xA0;
1314 for (j = 0; j < 6; ++j)
1315 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1317 /* Enable chip without interrupts for now */
1318 bmac_enable_and_reset_chip(dev);
1319 bmwrite(dev, INTDISABLE, DisableAll);
1321 dev->netdev_ops = &bmac_netdev_ops;
1322 dev->ethtool_ops = &bmac_ethtool_ops;
1324 bmac_get_station_address(dev, addr);
1325 if (bmac_verify_checksum(dev) != 0)
1326 goto err_out_iounmap;
1328 bp->is_bmac_plus = is_bmac_plus;
1329 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1330 if (!bp->tx_dma)
1331 goto err_out_iounmap;
1332 bp->tx_dma_intr = macio_irq(mdev, 1);
1333 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1334 if (!bp->rx_dma)
1335 goto err_out_iounmap_tx;
1336 bp->rx_dma_intr = macio_irq(mdev, 2);
1338 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1339 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1341 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1342 skb_queue_head_init(bp->queue);
1344 init_timer(&bp->tx_timeout);
1346 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1347 if (ret) {
1348 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1349 goto err_out_iounmap_rx;
1351 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1352 if (ret) {
1353 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1354 goto err_out_irq0;
1356 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1357 if (ret) {
1358 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1359 goto err_out_irq1;
1362 /* Mask chip interrupts and disable chip, will be
1363 * re-enabled on open()
1365 disable_irq(dev->irq);
1366 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1368 if (register_netdev(dev) != 0) {
1369 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1370 goto err_out_irq2;
1373 printk(KERN_INFO "%s: BMAC%s at %pM",
1374 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1375 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1376 printk("\n");
1378 return 0;
1380 err_out_irq2:
1381 free_irq(bp->rx_dma_intr, dev);
1382 err_out_irq1:
1383 free_irq(bp->tx_dma_intr, dev);
1384 err_out_irq0:
1385 free_irq(dev->irq, dev);
1386 err_out_iounmap_rx:
1387 iounmap(bp->rx_dma);
1388 err_out_iounmap_tx:
1389 iounmap(bp->tx_dma);
1390 err_out_iounmap:
1391 iounmap((void __iomem *)dev->base_addr);
1392 out_release:
1393 macio_release_resources(mdev);
1394 out_free:
1395 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1396 free_netdev(dev);
1398 return -ENODEV;
1401 static int bmac_open(struct net_device *dev)
1403 struct bmac_data *bp = netdev_priv(dev);
1404 /* XXDEBUG(("bmac: enter open\n")); */
1405 /* reset the chip */
1406 bp->opened = 1;
1407 bmac_reset_and_enable(dev);
1408 enable_irq(dev->irq);
1409 return 0;
1412 static int bmac_close(struct net_device *dev)
1414 struct bmac_data *bp = netdev_priv(dev);
1415 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1416 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1417 unsigned short config;
1418 int i;
1420 bp->sleeping = 1;
1422 /* disable rx and tx */
1423 config = bmread(dev, RXCFG);
1424 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1426 config = bmread(dev, TXCFG);
1427 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1429 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1431 /* disable rx and tx dma */
1432 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1433 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1435 /* free some skb's */
1436 XXDEBUG(("bmac: free rx bufs\n"));
1437 for (i=0; i<N_RX_RING; i++) {
1438 if (bp->rx_bufs[i] != NULL) {
1439 dev_kfree_skb(bp->rx_bufs[i]);
1440 bp->rx_bufs[i] = NULL;
1443 XXDEBUG(("bmac: free tx bufs\n"));
1444 for (i = 0; i<N_TX_RING; i++) {
1445 if (bp->tx_bufs[i] != NULL) {
1446 dev_kfree_skb(bp->tx_bufs[i]);
1447 bp->tx_bufs[i] = NULL;
1450 XXDEBUG(("bmac: all bufs freed\n"));
1452 bp->opened = 0;
1453 disable_irq(dev->irq);
1454 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1456 return 0;
1459 static void
1460 bmac_start(struct net_device *dev)
1462 struct bmac_data *bp = netdev_priv(dev);
1463 int i;
1464 struct sk_buff *skb;
1465 unsigned long flags;
1467 if (bp->sleeping)
1468 return;
1470 spin_lock_irqsave(&bp->lock, flags);
1471 while (1) {
1472 i = bp->tx_fill + 1;
1473 if (i >= N_TX_RING)
1474 i = 0;
1475 if (i == bp->tx_empty)
1476 break;
1477 skb = skb_dequeue(bp->queue);
1478 if (skb == NULL)
1479 break;
1480 bmac_transmit_packet(skb, dev);
1482 spin_unlock_irqrestore(&bp->lock, flags);
1485 static int
1486 bmac_output(struct sk_buff *skb, struct net_device *dev)
1488 struct bmac_data *bp = netdev_priv(dev);
1489 skb_queue_tail(bp->queue, skb);
1490 bmac_start(dev);
1491 return NETDEV_TX_OK;
1494 static void bmac_tx_timeout(unsigned long data)
1496 struct net_device *dev = (struct net_device *) data;
1497 struct bmac_data *bp = netdev_priv(dev);
1498 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1499 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1500 volatile struct dbdma_cmd *cp;
1501 unsigned long flags;
1502 unsigned short config, oldConfig;
1503 int i;
1505 XXDEBUG(("bmac: tx_timeout called\n"));
1506 spin_lock_irqsave(&bp->lock, flags);
1507 bp->timeout_active = 0;
1509 /* update various counters */
1510 /* bmac_handle_misc_intrs(bp, 0); */
1512 cp = &bp->tx_cmds[bp->tx_empty];
1513 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1514 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1515 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1517 /* turn off both tx and rx and reset the chip */
1518 config = bmread(dev, RXCFG);
1519 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1520 config = bmread(dev, TXCFG);
1521 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1522 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1523 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1524 bmac_enable_and_reset_chip(dev);
1526 /* restart rx dma */
1527 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1528 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1529 out_le16(&cp->xfer_status, 0);
1530 out_le32(&rd->cmdptr, virt_to_bus(cp));
1531 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1533 /* fix up the transmit side */
1534 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1535 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1536 i = bp->tx_empty;
1537 ++dev->stats.tx_errors;
1538 if (i != bp->tx_fill) {
1539 dev_kfree_skb(bp->tx_bufs[i]);
1540 bp->tx_bufs[i] = NULL;
1541 if (++i >= N_TX_RING) i = 0;
1542 bp->tx_empty = i;
1544 bp->tx_fullup = 0;
1545 netif_wake_queue(dev);
1546 if (i != bp->tx_fill) {
1547 cp = &bp->tx_cmds[i];
1548 out_le16(&cp->xfer_status, 0);
1549 out_le16(&cp->command, OUTPUT_LAST);
1550 out_le32(&td->cmdptr, virt_to_bus(cp));
1551 out_le32(&td->control, DBDMA_SET(RUN));
1552 /* bmac_set_timeout(dev); */
1553 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1556 /* turn it back on */
1557 oldConfig = bmread(dev, RXCFG);
1558 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1559 oldConfig = bmread(dev, TXCFG);
1560 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1562 spin_unlock_irqrestore(&bp->lock, flags);
1565 #if 0
1566 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1568 int i,*ip;
1570 for (i=0;i< count;i++) {
1571 ip = (int*)(cp+i);
1573 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1574 ld_le32(ip+0),
1575 ld_le32(ip+1),
1576 ld_le32(ip+2),
1577 ld_le32(ip+3));
1581 #endif
1583 #if 0
1584 static int
1585 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1587 int len = 0;
1588 off_t pos = 0;
1589 off_t begin = 0;
1590 int i;
1592 if (bmac_devs == NULL)
1593 return (-ENOSYS);
1595 len += sprintf(buffer, "BMAC counters & registers\n");
1597 for (i = 0; i<N_REG_ENTRIES; i++) {
1598 len += sprintf(buffer + len, "%s: %#08x\n",
1599 reg_entries[i].name,
1600 bmread(bmac_devs, reg_entries[i].reg_offset));
1601 pos = begin + len;
1603 if (pos < offset) {
1604 len = 0;
1605 begin = pos;
1608 if (pos > offset+length) break;
1611 *start = buffer + (offset - begin);
1612 len -= (offset - begin);
1614 if (len > length) len = length;
1616 return len;
1618 #endif
1620 static int __devexit bmac_remove(struct macio_dev *mdev)
1622 struct net_device *dev = macio_get_drvdata(mdev);
1623 struct bmac_data *bp = netdev_priv(dev);
1625 unregister_netdev(dev);
1627 free_irq(dev->irq, dev);
1628 free_irq(bp->tx_dma_intr, dev);
1629 free_irq(bp->rx_dma_intr, dev);
1631 iounmap((void __iomem *)dev->base_addr);
1632 iounmap(bp->tx_dma);
1633 iounmap(bp->rx_dma);
1635 macio_release_resources(mdev);
1637 free_netdev(dev);
1639 return 0;
1642 static struct of_device_id bmac_match[] =
1645 .name = "bmac",
1646 .data = (void *)0,
1649 .type = "network",
1650 .compatible = "bmac+",
1651 .data = (void *)1,
1655 MODULE_DEVICE_TABLE (of, bmac_match);
1657 static struct macio_driver bmac_driver =
1659 .name = "bmac",
1660 .match_table = bmac_match,
1661 .probe = bmac_probe,
1662 .remove = bmac_remove,
1663 #ifdef CONFIG_PM
1664 .suspend = bmac_suspend,
1665 .resume = bmac_resume,
1666 #endif
1670 static int __init bmac_init(void)
1672 if (bmac_emergency_rxbuf == NULL) {
1673 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1674 if (bmac_emergency_rxbuf == NULL) {
1675 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1676 return -ENOMEM;
1680 return macio_register_driver(&bmac_driver);
1683 static void __exit bmac_exit(void)
1685 macio_unregister_driver(&bmac_driver);
1687 kfree(bmac_emergency_rxbuf);
1688 bmac_emergency_rxbuf = NULL;
1691 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1692 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1693 MODULE_LICENSE("GPL");
1695 module_init(bmac_init);
1696 module_exit(bmac_exit);