More meth updates.
[linux-2.6/linux-mips.git] / drivers / net / bmac.c
blobcb280081b6da40b67f48a73317f20d76cc29ad3d
1 /*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
5 * Copyright (C) 1998 Randy Gobbel.
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10 #include <linux/config.h>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/delay.h>
16 #include <linux/string.h>
17 #include <linux/timer.h>
18 #include <linux/proc_fs.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/crc32.h>
22 #include <asm/prom.h>
23 #include <asm/dbdma.h>
24 #include <asm/io.h>
25 #include <asm/page.h>
26 #include <asm/pgtable.h>
27 #include <asm/machdep.h>
28 #include <asm/pmac_feature.h>
29 #include <asm/irq.h>
30 #ifdef CONFIG_PMAC_PBOOK
31 #include <linux/adb.h>
32 #include <linux/pmu.h>
33 #endif
34 #include "bmac.h"
36 #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
37 #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
40 * CRC polynomial - used in working out multicast filter bits.
42 #define ENET_CRCPOLY 0x04c11db7
44 /* switch to use multicast code lifted from sunhme driver */
45 #define SUNHME_MULTICAST
47 #define N_RX_RING 64
48 #define N_TX_RING 32
49 #define MAX_TX_ACTIVE 1
50 #define ETHERCRC 4
51 #define ETHERMINPACKET 64
52 #define ETHERMTU 1500
53 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
54 #define TX_TIMEOUT HZ /* 1 second */
56 /* Bits in transmit DMA status */
57 #define TX_DMA_ERR 0x80
59 #define XXDEBUG(args)
61 struct bmac_data {
62 /* volatile struct bmac *bmac; */
63 struct sk_buff_head *queue;
64 volatile struct dbdma_regs *tx_dma;
65 int tx_dma_intr;
66 volatile struct dbdma_regs *rx_dma;
67 int rx_dma_intr;
68 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
69 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
70 struct device_node *node;
71 int is_bmac_plus;
72 struct sk_buff *rx_bufs[N_RX_RING];
73 int rx_fill;
74 int rx_empty;
75 struct sk_buff *tx_bufs[N_TX_RING];
76 int tx_fill;
77 int tx_empty;
78 unsigned char tx_fullup;
79 struct net_device_stats stats;
80 struct timer_list tx_timeout;
81 int timeout_active;
82 int sleeping;
83 int opened;
84 unsigned short hash_use_count[64];
85 unsigned short hash_table_mask[4];
86 spinlock_t lock;
87 struct net_device *next_bmac;
90 typedef struct bmac_reg_entry {
91 char *name;
92 unsigned short reg_offset;
93 } bmac_reg_entry_t;
95 #define N_REG_ENTRIES 31
97 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
98 {"MEMADD", MEMADD},
99 {"MEMDATAHI", MEMDATAHI},
100 {"MEMDATALO", MEMDATALO},
101 {"TXPNTR", TXPNTR},
102 {"RXPNTR", RXPNTR},
103 {"IPG1", IPG1},
104 {"IPG2", IPG2},
105 {"ALIMIT", ALIMIT},
106 {"SLOT", SLOT},
107 {"PALEN", PALEN},
108 {"PAPAT", PAPAT},
109 {"TXSFD", TXSFD},
110 {"JAM", JAM},
111 {"TXCFG", TXCFG},
112 {"TXMAX", TXMAX},
113 {"TXMIN", TXMIN},
114 {"PAREG", PAREG},
115 {"DCNT", DCNT},
116 {"NCCNT", NCCNT},
117 {"NTCNT", NTCNT},
118 {"EXCNT", EXCNT},
119 {"LTCNT", LTCNT},
120 {"TXSM", TXSM},
121 {"RXCFG", RXCFG},
122 {"RXMAX", RXMAX},
123 {"RXMIN", RXMIN},
124 {"FRCNT", FRCNT},
125 {"AECNT", AECNT},
126 {"FECNT", FECNT},
127 {"RXSM", RXSM},
128 {"RXCV", RXCV}
131 static struct net_device *bmac_devs;
132 static unsigned char *bmac_emergency_rxbuf;
134 #ifdef CONFIG_PMAC_PBOOK
135 static int bmac_sleep_notify(struct pmu_sleep_notifier *self, int when);
136 static struct pmu_sleep_notifier bmac_sleep_notifier = {
137 bmac_sleep_notify, SLEEP_LEVEL_NET,
139 #endif
142 * Number of bytes of private data per BMAC: allow enough for
143 * the rx and tx dma commands plus a branch dma command each,
144 * and another 16 bytes to allow us to align the dma command
145 * buffers on a 16 byte boundary.
147 #define PRIV_BYTES (sizeof(struct bmac_data) \
148 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
149 + sizeof(struct sk_buff_head))
151 static unsigned char bitrev(unsigned char b);
152 static void bmac_probe1(struct device_node *bmac, int is_bmac_plus);
153 static int bmac_open(struct net_device *dev);
154 static int bmac_close(struct net_device *dev);
155 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
156 static struct net_device_stats *bmac_stats(struct net_device *dev);
157 static void bmac_set_multicast(struct net_device *dev);
158 static void bmac_reset_and_enable(struct net_device *dev);
159 static void bmac_start_chip(struct net_device *dev);
160 static void bmac_init_chip(struct net_device *dev);
161 static void bmac_init_registers(struct net_device *dev);
162 static void bmac_enable_and_reset_chip(struct net_device *dev);
163 static int bmac_set_address(struct net_device *dev, void *addr);
164 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs);
165 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs);
166 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs);
167 static void bmac_set_timeout(struct net_device *dev);
168 static void bmac_tx_timeout(unsigned long data);
169 static int bmac_proc_info ( char *buffer, char **start, off_t offset, int length);
170 static int bmac_output(struct sk_buff *skb, struct net_device *dev);
171 static void bmac_start(struct net_device *dev);
173 #define DBDMA_SET(x) ( ((x) | (x) << 16) )
174 #define DBDMA_CLEAR(x) ( (x) << 16)
176 static inline void
177 dbdma_st32(volatile unsigned long *a, unsigned long x)
179 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
180 return;
183 static inline unsigned long
184 dbdma_ld32(volatile unsigned long *a)
186 unsigned long swap;
187 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
188 return swap;
191 static void
192 dbdma_continue(volatile struct dbdma_regs *dmap)
194 dbdma_st32((volatile unsigned long *)&dmap->control,
195 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
196 eieio();
199 static void
200 dbdma_reset(volatile struct dbdma_regs *dmap)
202 dbdma_st32((volatile unsigned long *)&dmap->control,
203 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
204 eieio();
205 while (dbdma_ld32((volatile unsigned long *)&dmap->status) & RUN)
206 eieio();
209 static void
210 dbdma_setcmd(volatile struct dbdma_cmd *cp,
211 unsigned short cmd, unsigned count, unsigned long addr,
212 unsigned long cmd_dep)
214 out_le16(&cp->command, cmd);
215 out_le16(&cp->req_count, count);
216 out_le32(&cp->phy_addr, addr);
217 out_le32(&cp->cmd_dep, cmd_dep);
218 out_le16(&cp->xfer_status, 0);
219 out_le16(&cp->res_count, 0);
222 static inline
223 void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
225 out_le16((void *)dev->base_addr + reg_offset, data);
229 static inline
230 volatile unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
232 return in_le16((void *)dev->base_addr + reg_offset);
235 static void
236 bmac_enable_and_reset_chip(struct net_device *dev)
238 struct bmac_data *bp = (struct bmac_data *) dev->priv;
239 volatile struct dbdma_regs *rd = bp->rx_dma;
240 volatile struct dbdma_regs *td = bp->tx_dma;
242 if (rd)
243 dbdma_reset(rd);
244 if (td)
245 dbdma_reset(td);
247 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 1);
250 #define MIFDELAY udelay(10)
252 static unsigned int
253 bmac_mif_readbits(struct net_device *dev, int nb)
255 unsigned int val = 0;
257 while (--nb >= 0) {
258 bmwrite(dev, MIFCSR, 0);
259 MIFDELAY;
260 if (bmread(dev, MIFCSR) & 8)
261 val |= 1 << nb;
262 bmwrite(dev, MIFCSR, 1);
263 MIFDELAY;
265 bmwrite(dev, MIFCSR, 0);
266 MIFDELAY;
267 bmwrite(dev, MIFCSR, 1);
268 MIFDELAY;
269 return val;
272 static void
273 bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
275 int b;
277 while (--nb >= 0) {
278 b = (val & (1 << nb))? 6: 4;
279 bmwrite(dev, MIFCSR, b);
280 MIFDELAY;
281 bmwrite(dev, MIFCSR, b|1);
282 MIFDELAY;
286 static unsigned int
287 bmac_mif_read(struct net_device *dev, unsigned int addr)
289 unsigned int val;
291 bmwrite(dev, MIFCSR, 4);
292 MIFDELAY;
293 bmac_mif_writebits(dev, ~0U, 32);
294 bmac_mif_writebits(dev, 6, 4);
295 bmac_mif_writebits(dev, addr, 10);
296 bmwrite(dev, MIFCSR, 2);
297 MIFDELAY;
298 bmwrite(dev, MIFCSR, 1);
299 MIFDELAY;
300 val = bmac_mif_readbits(dev, 17);
301 bmwrite(dev, MIFCSR, 4);
302 MIFDELAY;
303 return val;
306 static void
307 bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
309 bmwrite(dev, MIFCSR, 4);
310 MIFDELAY;
311 bmac_mif_writebits(dev, ~0U, 32);
312 bmac_mif_writebits(dev, 5, 4);
313 bmac_mif_writebits(dev, addr, 10);
314 bmac_mif_writebits(dev, 2, 2);
315 bmac_mif_writebits(dev, val, 16);
316 bmac_mif_writebits(dev, 3, 2);
319 static void
320 bmac_init_registers(struct net_device *dev)
322 struct bmac_data *bp = (struct bmac_data *) dev->priv;
323 volatile unsigned short regValue;
324 unsigned short *pWord16;
325 int i;
327 /* XXDEBUG(("bmac: enter init_registers\n")); */
329 bmwrite(dev, RXRST, RxResetValue);
330 bmwrite(dev, TXRST, TxResetBit);
332 i = 100;
333 do {
334 --i;
335 udelay(10000);
336 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
337 } while ((regValue & TxResetBit) && i > 0);
339 if (!bp->is_bmac_plus) {
340 regValue = bmread(dev, XCVRIF);
341 regValue |= ClkBit | SerialMode | COLActiveLow;
342 bmwrite(dev, XCVRIF, regValue);
343 udelay(10000);
346 bmwrite(dev, RSEED, (unsigned short)0x1968);
348 regValue = bmread(dev, XIFC);
349 regValue |= TxOutputEnable;
350 bmwrite(dev, XIFC, regValue);
352 bmread(dev, PAREG);
354 /* set collision counters to 0 */
355 bmwrite(dev, NCCNT, 0);
356 bmwrite(dev, NTCNT, 0);
357 bmwrite(dev, EXCNT, 0);
358 bmwrite(dev, LTCNT, 0);
360 /* set rx counters to 0 */
361 bmwrite(dev, FRCNT, 0);
362 bmwrite(dev, LECNT, 0);
363 bmwrite(dev, AECNT, 0);
364 bmwrite(dev, FECNT, 0);
365 bmwrite(dev, RXCV, 0);
367 /* set tx fifo information */
368 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
370 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
371 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
373 /* set rx fifo information */
374 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
375 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
377 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
378 bmread(dev, STATUS); /* read it just to clear it */
380 /* zero out the chip Hash Filter registers */
381 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
382 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
383 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
384 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
385 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
387 pWord16 = (unsigned short *)dev->dev_addr;
388 bmwrite(dev, MADD0, *pWord16++);
389 bmwrite(dev, MADD1, *pWord16++);
390 bmwrite(dev, MADD2, *pWord16);
392 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
394 bmwrite(dev, INTDISABLE, EnableNormal);
396 return;
399 #if 0
400 static void
401 bmac_disable_interrupts(struct net_device *dev)
403 bmwrite(dev, INTDISABLE, DisableAll);
406 static void
407 bmac_enable_interrupts(struct net_device *dev)
409 bmwrite(dev, INTDISABLE, EnableNormal);
411 #endif
414 static void
415 bmac_start_chip(struct net_device *dev)
417 struct bmac_data *bp = (struct bmac_data *) dev->priv;
418 volatile struct dbdma_regs *rd = bp->rx_dma;
419 unsigned short oldConfig;
421 /* enable rx dma channel */
422 dbdma_continue(rd);
424 oldConfig = bmread(dev, TXCFG);
425 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
427 /* turn on rx plus any other bits already on (promiscuous possibly) */
428 oldConfig = bmread(dev, RXCFG);
429 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
430 udelay(20000);
433 static void
434 bmac_init_phy(struct net_device *dev)
436 unsigned int addr;
437 struct bmac_data *bp = (struct bmac_data *) dev->priv;
439 printk(KERN_DEBUG "phy registers:");
440 for (addr = 0; addr < 32; ++addr) {
441 if ((addr & 7) == 0)
442 printk("\n" KERN_DEBUG);
443 printk(" %.4x", bmac_mif_read(dev, addr));
445 printk("\n");
446 if (bp->is_bmac_plus) {
447 unsigned int capable, ctrl;
449 ctrl = bmac_mif_read(dev, 0);
450 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
451 if (bmac_mif_read(dev, 4) != capable
452 || (ctrl & 0x1000) == 0) {
453 bmac_mif_write(dev, 4, capable);
454 bmac_mif_write(dev, 0, 0x1200);
455 } else
456 bmac_mif_write(dev, 0, 0x1000);
460 static void
461 bmac_init_chip(struct net_device *dev)
463 bmac_init_phy(dev);
464 bmac_init_registers(dev);
467 #ifdef CONFIG_PMAC_PBOOK
468 static int
469 bmac_sleep_notify(struct pmu_sleep_notifier *self, int when)
471 struct bmac_data *bp;
472 unsigned long flags;
473 unsigned short config;
474 struct net_device* dev = bmac_devs;
475 int i;
477 if (bmac_devs == 0)
478 return PBOOK_SLEEP_OK;
480 bp = (struct bmac_data *) dev->priv;
482 switch (when) {
483 case PBOOK_SLEEP_REQUEST:
484 break;
485 case PBOOK_SLEEP_REJECT:
486 break;
487 case PBOOK_SLEEP_NOW:
488 netif_device_detach(dev);
489 /* prolly should wait for dma to finish & turn off the chip */
490 spin_lock_irqsave(&bp->lock, flags);
491 if (bp->timeout_active) {
492 del_timer(&bp->tx_timeout);
493 bp->timeout_active = 0;
495 disable_irq(dev->irq);
496 disable_irq(bp->tx_dma_intr);
497 disable_irq(bp->rx_dma_intr);
498 bp->sleeping = 1;
499 spin_unlock_irqrestore(&bp->lock, flags);
500 if (bp->opened) {
501 volatile struct dbdma_regs *rd = bp->rx_dma;
502 volatile struct dbdma_regs *td = bp->tx_dma;
504 config = bmread(dev, RXCFG);
505 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
506 config = bmread(dev, TXCFG);
507 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
508 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
509 /* disable rx and tx dma */
510 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
511 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
512 /* free some skb's */
513 for (i=0; i<N_RX_RING; i++) {
514 if (bp->rx_bufs[i] != NULL) {
515 dev_kfree_skb(bp->rx_bufs[i]);
516 bp->rx_bufs[i] = NULL;
519 for (i = 0; i<N_TX_RING; i++) {
520 if (bp->tx_bufs[i] != NULL) {
521 dev_kfree_skb(bp->tx_bufs[i]);
522 bp->tx_bufs[i] = NULL;
526 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
527 break;
528 case PBOOK_WAKE:
529 /* see if this is enough */
530 if (bp->opened)
531 bmac_reset_and_enable(dev);
532 enable_irq(dev->irq);
533 enable_irq(bp->tx_dma_intr);
534 enable_irq(bp->rx_dma_intr);
535 netif_device_attach(dev);
536 break;
538 return PBOOK_SLEEP_OK;
540 #endif
542 static int bmac_set_address(struct net_device *dev, void *addr)
544 struct bmac_data *bp = (struct bmac_data *) dev->priv;
545 unsigned char *p = addr;
546 unsigned short *pWord16;
547 unsigned long flags;
548 int i;
550 XXDEBUG(("bmac: enter set_address\n"));
551 spin_lock_irqsave(&bp->lock, flags);
553 for (i = 0; i < 6; ++i) {
554 dev->dev_addr[i] = p[i];
556 /* load up the hardware address */
557 pWord16 = (unsigned short *)dev->dev_addr;
558 bmwrite(dev, MADD0, *pWord16++);
559 bmwrite(dev, MADD1, *pWord16++);
560 bmwrite(dev, MADD2, *pWord16);
562 spin_unlock_irqrestore(&bp->lock, flags);
563 XXDEBUG(("bmac: exit set_address\n"));
564 return 0;
567 static inline void bmac_set_timeout(struct net_device *dev)
569 struct bmac_data *bp = (struct bmac_data *) dev->priv;
570 unsigned long flags;
572 spin_lock_irqsave(&bp->lock, flags);
573 if (bp->timeout_active)
574 del_timer(&bp->tx_timeout);
575 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
576 bp->tx_timeout.function = bmac_tx_timeout;
577 bp->tx_timeout.data = (unsigned long) dev;
578 add_timer(&bp->tx_timeout);
579 bp->timeout_active = 1;
580 spin_unlock_irqrestore(&bp->lock, flags);
583 static void
584 bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
586 void *vaddr;
587 unsigned long baddr;
588 unsigned long len;
590 len = skb->len;
591 vaddr = skb->data;
592 baddr = virt_to_bus(vaddr);
594 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
597 static void
598 bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
600 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
602 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
603 virt_to_bus(addr), 0);
606 /* Bit-reverse one byte of an ethernet hardware address. */
607 static unsigned char
608 bitrev(unsigned char b)
610 int d = 0, i;
612 for (i = 0; i < 8; ++i, b >>= 1)
613 d = (d << 1) | (b & 1);
614 return d;
618 static void
619 bmac_init_tx_ring(struct bmac_data *bp)
621 volatile struct dbdma_regs *td = bp->tx_dma;
623 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
625 bp->tx_empty = 0;
626 bp->tx_fill = 0;
627 bp->tx_fullup = 0;
629 /* put a branch at the end of the tx command list */
630 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
631 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
633 /* reset tx dma */
634 dbdma_reset(td);
635 out_le32(&td->wait_sel, 0x00200020);
636 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
639 static int
640 bmac_init_rx_ring(struct bmac_data *bp)
642 volatile struct dbdma_regs *rd = bp->rx_dma;
643 int i;
644 struct sk_buff *skb;
646 /* initialize list of sk_buffs for receiving and set up recv dma */
647 memset((char *)bp->rx_cmds, 0,
648 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
649 for (i = 0; i < N_RX_RING; i++) {
650 if ((skb = bp->rx_bufs[i]) == NULL) {
651 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
652 if (skb != NULL)
653 skb_reserve(skb, 2);
655 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
658 bp->rx_empty = 0;
659 bp->rx_fill = i;
661 /* Put a branch back to the beginning of the receive command list */
662 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
663 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
665 /* start rx dma */
666 dbdma_reset(rd);
667 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
669 return 1;
673 static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
675 struct bmac_data *bp = (struct bmac_data *) dev->priv;
676 volatile struct dbdma_regs *td = bp->tx_dma;
677 int i;
679 /* see if there's a free slot in the tx ring */
680 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
681 /* bp->tx_empty, bp->tx_fill)); */
682 i = bp->tx_fill + 1;
683 if (i >= N_TX_RING)
684 i = 0;
685 if (i == bp->tx_empty) {
686 netif_stop_queue(dev);
687 bp->tx_fullup = 1;
688 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
689 return -1; /* can't take it at the moment */
692 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
694 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
696 bp->tx_bufs[bp->tx_fill] = skb;
697 bp->tx_fill = i;
699 bp->stats.tx_bytes += skb->len;
701 dbdma_continue(td);
703 return 0;
706 static int rxintcount;
708 static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id, struct pt_regs *regs)
710 struct net_device *dev = (struct net_device *) dev_id;
711 struct bmac_data *bp = (struct bmac_data *) dev->priv;
712 volatile struct dbdma_regs *rd = bp->rx_dma;
713 volatile struct dbdma_cmd *cp;
714 int i, nb, stat;
715 struct sk_buff *skb;
716 unsigned int residual;
717 int last;
718 unsigned long flags;
720 spin_lock_irqsave(&bp->lock, flags);
722 if (++rxintcount < 10) {
723 XXDEBUG(("bmac_rxdma_intr\n"));
726 last = -1;
727 i = bp->rx_empty;
729 while (1) {
730 cp = &bp->rx_cmds[i];
731 stat = ld_le16(&cp->xfer_status);
732 residual = ld_le16(&cp->res_count);
733 if ((stat & ACTIVE) == 0)
734 break;
735 nb = RX_BUFLEN - residual - 2;
736 if (nb < (ETHERMINPACKET - ETHERCRC)) {
737 skb = NULL;
738 bp->stats.rx_length_errors++;
739 bp->stats.rx_errors++;
740 } else {
741 skb = bp->rx_bufs[i];
742 bp->rx_bufs[i] = NULL;
744 if (skb != NULL) {
745 nb -= ETHERCRC;
746 skb_put(skb, nb);
747 skb->dev = dev;
748 skb->protocol = eth_type_trans(skb, dev);
749 netif_rx(skb);
750 dev->last_rx = jiffies;
751 ++bp->stats.rx_packets;
752 bp->stats.rx_bytes += nb;
753 } else {
754 ++bp->stats.rx_dropped;
756 dev->last_rx = jiffies;
757 if ((skb = bp->rx_bufs[i]) == NULL) {
758 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
759 if (skb != NULL)
760 skb_reserve(bp->rx_bufs[i], 2);
762 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
763 st_le16(&cp->res_count, 0);
764 st_le16(&cp->xfer_status, 0);
765 last = i;
766 if (++i >= N_RX_RING) i = 0;
769 if (last != -1) {
770 bp->rx_fill = last;
771 bp->rx_empty = i;
774 dbdma_continue(rd);
775 spin_unlock_irqrestore(&bp->lock, flags);
777 if (rxintcount < 10) {
778 XXDEBUG(("bmac_rxdma_intr done\n"));
780 return IRQ_HANDLED;
783 static int txintcount;
785 static irqreturn_t bmac_txdma_intr(int irq, void *dev_id, struct pt_regs *regs)
787 struct net_device *dev = (struct net_device *) dev_id;
788 struct bmac_data *bp = (struct bmac_data *) dev->priv;
789 volatile struct dbdma_cmd *cp;
790 int stat;
791 unsigned long flags;
793 spin_lock_irqsave(&bp->lock, flags);
795 if (txintcount++ < 10) {
796 XXDEBUG(("bmac_txdma_intr\n"));
799 /* del_timer(&bp->tx_timeout); */
800 /* bp->timeout_active = 0; */
802 while (1) {
803 cp = &bp->tx_cmds[bp->tx_empty];
804 stat = ld_le16(&cp->xfer_status);
805 if (txintcount < 10) {
806 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
808 if (!(stat & ACTIVE)) {
810 * status field might not have been filled by DBDMA
812 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
813 break;
816 if (bp->tx_bufs[bp->tx_empty]) {
817 ++bp->stats.tx_packets;
818 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
820 bp->tx_bufs[bp->tx_empty] = NULL;
821 bp->tx_fullup = 0;
822 netif_wake_queue(dev);
823 if (++bp->tx_empty >= N_TX_RING)
824 bp->tx_empty = 0;
825 if (bp->tx_empty == bp->tx_fill)
826 break;
829 spin_unlock_irqrestore(&bp->lock, flags);
831 if (txintcount < 10) {
832 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
835 bmac_start(dev);
836 return IRQ_HANDLED;
839 static struct net_device_stats *bmac_stats(struct net_device *dev)
841 struct bmac_data *p = (struct bmac_data *) dev->priv;
843 return &p->stats;
846 #ifndef SUNHME_MULTICAST
847 /* Real fast bit-reversal algorithm, 6-bit values */
848 static int reverse6[64] = {
849 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
850 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
851 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
852 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
853 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
854 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
855 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
856 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
859 static unsigned int
860 crc416(unsigned int curval, unsigned short nxtval)
862 register unsigned int counter, cur = curval, next = nxtval;
863 register int high_crc_set, low_data_set;
865 /* Swap bytes */
866 next = ((next & 0x00FF) << 8) | (next >> 8);
868 /* Compute bit-by-bit */
869 for (counter = 0; counter < 16; ++counter) {
870 /* is high CRC bit set? */
871 if ((cur & 0x80000000) == 0) high_crc_set = 0;
872 else high_crc_set = 1;
874 cur = cur << 1;
876 if ((next & 0x0001) == 0) low_data_set = 0;
877 else low_data_set = 1;
879 next = next >> 1;
881 /* do the XOR */
882 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
884 return cur;
887 static unsigned int
888 bmac_crc(unsigned short *address)
890 unsigned int newcrc;
892 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
893 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
894 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
895 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
897 return(newcrc);
901 * Add requested mcast addr to BMac's hash table filter.
905 static void
906 bmac_addhash(struct bmac_data *bp, unsigned char *addr)
908 unsigned int crc;
909 unsigned short mask;
911 if (!(*addr)) return;
912 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
913 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
914 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
915 mask = crc % 16;
916 mask = (unsigned char)1 << mask;
917 bp->hash_use_count[crc/16] |= mask;
920 static void
921 bmac_removehash(struct bmac_data *bp, unsigned char *addr)
923 unsigned int crc;
924 unsigned char mask;
926 /* Now, delete the address from the filter copy, as indicated */
927 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
928 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
929 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
930 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
931 mask = crc % 16;
932 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
933 bp->hash_table_mask[crc/16] &= mask;
937 * Sync the adapter with the software copy of the multicast mask
938 * (logical address filter).
941 static void
942 bmac_rx_off(struct net_device *dev)
944 unsigned short rx_cfg;
946 rx_cfg = bmread(dev, RXCFG);
947 rx_cfg &= ~RxMACEnable;
948 bmwrite(dev, RXCFG, rx_cfg);
949 do {
950 rx_cfg = bmread(dev, RXCFG);
951 } while (rx_cfg & RxMACEnable);
954 unsigned short
955 bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
957 unsigned short rx_cfg;
959 rx_cfg = bmread(dev, RXCFG);
960 rx_cfg |= RxMACEnable;
961 if (hash_enable) rx_cfg |= RxHashFilterEnable;
962 else rx_cfg &= ~RxHashFilterEnable;
963 if (promisc_enable) rx_cfg |= RxPromiscEnable;
964 else rx_cfg &= ~RxPromiscEnable;
965 bmwrite(dev, RXRST, RxResetValue);
966 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
967 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
968 bmwrite(dev, RXCFG, rx_cfg );
969 return rx_cfg;
972 static void
973 bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
975 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
976 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
977 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
978 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
981 #if 0
982 static void
983 bmac_add_multi(struct net_device *dev,
984 struct bmac_data *bp, unsigned char *addr)
986 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
987 bmac_addhash(bp, addr);
988 bmac_rx_off(dev);
989 bmac_update_hash_table_mask(dev, bp);
990 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
991 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
994 static void
995 bmac_remove_multi(struct net_device *dev,
996 struct bmac_data *bp, unsigned char *addr)
998 bmac_removehash(bp, addr);
999 bmac_rx_off(dev);
1000 bmac_update_hash_table_mask(dev, bp);
1001 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
1003 #endif
1005 /* Set or clear the multicast filter for this adaptor.
1006 num_addrs == -1 Promiscuous mode, receive all packets
1007 num_addrs == 0 Normal mode, clear multicast list
1008 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1009 best-effort filtering.
1011 static void bmac_set_multicast(struct net_device *dev)
1013 struct dev_mc_list *dmi;
1014 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1015 int num_addrs = dev->mc_count;
1016 unsigned short rx_cfg;
1017 int i;
1019 if (bp->sleeping)
1020 return;
1022 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
1024 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1025 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
1026 bmac_update_hash_table_mask(dev, bp);
1027 rx_cfg = bmac_rx_on(dev, 1, 0);
1028 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
1029 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
1030 rx_cfg = bmread(dev, RXCFG);
1031 rx_cfg |= RxPromiscEnable;
1032 bmwrite(dev, RXCFG, rx_cfg);
1033 rx_cfg = bmac_rx_on(dev, 0, 1);
1034 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
1035 } else {
1036 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
1037 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
1038 if (num_addrs == 0) {
1039 rx_cfg = bmac_rx_on(dev, 0, 0);
1040 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1041 } else {
1042 for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
1043 bmac_addhash(bp, dmi->dmi_addr);
1044 bmac_update_hash_table_mask(dev, bp);
1045 rx_cfg = bmac_rx_on(dev, 1, 0);
1046 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1049 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1051 #else /* ifdef SUNHME_MULTICAST */
1053 /* The version of set_multicast below was lifted from sunhme.c */
1055 static void bmac_set_multicast(struct net_device *dev)
1057 struct dev_mc_list *dmi = dev->mc_list;
1058 char *addrs;
1059 int i;
1060 unsigned short rx_cfg;
1061 u32 crc;
1063 if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1064 bmwrite(dev, BHASH0, 0xffff);
1065 bmwrite(dev, BHASH1, 0xffff);
1066 bmwrite(dev, BHASH2, 0xffff);
1067 bmwrite(dev, BHASH3, 0xffff);
1068 } else if(dev->flags & IFF_PROMISC) {
1069 rx_cfg = bmread(dev, RXCFG);
1070 rx_cfg |= RxPromiscEnable;
1071 bmwrite(dev, RXCFG, rx_cfg);
1072 } else {
1073 u16 hash_table[4];
1075 rx_cfg = bmread(dev, RXCFG);
1076 rx_cfg &= ~RxPromiscEnable;
1077 bmwrite(dev, RXCFG, rx_cfg);
1079 for(i = 0; i < 4; i++) hash_table[i] = 0;
1081 for(i = 0; i < dev->mc_count; i++) {
1082 addrs = dmi->dmi_addr;
1083 dmi = dmi->next;
1085 if(!(*addrs & 1))
1086 continue;
1088 crc = ether_crc_le(6, addrs);
1089 crc >>= 26;
1090 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1092 bmwrite(dev, BHASH0, hash_table[0]);
1093 bmwrite(dev, BHASH1, hash_table[1]);
1094 bmwrite(dev, BHASH2, hash_table[2]);
1095 bmwrite(dev, BHASH3, hash_table[3]);
1098 #endif /* SUNHME_MULTICAST */
1100 static int miscintcount;
1102 static irqreturn_t bmac_misc_intr(int irq, void *dev_id, struct pt_regs *regs)
1104 struct net_device *dev = (struct net_device *) dev_id;
1105 struct bmac_data *bp = (struct bmac_data *)dev->priv;
1106 unsigned int status = bmread(dev, STATUS);
1107 if (miscintcount++ < 10) {
1108 XXDEBUG(("bmac_misc_intr\n"));
1110 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1111 /* bmac_txdma_intr_inner(irq, dev_id, regs); */
1112 /* if (status & FrameReceived) bp->stats.rx_dropped++; */
1113 if (status & RxErrorMask) bp->stats.rx_errors++;
1114 if (status & RxCRCCntExp) bp->stats.rx_crc_errors++;
1115 if (status & RxLenCntExp) bp->stats.rx_length_errors++;
1116 if (status & RxOverFlow) bp->stats.rx_over_errors++;
1117 if (status & RxAlignCntExp) bp->stats.rx_frame_errors++;
1119 /* if (status & FrameSent) bp->stats.tx_dropped++; */
1120 if (status & TxErrorMask) bp->stats.tx_errors++;
1121 if (status & TxUnderrun) bp->stats.tx_fifo_errors++;
1122 if (status & TxNormalCollExp) bp->stats.collisions++;
1123 return IRQ_HANDLED;
1127 * Procedure for reading EEPROM
1129 #define SROMAddressLength 5
1130 #define DataInOn 0x0008
1131 #define DataInOff 0x0000
1132 #define Clk 0x0002
1133 #define ChipSelect 0x0001
1134 #define SDIShiftCount 3
1135 #define SD0ShiftCount 2
1136 #define DelayValue 1000 /* number of microseconds */
1137 #define SROMStartOffset 10 /* this is in words */
1138 #define SROMReadCount 3 /* number of words to read from SROM */
1139 #define SROMAddressBits 6
1140 #define EnetAddressOffset 20
1142 static unsigned char
1143 bmac_clock_out_bit(struct net_device *dev)
1145 unsigned short data;
1146 unsigned short val;
1148 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1149 udelay(DelayValue);
1151 data = bmread(dev, SROMCSR);
1152 udelay(DelayValue);
1153 val = (data >> SD0ShiftCount) & 1;
1155 bmwrite(dev, SROMCSR, ChipSelect);
1156 udelay(DelayValue);
1158 return val;
1161 static void
1162 bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1164 unsigned short data;
1166 if (val != 0 && val != 1) return;
1168 data = (val << SDIShiftCount);
1169 bmwrite(dev, SROMCSR, data | ChipSelect );
1170 udelay(DelayValue);
1172 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1173 udelay(DelayValue);
1175 bmwrite(dev, SROMCSR, data | ChipSelect);
1176 udelay(DelayValue);
1179 static void
1180 reset_and_select_srom(struct net_device *dev)
1182 /* first reset */
1183 bmwrite(dev, SROMCSR, 0);
1184 udelay(DelayValue);
1186 /* send it the read command (110) */
1187 bmac_clock_in_bit(dev, 1);
1188 bmac_clock_in_bit(dev, 1);
1189 bmac_clock_in_bit(dev, 0);
1192 static unsigned short
1193 read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1195 unsigned short data, val;
1196 int i;
1198 /* send out the address we want to read from */
1199 for (i = 0; i < addr_len; i++) {
1200 val = addr >> (addr_len-i-1);
1201 bmac_clock_in_bit(dev, val & 1);
1204 /* Now read in the 16-bit data */
1205 data = 0;
1206 for (i = 0; i < 16; i++) {
1207 val = bmac_clock_out_bit(dev);
1208 data <<= 1;
1209 data |= val;
1211 bmwrite(dev, SROMCSR, 0);
1213 return data;
1217 * It looks like Cogent and SMC use different methods for calculating
1218 * checksums. What a pain..
1221 static int
1222 bmac_verify_checksum(struct net_device *dev)
1224 unsigned short data, storedCS;
1226 reset_and_select_srom(dev);
1227 data = read_srom(dev, 3, SROMAddressBits);
1228 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1230 return 0;
1234 static void
1235 bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1237 int i;
1238 unsigned short data;
1240 for (i = 0; i < 6; i++)
1242 reset_and_select_srom(dev);
1243 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1244 ea[2*i] = bitrev(data & 0x0ff);
1245 ea[2*i+1] = bitrev((data >> 8) & 0x0ff);
1249 static void bmac_reset_and_enable(struct net_device *dev)
1251 struct bmac_data *bp = dev->priv;
1252 unsigned long flags;
1253 struct sk_buff *skb;
1254 unsigned char *data;
1256 spin_lock_irqsave(&bp->lock, flags);
1257 bmac_enable_and_reset_chip(dev);
1258 bmac_init_tx_ring(bp);
1259 bmac_init_rx_ring(bp);
1260 bmac_init_chip(dev);
1261 bmac_start_chip(dev);
1262 bmwrite(dev, INTDISABLE, EnableNormal);
1263 bp->sleeping = 0;
1266 * It seems that the bmac can't receive until it's transmitted
1267 * a packet. So we give it a dummy packet to transmit.
1269 skb = dev_alloc_skb(ETHERMINPACKET);
1270 if (skb != NULL) {
1271 data = skb_put(skb, ETHERMINPACKET);
1272 memset(data, 0, ETHERMINPACKET);
1273 memcpy(data, dev->dev_addr, 6);
1274 memcpy(data+6, dev->dev_addr, 6);
1275 bmac_transmit_packet(skb, dev);
1277 spin_unlock_irqrestore(&bp->lock, flags);
1280 static int __init bmac_probe(void)
1282 struct device_node *bmac;
1284 MOD_INC_USE_COUNT;
1286 for (bmac = find_devices("bmac"); bmac != 0; bmac = bmac->next)
1287 bmac_probe1(bmac, 0);
1288 for (bmac = find_compatible_devices("network", "bmac+"); bmac != 0;
1289 bmac = bmac->next)
1290 bmac_probe1(bmac, 1);
1292 if (bmac_devs != 0) {
1293 proc_net_create ("bmac", 0, bmac_proc_info);
1294 #ifdef CONFIG_PMAC_PBOOK
1295 pmu_register_sleep_notifier(&bmac_sleep_notifier);
1296 #endif
1299 MOD_DEC_USE_COUNT;
1301 return bmac_devs? 0: -ENODEV;
1304 static void __init bmac_probe1(struct device_node *bmac, int is_bmac_plus)
1306 int j, rev, ret;
1307 struct bmac_data *bp;
1308 unsigned char *addr;
1309 struct net_device *dev;
1311 if (bmac->n_addrs != 3 || bmac->n_intrs != 3) {
1312 printk(KERN_ERR "can't use BMAC %s: need 3 addrs and 3 intrs\n",
1313 bmac->full_name);
1314 return;
1316 addr = get_property(bmac, "mac-address", NULL);
1317 if (addr == NULL) {
1318 addr = get_property(bmac, "local-mac-address", NULL);
1319 if (addr == NULL) {
1320 printk(KERN_ERR "Can't get mac-address for BMAC %s\n",
1321 bmac->full_name);
1322 return;
1326 if (bmac_emergency_rxbuf == NULL) {
1327 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1328 if (bmac_emergency_rxbuf == NULL) {
1329 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1330 return;
1334 dev = alloc_etherdev(PRIV_BYTES);
1335 if (!dev) {
1336 printk(KERN_ERR "alloc_etherdev failed, out of memory for BMAC %s\n",
1337 bmac->full_name);
1338 return;
1341 bp = (struct bmac_data *) dev->priv;
1342 SET_MODULE_OWNER(dev);
1343 bp->node = bmac;
1344 spin_lock_init(&bp->lock);
1346 if (!request_OF_resource(bmac, 0, " (bmac)")) {
1347 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1348 goto out1;
1350 if (!request_OF_resource(bmac, 1, " (bmac tx dma)")) {
1351 printk(KERN_ERR "BMAC: can't request TX DMA resource !\n");
1352 goto out2;
1354 if (!request_OF_resource(bmac, 2, " (bmac rx dma)")) {
1355 printk(KERN_ERR "BMAC: can't request RX DMA resource !\n");
1356 goto out3;
1359 dev->base_addr = (unsigned long)
1360 ioremap(bmac->addrs[0].address, bmac->addrs[0].size);
1361 if (!dev->base_addr)
1362 goto out4;
1364 dev->irq = bmac->intrs[0].line;
1366 bmac_enable_and_reset_chip(dev);
1367 bmwrite(dev, INTDISABLE, DisableAll);
1369 printk(KERN_INFO "%s: BMAC%s at", dev->name, (is_bmac_plus? "+": ""));
1370 rev = addr[0] == 0 && addr[1] == 0xA0;
1371 for (j = 0; j < 6; ++j) {
1372 dev->dev_addr[j] = rev? bitrev(addr[j]): addr[j];
1373 printk("%c%.2x", (j? ':': ' '), dev->dev_addr[j]);
1375 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1376 printk("\n");
1378 /* Enable chip without interrupts for now */
1379 bmac_enable_and_reset_chip(dev);
1380 bmwrite(dev, INTDISABLE, DisableAll);
1382 dev->open = bmac_open;
1383 dev->stop = bmac_close;
1384 dev->hard_start_xmit = bmac_output;
1385 dev->get_stats = bmac_stats;
1386 dev->set_multicast_list = bmac_set_multicast;
1387 dev->set_mac_address = bmac_set_address;
1389 bmac_get_station_address(dev, addr);
1390 if (bmac_verify_checksum(dev) != 0)
1391 goto err_out_iounmap;
1393 bp->is_bmac_plus = is_bmac_plus;
1394 bp->tx_dma = (volatile struct dbdma_regs *)
1395 ioremap(bmac->addrs[1].address, bmac->addrs[1].size);
1396 if (!bp->tx_dma)
1397 goto err_out_iounmap;
1398 bp->tx_dma_intr = bmac->intrs[1].line;
1399 bp->rx_dma = (volatile struct dbdma_regs *)
1400 ioremap(bmac->addrs[2].address, bmac->addrs[2].size);
1401 if (!bp->rx_dma)
1402 goto err_out_iounmap_tx;
1403 bp->rx_dma_intr = bmac->intrs[2].line;
1405 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1406 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1408 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1409 skb_queue_head_init(bp->queue);
1411 init_timer(&bp->tx_timeout);
1413 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1414 if (ret) {
1415 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1416 goto err_out_iounmap_rx;
1418 ret = request_irq(bmac->intrs[1].line, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1419 if (ret) {
1420 printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[1].line);
1421 goto err_out_irq0;
1423 ret = request_irq(bmac->intrs[2].line, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1424 if (ret) {
1425 printk(KERN_ERR "BMAC: can't get irq %d\n", bmac->intrs[2].line);
1426 goto err_out_irq1;
1429 /* Mask chip interrupts and disable chip, will be
1430 * re-enabled on open()
1432 disable_irq(dev->irq);
1433 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1435 if (register_netdev(dev) != 0) {
1436 printk(KERN_ERR "registration failed for BMAC %s\n",
1437 bmac->full_name);
1438 goto err_out_irq2;
1441 bp->next_bmac = bmac_devs;
1442 bmac_devs = dev;
1443 return;
1445 err_out_irq2:
1446 free_irq(bmac->intrs[2].line, dev);
1447 err_out_irq1:
1448 free_irq(bmac->intrs[1].line, dev);
1449 err_out_irq0:
1450 free_irq(dev->irq, dev);
1451 err_out_iounmap_rx:
1452 iounmap((void *)bp->rx_dma);
1453 err_out_iounmap_tx:
1454 iounmap((void *)bp->tx_dma);
1455 err_out_iounmap:
1456 iounmap((void *)dev->base_addr);
1457 out4:
1458 release_OF_resource(bp->node, 2);
1459 out3:
1460 release_OF_resource(bp->node, 1);
1461 out2:
1462 release_OF_resource(bp->node, 0);
1463 out1:
1464 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1465 kfree(dev);
1468 static int bmac_open(struct net_device *dev)
1470 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1471 /* XXDEBUG(("bmac: enter open\n")); */
1472 /* reset the chip */
1473 bp->opened = 1;
1474 bmac_reset_and_enable(dev);
1475 enable_irq(dev->irq);
1476 dev->flags |= IFF_RUNNING;
1477 return 0;
1480 static int bmac_close(struct net_device *dev)
1482 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1483 volatile struct dbdma_regs *rd = bp->rx_dma;
1484 volatile struct dbdma_regs *td = bp->tx_dma;
1485 unsigned short config;
1486 int i;
1488 bp->sleeping = 1;
1489 dev->flags &= ~(IFF_UP | IFF_RUNNING);
1491 /* disable rx and tx */
1492 config = bmread(dev, RXCFG);
1493 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1495 config = bmread(dev, TXCFG);
1496 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1498 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1500 /* disable rx and tx dma */
1501 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1502 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1504 /* free some skb's */
1505 XXDEBUG(("bmac: free rx bufs\n"));
1506 for (i=0; i<N_RX_RING; i++) {
1507 if (bp->rx_bufs[i] != NULL) {
1508 dev_kfree_skb(bp->rx_bufs[i]);
1509 bp->rx_bufs[i] = NULL;
1512 XXDEBUG(("bmac: free tx bufs\n"));
1513 for (i = 0; i<N_TX_RING; i++) {
1514 if (bp->tx_bufs[i] != NULL) {
1515 dev_kfree_skb(bp->tx_bufs[i]);
1516 bp->tx_bufs[i] = NULL;
1519 XXDEBUG(("bmac: all bufs freed\n"));
1521 bp->opened = 0;
1522 disable_irq(dev->irq);
1523 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, bp->node, 0, 0);
1525 return 0;
1528 static void
1529 bmac_start(struct net_device *dev)
1531 struct bmac_data *bp = dev->priv;
1532 int i;
1533 struct sk_buff *skb;
1534 unsigned long flags;
1536 if (bp->sleeping)
1537 return;
1539 spin_lock_irqsave(&bp->lock, flags);
1540 while (1) {
1541 i = bp->tx_fill + 1;
1542 if (i >= N_TX_RING)
1543 i = 0;
1544 if (i == bp->tx_empty)
1545 break;
1546 skb = skb_dequeue(bp->queue);
1547 if (skb == NULL)
1548 break;
1549 bmac_transmit_packet(skb, dev);
1551 spin_unlock_irqrestore(&bp->lock, flags);
1554 static int
1555 bmac_output(struct sk_buff *skb, struct net_device *dev)
1557 struct bmac_data *bp = dev->priv;
1558 skb_queue_tail(bp->queue, skb);
1559 bmac_start(dev);
1560 return 0;
1563 static void bmac_tx_timeout(unsigned long data)
1565 struct net_device *dev = (struct net_device *) data;
1566 struct bmac_data *bp = (struct bmac_data *) dev->priv;
1567 volatile struct dbdma_regs *td = bp->tx_dma;
1568 volatile struct dbdma_regs *rd = bp->rx_dma;
1569 volatile struct dbdma_cmd *cp;
1570 unsigned long flags;
1571 unsigned short config, oldConfig;
1572 int i;
1574 XXDEBUG(("bmac: tx_timeout called\n"));
1575 spin_lock_irqsave(&bp->lock, flags);
1576 bp->timeout_active = 0;
1578 /* update various counters */
1579 /* bmac_handle_misc_intrs(bp, 0); */
1581 cp = &bp->tx_cmds[bp->tx_empty];
1582 /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1583 /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1584 /* mb->pr, mb->xmtfs, mb->fifofc)); */
1586 /* turn off both tx and rx and reset the chip */
1587 config = bmread(dev, RXCFG);
1588 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1589 config = bmread(dev, TXCFG);
1590 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1591 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1592 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1593 bmac_enable_and_reset_chip(dev);
1595 /* restart rx dma */
1596 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1597 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1598 out_le16(&cp->xfer_status, 0);
1599 out_le32(&rd->cmdptr, virt_to_bus(cp));
1600 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1602 /* fix up the transmit side */
1603 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1604 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1605 i = bp->tx_empty;
1606 ++bp->stats.tx_errors;
1607 if (i != bp->tx_fill) {
1608 dev_kfree_skb(bp->tx_bufs[i]);
1609 bp->tx_bufs[i] = NULL;
1610 if (++i >= N_TX_RING) i = 0;
1611 bp->tx_empty = i;
1613 bp->tx_fullup = 0;
1614 netif_wake_queue(dev);
1615 if (i != bp->tx_fill) {
1616 cp = &bp->tx_cmds[i];
1617 out_le16(&cp->xfer_status, 0);
1618 out_le16(&cp->command, OUTPUT_LAST);
1619 out_le32(&td->cmdptr, virt_to_bus(cp));
1620 out_le32(&td->control, DBDMA_SET(RUN));
1621 /* bmac_set_timeout(dev); */
1622 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1625 /* turn it back on */
1626 oldConfig = bmread(dev, RXCFG);
1627 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1628 oldConfig = bmread(dev, TXCFG);
1629 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1631 spin_unlock_irqrestore(&bp->lock, flags);
1634 #if 0
1635 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1637 int i,*ip;
1639 for (i=0;i< count;i++) {
1640 ip = (int*)(cp+i);
1642 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1643 ld_le32(ip+0),
1644 ld_le32(ip+1),
1645 ld_le32(ip+2),
1646 ld_le32(ip+3));
1650 #endif
1652 static int
1653 bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1655 int len = 0;
1656 off_t pos = 0;
1657 off_t begin = 0;
1658 int i;
1660 if (bmac_devs == NULL)
1661 return (-ENOSYS);
1663 len += sprintf(buffer, "BMAC counters & registers\n");
1665 for (i = 0; i<N_REG_ENTRIES; i++) {
1666 len += sprintf(buffer + len, "%s: %#08x\n",
1667 reg_entries[i].name,
1668 bmread(bmac_devs, reg_entries[i].reg_offset));
1669 pos = begin + len;
1671 if (pos < offset) {
1672 len = 0;
1673 begin = pos;
1676 if (pos > offset+length) break;
1679 *start = buffer + (offset - begin);
1680 len -= (offset - begin);
1682 if (len > length) len = length;
1684 return len;
1688 MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1689 MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1690 MODULE_LICENSE("GPL");
1692 static void __exit bmac_cleanup (void)
1694 struct bmac_data *bp;
1695 struct net_device *dev;
1697 if (bmac_emergency_rxbuf != NULL) {
1698 kfree(bmac_emergency_rxbuf);
1699 bmac_emergency_rxbuf = NULL;
1702 if (bmac_devs == 0)
1703 return;
1704 #ifdef CONFIG_PMAC_PBOOK
1705 pmu_unregister_sleep_notifier(&bmac_sleep_notifier);
1706 #endif
1707 proc_net_remove("bmac");
1709 do {
1710 dev = bmac_devs;
1711 bp = (struct bmac_data *) dev->priv;
1712 bmac_devs = bp->next_bmac;
1714 unregister_netdev(dev);
1716 release_OF_resource(bp->node, 0);
1717 release_OF_resource(bp->node, 1);
1718 release_OF_resource(bp->node, 2);
1719 free_irq(dev->irq, dev);
1720 free_irq(bp->tx_dma_intr, dev);
1721 free_irq(bp->rx_dma_intr, dev);
1723 kfree(dev);
1724 } while (bmac_devs != NULL);
1727 module_init(bmac_probe);
1728 module_exit(bmac_cleanup);