b44: timer power saving
[linux-2.6/mini2440.git] / drivers / net / b44.c
blobe87bab9f9bea8baba0ae122e1f37880f7a2a6282
1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Fixed by Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2006 Broadcom Corporation.
7 * Distribute under GPL.
8 */
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/types.h>
14 #include <linux/netdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/mii.h>
17 #include <linux/if_ether.h>
18 #include <linux/etherdevice.h>
19 #include <linux/pci.h>
20 #include <linux/delay.h>
21 #include <linux/init.h>
22 #include <linux/dma-mapping.h>
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/irq.h>
28 #include "b44.h"
30 #define DRV_MODULE_NAME "b44"
31 #define PFX DRV_MODULE_NAME ": "
32 #define DRV_MODULE_VERSION "1.01"
33 #define DRV_MODULE_RELDATE "Jun 16, 2006"
35 #define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \
37 NETIF_MSG_PROBE | \
38 NETIF_MSG_LINK | \
39 NETIF_MSG_TIMER | \
40 NETIF_MSG_IFDOWN | \
41 NETIF_MSG_IFUP | \
42 NETIF_MSG_RX_ERR | \
43 NETIF_MSG_TX_ERR)
45 /* length of time before we decide the hardware is borked,
46 * and dev->tx_timeout() should be called to fix the problem
48 #define B44_TX_TIMEOUT (5 * HZ)
50 /* hardware minimum and maximum for a single frame's data payload */
51 #define B44_MIN_MTU 60
52 #define B44_MAX_MTU 1500
54 #define B44_RX_RING_SIZE 512
55 #define B44_DEF_RX_RING_PENDING 200
56 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
57 B44_RX_RING_SIZE)
58 #define B44_TX_RING_SIZE 512
59 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
60 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
61 B44_TX_RING_SIZE)
63 #define TX_RING_GAP(BP) \
64 (B44_TX_RING_SIZE - (BP)->tx_pending)
65 #define TX_BUFFS_AVAIL(BP) \
66 (((BP)->tx_cons <= (BP)->tx_prod) ? \
67 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
71 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
72 #define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8)
74 /* minimum number of free TX descriptors required to wake up TX process */
75 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
77 /* b44 internal pattern match filter info */
78 #define B44_PATTERN_BASE 0x400
79 #define B44_PATTERN_SIZE 0x80
80 #define B44_PMASK_BASE 0x600
81 #define B44_PMASK_SIZE 0x10
82 #define B44_MAX_PATTERNS 16
83 #define B44_ETHIPV6UDP_HLEN 62
84 #define B44_ETHIPV4UDP_HLEN 42
86 static char version[] __devinitdata =
87 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
89 MODULE_AUTHOR("Florian Schirmer, Pekka Pietikainen, David S. Miller");
90 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_MODULE_VERSION);
94 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
95 module_param(b44_debug, int, 0);
96 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
98 static struct pci_device_id b44_pci_tbl[] = {
99 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401,
100 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
101 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0,
102 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
103 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1,
104 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
105 { } /* terminate list with empty entry */
108 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
110 static void b44_halt(struct b44 *);
111 static void b44_init_rings(struct b44 *);
113 #define B44_FULL_RESET 1
114 #define B44_FULL_RESET_SKIP_PHY 2
115 #define B44_PARTIAL_RESET 3
117 static void b44_init_hw(struct b44 *, int);
119 static int dma_desc_align_mask;
120 static int dma_desc_sync_size;
122 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
123 #define _B44(x...) # x,
124 B44_STAT_REG_DECLARE
125 #undef _B44
128 static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
129 dma_addr_t dma_base,
130 unsigned long offset,
131 enum dma_data_direction dir)
133 dma_sync_single_range_for_device(&pdev->dev, dma_base,
134 offset & dma_desc_align_mask,
135 dma_desc_sync_size, dir);
138 static inline void b44_sync_dma_desc_for_cpu(struct pci_dev *pdev,
139 dma_addr_t dma_base,
140 unsigned long offset,
141 enum dma_data_direction dir)
143 dma_sync_single_range_for_cpu(&pdev->dev, dma_base,
144 offset & dma_desc_align_mask,
145 dma_desc_sync_size, dir);
148 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
150 return readl(bp->regs + reg);
153 static inline void bw32(const struct b44 *bp,
154 unsigned long reg, unsigned long val)
156 writel(val, bp->regs + reg);
159 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
160 u32 bit, unsigned long timeout, const int clear)
162 unsigned long i;
164 for (i = 0; i < timeout; i++) {
165 u32 val = br32(bp, reg);
167 if (clear && !(val & bit))
168 break;
169 if (!clear && (val & bit))
170 break;
171 udelay(10);
173 if (i == timeout) {
174 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
175 "%lx to %s.\n",
176 bp->dev->name,
177 bit, reg,
178 (clear ? "clear" : "set"));
179 return -ENODEV;
181 return 0;
184 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
185 * buzz words used on this company's website :-)
187 * All of these routines must be invoked with bp->lock held and
188 * interrupts disabled.
191 #define SB_PCI_DMA 0x40000000 /* Client Mode PCI memory access space (1 GB) */
192 #define BCM4400_PCI_CORE_ADDR 0x18002000 /* Address of PCI core on BCM4400 cards */
194 static u32 ssb_get_core_rev(struct b44 *bp)
196 return (br32(bp, B44_SBIDHIGH) & SBIDHIGH_RC_MASK);
199 static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
201 u32 bar_orig, pci_rev, val;
203 pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
204 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR);
205 pci_rev = ssb_get_core_rev(bp);
207 val = br32(bp, B44_SBINTVEC);
208 val |= cores;
209 bw32(bp, B44_SBINTVEC, val);
211 val = br32(bp, SSB_PCI_TRANS_2);
212 val |= SSB_PCI_PREF | SSB_PCI_BURST;
213 bw32(bp, SSB_PCI_TRANS_2, val);
215 pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);
217 return pci_rev;
220 static void ssb_core_disable(struct b44 *bp)
222 if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
223 return;
225 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
226 b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
227 b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
228 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
229 SBTMSLOW_REJECT | SBTMSLOW_RESET));
230 br32(bp, B44_SBTMSLOW);
231 udelay(1);
232 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
233 br32(bp, B44_SBTMSLOW);
234 udelay(1);
237 static void ssb_core_reset(struct b44 *bp)
239 u32 val;
241 ssb_core_disable(bp);
242 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
243 br32(bp, B44_SBTMSLOW);
244 udelay(1);
246 /* Clear SERR if set, this is a hw bug workaround. */
247 if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
248 bw32(bp, B44_SBTMSHIGH, 0);
250 val = br32(bp, B44_SBIMSTATE);
251 if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
252 bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));
254 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
255 br32(bp, B44_SBTMSLOW);
256 udelay(1);
258 bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
259 br32(bp, B44_SBTMSLOW);
260 udelay(1);
263 static int ssb_core_unit(struct b44 *bp)
265 #if 0
266 u32 val = br32(bp, B44_SBADMATCH0);
267 u32 base;
269 type = val & SBADMATCH0_TYPE_MASK;
270 switch (type) {
271 case 0:
272 base = val & SBADMATCH0_BS0_MASK;
273 break;
275 case 1:
276 base = val & SBADMATCH0_BS1_MASK;
277 break;
279 case 2:
280 default:
281 base = val & SBADMATCH0_BS2_MASK;
282 break;
284 #endif
285 return 0;
288 static int ssb_is_core_up(struct b44 *bp)
290 return ((br32(bp, B44_SBTMSLOW) & (SBTMSLOW_RESET | SBTMSLOW_REJECT | SBTMSLOW_CLOCK))
291 == SBTMSLOW_CLOCK);
294 static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
296 u32 val;
298 val = ((u32) data[2]) << 24;
299 val |= ((u32) data[3]) << 16;
300 val |= ((u32) data[4]) << 8;
301 val |= ((u32) data[5]) << 0;
302 bw32(bp, B44_CAM_DATA_LO, val);
303 val = (CAM_DATA_HI_VALID |
304 (((u32) data[0]) << 8) |
305 (((u32) data[1]) << 0));
306 bw32(bp, B44_CAM_DATA_HI, val);
307 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
308 (index << CAM_CTRL_INDEX_SHIFT)));
309 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
312 static inline void __b44_disable_ints(struct b44 *bp)
314 bw32(bp, B44_IMASK, 0);
317 static void b44_disable_ints(struct b44 *bp)
319 __b44_disable_ints(bp);
321 /* Flush posted writes. */
322 br32(bp, B44_IMASK);
325 static void b44_enable_ints(struct b44 *bp)
327 bw32(bp, B44_IMASK, bp->imask);
330 static int b44_readphy(struct b44 *bp, int reg, u32 *val)
332 int err;
334 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
335 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
336 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
337 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
338 (reg << MDIO_DATA_RA_SHIFT) |
339 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
340 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
341 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
343 return err;
346 static int b44_writephy(struct b44 *bp, int reg, u32 val)
348 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
349 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
350 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
351 (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
352 (reg << MDIO_DATA_RA_SHIFT) |
353 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
354 (val & MDIO_DATA_DATA)));
355 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
358 /* miilib interface */
359 /* FIXME FIXME: phy_id is ignored, bp->phy_addr use is unconditional
360 * due to code existing before miilib use was added to this driver.
361 * Someone should remove this artificial driver limitation in
362 * b44_{read,write}phy. bp->phy_addr itself is fine (and needed).
364 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
366 u32 val;
367 struct b44 *bp = netdev_priv(dev);
368 int rc = b44_readphy(bp, location, &val);
369 if (rc)
370 return 0xffffffff;
371 return val;
374 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
375 int val)
377 struct b44 *bp = netdev_priv(dev);
378 b44_writephy(bp, location, val);
381 static int b44_phy_reset(struct b44 *bp)
383 u32 val;
384 int err;
386 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
387 if (err)
388 return err;
389 udelay(100);
390 err = b44_readphy(bp, MII_BMCR, &val);
391 if (!err) {
392 if (val & BMCR_RESET) {
393 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
394 bp->dev->name);
395 err = -ENODEV;
399 return 0;
402 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
404 u32 val;
406 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
407 bp->flags |= pause_flags;
409 val = br32(bp, B44_RXCONFIG);
410 if (pause_flags & B44_FLAG_RX_PAUSE)
411 val |= RXCONFIG_FLOW;
412 else
413 val &= ~RXCONFIG_FLOW;
414 bw32(bp, B44_RXCONFIG, val);
416 val = br32(bp, B44_MAC_FLOW);
417 if (pause_flags & B44_FLAG_TX_PAUSE)
418 val |= (MAC_FLOW_PAUSE_ENAB |
419 (0xc0 & MAC_FLOW_RX_HI_WATER));
420 else
421 val &= ~MAC_FLOW_PAUSE_ENAB;
422 bw32(bp, B44_MAC_FLOW, val);
425 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
427 u32 pause_enab = 0;
429 /* The driver supports only rx pause by default because
430 the b44 mac tx pause mechanism generates excessive
431 pause frames.
432 Use ethtool to turn on b44 tx pause if necessary.
434 if ((local & ADVERTISE_PAUSE_CAP) &&
435 (local & ADVERTISE_PAUSE_ASYM)){
436 if ((remote & LPA_PAUSE_ASYM) &&
437 !(remote & LPA_PAUSE_CAP))
438 pause_enab |= B44_FLAG_RX_PAUSE;
441 __b44_set_flow_ctrl(bp, pause_enab);
444 static int b44_setup_phy(struct b44 *bp)
446 u32 val;
447 int err;
449 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
450 goto out;
451 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
452 val & MII_ALEDCTRL_ALLMSK)) != 0)
453 goto out;
454 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
455 goto out;
456 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
457 val | MII_TLEDCTRL_ENABLE)) != 0)
458 goto out;
460 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
461 u32 adv = ADVERTISE_CSMA;
463 if (bp->flags & B44_FLAG_ADV_10HALF)
464 adv |= ADVERTISE_10HALF;
465 if (bp->flags & B44_FLAG_ADV_10FULL)
466 adv |= ADVERTISE_10FULL;
467 if (bp->flags & B44_FLAG_ADV_100HALF)
468 adv |= ADVERTISE_100HALF;
469 if (bp->flags & B44_FLAG_ADV_100FULL)
470 adv |= ADVERTISE_100FULL;
472 if (bp->flags & B44_FLAG_PAUSE_AUTO)
473 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
475 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
476 goto out;
477 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
478 BMCR_ANRESTART))) != 0)
479 goto out;
480 } else {
481 u32 bmcr;
483 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
484 goto out;
485 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
486 if (bp->flags & B44_FLAG_100_BASE_T)
487 bmcr |= BMCR_SPEED100;
488 if (bp->flags & B44_FLAG_FULL_DUPLEX)
489 bmcr |= BMCR_FULLDPLX;
490 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
491 goto out;
493 /* Since we will not be negotiating there is no safe way
494 * to determine if the link partner supports flow control
495 * or not. So just disable it completely in this case.
497 b44_set_flow_ctrl(bp, 0, 0);
500 out:
501 return err;
504 static void b44_stats_update(struct b44 *bp)
506 unsigned long reg;
507 u32 *val;
509 val = &bp->hw_stats.tx_good_octets;
510 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
511 *val++ += br32(bp, reg);
514 /* Pad */
515 reg += 8*4UL;
517 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
518 *val++ += br32(bp, reg);
522 static void b44_link_report(struct b44 *bp)
524 if (!netif_carrier_ok(bp->dev)) {
525 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
526 } else {
527 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
528 bp->dev->name,
529 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
530 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
532 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
533 "%s for RX.\n",
534 bp->dev->name,
535 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
536 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
540 static void b44_check_phy(struct b44 *bp)
542 u32 bmsr, aux;
544 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
545 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
546 (bmsr != 0xffff)) {
547 if (aux & MII_AUXCTRL_SPEED)
548 bp->flags |= B44_FLAG_100_BASE_T;
549 else
550 bp->flags &= ~B44_FLAG_100_BASE_T;
551 if (aux & MII_AUXCTRL_DUPLEX)
552 bp->flags |= B44_FLAG_FULL_DUPLEX;
553 else
554 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
556 if (!netif_carrier_ok(bp->dev) &&
557 (bmsr & BMSR_LSTATUS)) {
558 u32 val = br32(bp, B44_TX_CTRL);
559 u32 local_adv, remote_adv;
561 if (bp->flags & B44_FLAG_FULL_DUPLEX)
562 val |= TX_CTRL_DUPLEX;
563 else
564 val &= ~TX_CTRL_DUPLEX;
565 bw32(bp, B44_TX_CTRL, val);
567 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
568 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
569 !b44_readphy(bp, MII_LPA, &remote_adv))
570 b44_set_flow_ctrl(bp, local_adv, remote_adv);
572 /* Link now up */
573 netif_carrier_on(bp->dev);
574 b44_link_report(bp);
575 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
576 /* Link now down */
577 netif_carrier_off(bp->dev);
578 b44_link_report(bp);
581 if (bmsr & BMSR_RFAULT)
582 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
583 bp->dev->name);
584 if (bmsr & BMSR_JCD)
585 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
586 bp->dev->name);
590 static void b44_timer(unsigned long __opaque)
592 struct b44 *bp = (struct b44 *) __opaque;
594 spin_lock_irq(&bp->lock);
596 b44_check_phy(bp);
598 b44_stats_update(bp);
600 spin_unlock_irq(&bp->lock);
602 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
605 static void b44_tx(struct b44 *bp)
607 u32 cur, cons;
609 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
610 cur /= sizeof(struct dma_desc);
612 /* XXX needs updating when NETIF_F_SG is supported */
613 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
614 struct ring_info *rp = &bp->tx_buffers[cons];
615 struct sk_buff *skb = rp->skb;
617 BUG_ON(skb == NULL);
619 pci_unmap_single(bp->pdev,
620 pci_unmap_addr(rp, mapping),
621 skb->len,
622 PCI_DMA_TODEVICE);
623 rp->skb = NULL;
624 dev_kfree_skb_irq(skb);
627 bp->tx_cons = cons;
628 if (netif_queue_stopped(bp->dev) &&
629 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
630 netif_wake_queue(bp->dev);
632 bw32(bp, B44_GPTIMER, 0);
635 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
636 * before the DMA address you give it. So we allocate 30 more bytes
637 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
638 * point the chip at 30 bytes past where the rx_header will go.
640 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
642 struct dma_desc *dp;
643 struct ring_info *src_map, *map;
644 struct rx_header *rh;
645 struct sk_buff *skb;
646 dma_addr_t mapping;
647 int dest_idx;
648 u32 ctrl;
650 src_map = NULL;
651 if (src_idx >= 0)
652 src_map = &bp->rx_buffers[src_idx];
653 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
654 map = &bp->rx_buffers[dest_idx];
655 skb = dev_alloc_skb(RX_PKT_BUF_SZ);
656 if (skb == NULL)
657 return -ENOMEM;
659 mapping = pci_map_single(bp->pdev, skb->data,
660 RX_PKT_BUF_SZ,
661 PCI_DMA_FROMDEVICE);
663 /* Hardware bug work-around, the chip is unable to do PCI DMA
664 to/from anything above 1GB :-( */
665 if (dma_mapping_error(mapping) ||
666 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
667 /* Sigh... */
668 if (!dma_mapping_error(mapping))
669 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
670 dev_kfree_skb_any(skb);
671 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA);
672 if (skb == NULL)
673 return -ENOMEM;
674 mapping = pci_map_single(bp->pdev, skb->data,
675 RX_PKT_BUF_SZ,
676 PCI_DMA_FROMDEVICE);
677 if (dma_mapping_error(mapping) ||
678 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
679 if (!dma_mapping_error(mapping))
680 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
681 dev_kfree_skb_any(skb);
682 return -ENOMEM;
686 skb->dev = bp->dev;
687 skb_reserve(skb, bp->rx_offset);
689 rh = (struct rx_header *)
690 (skb->data - bp->rx_offset);
691 rh->len = 0;
692 rh->flags = 0;
694 map->skb = skb;
695 pci_unmap_addr_set(map, mapping, mapping);
697 if (src_map != NULL)
698 src_map->skb = NULL;
700 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset));
701 if (dest_idx == (B44_RX_RING_SIZE - 1))
702 ctrl |= DESC_CTRL_EOT;
704 dp = &bp->rx_ring[dest_idx];
705 dp->ctrl = cpu_to_le32(ctrl);
706 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset);
708 if (bp->flags & B44_FLAG_RX_RING_HACK)
709 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
710 dest_idx * sizeof(dp),
711 DMA_BIDIRECTIONAL);
713 return RX_PKT_BUF_SZ;
716 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
718 struct dma_desc *src_desc, *dest_desc;
719 struct ring_info *src_map, *dest_map;
720 struct rx_header *rh;
721 int dest_idx;
722 __le32 ctrl;
724 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
725 dest_desc = &bp->rx_ring[dest_idx];
726 dest_map = &bp->rx_buffers[dest_idx];
727 src_desc = &bp->rx_ring[src_idx];
728 src_map = &bp->rx_buffers[src_idx];
730 dest_map->skb = src_map->skb;
731 rh = (struct rx_header *) src_map->skb->data;
732 rh->len = 0;
733 rh->flags = 0;
734 pci_unmap_addr_set(dest_map, mapping,
735 pci_unmap_addr(src_map, mapping));
737 if (bp->flags & B44_FLAG_RX_RING_HACK)
738 b44_sync_dma_desc_for_cpu(bp->pdev, bp->rx_ring_dma,
739 src_idx * sizeof(src_desc),
740 DMA_BIDIRECTIONAL);
742 ctrl = src_desc->ctrl;
743 if (dest_idx == (B44_RX_RING_SIZE - 1))
744 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
745 else
746 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
748 dest_desc->ctrl = ctrl;
749 dest_desc->addr = src_desc->addr;
751 src_map->skb = NULL;
753 if (bp->flags & B44_FLAG_RX_RING_HACK)
754 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
755 dest_idx * sizeof(dest_desc),
756 DMA_BIDIRECTIONAL);
758 pci_dma_sync_single_for_device(bp->pdev, le32_to_cpu(src_desc->addr),
759 RX_PKT_BUF_SZ,
760 PCI_DMA_FROMDEVICE);
763 static int b44_rx(struct b44 *bp, int budget)
765 int received;
766 u32 cons, prod;
768 received = 0;
769 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
770 prod /= sizeof(struct dma_desc);
771 cons = bp->rx_cons;
773 while (cons != prod && budget > 0) {
774 struct ring_info *rp = &bp->rx_buffers[cons];
775 struct sk_buff *skb = rp->skb;
776 dma_addr_t map = pci_unmap_addr(rp, mapping);
777 struct rx_header *rh;
778 u16 len;
780 pci_dma_sync_single_for_cpu(bp->pdev, map,
781 RX_PKT_BUF_SZ,
782 PCI_DMA_FROMDEVICE);
783 rh = (struct rx_header *) skb->data;
784 len = le16_to_cpu(rh->len);
785 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
786 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
787 drop_it:
788 b44_recycle_rx(bp, cons, bp->rx_prod);
789 drop_it_no_recycle:
790 bp->stats.rx_dropped++;
791 goto next_pkt;
794 if (len == 0) {
795 int i = 0;
797 do {
798 udelay(2);
799 barrier();
800 len = le16_to_cpu(rh->len);
801 } while (len == 0 && i++ < 5);
802 if (len == 0)
803 goto drop_it;
806 /* Omit CRC. */
807 len -= 4;
809 if (len > RX_COPY_THRESHOLD) {
810 int skb_size;
811 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
812 if (skb_size < 0)
813 goto drop_it;
814 pci_unmap_single(bp->pdev, map,
815 skb_size, PCI_DMA_FROMDEVICE);
816 /* Leave out rx_header */
817 skb_put(skb, len+bp->rx_offset);
818 skb_pull(skb,bp->rx_offset);
819 } else {
820 struct sk_buff *copy_skb;
822 b44_recycle_rx(bp, cons, bp->rx_prod);
823 copy_skb = dev_alloc_skb(len + 2);
824 if (copy_skb == NULL)
825 goto drop_it_no_recycle;
827 skb_reserve(copy_skb, 2);
828 skb_put(copy_skb, len);
829 /* DMA sync done above, copy just the actual packet */
830 skb_copy_from_linear_data_offset(skb, bp->rx_offset,
831 copy_skb->data, len);
832 skb = copy_skb;
834 skb->ip_summed = CHECKSUM_NONE;
835 skb->protocol = eth_type_trans(skb, bp->dev);
836 netif_receive_skb(skb);
837 bp->dev->last_rx = jiffies;
838 received++;
839 budget--;
840 next_pkt:
841 bp->rx_prod = (bp->rx_prod + 1) &
842 (B44_RX_RING_SIZE - 1);
843 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
846 bp->rx_cons = cons;
847 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
849 return received;
852 static int b44_poll(struct net_device *netdev, int *budget)
854 struct b44 *bp = netdev_priv(netdev);
855 int done;
857 spin_lock_irq(&bp->lock);
859 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
860 /* spin_lock(&bp->tx_lock); */
861 b44_tx(bp);
862 /* spin_unlock(&bp->tx_lock); */
864 spin_unlock_irq(&bp->lock);
866 done = 1;
867 if (bp->istat & ISTAT_RX) {
868 int orig_budget = *budget;
869 int work_done;
871 if (orig_budget > netdev->quota)
872 orig_budget = netdev->quota;
874 work_done = b44_rx(bp, orig_budget);
876 *budget -= work_done;
877 netdev->quota -= work_done;
879 if (work_done >= orig_budget)
880 done = 0;
883 if (bp->istat & ISTAT_ERRORS) {
884 unsigned long flags;
886 spin_lock_irqsave(&bp->lock, flags);
887 b44_halt(bp);
888 b44_init_rings(bp);
889 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
890 netif_wake_queue(bp->dev);
891 spin_unlock_irqrestore(&bp->lock, flags);
892 done = 1;
895 if (done) {
896 netif_rx_complete(netdev);
897 b44_enable_ints(bp);
900 return (done ? 0 : 1);
903 static irqreturn_t b44_interrupt(int irq, void *dev_id)
905 struct net_device *dev = dev_id;
906 struct b44 *bp = netdev_priv(dev);
907 u32 istat, imask;
908 int handled = 0;
910 spin_lock(&bp->lock);
912 istat = br32(bp, B44_ISTAT);
913 imask = br32(bp, B44_IMASK);
915 /* The interrupt mask register controls which interrupt bits
916 * will actually raise an interrupt to the CPU when set by hw/firmware,
917 * but doesn't mask off the bits.
919 istat &= imask;
920 if (istat) {
921 handled = 1;
923 if (unlikely(!netif_running(dev))) {
924 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
925 goto irq_ack;
928 if (netif_rx_schedule_prep(dev)) {
929 /* NOTE: These writes are posted by the readback of
930 * the ISTAT register below.
932 bp->istat = istat;
933 __b44_disable_ints(bp);
934 __netif_rx_schedule(dev);
935 } else {
936 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
937 dev->name);
940 irq_ack:
941 bw32(bp, B44_ISTAT, istat);
942 br32(bp, B44_ISTAT);
944 spin_unlock(&bp->lock);
945 return IRQ_RETVAL(handled);
948 static void b44_tx_timeout(struct net_device *dev)
950 struct b44 *bp = netdev_priv(dev);
952 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
953 dev->name);
955 spin_lock_irq(&bp->lock);
957 b44_halt(bp);
958 b44_init_rings(bp);
959 b44_init_hw(bp, B44_FULL_RESET);
961 spin_unlock_irq(&bp->lock);
963 b44_enable_ints(bp);
965 netif_wake_queue(dev);
968 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
970 struct b44 *bp = netdev_priv(dev);
971 struct sk_buff *bounce_skb;
972 int rc = NETDEV_TX_OK;
973 dma_addr_t mapping;
974 u32 len, entry, ctrl;
976 len = skb->len;
977 spin_lock_irq(&bp->lock);
979 /* This is a hard error, log it. */
980 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
981 netif_stop_queue(dev);
982 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
983 dev->name);
984 goto err_out;
987 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
988 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
989 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
990 if (!dma_mapping_error(mapping))
991 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
993 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
994 GFP_ATOMIC|GFP_DMA);
995 if (!bounce_skb)
996 goto err_out;
998 mapping = pci_map_single(bp->pdev, bounce_skb->data,
999 len, PCI_DMA_TODEVICE);
1000 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1001 if (!dma_mapping_error(mapping))
1002 pci_unmap_single(bp->pdev, mapping,
1003 len, PCI_DMA_TODEVICE);
1004 dev_kfree_skb_any(bounce_skb);
1005 goto err_out;
1008 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len),
1009 skb->len);
1010 dev_kfree_skb_any(skb);
1011 skb = bounce_skb;
1014 entry = bp->tx_prod;
1015 bp->tx_buffers[entry].skb = skb;
1016 pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
1018 ctrl = (len & DESC_CTRL_LEN);
1019 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1020 if (entry == (B44_TX_RING_SIZE - 1))
1021 ctrl |= DESC_CTRL_EOT;
1023 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1024 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1026 if (bp->flags & B44_FLAG_TX_RING_HACK)
1027 b44_sync_dma_desc_for_device(bp->pdev, bp->tx_ring_dma,
1028 entry * sizeof(bp->tx_ring[0]),
1029 DMA_TO_DEVICE);
1031 entry = NEXT_TX(entry);
1033 bp->tx_prod = entry;
1035 wmb();
1037 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1038 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1039 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1040 if (bp->flags & B44_FLAG_REORDER_BUG)
1041 br32(bp, B44_DMATX_PTR);
1043 if (TX_BUFFS_AVAIL(bp) < 1)
1044 netif_stop_queue(dev);
1046 dev->trans_start = jiffies;
1048 out_unlock:
1049 spin_unlock_irq(&bp->lock);
1051 return rc;
1053 err_out:
1054 rc = NETDEV_TX_BUSY;
1055 goto out_unlock;
1058 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1060 struct b44 *bp = netdev_priv(dev);
1062 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1063 return -EINVAL;
1065 if (!netif_running(dev)) {
1066 /* We'll just catch it later when the
1067 * device is up'd.
1069 dev->mtu = new_mtu;
1070 return 0;
1073 spin_lock_irq(&bp->lock);
1074 b44_halt(bp);
1075 dev->mtu = new_mtu;
1076 b44_init_rings(bp);
1077 b44_init_hw(bp, B44_FULL_RESET);
1078 spin_unlock_irq(&bp->lock);
1080 b44_enable_ints(bp);
1082 return 0;
1085 /* Free up pending packets in all rx/tx rings.
1087 * The chip has been shut down and the driver detached from
1088 * the networking, so no interrupts or new tx packets will
1089 * end up in the driver. bp->lock is not held and we are not
1090 * in an interrupt context and thus may sleep.
1092 static void b44_free_rings(struct b44 *bp)
1094 struct ring_info *rp;
1095 int i;
1097 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1098 rp = &bp->rx_buffers[i];
1100 if (rp->skb == NULL)
1101 continue;
1102 pci_unmap_single(bp->pdev,
1103 pci_unmap_addr(rp, mapping),
1104 RX_PKT_BUF_SZ,
1105 PCI_DMA_FROMDEVICE);
1106 dev_kfree_skb_any(rp->skb);
1107 rp->skb = NULL;
1110 /* XXX needs changes once NETIF_F_SG is set... */
1111 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1112 rp = &bp->tx_buffers[i];
1114 if (rp->skb == NULL)
1115 continue;
1116 pci_unmap_single(bp->pdev,
1117 pci_unmap_addr(rp, mapping),
1118 rp->skb->len,
1119 PCI_DMA_TODEVICE);
1120 dev_kfree_skb_any(rp->skb);
1121 rp->skb = NULL;
1125 /* Initialize tx/rx rings for packet processing.
1127 * The chip has been shut down and the driver detached from
1128 * the networking, so no interrupts or new tx packets will
1129 * end up in the driver.
1131 static void b44_init_rings(struct b44 *bp)
1133 int i;
1135 b44_free_rings(bp);
1137 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1138 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1140 if (bp->flags & B44_FLAG_RX_RING_HACK)
1141 dma_sync_single_for_device(&bp->pdev->dev, bp->rx_ring_dma,
1142 DMA_TABLE_BYTES,
1143 PCI_DMA_BIDIRECTIONAL);
1145 if (bp->flags & B44_FLAG_TX_RING_HACK)
1146 dma_sync_single_for_device(&bp->pdev->dev, bp->tx_ring_dma,
1147 DMA_TABLE_BYTES,
1148 PCI_DMA_TODEVICE);
1150 for (i = 0; i < bp->rx_pending; i++) {
1151 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1152 break;
1157 * Must not be invoked with interrupt sources disabled and
1158 * the hardware shutdown down.
1160 static void b44_free_consistent(struct b44 *bp)
1162 kfree(bp->rx_buffers);
1163 bp->rx_buffers = NULL;
1164 kfree(bp->tx_buffers);
1165 bp->tx_buffers = NULL;
1166 if (bp->rx_ring) {
1167 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1168 dma_unmap_single(&bp->pdev->dev, bp->rx_ring_dma,
1169 DMA_TABLE_BYTES,
1170 DMA_BIDIRECTIONAL);
1171 kfree(bp->rx_ring);
1172 } else
1173 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1174 bp->rx_ring, bp->rx_ring_dma);
1175 bp->rx_ring = NULL;
1176 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1178 if (bp->tx_ring) {
1179 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1180 dma_unmap_single(&bp->pdev->dev, bp->tx_ring_dma,
1181 DMA_TABLE_BYTES,
1182 DMA_TO_DEVICE);
1183 kfree(bp->tx_ring);
1184 } else
1185 pci_free_consistent(bp->pdev, DMA_TABLE_BYTES,
1186 bp->tx_ring, bp->tx_ring_dma);
1187 bp->tx_ring = NULL;
1188 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1193 * Must not be invoked with interrupt sources disabled and
1194 * the hardware shutdown down. Can sleep.
1196 static int b44_alloc_consistent(struct b44 *bp)
1198 int size;
1200 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1201 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1202 if (!bp->rx_buffers)
1203 goto out_err;
1205 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1206 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1207 if (!bp->tx_buffers)
1208 goto out_err;
1210 size = DMA_TABLE_BYTES;
1211 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
1212 if (!bp->rx_ring) {
1213 /* Allocation may have failed due to pci_alloc_consistent
1214 insisting on use of GFP_DMA, which is more restrictive
1215 than necessary... */
1216 struct dma_desc *rx_ring;
1217 dma_addr_t rx_ring_dma;
1219 rx_ring = kzalloc(size, GFP_KERNEL);
1220 if (!rx_ring)
1221 goto out_err;
1223 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1224 DMA_TABLE_BYTES,
1225 DMA_BIDIRECTIONAL);
1227 if (dma_mapping_error(rx_ring_dma) ||
1228 rx_ring_dma + size > DMA_30BIT_MASK) {
1229 kfree(rx_ring);
1230 goto out_err;
1233 bp->rx_ring = rx_ring;
1234 bp->rx_ring_dma = rx_ring_dma;
1235 bp->flags |= B44_FLAG_RX_RING_HACK;
1238 bp->tx_ring = pci_alloc_consistent(bp->pdev, size, &bp->tx_ring_dma);
1239 if (!bp->tx_ring) {
1240 /* Allocation may have failed due to pci_alloc_consistent
1241 insisting on use of GFP_DMA, which is more restrictive
1242 than necessary... */
1243 struct dma_desc *tx_ring;
1244 dma_addr_t tx_ring_dma;
1246 tx_ring = kzalloc(size, GFP_KERNEL);
1247 if (!tx_ring)
1248 goto out_err;
1250 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1251 DMA_TABLE_BYTES,
1252 DMA_TO_DEVICE);
1254 if (dma_mapping_error(tx_ring_dma) ||
1255 tx_ring_dma + size > DMA_30BIT_MASK) {
1256 kfree(tx_ring);
1257 goto out_err;
1260 bp->tx_ring = tx_ring;
1261 bp->tx_ring_dma = tx_ring_dma;
1262 bp->flags |= B44_FLAG_TX_RING_HACK;
1265 return 0;
1267 out_err:
1268 b44_free_consistent(bp);
1269 return -ENOMEM;
1272 /* bp->lock is held. */
1273 static void b44_clear_stats(struct b44 *bp)
1275 unsigned long reg;
1277 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1278 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1279 br32(bp, reg);
1280 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1281 br32(bp, reg);
1284 /* bp->lock is held. */
1285 static void b44_chip_reset(struct b44 *bp)
1287 if (ssb_is_core_up(bp)) {
1288 bw32(bp, B44_RCV_LAZY, 0);
1289 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1290 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1291 bw32(bp, B44_DMATX_CTRL, 0);
1292 bp->tx_prod = bp->tx_cons = 0;
1293 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1294 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1295 100, 0);
1297 bw32(bp, B44_DMARX_CTRL, 0);
1298 bp->rx_prod = bp->rx_cons = 0;
1299 } else {
1300 ssb_pci_setup(bp, (bp->core_unit == 0 ?
1301 SBINTVEC_ENET0 :
1302 SBINTVEC_ENET1));
1305 ssb_core_reset(bp);
1307 b44_clear_stats(bp);
1309 /* Make PHY accessible. */
1310 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1311 (0x0d & MDIO_CTRL_MAXF_MASK)));
1312 br32(bp, B44_MDIO_CTRL);
1314 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1315 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1316 br32(bp, B44_ENET_CTRL);
1317 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1318 } else {
1319 u32 val = br32(bp, B44_DEVCTRL);
1321 if (val & DEVCTRL_EPR) {
1322 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1323 br32(bp, B44_DEVCTRL);
1324 udelay(100);
1326 bp->flags |= B44_FLAG_INTERNAL_PHY;
1330 /* bp->lock is held. */
1331 static void b44_halt(struct b44 *bp)
1333 b44_disable_ints(bp);
1334 b44_chip_reset(bp);
1337 /* bp->lock is held. */
1338 static void __b44_set_mac_addr(struct b44 *bp)
1340 bw32(bp, B44_CAM_CTRL, 0);
1341 if (!(bp->dev->flags & IFF_PROMISC)) {
1342 u32 val;
1344 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1345 val = br32(bp, B44_CAM_CTRL);
1346 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350 static int b44_set_mac_addr(struct net_device *dev, void *p)
1352 struct b44 *bp = netdev_priv(dev);
1353 struct sockaddr *addr = p;
1355 if (netif_running(dev))
1356 return -EBUSY;
1358 if (!is_valid_ether_addr(addr->sa_data))
1359 return -EINVAL;
1361 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1363 spin_lock_irq(&bp->lock);
1364 __b44_set_mac_addr(bp);
1365 spin_unlock_irq(&bp->lock);
1367 return 0;
1370 /* Called at device open time to get the chip ready for
1371 * packet processing. Invoked with bp->lock held.
1373 static void __b44_set_rx_mode(struct net_device *);
1374 static void b44_init_hw(struct b44 *bp, int reset_kind)
1376 u32 val;
1378 b44_chip_reset(bp);
1379 if (reset_kind == B44_FULL_RESET) {
1380 b44_phy_reset(bp);
1381 b44_setup_phy(bp);
1384 /* Enable CRC32, set proper LED modes and power on PHY */
1385 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1386 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1388 /* This sets the MAC address too. */
1389 __b44_set_rx_mode(bp->dev);
1391 /* MTU + eth header + possible VLAN tag + struct rx_header */
1392 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1393 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1395 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1396 if (reset_kind == B44_PARTIAL_RESET) {
1397 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1398 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1399 } else {
1400 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1401 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1402 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403 (bp->rx_offset << DMARX_CTRL_ROSHIFT)));
1404 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1406 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1407 bp->rx_prod = bp->rx_pending;
1409 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1412 val = br32(bp, B44_ENET_CTRL);
1413 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1416 static int b44_open(struct net_device *dev)
1418 struct b44 *bp = netdev_priv(dev);
1419 int err;
1421 err = b44_alloc_consistent(bp);
1422 if (err)
1423 goto out;
1425 b44_init_rings(bp);
1426 b44_init_hw(bp, B44_FULL_RESET);
1428 b44_check_phy(bp);
1430 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1431 if (unlikely(err < 0)) {
1432 b44_chip_reset(bp);
1433 b44_free_rings(bp);
1434 b44_free_consistent(bp);
1435 goto out;
1438 init_timer(&bp->timer);
1439 bp->timer.expires = jiffies + HZ;
1440 bp->timer.data = (unsigned long) bp;
1441 bp->timer.function = b44_timer;
1442 add_timer(&bp->timer);
1444 b44_enable_ints(bp);
1445 netif_start_queue(dev);
1446 out:
1447 return err;
1450 #if 0
1451 /*static*/ void b44_dump_state(struct b44 *bp)
1453 u32 val32, val32_2, val32_3, val32_4, val32_5;
1454 u16 val16;
1456 pci_read_config_word(bp->pdev, PCI_STATUS, &val16);
1457 printk("DEBUG: PCI status [%04x] \n", val16);
1460 #endif
1462 #ifdef CONFIG_NET_POLL_CONTROLLER
1464 * Polling receive - used by netconsole and other diagnostic tools
1465 * to allow network i/o with interrupts disabled.
1467 static void b44_poll_controller(struct net_device *dev)
1469 disable_irq(dev->irq);
1470 b44_interrupt(dev->irq, dev);
1471 enable_irq(dev->irq);
1473 #endif
1475 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1477 u32 i;
1478 u32 *pattern = (u32 *) pp;
1480 for (i = 0; i < bytes; i += sizeof(u32)) {
1481 bw32(bp, B44_FILT_ADDR, table_offset + i);
1482 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1486 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1488 int magicsync = 6;
1489 int k, j, len = offset;
1490 int ethaddr_bytes = ETH_ALEN;
1492 memset(ppattern + offset, 0xff, magicsync);
1493 for (j = 0; j < magicsync; j++)
1494 set_bit(len++, (unsigned long *) pmask);
1496 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1497 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1498 ethaddr_bytes = ETH_ALEN;
1499 else
1500 ethaddr_bytes = B44_PATTERN_SIZE - len;
1501 if (ethaddr_bytes <=0)
1502 break;
1503 for (k = 0; k< ethaddr_bytes; k++) {
1504 ppattern[offset + magicsync +
1505 (j * ETH_ALEN) + k] = macaddr[k];
1506 len++;
1507 set_bit(len, (unsigned long *) pmask);
1510 return len - 1;
1513 /* Setup magic packet patterns in the b44 WOL
1514 * pattern matching filter.
1516 static void b44_setup_pseudo_magicp(struct b44 *bp)
1519 u32 val;
1520 int plen0, plen1, plen2;
1521 u8 *pwol_pattern;
1522 u8 pwol_mask[B44_PMASK_SIZE];
1524 pwol_pattern = kmalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1525 if (!pwol_pattern) {
1526 printk(KERN_ERR PFX "Memory not available for WOL\n");
1527 return;
1530 /* Ipv4 magic packet pattern - pattern 0.*/
1531 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1532 memset(pwol_mask, 0, B44_PMASK_SIZE);
1533 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1534 B44_ETHIPV4UDP_HLEN);
1536 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1537 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1539 /* Raw ethernet II magic packet pattern - pattern 1 */
1540 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1541 memset(pwol_mask, 0, B44_PMASK_SIZE);
1542 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1543 ETH_HLEN);
1545 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1546 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1547 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1548 B44_PMASK_BASE + B44_PMASK_SIZE);
1550 /* Ipv6 magic packet pattern - pattern 2 */
1551 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1552 memset(pwol_mask, 0, B44_PMASK_SIZE);
1553 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1554 B44_ETHIPV6UDP_HLEN);
1556 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1557 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1558 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1559 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1561 kfree(pwol_pattern);
1563 /* set these pattern's lengths: one less than each real length */
1564 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1565 bw32(bp, B44_WKUP_LEN, val);
1567 /* enable wakeup pattern matching */
1568 val = br32(bp, B44_DEVCTRL);
1569 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1573 static void b44_setup_wol(struct b44 *bp)
1575 u32 val;
1576 u16 pmval;
1578 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1580 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1582 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1584 val = bp->dev->dev_addr[2] << 24 |
1585 bp->dev->dev_addr[3] << 16 |
1586 bp->dev->dev_addr[4] << 8 |
1587 bp->dev->dev_addr[5];
1588 bw32(bp, B44_ADDR_LO, val);
1590 val = bp->dev->dev_addr[0] << 8 |
1591 bp->dev->dev_addr[1];
1592 bw32(bp, B44_ADDR_HI, val);
1594 val = br32(bp, B44_DEVCTRL);
1595 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1597 } else {
1598 b44_setup_pseudo_magicp(bp);
1601 val = br32(bp, B44_SBTMSLOW);
1602 bw32(bp, B44_SBTMSLOW, val | SBTMSLOW_PE);
1604 pci_read_config_word(bp->pdev, SSB_PMCSR, &pmval);
1605 pci_write_config_word(bp->pdev, SSB_PMCSR, pmval | SSB_PE);
1609 static int b44_close(struct net_device *dev)
1611 struct b44 *bp = netdev_priv(dev);
1613 netif_stop_queue(dev);
1615 netif_poll_disable(dev);
1617 del_timer_sync(&bp->timer);
1619 spin_lock_irq(&bp->lock);
1621 #if 0
1622 b44_dump_state(bp);
1623 #endif
1624 b44_halt(bp);
1625 b44_free_rings(bp);
1626 netif_carrier_off(dev);
1628 spin_unlock_irq(&bp->lock);
1630 free_irq(dev->irq, dev);
1632 netif_poll_enable(dev);
1634 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1635 b44_init_hw(bp, B44_PARTIAL_RESET);
1636 b44_setup_wol(bp);
1639 b44_free_consistent(bp);
1641 return 0;
1644 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1646 struct b44 *bp = netdev_priv(dev);
1647 struct net_device_stats *nstat = &bp->stats;
1648 struct b44_hw_stats *hwstat = &bp->hw_stats;
1650 /* Convert HW stats into netdevice stats. */
1651 nstat->rx_packets = hwstat->rx_pkts;
1652 nstat->tx_packets = hwstat->tx_pkts;
1653 nstat->rx_bytes = hwstat->rx_octets;
1654 nstat->tx_bytes = hwstat->tx_octets;
1655 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1656 hwstat->tx_oversize_pkts +
1657 hwstat->tx_underruns +
1658 hwstat->tx_excessive_cols +
1659 hwstat->tx_late_cols);
1660 nstat->multicast = hwstat->tx_multicast_pkts;
1661 nstat->collisions = hwstat->tx_total_cols;
1663 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1664 hwstat->rx_undersize);
1665 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1666 nstat->rx_frame_errors = hwstat->rx_align_errs;
1667 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1668 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1669 hwstat->rx_oversize_pkts +
1670 hwstat->rx_missed_pkts +
1671 hwstat->rx_crc_align_errs +
1672 hwstat->rx_undersize +
1673 hwstat->rx_crc_errs +
1674 hwstat->rx_align_errs +
1675 hwstat->rx_symbol_errs);
1677 nstat->tx_aborted_errors = hwstat->tx_underruns;
1678 #if 0
1679 /* Carrier lost counter seems to be broken for some devices */
1680 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1681 #endif
1683 return nstat;
1686 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1688 struct dev_mc_list *mclist;
1689 int i, num_ents;
1691 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1692 mclist = dev->mc_list;
1693 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1694 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1696 return i+1;
1699 static void __b44_set_rx_mode(struct net_device *dev)
1701 struct b44 *bp = netdev_priv(dev);
1702 u32 val;
1704 val = br32(bp, B44_RXCONFIG);
1705 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1706 if (dev->flags & IFF_PROMISC) {
1707 val |= RXCONFIG_PROMISC;
1708 bw32(bp, B44_RXCONFIG, val);
1709 } else {
1710 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1711 int i = 1;
1713 __b44_set_mac_addr(bp);
1715 if ((dev->flags & IFF_ALLMULTI) ||
1716 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1717 val |= RXCONFIG_ALLMULTI;
1718 else
1719 i = __b44_load_mcast(bp, dev);
1721 for (; i < 64; i++)
1722 __b44_cam_write(bp, zero, i);
1724 bw32(bp, B44_RXCONFIG, val);
1725 val = br32(bp, B44_CAM_CTRL);
1726 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1730 static void b44_set_rx_mode(struct net_device *dev)
1732 struct b44 *bp = netdev_priv(dev);
1734 spin_lock_irq(&bp->lock);
1735 __b44_set_rx_mode(dev);
1736 spin_unlock_irq(&bp->lock);
1739 static u32 b44_get_msglevel(struct net_device *dev)
1741 struct b44 *bp = netdev_priv(dev);
1742 return bp->msg_enable;
1745 static void b44_set_msglevel(struct net_device *dev, u32 value)
1747 struct b44 *bp = netdev_priv(dev);
1748 bp->msg_enable = value;
1751 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1753 struct b44 *bp = netdev_priv(dev);
1754 struct pci_dev *pci_dev = bp->pdev;
1756 strcpy (info->driver, DRV_MODULE_NAME);
1757 strcpy (info->version, DRV_MODULE_VERSION);
1758 strcpy (info->bus_info, pci_name(pci_dev));
1761 static int b44_nway_reset(struct net_device *dev)
1763 struct b44 *bp = netdev_priv(dev);
1764 u32 bmcr;
1765 int r;
1767 spin_lock_irq(&bp->lock);
1768 b44_readphy(bp, MII_BMCR, &bmcr);
1769 b44_readphy(bp, MII_BMCR, &bmcr);
1770 r = -EINVAL;
1771 if (bmcr & BMCR_ANENABLE) {
1772 b44_writephy(bp, MII_BMCR,
1773 bmcr | BMCR_ANRESTART);
1774 r = 0;
1776 spin_unlock_irq(&bp->lock);
1778 return r;
1781 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1783 struct b44 *bp = netdev_priv(dev);
1785 cmd->supported = (SUPPORTED_Autoneg);
1786 cmd->supported |= (SUPPORTED_100baseT_Half |
1787 SUPPORTED_100baseT_Full |
1788 SUPPORTED_10baseT_Half |
1789 SUPPORTED_10baseT_Full |
1790 SUPPORTED_MII);
1792 cmd->advertising = 0;
1793 if (bp->flags & B44_FLAG_ADV_10HALF)
1794 cmd->advertising |= ADVERTISED_10baseT_Half;
1795 if (bp->flags & B44_FLAG_ADV_10FULL)
1796 cmd->advertising |= ADVERTISED_10baseT_Full;
1797 if (bp->flags & B44_FLAG_ADV_100HALF)
1798 cmd->advertising |= ADVERTISED_100baseT_Half;
1799 if (bp->flags & B44_FLAG_ADV_100FULL)
1800 cmd->advertising |= ADVERTISED_100baseT_Full;
1801 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1802 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1803 SPEED_100 : SPEED_10;
1804 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1805 DUPLEX_FULL : DUPLEX_HALF;
1806 cmd->port = 0;
1807 cmd->phy_address = bp->phy_addr;
1808 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1809 XCVR_INTERNAL : XCVR_EXTERNAL;
1810 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1811 AUTONEG_DISABLE : AUTONEG_ENABLE;
1812 if (cmd->autoneg == AUTONEG_ENABLE)
1813 cmd->advertising |= ADVERTISED_Autoneg;
1814 if (!netif_running(dev)){
1815 cmd->speed = 0;
1816 cmd->duplex = 0xff;
1818 cmd->maxtxpkt = 0;
1819 cmd->maxrxpkt = 0;
1820 return 0;
1823 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1825 struct b44 *bp = netdev_priv(dev);
1827 /* We do not support gigabit. */
1828 if (cmd->autoneg == AUTONEG_ENABLE) {
1829 if (cmd->advertising &
1830 (ADVERTISED_1000baseT_Half |
1831 ADVERTISED_1000baseT_Full))
1832 return -EINVAL;
1833 } else if ((cmd->speed != SPEED_100 &&
1834 cmd->speed != SPEED_10) ||
1835 (cmd->duplex != DUPLEX_HALF &&
1836 cmd->duplex != DUPLEX_FULL)) {
1837 return -EINVAL;
1840 spin_lock_irq(&bp->lock);
1842 if (cmd->autoneg == AUTONEG_ENABLE) {
1843 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1844 B44_FLAG_100_BASE_T |
1845 B44_FLAG_FULL_DUPLEX |
1846 B44_FLAG_ADV_10HALF |
1847 B44_FLAG_ADV_10FULL |
1848 B44_FLAG_ADV_100HALF |
1849 B44_FLAG_ADV_100FULL);
1850 if (cmd->advertising == 0) {
1851 bp->flags |= (B44_FLAG_ADV_10HALF |
1852 B44_FLAG_ADV_10FULL |
1853 B44_FLAG_ADV_100HALF |
1854 B44_FLAG_ADV_100FULL);
1855 } else {
1856 if (cmd->advertising & ADVERTISED_10baseT_Half)
1857 bp->flags |= B44_FLAG_ADV_10HALF;
1858 if (cmd->advertising & ADVERTISED_10baseT_Full)
1859 bp->flags |= B44_FLAG_ADV_10FULL;
1860 if (cmd->advertising & ADVERTISED_100baseT_Half)
1861 bp->flags |= B44_FLAG_ADV_100HALF;
1862 if (cmd->advertising & ADVERTISED_100baseT_Full)
1863 bp->flags |= B44_FLAG_ADV_100FULL;
1865 } else {
1866 bp->flags |= B44_FLAG_FORCE_LINK;
1867 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1868 if (cmd->speed == SPEED_100)
1869 bp->flags |= B44_FLAG_100_BASE_T;
1870 if (cmd->duplex == DUPLEX_FULL)
1871 bp->flags |= B44_FLAG_FULL_DUPLEX;
1874 if (netif_running(dev))
1875 b44_setup_phy(bp);
1877 spin_unlock_irq(&bp->lock);
1879 return 0;
1882 static void b44_get_ringparam(struct net_device *dev,
1883 struct ethtool_ringparam *ering)
1885 struct b44 *bp = netdev_priv(dev);
1887 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1888 ering->rx_pending = bp->rx_pending;
1890 /* XXX ethtool lacks a tx_max_pending, oops... */
1893 static int b44_set_ringparam(struct net_device *dev,
1894 struct ethtool_ringparam *ering)
1896 struct b44 *bp = netdev_priv(dev);
1898 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1899 (ering->rx_mini_pending != 0) ||
1900 (ering->rx_jumbo_pending != 0) ||
1901 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1902 return -EINVAL;
1904 spin_lock_irq(&bp->lock);
1906 bp->rx_pending = ering->rx_pending;
1907 bp->tx_pending = ering->tx_pending;
1909 b44_halt(bp);
1910 b44_init_rings(bp);
1911 b44_init_hw(bp, B44_FULL_RESET);
1912 netif_wake_queue(bp->dev);
1913 spin_unlock_irq(&bp->lock);
1915 b44_enable_ints(bp);
1917 return 0;
1920 static void b44_get_pauseparam(struct net_device *dev,
1921 struct ethtool_pauseparam *epause)
1923 struct b44 *bp = netdev_priv(dev);
1925 epause->autoneg =
1926 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1927 epause->rx_pause =
1928 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1929 epause->tx_pause =
1930 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1933 static int b44_set_pauseparam(struct net_device *dev,
1934 struct ethtool_pauseparam *epause)
1936 struct b44 *bp = netdev_priv(dev);
1938 spin_lock_irq(&bp->lock);
1939 if (epause->autoneg)
1940 bp->flags |= B44_FLAG_PAUSE_AUTO;
1941 else
1942 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1943 if (epause->rx_pause)
1944 bp->flags |= B44_FLAG_RX_PAUSE;
1945 else
1946 bp->flags &= ~B44_FLAG_RX_PAUSE;
1947 if (epause->tx_pause)
1948 bp->flags |= B44_FLAG_TX_PAUSE;
1949 else
1950 bp->flags &= ~B44_FLAG_TX_PAUSE;
1951 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1952 b44_halt(bp);
1953 b44_init_rings(bp);
1954 b44_init_hw(bp, B44_FULL_RESET);
1955 } else {
1956 __b44_set_flow_ctrl(bp, bp->flags);
1958 spin_unlock_irq(&bp->lock);
1960 b44_enable_ints(bp);
1962 return 0;
1965 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1967 switch(stringset) {
1968 case ETH_SS_STATS:
1969 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1970 break;
1974 static int b44_get_stats_count(struct net_device *dev)
1976 return ARRAY_SIZE(b44_gstrings);
1979 static void b44_get_ethtool_stats(struct net_device *dev,
1980 struct ethtool_stats *stats, u64 *data)
1982 struct b44 *bp = netdev_priv(dev);
1983 u32 *val = &bp->hw_stats.tx_good_octets;
1984 u32 i;
1986 spin_lock_irq(&bp->lock);
1988 b44_stats_update(bp);
1990 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1991 *data++ = *val++;
1993 spin_unlock_irq(&bp->lock);
1996 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1998 struct b44 *bp = netdev_priv(dev);
2000 wol->supported = WAKE_MAGIC;
2001 if (bp->flags & B44_FLAG_WOL_ENABLE)
2002 wol->wolopts = WAKE_MAGIC;
2003 else
2004 wol->wolopts = 0;
2005 memset(&wol->sopass, 0, sizeof(wol->sopass));
2008 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2010 struct b44 *bp = netdev_priv(dev);
2012 spin_lock_irq(&bp->lock);
2013 if (wol->wolopts & WAKE_MAGIC)
2014 bp->flags |= B44_FLAG_WOL_ENABLE;
2015 else
2016 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2017 spin_unlock_irq(&bp->lock);
2019 return 0;
2022 static const struct ethtool_ops b44_ethtool_ops = {
2023 .get_drvinfo = b44_get_drvinfo,
2024 .get_settings = b44_get_settings,
2025 .set_settings = b44_set_settings,
2026 .nway_reset = b44_nway_reset,
2027 .get_link = ethtool_op_get_link,
2028 .get_wol = b44_get_wol,
2029 .set_wol = b44_set_wol,
2030 .get_ringparam = b44_get_ringparam,
2031 .set_ringparam = b44_set_ringparam,
2032 .get_pauseparam = b44_get_pauseparam,
2033 .set_pauseparam = b44_set_pauseparam,
2034 .get_msglevel = b44_get_msglevel,
2035 .set_msglevel = b44_set_msglevel,
2036 .get_strings = b44_get_strings,
2037 .get_stats_count = b44_get_stats_count,
2038 .get_ethtool_stats = b44_get_ethtool_stats,
2039 .get_perm_addr = ethtool_op_get_perm_addr,
2042 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2044 struct mii_ioctl_data *data = if_mii(ifr);
2045 struct b44 *bp = netdev_priv(dev);
2046 int err = -EINVAL;
2048 if (!netif_running(dev))
2049 goto out;
2051 spin_lock_irq(&bp->lock);
2052 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2053 spin_unlock_irq(&bp->lock);
2054 out:
2055 return err;
2058 /* Read 128-bytes of EEPROM. */
2059 static int b44_read_eeprom(struct b44 *bp, u8 *data)
2061 long i;
2062 __le16 *ptr = (__le16 *) data;
2064 for (i = 0; i < 128; i += 2)
2065 ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
2067 return 0;
2070 static int __devinit b44_get_invariants(struct b44 *bp)
2072 u8 eeprom[128];
2073 int err;
2075 err = b44_read_eeprom(bp, &eeprom[0]);
2076 if (err)
2077 goto out;
2079 bp->dev->dev_addr[0] = eeprom[79];
2080 bp->dev->dev_addr[1] = eeprom[78];
2081 bp->dev->dev_addr[2] = eeprom[81];
2082 bp->dev->dev_addr[3] = eeprom[80];
2083 bp->dev->dev_addr[4] = eeprom[83];
2084 bp->dev->dev_addr[5] = eeprom[82];
2086 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2087 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2088 return -EINVAL;
2091 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2093 bp->phy_addr = eeprom[90] & 0x1f;
2095 /* With this, plus the rx_header prepended to the data by the
2096 * hardware, we'll land the ethernet header on a 2-byte boundary.
2098 bp->rx_offset = 30;
2100 bp->imask = IMASK_DEF;
2102 bp->core_unit = ssb_core_unit(bp);
2103 bp->dma_offset = SB_PCI_DMA;
2105 /* XXX - really required?
2106 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2109 if (ssb_get_core_rev(bp) >= 7)
2110 bp->flags |= B44_FLAG_B0_ANDLATER;
2112 out:
2113 return err;
2116 static int __devinit b44_init_one(struct pci_dev *pdev,
2117 const struct pci_device_id *ent)
2119 static int b44_version_printed = 0;
2120 unsigned long b44reg_base, b44reg_len;
2121 struct net_device *dev;
2122 struct b44 *bp;
2123 int err, i;
2125 if (b44_version_printed++ == 0)
2126 printk(KERN_INFO "%s", version);
2128 err = pci_enable_device(pdev);
2129 if (err) {
2130 dev_err(&pdev->dev, "Cannot enable PCI device, "
2131 "aborting.\n");
2132 return err;
2135 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2136 dev_err(&pdev->dev,
2137 "Cannot find proper PCI device "
2138 "base address, aborting.\n");
2139 err = -ENODEV;
2140 goto err_out_disable_pdev;
2143 err = pci_request_regions(pdev, DRV_MODULE_NAME);
2144 if (err) {
2145 dev_err(&pdev->dev,
2146 "Cannot obtain PCI resources, aborting.\n");
2147 goto err_out_disable_pdev;
2150 pci_set_master(pdev);
2152 err = pci_set_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2153 if (err) {
2154 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2155 goto err_out_free_res;
2158 err = pci_set_consistent_dma_mask(pdev, (u64) DMA_30BIT_MASK);
2159 if (err) {
2160 dev_err(&pdev->dev, "No usable DMA configuration, aborting.\n");
2161 goto err_out_free_res;
2164 b44reg_base = pci_resource_start(pdev, 0);
2165 b44reg_len = pci_resource_len(pdev, 0);
2167 dev = alloc_etherdev(sizeof(*bp));
2168 if (!dev) {
2169 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
2170 err = -ENOMEM;
2171 goto err_out_free_res;
2174 SET_MODULE_OWNER(dev);
2175 SET_NETDEV_DEV(dev,&pdev->dev);
2177 /* No interesting netdevice features in this card... */
2178 dev->features |= 0;
2180 bp = netdev_priv(dev);
2181 bp->pdev = pdev;
2182 bp->dev = dev;
2184 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2186 spin_lock_init(&bp->lock);
2188 bp->regs = ioremap(b44reg_base, b44reg_len);
2189 if (bp->regs == 0UL) {
2190 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
2191 err = -ENOMEM;
2192 goto err_out_free_dev;
2195 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2196 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2198 dev->open = b44_open;
2199 dev->stop = b44_close;
2200 dev->hard_start_xmit = b44_start_xmit;
2201 dev->get_stats = b44_get_stats;
2202 dev->set_multicast_list = b44_set_rx_mode;
2203 dev->set_mac_address = b44_set_mac_addr;
2204 dev->do_ioctl = b44_ioctl;
2205 dev->tx_timeout = b44_tx_timeout;
2206 dev->poll = b44_poll;
2207 dev->weight = 64;
2208 dev->watchdog_timeo = B44_TX_TIMEOUT;
2209 #ifdef CONFIG_NET_POLL_CONTROLLER
2210 dev->poll_controller = b44_poll_controller;
2211 #endif
2212 dev->change_mtu = b44_change_mtu;
2213 dev->irq = pdev->irq;
2214 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2216 netif_carrier_off(dev);
2218 err = b44_get_invariants(bp);
2219 if (err) {
2220 dev_err(&pdev->dev,
2221 "Problem fetching invariants of chip, aborting.\n");
2222 goto err_out_iounmap;
2225 bp->mii_if.dev = dev;
2226 bp->mii_if.mdio_read = b44_mii_read;
2227 bp->mii_if.mdio_write = b44_mii_write;
2228 bp->mii_if.phy_id = bp->phy_addr;
2229 bp->mii_if.phy_id_mask = 0x1f;
2230 bp->mii_if.reg_num_mask = 0x1f;
2232 /* By default, advertise all speed/duplex settings. */
2233 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2234 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2236 /* By default, auto-negotiate PAUSE. */
2237 bp->flags |= B44_FLAG_PAUSE_AUTO;
2239 err = register_netdev(dev);
2240 if (err) {
2241 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
2242 goto err_out_iounmap;
2245 pci_set_drvdata(pdev, dev);
2247 pci_save_state(bp->pdev);
2249 /* Chip reset provides power to the b44 MAC & PCI cores, which
2250 * is necessary for MAC register access.
2252 b44_chip_reset(bp);
2254 printk(KERN_INFO "%s: Broadcom 4400 10/100BaseT Ethernet ", dev->name);
2255 for (i = 0; i < 6; i++)
2256 printk("%2.2x%c", dev->dev_addr[i],
2257 i == 5 ? '\n' : ':');
2259 return 0;
2261 err_out_iounmap:
2262 iounmap(bp->regs);
2264 err_out_free_dev:
2265 free_netdev(dev);
2267 err_out_free_res:
2268 pci_release_regions(pdev);
2270 err_out_disable_pdev:
2271 pci_disable_device(pdev);
2272 pci_set_drvdata(pdev, NULL);
2273 return err;
2276 static void __devexit b44_remove_one(struct pci_dev *pdev)
2278 struct net_device *dev = pci_get_drvdata(pdev);
2279 struct b44 *bp = netdev_priv(dev);
2281 unregister_netdev(dev);
2282 iounmap(bp->regs);
2283 free_netdev(dev);
2284 pci_release_regions(pdev);
2285 pci_disable_device(pdev);
2286 pci_set_drvdata(pdev, NULL);
2289 static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
2291 struct net_device *dev = pci_get_drvdata(pdev);
2292 struct b44 *bp = netdev_priv(dev);
2294 if (!netif_running(dev))
2295 return 0;
2297 del_timer_sync(&bp->timer);
2299 spin_lock_irq(&bp->lock);
2301 b44_halt(bp);
2302 netif_carrier_off(bp->dev);
2303 netif_device_detach(bp->dev);
2304 b44_free_rings(bp);
2306 spin_unlock_irq(&bp->lock);
2308 free_irq(dev->irq, dev);
2309 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2310 b44_init_hw(bp, B44_PARTIAL_RESET);
2311 b44_setup_wol(bp);
2313 pci_disable_device(pdev);
2314 return 0;
2317 static int b44_resume(struct pci_dev *pdev)
2319 struct net_device *dev = pci_get_drvdata(pdev);
2320 struct b44 *bp = netdev_priv(dev);
2321 int rc = 0;
2323 pci_restore_state(pdev);
2324 rc = pci_enable_device(pdev);
2325 if (rc) {
2326 printk(KERN_ERR PFX "%s: pci_enable_device failed\n",
2327 dev->name);
2328 return rc;
2331 pci_set_master(pdev);
2333 if (!netif_running(dev))
2334 return 0;
2336 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2337 if (rc) {
2338 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2339 pci_disable_device(pdev);
2340 return rc;
2343 spin_lock_irq(&bp->lock);
2345 b44_init_rings(bp);
2346 b44_init_hw(bp, B44_FULL_RESET);
2347 netif_device_attach(bp->dev);
2348 spin_unlock_irq(&bp->lock);
2350 b44_enable_ints(bp);
2351 netif_wake_queue(dev);
2353 mod_timer(&bp->timer, jiffies + 1);
2355 return 0;
2358 static struct pci_driver b44_driver = {
2359 .name = DRV_MODULE_NAME,
2360 .id_table = b44_pci_tbl,
2361 .probe = b44_init_one,
2362 .remove = __devexit_p(b44_remove_one),
2363 .suspend = b44_suspend,
2364 .resume = b44_resume,
2367 static int __init b44_init(void)
2369 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2371 /* Setup paramaters for syncing RX/TX DMA descriptors */
2372 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2373 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2375 return pci_register_driver(&b44_driver);
2378 static void __exit b44_cleanup(void)
2380 pci_unregister_driver(&b44_driver);
2383 module_init(b44_init);
2384 module_exit(b44_cleanup);