[IA64] SN specific version of dma_get_required_mask()
[linux-2.6/mini2440.git] / drivers / net / b44.c
blob5ae131c147f9724d38917b31d4a0ada3328de93c
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
10 * Distribute under GPL.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/netdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/mii.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_vlan.h>
22 #include <linux/etherdevice.h>
23 #include <linux/pci.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/ssb/ssb.h>
29 #include <asm/uaccess.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
34 #include "b44.h"
36 #define DRV_MODULE_NAME "b44"
37 #define PFX DRV_MODULE_NAME ": "
38 #define DRV_MODULE_VERSION "2.0"
40 #define B44_DEF_MSG_ENABLE \
41 (NETIF_MSG_DRV | \
42 NETIF_MSG_PROBE | \
43 NETIF_MSG_LINK | \
44 NETIF_MSG_TIMER | \
45 NETIF_MSG_IFDOWN | \
46 NETIF_MSG_IFUP | \
47 NETIF_MSG_RX_ERR | \
48 NETIF_MSG_TX_ERR)
50 /* length of time before we decide the hardware is borked,
51 * and dev->tx_timeout() should be called to fix the problem
53 #define B44_TX_TIMEOUT (5 * HZ)
55 /* hardware minimum and maximum for a single frame's data payload */
56 #define B44_MIN_MTU 60
57 #define B44_MAX_MTU 1500
59 #define B44_RX_RING_SIZE 512
60 #define B44_DEF_RX_RING_PENDING 200
61 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
62 B44_RX_RING_SIZE)
63 #define B44_TX_RING_SIZE 512
64 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
65 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
66 B44_TX_RING_SIZE)
68 #define TX_RING_GAP(BP) \
69 (B44_TX_RING_SIZE - (BP)->tx_pending)
70 #define TX_BUFFS_AVAIL(BP) \
71 (((BP)->tx_cons <= (BP)->tx_prod) ? \
72 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
73 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
74 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
76 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
77 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
79 /* minimum number of free TX descriptors required to wake up TX process */
80 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
82 /* b44 internal pattern match filter info */
83 #define B44_PATTERN_BASE 0x400
84 #define B44_PATTERN_SIZE 0x80
85 #define B44_PMASK_BASE 0x600
86 #define B44_PMASK_SIZE 0x10
87 #define B44_MAX_PATTERNS 16
88 #define B44_ETHIPV6UDP_HLEN 62
89 #define B44_ETHIPV4UDP_HLEN 42
91 static char version[] __devinitdata =
92 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
94 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
95 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
99 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
100 module_param(b44_debug, int, 0);
101 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
104 #ifdef CONFIG_B44_PCI
105 static const struct pci_device_id b44_pci_tbl[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
107 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
109 { 0 } /* terminate list with empty entry */
111 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
113 static struct pci_driver b44_pci_driver = {
114 .name = DRV_MODULE_NAME,
115 .id_table = b44_pci_tbl,
117 #endif /* CONFIG_B44_PCI */
119 static const struct ssb_device_id b44_ssb_tbl[] = {
120 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
121 SSB_DEVTABLE_END
123 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
125 static void b44_halt(struct b44 *);
126 static void b44_init_rings(struct b44 *);
128 #define B44_FULL_RESET 1
129 #define B44_FULL_RESET_SKIP_PHY 2
130 #define B44_PARTIAL_RESET 3
131 #define B44_CHIP_RESET_FULL 4
132 #define B44_CHIP_RESET_PARTIAL 5
134 static void b44_init_hw(struct b44 *, int);
136 static int dma_desc_align_mask;
137 static int dma_desc_sync_size;
138 static int instance;
140 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
141 #define _B44(x...) # x,
142 B44_STAT_REG_DECLARE
143 #undef _B44
146 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
147 dma_addr_t dma_base,
148 unsigned long offset,
149 enum dma_data_direction dir)
151 ssb_dma_sync_single_range_for_device(sdev, dma_base,
152 offset & dma_desc_align_mask,
153 dma_desc_sync_size, dir);
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
161 ssb_dma_sync_single_range_for_cpu(sdev, dma_base,
162 offset & dma_desc_align_mask,
163 dma_desc_sync_size, dir);
166 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
168 return ssb_read32(bp->sdev, reg);
171 static inline void bw32(const struct b44 *bp,
172 unsigned long reg, unsigned long val)
174 ssb_write32(bp->sdev, reg, val);
177 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
178 u32 bit, unsigned long timeout, const int clear)
180 unsigned long i;
182 for (i = 0; i < timeout; i++) {
183 u32 val = br32(bp, reg);
185 if (clear && !(val & bit))
186 break;
187 if (!clear && (val & bit))
188 break;
189 udelay(10);
191 if (i == timeout) {
192 printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
193 "%lx to %s.\n",
194 bp->dev->name,
195 bit, reg,
196 (clear ? "clear" : "set"));
197 return -ENODEV;
199 return 0;
202 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
204 u32 val;
206 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
207 (index << CAM_CTRL_INDEX_SHIFT)));
209 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
211 val = br32(bp, B44_CAM_DATA_LO);
213 data[2] = (val >> 24) & 0xFF;
214 data[3] = (val >> 16) & 0xFF;
215 data[4] = (val >> 8) & 0xFF;
216 data[5] = (val >> 0) & 0xFF;
218 val = br32(bp, B44_CAM_DATA_HI);
220 data[0] = (val >> 8) & 0xFF;
221 data[1] = (val >> 0) & 0xFF;
224 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
226 u32 val;
228 val = ((u32) data[2]) << 24;
229 val |= ((u32) data[3]) << 16;
230 val |= ((u32) data[4]) << 8;
231 val |= ((u32) data[5]) << 0;
232 bw32(bp, B44_CAM_DATA_LO, val);
233 val = (CAM_DATA_HI_VALID |
234 (((u32) data[0]) << 8) |
235 (((u32) data[1]) << 0));
236 bw32(bp, B44_CAM_DATA_HI, val);
237 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
238 (index << CAM_CTRL_INDEX_SHIFT)));
239 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
242 static inline void __b44_disable_ints(struct b44 *bp)
244 bw32(bp, B44_IMASK, 0);
247 static void b44_disable_ints(struct b44 *bp)
249 __b44_disable_ints(bp);
251 /* Flush posted writes. */
252 br32(bp, B44_IMASK);
255 static void b44_enable_ints(struct b44 *bp)
257 bw32(bp, B44_IMASK, bp->imask);
260 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
262 int err;
264 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
265 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
266 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
267 (phy_addr << MDIO_DATA_PMD_SHIFT) |
268 (reg << MDIO_DATA_RA_SHIFT) |
269 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
270 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
271 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
273 return err;
276 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
278 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
279 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
280 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
281 (phy_addr << MDIO_DATA_PMD_SHIFT) |
282 (reg << MDIO_DATA_RA_SHIFT) |
283 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
284 (val & MDIO_DATA_DATA)));
285 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
288 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
290 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
291 return 0;
293 return __b44_readphy(bp, bp->phy_addr, reg, val);
296 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
298 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
299 return 0;
301 return __b44_writephy(bp, bp->phy_addr, reg, val);
304 /* miilib interface */
305 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
307 u32 val;
308 struct b44 *bp = netdev_priv(dev);
309 int rc = __b44_readphy(bp, phy_id, location, &val);
310 if (rc)
311 return 0xffffffff;
312 return val;
315 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
316 int val)
318 struct b44 *bp = netdev_priv(dev);
319 __b44_writephy(bp, phy_id, location, val);
322 static int b44_phy_reset(struct b44 *bp)
324 u32 val;
325 int err;
327 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
328 return 0;
329 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
330 if (err)
331 return err;
332 udelay(100);
333 err = b44_readphy(bp, MII_BMCR, &val);
334 if (!err) {
335 if (val & BMCR_RESET) {
336 printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
337 bp->dev->name);
338 err = -ENODEV;
342 return 0;
345 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
347 u32 val;
349 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
350 bp->flags |= pause_flags;
352 val = br32(bp, B44_RXCONFIG);
353 if (pause_flags & B44_FLAG_RX_PAUSE)
354 val |= RXCONFIG_FLOW;
355 else
356 val &= ~RXCONFIG_FLOW;
357 bw32(bp, B44_RXCONFIG, val);
359 val = br32(bp, B44_MAC_FLOW);
360 if (pause_flags & B44_FLAG_TX_PAUSE)
361 val |= (MAC_FLOW_PAUSE_ENAB |
362 (0xc0 & MAC_FLOW_RX_HI_WATER));
363 else
364 val &= ~MAC_FLOW_PAUSE_ENAB;
365 bw32(bp, B44_MAC_FLOW, val);
368 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
370 u32 pause_enab = 0;
372 /* The driver supports only rx pause by default because
373 the b44 mac tx pause mechanism generates excessive
374 pause frames.
375 Use ethtool to turn on b44 tx pause if necessary.
377 if ((local & ADVERTISE_PAUSE_CAP) &&
378 (local & ADVERTISE_PAUSE_ASYM)){
379 if ((remote & LPA_PAUSE_ASYM) &&
380 !(remote & LPA_PAUSE_CAP))
381 pause_enab |= B44_FLAG_RX_PAUSE;
384 __b44_set_flow_ctrl(bp, pause_enab);
387 #ifdef SSB_DRIVER_MIPS
388 extern char *nvram_get(char *name);
389 static void b44_wap54g10_workaround(struct b44 *bp)
391 const char *str;
392 u32 val;
393 int err;
396 * workaround for bad hardware design in Linksys WAP54G v1.0
397 * see https://dev.openwrt.org/ticket/146
398 * check and reset bit "isolate"
400 str = nvram_get("boardnum");
401 if (!str)
402 return;
403 if (simple_strtoul(str, NULL, 0) == 2) {
404 err = __b44_readphy(bp, 0, MII_BMCR, &val);
405 if (err)
406 goto error;
407 if (!(val & BMCR_ISOLATE))
408 return;
409 val &= ~BMCR_ISOLATE;
410 err = __b44_writephy(bp, 0, MII_BMCR, val);
411 if (err)
412 goto error;
414 return;
415 error:
416 printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
418 #else
419 static inline void b44_wap54g10_workaround(struct b44 *bp)
422 #endif
424 static int b44_setup_phy(struct b44 *bp)
426 u32 val;
427 int err;
429 b44_wap54g10_workaround(bp);
431 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
432 return 0;
433 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
434 goto out;
435 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
436 val & MII_ALEDCTRL_ALLMSK)) != 0)
437 goto out;
438 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
439 goto out;
440 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
441 val | MII_TLEDCTRL_ENABLE)) != 0)
442 goto out;
444 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
445 u32 adv = ADVERTISE_CSMA;
447 if (bp->flags & B44_FLAG_ADV_10HALF)
448 adv |= ADVERTISE_10HALF;
449 if (bp->flags & B44_FLAG_ADV_10FULL)
450 adv |= ADVERTISE_10FULL;
451 if (bp->flags & B44_FLAG_ADV_100HALF)
452 adv |= ADVERTISE_100HALF;
453 if (bp->flags & B44_FLAG_ADV_100FULL)
454 adv |= ADVERTISE_100FULL;
456 if (bp->flags & B44_FLAG_PAUSE_AUTO)
457 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
459 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
460 goto out;
461 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
462 BMCR_ANRESTART))) != 0)
463 goto out;
464 } else {
465 u32 bmcr;
467 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
468 goto out;
469 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
470 if (bp->flags & B44_FLAG_100_BASE_T)
471 bmcr |= BMCR_SPEED100;
472 if (bp->flags & B44_FLAG_FULL_DUPLEX)
473 bmcr |= BMCR_FULLDPLX;
474 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
475 goto out;
477 /* Since we will not be negotiating there is no safe way
478 * to determine if the link partner supports flow control
479 * or not. So just disable it completely in this case.
481 b44_set_flow_ctrl(bp, 0, 0);
484 out:
485 return err;
488 static void b44_stats_update(struct b44 *bp)
490 unsigned long reg;
491 u32 *val;
493 val = &bp->hw_stats.tx_good_octets;
494 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
495 *val++ += br32(bp, reg);
498 /* Pad */
499 reg += 8*4UL;
501 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
502 *val++ += br32(bp, reg);
506 static void b44_link_report(struct b44 *bp)
508 if (!netif_carrier_ok(bp->dev)) {
509 printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
510 } else {
511 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
512 bp->dev->name,
513 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
514 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
516 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
517 "%s for RX.\n",
518 bp->dev->name,
519 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
520 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
524 static void b44_check_phy(struct b44 *bp)
526 u32 bmsr, aux;
528 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
529 bp->flags |= B44_FLAG_100_BASE_T;
530 bp->flags |= B44_FLAG_FULL_DUPLEX;
531 if (!netif_carrier_ok(bp->dev)) {
532 u32 val = br32(bp, B44_TX_CTRL);
533 val |= TX_CTRL_DUPLEX;
534 bw32(bp, B44_TX_CTRL, val);
535 netif_carrier_on(bp->dev);
536 b44_link_report(bp);
538 return;
541 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
542 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
543 (bmsr != 0xffff)) {
544 if (aux & MII_AUXCTRL_SPEED)
545 bp->flags |= B44_FLAG_100_BASE_T;
546 else
547 bp->flags &= ~B44_FLAG_100_BASE_T;
548 if (aux & MII_AUXCTRL_DUPLEX)
549 bp->flags |= B44_FLAG_FULL_DUPLEX;
550 else
551 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
553 if (!netif_carrier_ok(bp->dev) &&
554 (bmsr & BMSR_LSTATUS)) {
555 u32 val = br32(bp, B44_TX_CTRL);
556 u32 local_adv, remote_adv;
558 if (bp->flags & B44_FLAG_FULL_DUPLEX)
559 val |= TX_CTRL_DUPLEX;
560 else
561 val &= ~TX_CTRL_DUPLEX;
562 bw32(bp, B44_TX_CTRL, val);
564 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
565 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
566 !b44_readphy(bp, MII_LPA, &remote_adv))
567 b44_set_flow_ctrl(bp, local_adv, remote_adv);
569 /* Link now up */
570 netif_carrier_on(bp->dev);
571 b44_link_report(bp);
572 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
573 /* Link now down */
574 netif_carrier_off(bp->dev);
575 b44_link_report(bp);
578 if (bmsr & BMSR_RFAULT)
579 printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
580 bp->dev->name);
581 if (bmsr & BMSR_JCD)
582 printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
583 bp->dev->name);
587 static void b44_timer(unsigned long __opaque)
589 struct b44 *bp = (struct b44 *) __opaque;
591 spin_lock_irq(&bp->lock);
593 b44_check_phy(bp);
595 b44_stats_update(bp);
597 spin_unlock_irq(&bp->lock);
599 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
602 static void b44_tx(struct b44 *bp)
604 u32 cur, cons;
606 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
607 cur /= sizeof(struct dma_desc);
609 /* XXX needs updating when NETIF_F_SG is supported */
610 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
611 struct ring_info *rp = &bp->tx_buffers[cons];
612 struct sk_buff *skb = rp->skb;
614 BUG_ON(skb == NULL);
616 ssb_dma_unmap_single(bp->sdev,
617 rp->mapping,
618 skb->len,
619 DMA_TO_DEVICE);
620 rp->skb = NULL;
621 dev_kfree_skb_irq(skb);
624 bp->tx_cons = cons;
625 if (netif_queue_stopped(bp->dev) &&
626 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
627 netif_wake_queue(bp->dev);
629 bw32(bp, B44_GPTIMER, 0);
632 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
633 * before the DMA address you give it. So we allocate 30 more bytes
634 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
635 * point the chip at 30 bytes past where the rx_header will go.
637 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
639 struct dma_desc *dp;
640 struct ring_info *src_map, *map;
641 struct rx_header *rh;
642 struct sk_buff *skb;
643 dma_addr_t mapping;
644 int dest_idx;
645 u32 ctrl;
647 src_map = NULL;
648 if (src_idx >= 0)
649 src_map = &bp->rx_buffers[src_idx];
650 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
651 map = &bp->rx_buffers[dest_idx];
652 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
653 if (skb == NULL)
654 return -ENOMEM;
656 mapping = ssb_dma_map_single(bp->sdev, skb->data,
657 RX_PKT_BUF_SZ,
658 DMA_FROM_DEVICE);
660 /* Hardware bug work-around, the chip is unable to do PCI DMA
661 to/from anything above 1GB :-( */
662 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
663 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
664 /* Sigh... */
665 if (!ssb_dma_mapping_error(bp->sdev, mapping))
666 ssb_dma_unmap_single(bp->sdev, mapping,
667 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
668 dev_kfree_skb_any(skb);
669 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
670 if (skb == NULL)
671 return -ENOMEM;
672 mapping = ssb_dma_map_single(bp->sdev, skb->data,
673 RX_PKT_BUF_SZ,
674 DMA_FROM_DEVICE);
675 if (ssb_dma_mapping_error(bp->sdev, mapping) ||
676 mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) {
677 if (!ssb_dma_mapping_error(bp->sdev, mapping))
678 ssb_dma_unmap_single(bp->sdev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
679 dev_kfree_skb_any(skb);
680 return -ENOMEM;
684 rh = (struct rx_header *) skb->data;
686 rh->len = 0;
687 rh->flags = 0;
689 map->skb = skb;
690 map->mapping = mapping;
692 if (src_map != NULL)
693 src_map->skb = NULL;
695 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
696 if (dest_idx == (B44_RX_RING_SIZE - 1))
697 ctrl |= DESC_CTRL_EOT;
699 dp = &bp->rx_ring[dest_idx];
700 dp->ctrl = cpu_to_le32(ctrl);
701 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
703 if (bp->flags & B44_FLAG_RX_RING_HACK)
704 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
705 dest_idx * sizeof(dp),
706 DMA_BIDIRECTIONAL);
708 return RX_PKT_BUF_SZ;
711 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
713 struct dma_desc *src_desc, *dest_desc;
714 struct ring_info *src_map, *dest_map;
715 struct rx_header *rh;
716 int dest_idx;
717 __le32 ctrl;
719 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
720 dest_desc = &bp->rx_ring[dest_idx];
721 dest_map = &bp->rx_buffers[dest_idx];
722 src_desc = &bp->rx_ring[src_idx];
723 src_map = &bp->rx_buffers[src_idx];
725 dest_map->skb = src_map->skb;
726 rh = (struct rx_header *) src_map->skb->data;
727 rh->len = 0;
728 rh->flags = 0;
729 dest_map->mapping = src_map->mapping;
731 if (bp->flags & B44_FLAG_RX_RING_HACK)
732 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
733 src_idx * sizeof(src_desc),
734 DMA_BIDIRECTIONAL);
736 ctrl = src_desc->ctrl;
737 if (dest_idx == (B44_RX_RING_SIZE - 1))
738 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
739 else
740 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
742 dest_desc->ctrl = ctrl;
743 dest_desc->addr = src_desc->addr;
745 src_map->skb = NULL;
747 if (bp->flags & B44_FLAG_RX_RING_HACK)
748 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
749 dest_idx * sizeof(dest_desc),
750 DMA_BIDIRECTIONAL);
752 ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
753 RX_PKT_BUF_SZ,
754 DMA_FROM_DEVICE);
757 static int b44_rx(struct b44 *bp, int budget)
759 int received;
760 u32 cons, prod;
762 received = 0;
763 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
764 prod /= sizeof(struct dma_desc);
765 cons = bp->rx_cons;
767 while (cons != prod && budget > 0) {
768 struct ring_info *rp = &bp->rx_buffers[cons];
769 struct sk_buff *skb = rp->skb;
770 dma_addr_t map = rp->mapping;
771 struct rx_header *rh;
772 u16 len;
774 ssb_dma_sync_single_for_cpu(bp->sdev, map,
775 RX_PKT_BUF_SZ,
776 DMA_FROM_DEVICE);
777 rh = (struct rx_header *) skb->data;
778 len = le16_to_cpu(rh->len);
779 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
780 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
781 drop_it:
782 b44_recycle_rx(bp, cons, bp->rx_prod);
783 drop_it_no_recycle:
784 bp->stats.rx_dropped++;
785 goto next_pkt;
788 if (len == 0) {
789 int i = 0;
791 do {
792 udelay(2);
793 barrier();
794 len = le16_to_cpu(rh->len);
795 } while (len == 0 && i++ < 5);
796 if (len == 0)
797 goto drop_it;
800 /* Omit CRC. */
801 len -= 4;
803 if (len > RX_COPY_THRESHOLD) {
804 int skb_size;
805 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
806 if (skb_size < 0)
807 goto drop_it;
808 ssb_dma_unmap_single(bp->sdev, map,
809 skb_size, DMA_FROM_DEVICE);
810 /* Leave out rx_header */
811 skb_put(skb, len + RX_PKT_OFFSET);
812 skb_pull(skb, RX_PKT_OFFSET);
813 } else {
814 struct sk_buff *copy_skb;
816 b44_recycle_rx(bp, cons, bp->rx_prod);
817 copy_skb = dev_alloc_skb(len + 2);
818 if (copy_skb == NULL)
819 goto drop_it_no_recycle;
821 skb_reserve(copy_skb, 2);
822 skb_put(copy_skb, len);
823 /* DMA sync done above, copy just the actual packet */
824 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
825 copy_skb->data, len);
826 skb = copy_skb;
828 skb->ip_summed = CHECKSUM_NONE;
829 skb->protocol = eth_type_trans(skb, bp->dev);
830 netif_receive_skb(skb);
831 received++;
832 budget--;
833 next_pkt:
834 bp->rx_prod = (bp->rx_prod + 1) &
835 (B44_RX_RING_SIZE - 1);
836 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
839 bp->rx_cons = cons;
840 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
842 return received;
845 static int b44_poll(struct napi_struct *napi, int budget)
847 struct b44 *bp = container_of(napi, struct b44, napi);
848 int work_done;
850 spin_lock_irq(&bp->lock);
852 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
853 /* spin_lock(&bp->tx_lock); */
854 b44_tx(bp);
855 /* spin_unlock(&bp->tx_lock); */
857 spin_unlock_irq(&bp->lock);
859 work_done = 0;
860 if (bp->istat & ISTAT_RX)
861 work_done += b44_rx(bp, budget);
863 if (bp->istat & ISTAT_ERRORS) {
864 unsigned long flags;
866 spin_lock_irqsave(&bp->lock, flags);
867 b44_halt(bp);
868 b44_init_rings(bp);
869 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
870 netif_wake_queue(bp->dev);
871 spin_unlock_irqrestore(&bp->lock, flags);
872 work_done = 0;
875 if (work_done < budget) {
876 netif_rx_complete(napi);
877 b44_enable_ints(bp);
880 return work_done;
883 static irqreturn_t b44_interrupt(int irq, void *dev_id)
885 struct net_device *dev = dev_id;
886 struct b44 *bp = netdev_priv(dev);
887 u32 istat, imask;
888 int handled = 0;
890 spin_lock(&bp->lock);
892 istat = br32(bp, B44_ISTAT);
893 imask = br32(bp, B44_IMASK);
895 /* The interrupt mask register controls which interrupt bits
896 * will actually raise an interrupt to the CPU when set by hw/firmware,
897 * but doesn't mask off the bits.
899 istat &= imask;
900 if (istat) {
901 handled = 1;
903 if (unlikely(!netif_running(dev))) {
904 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
905 goto irq_ack;
908 if (netif_rx_schedule_prep(&bp->napi)) {
909 /* NOTE: These writes are posted by the readback of
910 * the ISTAT register below.
912 bp->istat = istat;
913 __b44_disable_ints(bp);
914 __netif_rx_schedule(&bp->napi);
915 } else {
916 printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
917 dev->name);
920 irq_ack:
921 bw32(bp, B44_ISTAT, istat);
922 br32(bp, B44_ISTAT);
924 spin_unlock(&bp->lock);
925 return IRQ_RETVAL(handled);
928 static void b44_tx_timeout(struct net_device *dev)
930 struct b44 *bp = netdev_priv(dev);
932 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
933 dev->name);
935 spin_lock_irq(&bp->lock);
937 b44_halt(bp);
938 b44_init_rings(bp);
939 b44_init_hw(bp, B44_FULL_RESET);
941 spin_unlock_irq(&bp->lock);
943 b44_enable_ints(bp);
945 netif_wake_queue(dev);
948 static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
950 struct b44 *bp = netdev_priv(dev);
951 int rc = NETDEV_TX_OK;
952 dma_addr_t mapping;
953 u32 len, entry, ctrl;
955 len = skb->len;
956 spin_lock_irq(&bp->lock);
958 /* This is a hard error, log it. */
959 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
960 netif_stop_queue(dev);
961 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
962 dev->name);
963 goto err_out;
966 mapping = ssb_dma_map_single(bp->sdev, skb->data, len, DMA_TO_DEVICE);
967 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
968 struct sk_buff *bounce_skb;
970 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
971 if (!ssb_dma_mapping_error(bp->sdev, mapping))
972 ssb_dma_unmap_single(bp->sdev, mapping, len,
973 DMA_TO_DEVICE);
975 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
976 if (!bounce_skb)
977 goto err_out;
979 mapping = ssb_dma_map_single(bp->sdev, bounce_skb->data,
980 len, DMA_TO_DEVICE);
981 if (ssb_dma_mapping_error(bp->sdev, mapping) || mapping + len > DMA_30BIT_MASK) {
982 if (!ssb_dma_mapping_error(bp->sdev, mapping))
983 ssb_dma_unmap_single(bp->sdev, mapping,
984 len, DMA_TO_DEVICE);
985 dev_kfree_skb_any(bounce_skb);
986 goto err_out;
989 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
990 dev_kfree_skb_any(skb);
991 skb = bounce_skb;
994 entry = bp->tx_prod;
995 bp->tx_buffers[entry].skb = skb;
996 bp->tx_buffers[entry].mapping = mapping;
998 ctrl = (len & DESC_CTRL_LEN);
999 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
1000 if (entry == (B44_TX_RING_SIZE - 1))
1001 ctrl |= DESC_CTRL_EOT;
1003 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1004 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1006 if (bp->flags & B44_FLAG_TX_RING_HACK)
1007 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1008 entry * sizeof(bp->tx_ring[0]),
1009 DMA_TO_DEVICE);
1011 entry = NEXT_TX(entry);
1013 bp->tx_prod = entry;
1015 wmb();
1017 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1018 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1019 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1020 if (bp->flags & B44_FLAG_REORDER_BUG)
1021 br32(bp, B44_DMATX_PTR);
1023 if (TX_BUFFS_AVAIL(bp) < 1)
1024 netif_stop_queue(dev);
1026 dev->trans_start = jiffies;
1028 out_unlock:
1029 spin_unlock_irq(&bp->lock);
1031 return rc;
1033 err_out:
1034 rc = NETDEV_TX_BUSY;
1035 goto out_unlock;
1038 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1040 struct b44 *bp = netdev_priv(dev);
1042 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1043 return -EINVAL;
1045 if (!netif_running(dev)) {
1046 /* We'll just catch it later when the
1047 * device is up'd.
1049 dev->mtu = new_mtu;
1050 return 0;
1053 spin_lock_irq(&bp->lock);
1054 b44_halt(bp);
1055 dev->mtu = new_mtu;
1056 b44_init_rings(bp);
1057 b44_init_hw(bp, B44_FULL_RESET);
1058 spin_unlock_irq(&bp->lock);
1060 b44_enable_ints(bp);
1062 return 0;
1065 /* Free up pending packets in all rx/tx rings.
1067 * The chip has been shut down and the driver detached from
1068 * the networking, so no interrupts or new tx packets will
1069 * end up in the driver. bp->lock is not held and we are not
1070 * in an interrupt context and thus may sleep.
1072 static void b44_free_rings(struct b44 *bp)
1074 struct ring_info *rp;
1075 int i;
1077 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1078 rp = &bp->rx_buffers[i];
1080 if (rp->skb == NULL)
1081 continue;
1082 ssb_dma_unmap_single(bp->sdev, rp->mapping, RX_PKT_BUF_SZ,
1083 DMA_FROM_DEVICE);
1084 dev_kfree_skb_any(rp->skb);
1085 rp->skb = NULL;
1088 /* XXX needs changes once NETIF_F_SG is set... */
1089 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1090 rp = &bp->tx_buffers[i];
1092 if (rp->skb == NULL)
1093 continue;
1094 ssb_dma_unmap_single(bp->sdev, rp->mapping, rp->skb->len,
1095 DMA_TO_DEVICE);
1096 dev_kfree_skb_any(rp->skb);
1097 rp->skb = NULL;
1101 /* Initialize tx/rx rings for packet processing.
1103 * The chip has been shut down and the driver detached from
1104 * the networking, so no interrupts or new tx packets will
1105 * end up in the driver.
1107 static void b44_init_rings(struct b44 *bp)
1109 int i;
1111 b44_free_rings(bp);
1113 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1114 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1116 if (bp->flags & B44_FLAG_RX_RING_HACK)
1117 ssb_dma_sync_single_for_device(bp->sdev, bp->rx_ring_dma,
1118 DMA_TABLE_BYTES,
1119 DMA_BIDIRECTIONAL);
1121 if (bp->flags & B44_FLAG_TX_RING_HACK)
1122 ssb_dma_sync_single_for_device(bp->sdev, bp->tx_ring_dma,
1123 DMA_TABLE_BYTES,
1124 DMA_TO_DEVICE);
1126 for (i = 0; i < bp->rx_pending; i++) {
1127 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1128 break;
1133 * Must not be invoked with interrupt sources disabled and
1134 * the hardware shutdown down.
1136 static void b44_free_consistent(struct b44 *bp)
1138 kfree(bp->rx_buffers);
1139 bp->rx_buffers = NULL;
1140 kfree(bp->tx_buffers);
1141 bp->tx_buffers = NULL;
1142 if (bp->rx_ring) {
1143 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1144 ssb_dma_unmap_single(bp->sdev, bp->rx_ring_dma,
1145 DMA_TABLE_BYTES,
1146 DMA_BIDIRECTIONAL);
1147 kfree(bp->rx_ring);
1148 } else
1149 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1150 bp->rx_ring, bp->rx_ring_dma,
1151 GFP_KERNEL);
1152 bp->rx_ring = NULL;
1153 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1155 if (bp->tx_ring) {
1156 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1157 ssb_dma_unmap_single(bp->sdev, bp->tx_ring_dma,
1158 DMA_TABLE_BYTES,
1159 DMA_TO_DEVICE);
1160 kfree(bp->tx_ring);
1161 } else
1162 ssb_dma_free_consistent(bp->sdev, DMA_TABLE_BYTES,
1163 bp->tx_ring, bp->tx_ring_dma,
1164 GFP_KERNEL);
1165 bp->tx_ring = NULL;
1166 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1171 * Must not be invoked with interrupt sources disabled and
1172 * the hardware shutdown down. Can sleep.
1174 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1176 int size;
1178 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1179 bp->rx_buffers = kzalloc(size, gfp);
1180 if (!bp->rx_buffers)
1181 goto out_err;
1183 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1184 bp->tx_buffers = kzalloc(size, gfp);
1185 if (!bp->tx_buffers)
1186 goto out_err;
1188 size = DMA_TABLE_BYTES;
1189 bp->rx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->rx_ring_dma, gfp);
1190 if (!bp->rx_ring) {
1191 /* Allocation may have failed due to pci_alloc_consistent
1192 insisting on use of GFP_DMA, which is more restrictive
1193 than necessary... */
1194 struct dma_desc *rx_ring;
1195 dma_addr_t rx_ring_dma;
1197 rx_ring = kzalloc(size, gfp);
1198 if (!rx_ring)
1199 goto out_err;
1201 rx_ring_dma = ssb_dma_map_single(bp->sdev, rx_ring,
1202 DMA_TABLE_BYTES,
1203 DMA_BIDIRECTIONAL);
1205 if (ssb_dma_mapping_error(bp->sdev, rx_ring_dma) ||
1206 rx_ring_dma + size > DMA_30BIT_MASK) {
1207 kfree(rx_ring);
1208 goto out_err;
1211 bp->rx_ring = rx_ring;
1212 bp->rx_ring_dma = rx_ring_dma;
1213 bp->flags |= B44_FLAG_RX_RING_HACK;
1216 bp->tx_ring = ssb_dma_alloc_consistent(bp->sdev, size, &bp->tx_ring_dma, gfp);
1217 if (!bp->tx_ring) {
1218 /* Allocation may have failed due to ssb_dma_alloc_consistent
1219 insisting on use of GFP_DMA, which is more restrictive
1220 than necessary... */
1221 struct dma_desc *tx_ring;
1222 dma_addr_t tx_ring_dma;
1224 tx_ring = kzalloc(size, gfp);
1225 if (!tx_ring)
1226 goto out_err;
1228 tx_ring_dma = ssb_dma_map_single(bp->sdev, tx_ring,
1229 DMA_TABLE_BYTES,
1230 DMA_TO_DEVICE);
1232 if (ssb_dma_mapping_error(bp->sdev, tx_ring_dma) ||
1233 tx_ring_dma + size > DMA_30BIT_MASK) {
1234 kfree(tx_ring);
1235 goto out_err;
1238 bp->tx_ring = tx_ring;
1239 bp->tx_ring_dma = tx_ring_dma;
1240 bp->flags |= B44_FLAG_TX_RING_HACK;
1243 return 0;
1245 out_err:
1246 b44_free_consistent(bp);
1247 return -ENOMEM;
1250 /* bp->lock is held. */
1251 static void b44_clear_stats(struct b44 *bp)
1253 unsigned long reg;
1255 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1256 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1257 br32(bp, reg);
1258 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1259 br32(bp, reg);
1262 /* bp->lock is held. */
1263 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1265 struct ssb_device *sdev = bp->sdev;
1267 if (ssb_device_is_enabled(bp->sdev)) {
1268 bw32(bp, B44_RCV_LAZY, 0);
1269 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1270 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1271 bw32(bp, B44_DMATX_CTRL, 0);
1272 bp->tx_prod = bp->tx_cons = 0;
1273 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1274 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1275 100, 0);
1277 bw32(bp, B44_DMARX_CTRL, 0);
1278 bp->rx_prod = bp->rx_cons = 0;
1279 } else
1280 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1282 ssb_device_enable(bp->sdev, 0);
1283 b44_clear_stats(bp);
1286 * Don't enable PHY if we are doing a partial reset
1287 * we are probably going to power down
1289 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1290 return;
1292 switch (sdev->bus->bustype) {
1293 case SSB_BUSTYPE_SSB:
1294 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1295 (((ssb_clockspeed(sdev->bus) + (B44_MDC_RATIO / 2)) / B44_MDC_RATIO)
1296 & MDIO_CTRL_MAXF_MASK)));
1297 break;
1298 case SSB_BUSTYPE_PCI:
1299 case SSB_BUSTYPE_PCMCIA:
1300 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1301 (0x0d & MDIO_CTRL_MAXF_MASK)));
1302 break;
1305 br32(bp, B44_MDIO_CTRL);
1307 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1308 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1309 br32(bp, B44_ENET_CTRL);
1310 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1311 } else {
1312 u32 val = br32(bp, B44_DEVCTRL);
1314 if (val & DEVCTRL_EPR) {
1315 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1316 br32(bp, B44_DEVCTRL);
1317 udelay(100);
1319 bp->flags |= B44_FLAG_INTERNAL_PHY;
1323 /* bp->lock is held. */
1324 static void b44_halt(struct b44 *bp)
1326 b44_disable_ints(bp);
1327 /* reset PHY */
1328 b44_phy_reset(bp);
1329 /* power down PHY */
1330 printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
1331 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1332 /* now reset the chip, but without enabling the MAC&PHY
1333 * part of it. This has to be done _after_ we shut down the PHY */
1334 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1337 /* bp->lock is held. */
1338 static void __b44_set_mac_addr(struct b44 *bp)
1340 bw32(bp, B44_CAM_CTRL, 0);
1341 if (!(bp->dev->flags & IFF_PROMISC)) {
1342 u32 val;
1344 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1345 val = br32(bp, B44_CAM_CTRL);
1346 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1350 static int b44_set_mac_addr(struct net_device *dev, void *p)
1352 struct b44 *bp = netdev_priv(dev);
1353 struct sockaddr *addr = p;
1354 u32 val;
1356 if (netif_running(dev))
1357 return -EBUSY;
1359 if (!is_valid_ether_addr(addr->sa_data))
1360 return -EINVAL;
1362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1364 spin_lock_irq(&bp->lock);
1366 val = br32(bp, B44_RXCONFIG);
1367 if (!(val & RXCONFIG_CAM_ABSENT))
1368 __b44_set_mac_addr(bp);
1370 spin_unlock_irq(&bp->lock);
1372 return 0;
1375 /* Called at device open time to get the chip ready for
1376 * packet processing. Invoked with bp->lock held.
1378 static void __b44_set_rx_mode(struct net_device *);
1379 static void b44_init_hw(struct b44 *bp, int reset_kind)
1381 u32 val;
1383 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1384 if (reset_kind == B44_FULL_RESET) {
1385 b44_phy_reset(bp);
1386 b44_setup_phy(bp);
1389 /* Enable CRC32, set proper LED modes and power on PHY */
1390 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1391 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1393 /* This sets the MAC address too. */
1394 __b44_set_rx_mode(bp->dev);
1396 /* MTU + eth header + possible VLAN tag + struct rx_header */
1397 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1398 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1400 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1401 if (reset_kind == B44_PARTIAL_RESET) {
1402 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1403 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1404 } else {
1405 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1406 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1407 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1408 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1409 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1411 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1412 bp->rx_prod = bp->rx_pending;
1414 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1417 val = br32(bp, B44_ENET_CTRL);
1418 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1421 static int b44_open(struct net_device *dev)
1423 struct b44 *bp = netdev_priv(dev);
1424 int err;
1426 err = b44_alloc_consistent(bp, GFP_KERNEL);
1427 if (err)
1428 goto out;
1430 napi_enable(&bp->napi);
1432 b44_init_rings(bp);
1433 b44_init_hw(bp, B44_FULL_RESET);
1435 b44_check_phy(bp);
1437 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1438 if (unlikely(err < 0)) {
1439 napi_disable(&bp->napi);
1440 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1441 b44_free_rings(bp);
1442 b44_free_consistent(bp);
1443 goto out;
1446 init_timer(&bp->timer);
1447 bp->timer.expires = jiffies + HZ;
1448 bp->timer.data = (unsigned long) bp;
1449 bp->timer.function = b44_timer;
1450 add_timer(&bp->timer);
1452 b44_enable_ints(bp);
1453 netif_start_queue(dev);
1454 out:
1455 return err;
1458 #ifdef CONFIG_NET_POLL_CONTROLLER
1460 * Polling receive - used by netconsole and other diagnostic tools
1461 * to allow network i/o with interrupts disabled.
1463 static void b44_poll_controller(struct net_device *dev)
1465 disable_irq(dev->irq);
1466 b44_interrupt(dev->irq, dev);
1467 enable_irq(dev->irq);
1469 #endif
1471 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1473 u32 i;
1474 u32 *pattern = (u32 *) pp;
1476 for (i = 0; i < bytes; i += sizeof(u32)) {
1477 bw32(bp, B44_FILT_ADDR, table_offset + i);
1478 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1482 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1484 int magicsync = 6;
1485 int k, j, len = offset;
1486 int ethaddr_bytes = ETH_ALEN;
1488 memset(ppattern + offset, 0xff, magicsync);
1489 for (j = 0; j < magicsync; j++)
1490 set_bit(len++, (unsigned long *) pmask);
1492 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1493 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1494 ethaddr_bytes = ETH_ALEN;
1495 else
1496 ethaddr_bytes = B44_PATTERN_SIZE - len;
1497 if (ethaddr_bytes <=0)
1498 break;
1499 for (k = 0; k< ethaddr_bytes; k++) {
1500 ppattern[offset + magicsync +
1501 (j * ETH_ALEN) + k] = macaddr[k];
1502 len++;
1503 set_bit(len, (unsigned long *) pmask);
1506 return len - 1;
1509 /* Setup magic packet patterns in the b44 WOL
1510 * pattern matching filter.
1512 static void b44_setup_pseudo_magicp(struct b44 *bp)
1515 u32 val;
1516 int plen0, plen1, plen2;
1517 u8 *pwol_pattern;
1518 u8 pwol_mask[B44_PMASK_SIZE];
1520 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1521 if (!pwol_pattern) {
1522 printk(KERN_ERR PFX "Memory not available for WOL\n");
1523 return;
1526 /* Ipv4 magic packet pattern - pattern 0.*/
1527 memset(pwol_mask, 0, B44_PMASK_SIZE);
1528 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1529 B44_ETHIPV4UDP_HLEN);
1531 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1532 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1534 /* Raw ethernet II magic packet pattern - pattern 1 */
1535 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1536 memset(pwol_mask, 0, B44_PMASK_SIZE);
1537 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1538 ETH_HLEN);
1540 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1541 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1542 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1543 B44_PMASK_BASE + B44_PMASK_SIZE);
1545 /* Ipv6 magic packet pattern - pattern 2 */
1546 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1547 memset(pwol_mask, 0, B44_PMASK_SIZE);
1548 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1549 B44_ETHIPV6UDP_HLEN);
1551 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1552 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1553 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1554 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1556 kfree(pwol_pattern);
1558 /* set these pattern's lengths: one less than each real length */
1559 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1560 bw32(bp, B44_WKUP_LEN, val);
1562 /* enable wakeup pattern matching */
1563 val = br32(bp, B44_DEVCTRL);
1564 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1568 #ifdef CONFIG_B44_PCI
1569 static void b44_setup_wol_pci(struct b44 *bp)
1571 u16 val;
1573 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1574 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1575 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1576 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1579 #else
1580 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1581 #endif /* CONFIG_B44_PCI */
1583 static void b44_setup_wol(struct b44 *bp)
1585 u32 val;
1587 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1589 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1591 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1593 val = bp->dev->dev_addr[2] << 24 |
1594 bp->dev->dev_addr[3] << 16 |
1595 bp->dev->dev_addr[4] << 8 |
1596 bp->dev->dev_addr[5];
1597 bw32(bp, B44_ADDR_LO, val);
1599 val = bp->dev->dev_addr[0] << 8 |
1600 bp->dev->dev_addr[1];
1601 bw32(bp, B44_ADDR_HI, val);
1603 val = br32(bp, B44_DEVCTRL);
1604 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1606 } else {
1607 b44_setup_pseudo_magicp(bp);
1609 b44_setup_wol_pci(bp);
1612 static int b44_close(struct net_device *dev)
1614 struct b44 *bp = netdev_priv(dev);
1616 netif_stop_queue(dev);
1618 napi_disable(&bp->napi);
1620 del_timer_sync(&bp->timer);
1622 spin_lock_irq(&bp->lock);
1624 b44_halt(bp);
1625 b44_free_rings(bp);
1626 netif_carrier_off(dev);
1628 spin_unlock_irq(&bp->lock);
1630 free_irq(dev->irq, dev);
1632 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1633 b44_init_hw(bp, B44_PARTIAL_RESET);
1634 b44_setup_wol(bp);
1637 b44_free_consistent(bp);
1639 return 0;
1642 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1644 struct b44 *bp = netdev_priv(dev);
1645 struct net_device_stats *nstat = &bp->stats;
1646 struct b44_hw_stats *hwstat = &bp->hw_stats;
1648 /* Convert HW stats into netdevice stats. */
1649 nstat->rx_packets = hwstat->rx_pkts;
1650 nstat->tx_packets = hwstat->tx_pkts;
1651 nstat->rx_bytes = hwstat->rx_octets;
1652 nstat->tx_bytes = hwstat->tx_octets;
1653 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1654 hwstat->tx_oversize_pkts +
1655 hwstat->tx_underruns +
1656 hwstat->tx_excessive_cols +
1657 hwstat->tx_late_cols);
1658 nstat->multicast = hwstat->tx_multicast_pkts;
1659 nstat->collisions = hwstat->tx_total_cols;
1661 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1662 hwstat->rx_undersize);
1663 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1664 nstat->rx_frame_errors = hwstat->rx_align_errs;
1665 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1666 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1667 hwstat->rx_oversize_pkts +
1668 hwstat->rx_missed_pkts +
1669 hwstat->rx_crc_align_errs +
1670 hwstat->rx_undersize +
1671 hwstat->rx_crc_errs +
1672 hwstat->rx_align_errs +
1673 hwstat->rx_symbol_errs);
1675 nstat->tx_aborted_errors = hwstat->tx_underruns;
1676 #if 0
1677 /* Carrier lost counter seems to be broken for some devices */
1678 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1679 #endif
1681 return nstat;
1684 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1686 struct dev_mc_list *mclist;
1687 int i, num_ents;
1689 num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
1690 mclist = dev->mc_list;
1691 for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
1692 __b44_cam_write(bp, mclist->dmi_addr, i + 1);
1694 return i+1;
1697 static void __b44_set_rx_mode(struct net_device *dev)
1699 struct b44 *bp = netdev_priv(dev);
1700 u32 val;
1702 val = br32(bp, B44_RXCONFIG);
1703 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1704 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1705 val |= RXCONFIG_PROMISC;
1706 bw32(bp, B44_RXCONFIG, val);
1707 } else {
1708 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1709 int i = 1;
1711 __b44_set_mac_addr(bp);
1713 if ((dev->flags & IFF_ALLMULTI) ||
1714 (dev->mc_count > B44_MCAST_TABLE_SIZE))
1715 val |= RXCONFIG_ALLMULTI;
1716 else
1717 i = __b44_load_mcast(bp, dev);
1719 for (; i < 64; i++)
1720 __b44_cam_write(bp, zero, i);
1722 bw32(bp, B44_RXCONFIG, val);
1723 val = br32(bp, B44_CAM_CTRL);
1724 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1728 static void b44_set_rx_mode(struct net_device *dev)
1730 struct b44 *bp = netdev_priv(dev);
1732 spin_lock_irq(&bp->lock);
1733 __b44_set_rx_mode(dev);
1734 spin_unlock_irq(&bp->lock);
1737 static u32 b44_get_msglevel(struct net_device *dev)
1739 struct b44 *bp = netdev_priv(dev);
1740 return bp->msg_enable;
1743 static void b44_set_msglevel(struct net_device *dev, u32 value)
1745 struct b44 *bp = netdev_priv(dev);
1746 bp->msg_enable = value;
1749 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1751 struct b44 *bp = netdev_priv(dev);
1752 struct ssb_bus *bus = bp->sdev->bus;
1754 strncpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1755 strncpy(info->version, DRV_MODULE_VERSION, sizeof(info->driver));
1756 switch (bus->bustype) {
1757 case SSB_BUSTYPE_PCI:
1758 strncpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1759 break;
1760 case SSB_BUSTYPE_PCMCIA:
1761 case SSB_BUSTYPE_SSB:
1762 strncpy(info->bus_info, "SSB", sizeof(info->bus_info));
1763 break;
1767 static int b44_nway_reset(struct net_device *dev)
1769 struct b44 *bp = netdev_priv(dev);
1770 u32 bmcr;
1771 int r;
1773 spin_lock_irq(&bp->lock);
1774 b44_readphy(bp, MII_BMCR, &bmcr);
1775 b44_readphy(bp, MII_BMCR, &bmcr);
1776 r = -EINVAL;
1777 if (bmcr & BMCR_ANENABLE) {
1778 b44_writephy(bp, MII_BMCR,
1779 bmcr | BMCR_ANRESTART);
1780 r = 0;
1782 spin_unlock_irq(&bp->lock);
1784 return r;
1787 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1789 struct b44 *bp = netdev_priv(dev);
1791 cmd->supported = (SUPPORTED_Autoneg);
1792 cmd->supported |= (SUPPORTED_100baseT_Half |
1793 SUPPORTED_100baseT_Full |
1794 SUPPORTED_10baseT_Half |
1795 SUPPORTED_10baseT_Full |
1796 SUPPORTED_MII);
1798 cmd->advertising = 0;
1799 if (bp->flags & B44_FLAG_ADV_10HALF)
1800 cmd->advertising |= ADVERTISED_10baseT_Half;
1801 if (bp->flags & B44_FLAG_ADV_10FULL)
1802 cmd->advertising |= ADVERTISED_10baseT_Full;
1803 if (bp->flags & B44_FLAG_ADV_100HALF)
1804 cmd->advertising |= ADVERTISED_100baseT_Half;
1805 if (bp->flags & B44_FLAG_ADV_100FULL)
1806 cmd->advertising |= ADVERTISED_100baseT_Full;
1807 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1808 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1809 SPEED_100 : SPEED_10;
1810 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1811 DUPLEX_FULL : DUPLEX_HALF;
1812 cmd->port = 0;
1813 cmd->phy_address = bp->phy_addr;
1814 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1815 XCVR_INTERNAL : XCVR_EXTERNAL;
1816 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1817 AUTONEG_DISABLE : AUTONEG_ENABLE;
1818 if (cmd->autoneg == AUTONEG_ENABLE)
1819 cmd->advertising |= ADVERTISED_Autoneg;
1820 if (!netif_running(dev)){
1821 cmd->speed = 0;
1822 cmd->duplex = 0xff;
1824 cmd->maxtxpkt = 0;
1825 cmd->maxrxpkt = 0;
1826 return 0;
1829 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1831 struct b44 *bp = netdev_priv(dev);
1833 /* We do not support gigabit. */
1834 if (cmd->autoneg == AUTONEG_ENABLE) {
1835 if (cmd->advertising &
1836 (ADVERTISED_1000baseT_Half |
1837 ADVERTISED_1000baseT_Full))
1838 return -EINVAL;
1839 } else if ((cmd->speed != SPEED_100 &&
1840 cmd->speed != SPEED_10) ||
1841 (cmd->duplex != DUPLEX_HALF &&
1842 cmd->duplex != DUPLEX_FULL)) {
1843 return -EINVAL;
1846 spin_lock_irq(&bp->lock);
1848 if (cmd->autoneg == AUTONEG_ENABLE) {
1849 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1850 B44_FLAG_100_BASE_T |
1851 B44_FLAG_FULL_DUPLEX |
1852 B44_FLAG_ADV_10HALF |
1853 B44_FLAG_ADV_10FULL |
1854 B44_FLAG_ADV_100HALF |
1855 B44_FLAG_ADV_100FULL);
1856 if (cmd->advertising == 0) {
1857 bp->flags |= (B44_FLAG_ADV_10HALF |
1858 B44_FLAG_ADV_10FULL |
1859 B44_FLAG_ADV_100HALF |
1860 B44_FLAG_ADV_100FULL);
1861 } else {
1862 if (cmd->advertising & ADVERTISED_10baseT_Half)
1863 bp->flags |= B44_FLAG_ADV_10HALF;
1864 if (cmd->advertising & ADVERTISED_10baseT_Full)
1865 bp->flags |= B44_FLAG_ADV_10FULL;
1866 if (cmd->advertising & ADVERTISED_100baseT_Half)
1867 bp->flags |= B44_FLAG_ADV_100HALF;
1868 if (cmd->advertising & ADVERTISED_100baseT_Full)
1869 bp->flags |= B44_FLAG_ADV_100FULL;
1871 } else {
1872 bp->flags |= B44_FLAG_FORCE_LINK;
1873 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1874 if (cmd->speed == SPEED_100)
1875 bp->flags |= B44_FLAG_100_BASE_T;
1876 if (cmd->duplex == DUPLEX_FULL)
1877 bp->flags |= B44_FLAG_FULL_DUPLEX;
1880 if (netif_running(dev))
1881 b44_setup_phy(bp);
1883 spin_unlock_irq(&bp->lock);
1885 return 0;
1888 static void b44_get_ringparam(struct net_device *dev,
1889 struct ethtool_ringparam *ering)
1891 struct b44 *bp = netdev_priv(dev);
1893 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1894 ering->rx_pending = bp->rx_pending;
1896 /* XXX ethtool lacks a tx_max_pending, oops... */
1899 static int b44_set_ringparam(struct net_device *dev,
1900 struct ethtool_ringparam *ering)
1902 struct b44 *bp = netdev_priv(dev);
1904 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1905 (ering->rx_mini_pending != 0) ||
1906 (ering->rx_jumbo_pending != 0) ||
1907 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1908 return -EINVAL;
1910 spin_lock_irq(&bp->lock);
1912 bp->rx_pending = ering->rx_pending;
1913 bp->tx_pending = ering->tx_pending;
1915 b44_halt(bp);
1916 b44_init_rings(bp);
1917 b44_init_hw(bp, B44_FULL_RESET);
1918 netif_wake_queue(bp->dev);
1919 spin_unlock_irq(&bp->lock);
1921 b44_enable_ints(bp);
1923 return 0;
1926 static void b44_get_pauseparam(struct net_device *dev,
1927 struct ethtool_pauseparam *epause)
1929 struct b44 *bp = netdev_priv(dev);
1931 epause->autoneg =
1932 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1933 epause->rx_pause =
1934 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1935 epause->tx_pause =
1936 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1939 static int b44_set_pauseparam(struct net_device *dev,
1940 struct ethtool_pauseparam *epause)
1942 struct b44 *bp = netdev_priv(dev);
1944 spin_lock_irq(&bp->lock);
1945 if (epause->autoneg)
1946 bp->flags |= B44_FLAG_PAUSE_AUTO;
1947 else
1948 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1949 if (epause->rx_pause)
1950 bp->flags |= B44_FLAG_RX_PAUSE;
1951 else
1952 bp->flags &= ~B44_FLAG_RX_PAUSE;
1953 if (epause->tx_pause)
1954 bp->flags |= B44_FLAG_TX_PAUSE;
1955 else
1956 bp->flags &= ~B44_FLAG_TX_PAUSE;
1957 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1958 b44_halt(bp);
1959 b44_init_rings(bp);
1960 b44_init_hw(bp, B44_FULL_RESET);
1961 } else {
1962 __b44_set_flow_ctrl(bp, bp->flags);
1964 spin_unlock_irq(&bp->lock);
1966 b44_enable_ints(bp);
1968 return 0;
1971 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1973 switch(stringset) {
1974 case ETH_SS_STATS:
1975 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1976 break;
1980 static int b44_get_sset_count(struct net_device *dev, int sset)
1982 switch (sset) {
1983 case ETH_SS_STATS:
1984 return ARRAY_SIZE(b44_gstrings);
1985 default:
1986 return -EOPNOTSUPP;
1990 static void b44_get_ethtool_stats(struct net_device *dev,
1991 struct ethtool_stats *stats, u64 *data)
1993 struct b44 *bp = netdev_priv(dev);
1994 u32 *val = &bp->hw_stats.tx_good_octets;
1995 u32 i;
1997 spin_lock_irq(&bp->lock);
1999 b44_stats_update(bp);
2001 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2002 *data++ = *val++;
2004 spin_unlock_irq(&bp->lock);
2007 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2009 struct b44 *bp = netdev_priv(dev);
2011 wol->supported = WAKE_MAGIC;
2012 if (bp->flags & B44_FLAG_WOL_ENABLE)
2013 wol->wolopts = WAKE_MAGIC;
2014 else
2015 wol->wolopts = 0;
2016 memset(&wol->sopass, 0, sizeof(wol->sopass));
2019 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2021 struct b44 *bp = netdev_priv(dev);
2023 spin_lock_irq(&bp->lock);
2024 if (wol->wolopts & WAKE_MAGIC)
2025 bp->flags |= B44_FLAG_WOL_ENABLE;
2026 else
2027 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2028 spin_unlock_irq(&bp->lock);
2030 return 0;
2033 static const struct ethtool_ops b44_ethtool_ops = {
2034 .get_drvinfo = b44_get_drvinfo,
2035 .get_settings = b44_get_settings,
2036 .set_settings = b44_set_settings,
2037 .nway_reset = b44_nway_reset,
2038 .get_link = ethtool_op_get_link,
2039 .get_wol = b44_get_wol,
2040 .set_wol = b44_set_wol,
2041 .get_ringparam = b44_get_ringparam,
2042 .set_ringparam = b44_set_ringparam,
2043 .get_pauseparam = b44_get_pauseparam,
2044 .set_pauseparam = b44_set_pauseparam,
2045 .get_msglevel = b44_get_msglevel,
2046 .set_msglevel = b44_set_msglevel,
2047 .get_strings = b44_get_strings,
2048 .get_sset_count = b44_get_sset_count,
2049 .get_ethtool_stats = b44_get_ethtool_stats,
2052 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2054 struct mii_ioctl_data *data = if_mii(ifr);
2055 struct b44 *bp = netdev_priv(dev);
2056 int err = -EINVAL;
2058 if (!netif_running(dev))
2059 goto out;
2061 spin_lock_irq(&bp->lock);
2062 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2063 spin_unlock_irq(&bp->lock);
2064 out:
2065 return err;
2068 static int __devinit b44_get_invariants(struct b44 *bp)
2070 struct ssb_device *sdev = bp->sdev;
2071 int err = 0;
2072 u8 *addr;
2074 bp->dma_offset = ssb_dma_translation(sdev);
2076 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2077 instance > 1) {
2078 addr = sdev->bus->sprom.et1mac;
2079 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2080 } else {
2081 addr = sdev->bus->sprom.et0mac;
2082 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2084 /* Some ROMs have buggy PHY addresses with the high
2085 * bits set (sign extension?). Truncate them to a
2086 * valid PHY address. */
2087 bp->phy_addr &= 0x1F;
2089 memcpy(bp->dev->dev_addr, addr, 6);
2091 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2092 printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
2093 return -EINVAL;
2096 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2098 bp->imask = IMASK_DEF;
2100 /* XXX - really required?
2101 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2104 if (bp->sdev->id.revision >= 7)
2105 bp->flags |= B44_FLAG_B0_ANDLATER;
2107 return err;
2110 static const struct net_device_ops b44_netdev_ops = {
2111 .ndo_open = b44_open,
2112 .ndo_stop = b44_close,
2113 .ndo_start_xmit = b44_start_xmit,
2114 .ndo_get_stats = b44_get_stats,
2115 .ndo_set_multicast_list = b44_set_rx_mode,
2116 .ndo_set_mac_address = b44_set_mac_addr,
2117 .ndo_validate_addr = eth_validate_addr,
2118 .ndo_do_ioctl = b44_ioctl,
2119 .ndo_tx_timeout = b44_tx_timeout,
2120 .ndo_change_mtu = b44_change_mtu,
2121 #ifdef CONFIG_NET_POLL_CONTROLLER
2122 .ndo_poll_controller = b44_poll_controller,
2123 #endif
2126 static int __devinit b44_init_one(struct ssb_device *sdev,
2127 const struct ssb_device_id *ent)
2129 static int b44_version_printed = 0;
2130 struct net_device *dev;
2131 struct b44 *bp;
2132 int err;
2134 instance++;
2136 if (b44_version_printed++ == 0)
2137 printk(KERN_INFO "%s", version);
2140 dev = alloc_etherdev(sizeof(*bp));
2141 if (!dev) {
2142 dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
2143 err = -ENOMEM;
2144 goto out;
2147 SET_NETDEV_DEV(dev, sdev->dev);
2149 /* No interesting netdevice features in this card... */
2150 dev->features |= 0;
2152 bp = netdev_priv(dev);
2153 bp->sdev = sdev;
2154 bp->dev = dev;
2156 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2158 spin_lock_init(&bp->lock);
2160 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2161 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2163 dev->netdev_ops = &b44_netdev_ops;
2164 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2165 dev->watchdog_timeo = B44_TX_TIMEOUT;
2166 dev->irq = sdev->irq;
2167 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2169 netif_carrier_off(dev);
2171 err = ssb_bus_powerup(sdev->bus, 0);
2172 if (err) {
2173 dev_err(sdev->dev,
2174 "Failed to powerup the bus\n");
2175 goto err_out_free_dev;
2177 err = ssb_dma_set_mask(sdev, DMA_30BIT_MASK);
2178 if (err) {
2179 dev_err(sdev->dev,
2180 "Required 30BIT DMA mask unsupported by the system.\n");
2181 goto err_out_powerdown;
2183 err = b44_get_invariants(bp);
2184 if (err) {
2185 dev_err(sdev->dev,
2186 "Problem fetching invariants of chip, aborting.\n");
2187 goto err_out_powerdown;
2190 bp->mii_if.dev = dev;
2191 bp->mii_if.mdio_read = b44_mii_read;
2192 bp->mii_if.mdio_write = b44_mii_write;
2193 bp->mii_if.phy_id = bp->phy_addr;
2194 bp->mii_if.phy_id_mask = 0x1f;
2195 bp->mii_if.reg_num_mask = 0x1f;
2197 /* By default, advertise all speed/duplex settings. */
2198 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2199 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2201 /* By default, auto-negotiate PAUSE. */
2202 bp->flags |= B44_FLAG_PAUSE_AUTO;
2204 err = register_netdev(dev);
2205 if (err) {
2206 dev_err(sdev->dev, "Cannot register net device, aborting.\n");
2207 goto err_out_powerdown;
2210 ssb_set_drvdata(sdev, dev);
2212 /* Chip reset provides power to the b44 MAC & PCI cores, which
2213 * is necessary for MAC register access.
2215 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2217 printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2218 dev->name, dev->dev_addr);
2220 return 0;
2222 err_out_powerdown:
2223 ssb_bus_may_powerdown(sdev->bus);
2225 err_out_free_dev:
2226 free_netdev(dev);
2228 out:
2229 return err;
2232 static void __devexit b44_remove_one(struct ssb_device *sdev)
2234 struct net_device *dev = ssb_get_drvdata(sdev);
2236 unregister_netdev(dev);
2237 ssb_bus_may_powerdown(sdev->bus);
2238 free_netdev(dev);
2239 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2240 ssb_set_drvdata(sdev, NULL);
2243 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2245 struct net_device *dev = ssb_get_drvdata(sdev);
2246 struct b44 *bp = netdev_priv(dev);
2248 if (!netif_running(dev))
2249 return 0;
2251 del_timer_sync(&bp->timer);
2253 spin_lock_irq(&bp->lock);
2255 b44_halt(bp);
2256 netif_carrier_off(bp->dev);
2257 netif_device_detach(bp->dev);
2258 b44_free_rings(bp);
2260 spin_unlock_irq(&bp->lock);
2262 free_irq(dev->irq, dev);
2263 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2264 b44_init_hw(bp, B44_PARTIAL_RESET);
2265 b44_setup_wol(bp);
2268 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2269 return 0;
2272 static int b44_resume(struct ssb_device *sdev)
2274 struct net_device *dev = ssb_get_drvdata(sdev);
2275 struct b44 *bp = netdev_priv(dev);
2276 int rc = 0;
2278 rc = ssb_bus_powerup(sdev->bus, 0);
2279 if (rc) {
2280 dev_err(sdev->dev,
2281 "Failed to powerup the bus\n");
2282 return rc;
2285 if (!netif_running(dev))
2286 return 0;
2288 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2289 if (rc) {
2290 printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
2291 return rc;
2294 spin_lock_irq(&bp->lock);
2296 b44_init_rings(bp);
2297 b44_init_hw(bp, B44_FULL_RESET);
2298 netif_device_attach(bp->dev);
2299 spin_unlock_irq(&bp->lock);
2301 b44_enable_ints(bp);
2302 netif_wake_queue(dev);
2304 mod_timer(&bp->timer, jiffies + 1);
2306 return 0;
2309 static struct ssb_driver b44_ssb_driver = {
2310 .name = DRV_MODULE_NAME,
2311 .id_table = b44_ssb_tbl,
2312 .probe = b44_init_one,
2313 .remove = __devexit_p(b44_remove_one),
2314 .suspend = b44_suspend,
2315 .resume = b44_resume,
2318 static inline int b44_pci_init(void)
2320 int err = 0;
2321 #ifdef CONFIG_B44_PCI
2322 err = ssb_pcihost_register(&b44_pci_driver);
2323 #endif
2324 return err;
2327 static inline void b44_pci_exit(void)
2329 #ifdef CONFIG_B44_PCI
2330 ssb_pcihost_unregister(&b44_pci_driver);
2331 #endif
2334 static int __init b44_init(void)
2336 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2337 int err;
2339 /* Setup paramaters for syncing RX/TX DMA descriptors */
2340 dma_desc_align_mask = ~(dma_desc_align_size - 1);
2341 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2343 err = b44_pci_init();
2344 if (err)
2345 return err;
2346 err = ssb_driver_register(&b44_ssb_driver);
2347 if (err)
2348 b44_pci_exit();
2349 return err;
2352 static void __exit b44_cleanup(void)
2354 ssb_driver_unregister(&b44_ssb_driver);
2355 b44_pci_exit();
2358 module_init(b44_init);
2359 module_exit(b44_cleanup);