irda: Remove BKL instances from af_irda.c
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / net / b44.c
blob8e7c8a8e61c7756231191e6dc379732432a0e9b4
1 /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
4 * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
5 * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
6 * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
7 * Copyright (C) 2006 Broadcom Corporation.
8 * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
10 * Distribute under GPL.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/moduleparam.h>
18 #include <linux/types.h>
19 #include <linux/netdevice.h>
20 #include <linux/ethtool.h>
21 #include <linux/mii.h>
22 #include <linux/if_ether.h>
23 #include <linux/if_vlan.h>
24 #include <linux/etherdevice.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/ssb/ssb.h>
30 #include <linux/slab.h>
32 #include <asm/uaccess.h>
33 #include <asm/io.h>
34 #include <asm/irq.h>
37 #include "b44.h"
39 #define DRV_MODULE_NAME "b44"
40 #define DRV_MODULE_VERSION "2.0"
42 #define B44_DEF_MSG_ENABLE \
43 (NETIF_MSG_DRV | \
44 NETIF_MSG_PROBE | \
45 NETIF_MSG_LINK | \
46 NETIF_MSG_TIMER | \
47 NETIF_MSG_IFDOWN | \
48 NETIF_MSG_IFUP | \
49 NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR)
52 /* length of time before we decide the hardware is borked,
53 * and dev->tx_timeout() should be called to fix the problem
55 #define B44_TX_TIMEOUT (5 * HZ)
57 /* hardware minimum and maximum for a single frame's data payload */
58 #define B44_MIN_MTU 60
59 #define B44_MAX_MTU 1500
61 #define B44_RX_RING_SIZE 512
62 #define B44_DEF_RX_RING_PENDING 200
63 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
64 B44_RX_RING_SIZE)
65 #define B44_TX_RING_SIZE 512
66 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
67 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
68 B44_TX_RING_SIZE)
70 #define TX_RING_GAP(BP) \
71 (B44_TX_RING_SIZE - (BP)->tx_pending)
72 #define TX_BUFFS_AVAIL(BP) \
73 (((BP)->tx_cons <= (BP)->tx_prod) ? \
74 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
75 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
76 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
78 #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
79 #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
81 /* minimum number of free TX descriptors required to wake up TX process */
82 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
84 /* b44 internal pattern match filter info */
85 #define B44_PATTERN_BASE 0x400
86 #define B44_PATTERN_SIZE 0x80
87 #define B44_PMASK_BASE 0x600
88 #define B44_PMASK_SIZE 0x10
89 #define B44_MAX_PATTERNS 16
90 #define B44_ETHIPV6UDP_HLEN 62
91 #define B44_ETHIPV4UDP_HLEN 42
93 static char version[] __devinitdata =
94 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
96 MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
97 MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
98 MODULE_LICENSE("GPL");
99 MODULE_VERSION(DRV_MODULE_VERSION);
101 static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
102 module_param(b44_debug, int, 0);
103 MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
106 #ifdef CONFIG_B44_PCI
107 static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
108 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
109 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
110 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
111 { 0 } /* terminate list with empty entry */
113 MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
115 static struct pci_driver b44_pci_driver = {
116 .name = DRV_MODULE_NAME,
117 .id_table = b44_pci_tbl,
119 #endif /* CONFIG_B44_PCI */
121 static const struct ssb_device_id b44_ssb_tbl[] = {
122 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
123 SSB_DEVTABLE_END
125 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
127 static void b44_halt(struct b44 *);
128 static void b44_init_rings(struct b44 *);
130 #define B44_FULL_RESET 1
131 #define B44_FULL_RESET_SKIP_PHY 2
132 #define B44_PARTIAL_RESET 3
133 #define B44_CHIP_RESET_FULL 4
134 #define B44_CHIP_RESET_PARTIAL 5
136 static void b44_init_hw(struct b44 *, int);
138 static int dma_desc_sync_size;
139 static int instance;
141 static const char b44_gstrings[][ETH_GSTRING_LEN] = {
142 #define _B44(x...) # x,
143 B44_STAT_REG_DECLARE
144 #undef _B44
147 static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
148 dma_addr_t dma_base,
149 unsigned long offset,
150 enum dma_data_direction dir)
152 dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
153 dma_desc_sync_size, dir);
156 static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
157 dma_addr_t dma_base,
158 unsigned long offset,
159 enum dma_data_direction dir)
161 dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
162 dma_desc_sync_size, dir);
165 static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
167 return ssb_read32(bp->sdev, reg);
170 static inline void bw32(const struct b44 *bp,
171 unsigned long reg, unsigned long val)
173 ssb_write32(bp->sdev, reg, val);
176 static int b44_wait_bit(struct b44 *bp, unsigned long reg,
177 u32 bit, unsigned long timeout, const int clear)
179 unsigned long i;
181 for (i = 0; i < timeout; i++) {
182 u32 val = br32(bp, reg);
184 if (clear && !(val & bit))
185 break;
186 if (!clear && (val & bit))
187 break;
188 udelay(10);
190 if (i == timeout) {
191 if (net_ratelimit())
192 netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
193 bit, reg, clear ? "clear" : "set");
195 return -ENODEV;
197 return 0;
200 static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
202 u32 val;
204 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
205 (index << CAM_CTRL_INDEX_SHIFT)));
207 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
209 val = br32(bp, B44_CAM_DATA_LO);
211 data[2] = (val >> 24) & 0xFF;
212 data[3] = (val >> 16) & 0xFF;
213 data[4] = (val >> 8) & 0xFF;
214 data[5] = (val >> 0) & 0xFF;
216 val = br32(bp, B44_CAM_DATA_HI);
218 data[0] = (val >> 8) & 0xFF;
219 data[1] = (val >> 0) & 0xFF;
222 static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
224 u32 val;
226 val = ((u32) data[2]) << 24;
227 val |= ((u32) data[3]) << 16;
228 val |= ((u32) data[4]) << 8;
229 val |= ((u32) data[5]) << 0;
230 bw32(bp, B44_CAM_DATA_LO, val);
231 val = (CAM_DATA_HI_VALID |
232 (((u32) data[0]) << 8) |
233 (((u32) data[1]) << 0));
234 bw32(bp, B44_CAM_DATA_HI, val);
235 bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
236 (index << CAM_CTRL_INDEX_SHIFT)));
237 b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
240 static inline void __b44_disable_ints(struct b44 *bp)
242 bw32(bp, B44_IMASK, 0);
245 static void b44_disable_ints(struct b44 *bp)
247 __b44_disable_ints(bp);
249 /* Flush posted writes. */
250 br32(bp, B44_IMASK);
253 static void b44_enable_ints(struct b44 *bp)
255 bw32(bp, B44_IMASK, bp->imask);
258 static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
260 int err;
262 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
263 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
264 (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
265 (phy_addr << MDIO_DATA_PMD_SHIFT) |
266 (reg << MDIO_DATA_RA_SHIFT) |
267 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
268 err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
269 *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
271 return err;
274 static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
276 bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
277 bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
278 (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
279 (phy_addr << MDIO_DATA_PMD_SHIFT) |
280 (reg << MDIO_DATA_RA_SHIFT) |
281 (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
282 (val & MDIO_DATA_DATA)));
283 return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
286 static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
288 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
289 return 0;
291 return __b44_readphy(bp, bp->phy_addr, reg, val);
294 static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
296 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
297 return 0;
299 return __b44_writephy(bp, bp->phy_addr, reg, val);
302 /* miilib interface */
303 static int b44_mii_read(struct net_device *dev, int phy_id, int location)
305 u32 val;
306 struct b44 *bp = netdev_priv(dev);
307 int rc = __b44_readphy(bp, phy_id, location, &val);
308 if (rc)
309 return 0xffffffff;
310 return val;
313 static void b44_mii_write(struct net_device *dev, int phy_id, int location,
314 int val)
316 struct b44 *bp = netdev_priv(dev);
317 __b44_writephy(bp, phy_id, location, val);
320 static int b44_phy_reset(struct b44 *bp)
322 u32 val;
323 int err;
325 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
326 return 0;
327 err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
328 if (err)
329 return err;
330 udelay(100);
331 err = b44_readphy(bp, MII_BMCR, &val);
332 if (!err) {
333 if (val & BMCR_RESET) {
334 netdev_err(bp->dev, "PHY Reset would not complete\n");
335 err = -ENODEV;
339 return err;
342 static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
344 u32 val;
346 bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
347 bp->flags |= pause_flags;
349 val = br32(bp, B44_RXCONFIG);
350 if (pause_flags & B44_FLAG_RX_PAUSE)
351 val |= RXCONFIG_FLOW;
352 else
353 val &= ~RXCONFIG_FLOW;
354 bw32(bp, B44_RXCONFIG, val);
356 val = br32(bp, B44_MAC_FLOW);
357 if (pause_flags & B44_FLAG_TX_PAUSE)
358 val |= (MAC_FLOW_PAUSE_ENAB |
359 (0xc0 & MAC_FLOW_RX_HI_WATER));
360 else
361 val &= ~MAC_FLOW_PAUSE_ENAB;
362 bw32(bp, B44_MAC_FLOW, val);
365 static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
367 u32 pause_enab = 0;
369 /* The driver supports only rx pause by default because
370 the b44 mac tx pause mechanism generates excessive
371 pause frames.
372 Use ethtool to turn on b44 tx pause if necessary.
374 if ((local & ADVERTISE_PAUSE_CAP) &&
375 (local & ADVERTISE_PAUSE_ASYM)){
376 if ((remote & LPA_PAUSE_ASYM) &&
377 !(remote & LPA_PAUSE_CAP))
378 pause_enab |= B44_FLAG_RX_PAUSE;
381 __b44_set_flow_ctrl(bp, pause_enab);
384 #ifdef SSB_DRIVER_MIPS
385 extern char *nvram_get(char *name);
386 static void b44_wap54g10_workaround(struct b44 *bp)
388 const char *str;
389 u32 val;
390 int err;
393 * workaround for bad hardware design in Linksys WAP54G v1.0
394 * see https://dev.openwrt.org/ticket/146
395 * check and reset bit "isolate"
397 str = nvram_get("boardnum");
398 if (!str)
399 return;
400 if (simple_strtoul(str, NULL, 0) == 2) {
401 err = __b44_readphy(bp, 0, MII_BMCR, &val);
402 if (err)
403 goto error;
404 if (!(val & BMCR_ISOLATE))
405 return;
406 val &= ~BMCR_ISOLATE;
407 err = __b44_writephy(bp, 0, MII_BMCR, val);
408 if (err)
409 goto error;
411 return;
412 error:
413 pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
415 #else
416 static inline void b44_wap54g10_workaround(struct b44 *bp)
419 #endif
421 static int b44_setup_phy(struct b44 *bp)
423 u32 val;
424 int err;
426 b44_wap54g10_workaround(bp);
428 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
429 return 0;
430 if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
431 goto out;
432 if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
433 val & MII_ALEDCTRL_ALLMSK)) != 0)
434 goto out;
435 if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
436 goto out;
437 if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
438 val | MII_TLEDCTRL_ENABLE)) != 0)
439 goto out;
441 if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
442 u32 adv = ADVERTISE_CSMA;
444 if (bp->flags & B44_FLAG_ADV_10HALF)
445 adv |= ADVERTISE_10HALF;
446 if (bp->flags & B44_FLAG_ADV_10FULL)
447 adv |= ADVERTISE_10FULL;
448 if (bp->flags & B44_FLAG_ADV_100HALF)
449 adv |= ADVERTISE_100HALF;
450 if (bp->flags & B44_FLAG_ADV_100FULL)
451 adv |= ADVERTISE_100FULL;
453 if (bp->flags & B44_FLAG_PAUSE_AUTO)
454 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
456 if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
457 goto out;
458 if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
459 BMCR_ANRESTART))) != 0)
460 goto out;
461 } else {
462 u32 bmcr;
464 if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
465 goto out;
466 bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
467 if (bp->flags & B44_FLAG_100_BASE_T)
468 bmcr |= BMCR_SPEED100;
469 if (bp->flags & B44_FLAG_FULL_DUPLEX)
470 bmcr |= BMCR_FULLDPLX;
471 if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
472 goto out;
474 /* Since we will not be negotiating there is no safe way
475 * to determine if the link partner supports flow control
476 * or not. So just disable it completely in this case.
478 b44_set_flow_ctrl(bp, 0, 0);
481 out:
482 return err;
485 static void b44_stats_update(struct b44 *bp)
487 unsigned long reg;
488 u32 *val;
490 val = &bp->hw_stats.tx_good_octets;
491 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
492 *val++ += br32(bp, reg);
495 /* Pad */
496 reg += 8*4UL;
498 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
499 *val++ += br32(bp, reg);
503 static void b44_link_report(struct b44 *bp)
505 if (!netif_carrier_ok(bp->dev)) {
506 netdev_info(bp->dev, "Link is down\n");
507 } else {
508 netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
509 (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
510 (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
512 netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
513 (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
514 (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
518 static void b44_check_phy(struct b44 *bp)
520 u32 bmsr, aux;
522 if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
523 bp->flags |= B44_FLAG_100_BASE_T;
524 bp->flags |= B44_FLAG_FULL_DUPLEX;
525 if (!netif_carrier_ok(bp->dev)) {
526 u32 val = br32(bp, B44_TX_CTRL);
527 val |= TX_CTRL_DUPLEX;
528 bw32(bp, B44_TX_CTRL, val);
529 netif_carrier_on(bp->dev);
530 b44_link_report(bp);
532 return;
535 if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
536 !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
537 (bmsr != 0xffff)) {
538 if (aux & MII_AUXCTRL_SPEED)
539 bp->flags |= B44_FLAG_100_BASE_T;
540 else
541 bp->flags &= ~B44_FLAG_100_BASE_T;
542 if (aux & MII_AUXCTRL_DUPLEX)
543 bp->flags |= B44_FLAG_FULL_DUPLEX;
544 else
545 bp->flags &= ~B44_FLAG_FULL_DUPLEX;
547 if (!netif_carrier_ok(bp->dev) &&
548 (bmsr & BMSR_LSTATUS)) {
549 u32 val = br32(bp, B44_TX_CTRL);
550 u32 local_adv, remote_adv;
552 if (bp->flags & B44_FLAG_FULL_DUPLEX)
553 val |= TX_CTRL_DUPLEX;
554 else
555 val &= ~TX_CTRL_DUPLEX;
556 bw32(bp, B44_TX_CTRL, val);
558 if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
559 !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
560 !b44_readphy(bp, MII_LPA, &remote_adv))
561 b44_set_flow_ctrl(bp, local_adv, remote_adv);
563 /* Link now up */
564 netif_carrier_on(bp->dev);
565 b44_link_report(bp);
566 } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
567 /* Link now down */
568 netif_carrier_off(bp->dev);
569 b44_link_report(bp);
572 if (bmsr & BMSR_RFAULT)
573 netdev_warn(bp->dev, "Remote fault detected in PHY\n");
574 if (bmsr & BMSR_JCD)
575 netdev_warn(bp->dev, "Jabber detected in PHY\n");
579 static void b44_timer(unsigned long __opaque)
581 struct b44 *bp = (struct b44 *) __opaque;
583 spin_lock_irq(&bp->lock);
585 b44_check_phy(bp);
587 b44_stats_update(bp);
589 spin_unlock_irq(&bp->lock);
591 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
594 static void b44_tx(struct b44 *bp)
596 u32 cur, cons;
598 cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
599 cur /= sizeof(struct dma_desc);
601 /* XXX needs updating when NETIF_F_SG is supported */
602 for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
603 struct ring_info *rp = &bp->tx_buffers[cons];
604 struct sk_buff *skb = rp->skb;
606 BUG_ON(skb == NULL);
608 dma_unmap_single(bp->sdev->dma_dev,
609 rp->mapping,
610 skb->len,
611 DMA_TO_DEVICE);
612 rp->skb = NULL;
613 dev_kfree_skb_irq(skb);
616 bp->tx_cons = cons;
617 if (netif_queue_stopped(bp->dev) &&
618 TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
619 netif_wake_queue(bp->dev);
621 bw32(bp, B44_GPTIMER, 0);
624 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
625 * before the DMA address you give it. So we allocate 30 more bytes
626 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
627 * point the chip at 30 bytes past where the rx_header will go.
629 static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
631 struct dma_desc *dp;
632 struct ring_info *src_map, *map;
633 struct rx_header *rh;
634 struct sk_buff *skb;
635 dma_addr_t mapping;
636 int dest_idx;
637 u32 ctrl;
639 src_map = NULL;
640 if (src_idx >= 0)
641 src_map = &bp->rx_buffers[src_idx];
642 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
643 map = &bp->rx_buffers[dest_idx];
644 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
645 if (skb == NULL)
646 return -ENOMEM;
648 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
649 RX_PKT_BUF_SZ,
650 DMA_FROM_DEVICE);
652 /* Hardware bug work-around, the chip is unable to do PCI DMA
653 to/from anything above 1GB :-( */
654 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
655 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
656 /* Sigh... */
657 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
658 dma_unmap_single(bp->sdev->dma_dev, mapping,
659 RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
660 dev_kfree_skb_any(skb);
661 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
662 if (skb == NULL)
663 return -ENOMEM;
664 mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
665 RX_PKT_BUF_SZ,
666 DMA_FROM_DEVICE);
667 if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
668 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
669 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
670 dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
671 dev_kfree_skb_any(skb);
672 return -ENOMEM;
674 bp->force_copybreak = 1;
677 rh = (struct rx_header *) skb->data;
679 rh->len = 0;
680 rh->flags = 0;
682 map->skb = skb;
683 map->mapping = mapping;
685 if (src_map != NULL)
686 src_map->skb = NULL;
688 ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
689 if (dest_idx == (B44_RX_RING_SIZE - 1))
690 ctrl |= DESC_CTRL_EOT;
692 dp = &bp->rx_ring[dest_idx];
693 dp->ctrl = cpu_to_le32(ctrl);
694 dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
696 if (bp->flags & B44_FLAG_RX_RING_HACK)
697 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
698 dest_idx * sizeof(*dp),
699 DMA_BIDIRECTIONAL);
701 return RX_PKT_BUF_SZ;
704 static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
706 struct dma_desc *src_desc, *dest_desc;
707 struct ring_info *src_map, *dest_map;
708 struct rx_header *rh;
709 int dest_idx;
710 __le32 ctrl;
712 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
713 dest_desc = &bp->rx_ring[dest_idx];
714 dest_map = &bp->rx_buffers[dest_idx];
715 src_desc = &bp->rx_ring[src_idx];
716 src_map = &bp->rx_buffers[src_idx];
718 dest_map->skb = src_map->skb;
719 rh = (struct rx_header *) src_map->skb->data;
720 rh->len = 0;
721 rh->flags = 0;
722 dest_map->mapping = src_map->mapping;
724 if (bp->flags & B44_FLAG_RX_RING_HACK)
725 b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
726 src_idx * sizeof(*src_desc),
727 DMA_BIDIRECTIONAL);
729 ctrl = src_desc->ctrl;
730 if (dest_idx == (B44_RX_RING_SIZE - 1))
731 ctrl |= cpu_to_le32(DESC_CTRL_EOT);
732 else
733 ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
735 dest_desc->ctrl = ctrl;
736 dest_desc->addr = src_desc->addr;
738 src_map->skb = NULL;
740 if (bp->flags & B44_FLAG_RX_RING_HACK)
741 b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
742 dest_idx * sizeof(*dest_desc),
743 DMA_BIDIRECTIONAL);
745 dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
746 RX_PKT_BUF_SZ,
747 DMA_FROM_DEVICE);
750 static int b44_rx(struct b44 *bp, int budget)
752 int received;
753 u32 cons, prod;
755 received = 0;
756 prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
757 prod /= sizeof(struct dma_desc);
758 cons = bp->rx_cons;
760 while (cons != prod && budget > 0) {
761 struct ring_info *rp = &bp->rx_buffers[cons];
762 struct sk_buff *skb = rp->skb;
763 dma_addr_t map = rp->mapping;
764 struct rx_header *rh;
765 u16 len;
767 dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
768 RX_PKT_BUF_SZ,
769 DMA_FROM_DEVICE);
770 rh = (struct rx_header *) skb->data;
771 len = le16_to_cpu(rh->len);
772 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
773 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
774 drop_it:
775 b44_recycle_rx(bp, cons, bp->rx_prod);
776 drop_it_no_recycle:
777 bp->dev->stats.rx_dropped++;
778 goto next_pkt;
781 if (len == 0) {
782 int i = 0;
784 do {
785 udelay(2);
786 barrier();
787 len = le16_to_cpu(rh->len);
788 } while (len == 0 && i++ < 5);
789 if (len == 0)
790 goto drop_it;
793 /* Omit CRC. */
794 len -= 4;
796 if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
797 int skb_size;
798 skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
799 if (skb_size < 0)
800 goto drop_it;
801 dma_unmap_single(bp->sdev->dma_dev, map,
802 skb_size, DMA_FROM_DEVICE);
803 /* Leave out rx_header */
804 skb_put(skb, len + RX_PKT_OFFSET);
805 skb_pull(skb, RX_PKT_OFFSET);
806 } else {
807 struct sk_buff *copy_skb;
809 b44_recycle_rx(bp, cons, bp->rx_prod);
810 copy_skb = netdev_alloc_skb(bp->dev, len + 2);
811 if (copy_skb == NULL)
812 goto drop_it_no_recycle;
814 skb_reserve(copy_skb, 2);
815 skb_put(copy_skb, len);
816 /* DMA sync done above, copy just the actual packet */
817 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
818 copy_skb->data, len);
819 skb = copy_skb;
821 skb_checksum_none_assert(skb);
822 skb->protocol = eth_type_trans(skb, bp->dev);
823 netif_receive_skb(skb);
824 received++;
825 budget--;
826 next_pkt:
827 bp->rx_prod = (bp->rx_prod + 1) &
828 (B44_RX_RING_SIZE - 1);
829 cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
832 bp->rx_cons = cons;
833 bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
835 return received;
838 static int b44_poll(struct napi_struct *napi, int budget)
840 struct b44 *bp = container_of(napi, struct b44, napi);
841 int work_done;
842 unsigned long flags;
844 spin_lock_irqsave(&bp->lock, flags);
846 if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
847 /* spin_lock(&bp->tx_lock); */
848 b44_tx(bp);
849 /* spin_unlock(&bp->tx_lock); */
851 if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
852 bp->istat &= ~ISTAT_RFO;
853 b44_disable_ints(bp);
854 ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
855 b44_init_rings(bp);
856 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
857 netif_wake_queue(bp->dev);
860 spin_unlock_irqrestore(&bp->lock, flags);
862 work_done = 0;
863 if (bp->istat & ISTAT_RX)
864 work_done += b44_rx(bp, budget);
866 if (bp->istat & ISTAT_ERRORS) {
867 spin_lock_irqsave(&bp->lock, flags);
868 b44_halt(bp);
869 b44_init_rings(bp);
870 b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
871 netif_wake_queue(bp->dev);
872 spin_unlock_irqrestore(&bp->lock, flags);
873 work_done = 0;
876 if (work_done < budget) {
877 napi_complete(napi);
878 b44_enable_ints(bp);
881 return work_done;
884 static irqreturn_t b44_interrupt(int irq, void *dev_id)
886 struct net_device *dev = dev_id;
887 struct b44 *bp = netdev_priv(dev);
888 u32 istat, imask;
889 int handled = 0;
891 spin_lock(&bp->lock);
893 istat = br32(bp, B44_ISTAT);
894 imask = br32(bp, B44_IMASK);
896 /* The interrupt mask register controls which interrupt bits
897 * will actually raise an interrupt to the CPU when set by hw/firmware,
898 * but doesn't mask off the bits.
900 istat &= imask;
901 if (istat) {
902 handled = 1;
904 if (unlikely(!netif_running(dev))) {
905 netdev_info(dev, "late interrupt\n");
906 goto irq_ack;
909 if (napi_schedule_prep(&bp->napi)) {
910 /* NOTE: These writes are posted by the readback of
911 * the ISTAT register below.
913 bp->istat = istat;
914 __b44_disable_ints(bp);
915 __napi_schedule(&bp->napi);
918 irq_ack:
919 bw32(bp, B44_ISTAT, istat);
920 br32(bp, B44_ISTAT);
922 spin_unlock(&bp->lock);
923 return IRQ_RETVAL(handled);
926 static void b44_tx_timeout(struct net_device *dev)
928 struct b44 *bp = netdev_priv(dev);
930 netdev_err(dev, "transmit timed out, resetting\n");
932 spin_lock_irq(&bp->lock);
934 b44_halt(bp);
935 b44_init_rings(bp);
936 b44_init_hw(bp, B44_FULL_RESET);
938 spin_unlock_irq(&bp->lock);
940 b44_enable_ints(bp);
942 netif_wake_queue(dev);
945 static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
947 struct b44 *bp = netdev_priv(dev);
948 int rc = NETDEV_TX_OK;
949 dma_addr_t mapping;
950 u32 len, entry, ctrl;
951 unsigned long flags;
953 len = skb->len;
954 spin_lock_irqsave(&bp->lock, flags);
956 /* This is a hard error, log it. */
957 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
958 netif_stop_queue(dev);
959 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
960 goto err_out;
963 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
964 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
965 struct sk_buff *bounce_skb;
967 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
968 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
969 dma_unmap_single(bp->sdev->dma_dev, mapping, len,
970 DMA_TO_DEVICE);
972 bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
973 if (!bounce_skb)
974 goto err_out;
976 mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
977 len, DMA_TO_DEVICE);
978 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
979 if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
980 dma_unmap_single(bp->sdev->dma_dev, mapping,
981 len, DMA_TO_DEVICE);
982 dev_kfree_skb_any(bounce_skb);
983 goto err_out;
986 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
987 dev_kfree_skb_any(skb);
988 skb = bounce_skb;
991 entry = bp->tx_prod;
992 bp->tx_buffers[entry].skb = skb;
993 bp->tx_buffers[entry].mapping = mapping;
995 ctrl = (len & DESC_CTRL_LEN);
996 ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
997 if (entry == (B44_TX_RING_SIZE - 1))
998 ctrl |= DESC_CTRL_EOT;
1000 bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
1001 bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
1003 if (bp->flags & B44_FLAG_TX_RING_HACK)
1004 b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
1005 entry * sizeof(bp->tx_ring[0]),
1006 DMA_TO_DEVICE);
1008 entry = NEXT_TX(entry);
1010 bp->tx_prod = entry;
1012 wmb();
1014 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1015 if (bp->flags & B44_FLAG_BUGGY_TXPTR)
1016 bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
1017 if (bp->flags & B44_FLAG_REORDER_BUG)
1018 br32(bp, B44_DMATX_PTR);
1020 if (TX_BUFFS_AVAIL(bp) < 1)
1021 netif_stop_queue(dev);
1023 out_unlock:
1024 spin_unlock_irqrestore(&bp->lock, flags);
1026 return rc;
1028 err_out:
1029 rc = NETDEV_TX_BUSY;
1030 goto out_unlock;
1033 static int b44_change_mtu(struct net_device *dev, int new_mtu)
1035 struct b44 *bp = netdev_priv(dev);
1037 if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
1038 return -EINVAL;
1040 if (!netif_running(dev)) {
1041 /* We'll just catch it later when the
1042 * device is up'd.
1044 dev->mtu = new_mtu;
1045 return 0;
1048 spin_lock_irq(&bp->lock);
1049 b44_halt(bp);
1050 dev->mtu = new_mtu;
1051 b44_init_rings(bp);
1052 b44_init_hw(bp, B44_FULL_RESET);
1053 spin_unlock_irq(&bp->lock);
1055 b44_enable_ints(bp);
1057 return 0;
1060 /* Free up pending packets in all rx/tx rings.
1062 * The chip has been shut down and the driver detached from
1063 * the networking, so no interrupts or new tx packets will
1064 * end up in the driver. bp->lock is not held and we are not
1065 * in an interrupt context and thus may sleep.
1067 static void b44_free_rings(struct b44 *bp)
1069 struct ring_info *rp;
1070 int i;
1072 for (i = 0; i < B44_RX_RING_SIZE; i++) {
1073 rp = &bp->rx_buffers[i];
1075 if (rp->skb == NULL)
1076 continue;
1077 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
1078 DMA_FROM_DEVICE);
1079 dev_kfree_skb_any(rp->skb);
1080 rp->skb = NULL;
1083 /* XXX needs changes once NETIF_F_SG is set... */
1084 for (i = 0; i < B44_TX_RING_SIZE; i++) {
1085 rp = &bp->tx_buffers[i];
1087 if (rp->skb == NULL)
1088 continue;
1089 dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
1090 DMA_TO_DEVICE);
1091 dev_kfree_skb_any(rp->skb);
1092 rp->skb = NULL;
1096 /* Initialize tx/rx rings for packet processing.
1098 * The chip has been shut down and the driver detached from
1099 * the networking, so no interrupts or new tx packets will
1100 * end up in the driver.
1102 static void b44_init_rings(struct b44 *bp)
1104 int i;
1106 b44_free_rings(bp);
1108 memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
1109 memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
1111 if (bp->flags & B44_FLAG_RX_RING_HACK)
1112 dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
1113 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1115 if (bp->flags & B44_FLAG_TX_RING_HACK)
1116 dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
1117 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1119 for (i = 0; i < bp->rx_pending; i++) {
1120 if (b44_alloc_rx_skb(bp, -1, i) < 0)
1121 break;
1126 * Must not be invoked with interrupt sources disabled and
1127 * the hardware shutdown down.
1129 static void b44_free_consistent(struct b44 *bp)
1131 kfree(bp->rx_buffers);
1132 bp->rx_buffers = NULL;
1133 kfree(bp->tx_buffers);
1134 bp->tx_buffers = NULL;
1135 if (bp->rx_ring) {
1136 if (bp->flags & B44_FLAG_RX_RING_HACK) {
1137 dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
1138 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
1139 kfree(bp->rx_ring);
1140 } else
1141 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1142 bp->rx_ring, bp->rx_ring_dma);
1143 bp->rx_ring = NULL;
1144 bp->flags &= ~B44_FLAG_RX_RING_HACK;
1146 if (bp->tx_ring) {
1147 if (bp->flags & B44_FLAG_TX_RING_HACK) {
1148 dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
1149 DMA_TABLE_BYTES, DMA_TO_DEVICE);
1150 kfree(bp->tx_ring);
1151 } else
1152 dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
1153 bp->tx_ring, bp->tx_ring_dma);
1154 bp->tx_ring = NULL;
1155 bp->flags &= ~B44_FLAG_TX_RING_HACK;
1160 * Must not be invoked with interrupt sources disabled and
1161 * the hardware shutdown down. Can sleep.
1163 static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
1165 int size;
1167 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1168 bp->rx_buffers = kzalloc(size, gfp);
1169 if (!bp->rx_buffers)
1170 goto out_err;
1172 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1173 bp->tx_buffers = kzalloc(size, gfp);
1174 if (!bp->tx_buffers)
1175 goto out_err;
1177 size = DMA_TABLE_BYTES;
1178 bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1179 &bp->rx_ring_dma, gfp);
1180 if (!bp->rx_ring) {
1181 /* Allocation may have failed due to pci_alloc_consistent
1182 insisting on use of GFP_DMA, which is more restrictive
1183 than necessary... */
1184 struct dma_desc *rx_ring;
1185 dma_addr_t rx_ring_dma;
1187 rx_ring = kzalloc(size, gfp);
1188 if (!rx_ring)
1189 goto out_err;
1191 rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
1192 DMA_TABLE_BYTES,
1193 DMA_BIDIRECTIONAL);
1195 if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
1196 rx_ring_dma + size > DMA_BIT_MASK(30)) {
1197 kfree(rx_ring);
1198 goto out_err;
1201 bp->rx_ring = rx_ring;
1202 bp->rx_ring_dma = rx_ring_dma;
1203 bp->flags |= B44_FLAG_RX_RING_HACK;
1206 bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
1207 &bp->tx_ring_dma, gfp);
1208 if (!bp->tx_ring) {
1209 /* Allocation may have failed due to ssb_dma_alloc_consistent
1210 insisting on use of GFP_DMA, which is more restrictive
1211 than necessary... */
1212 struct dma_desc *tx_ring;
1213 dma_addr_t tx_ring_dma;
1215 tx_ring = kzalloc(size, gfp);
1216 if (!tx_ring)
1217 goto out_err;
1219 tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
1220 DMA_TABLE_BYTES,
1221 DMA_TO_DEVICE);
1223 if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
1224 tx_ring_dma + size > DMA_BIT_MASK(30)) {
1225 kfree(tx_ring);
1226 goto out_err;
1229 bp->tx_ring = tx_ring;
1230 bp->tx_ring_dma = tx_ring_dma;
1231 bp->flags |= B44_FLAG_TX_RING_HACK;
1234 return 0;
1236 out_err:
1237 b44_free_consistent(bp);
1238 return -ENOMEM;
1241 /* bp->lock is held. */
1242 static void b44_clear_stats(struct b44 *bp)
1244 unsigned long reg;
1246 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1247 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
1248 br32(bp, reg);
1249 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
1250 br32(bp, reg);
1253 /* bp->lock is held. */
1254 static void b44_chip_reset(struct b44 *bp, int reset_kind)
1256 struct ssb_device *sdev = bp->sdev;
1257 bool was_enabled;
1259 was_enabled = ssb_device_is_enabled(bp->sdev);
1261 ssb_device_enable(bp->sdev, 0);
1262 ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
1264 if (was_enabled) {
1265 bw32(bp, B44_RCV_LAZY, 0);
1266 bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
1267 b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
1268 bw32(bp, B44_DMATX_CTRL, 0);
1269 bp->tx_prod = bp->tx_cons = 0;
1270 if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
1271 b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
1272 100, 0);
1274 bw32(bp, B44_DMARX_CTRL, 0);
1275 bp->rx_prod = bp->rx_cons = 0;
1278 b44_clear_stats(bp);
1281 * Don't enable PHY if we are doing a partial reset
1282 * we are probably going to power down
1284 if (reset_kind == B44_CHIP_RESET_PARTIAL)
1285 return;
1287 switch (sdev->bus->bustype) {
1288 case SSB_BUSTYPE_SSB:
1289 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1290 (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
1291 B44_MDC_RATIO)
1292 & MDIO_CTRL_MAXF_MASK)));
1293 break;
1294 case SSB_BUSTYPE_PCI:
1295 bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
1296 (0x0d & MDIO_CTRL_MAXF_MASK)));
1297 break;
1298 case SSB_BUSTYPE_PCMCIA:
1299 case SSB_BUSTYPE_SDIO:
1300 WARN_ON(1); /* A device with this bus does not exist. */
1301 break;
1304 br32(bp, B44_MDIO_CTRL);
1306 if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
1307 bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
1308 br32(bp, B44_ENET_CTRL);
1309 bp->flags &= ~B44_FLAG_INTERNAL_PHY;
1310 } else {
1311 u32 val = br32(bp, B44_DEVCTRL);
1313 if (val & DEVCTRL_EPR) {
1314 bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
1315 br32(bp, B44_DEVCTRL);
1316 udelay(100);
1318 bp->flags |= B44_FLAG_INTERNAL_PHY;
1322 /* bp->lock is held. */
1323 static void b44_halt(struct b44 *bp)
1325 b44_disable_ints(bp);
1326 /* reset PHY */
1327 b44_phy_reset(bp);
1328 /* power down PHY */
1329 netdev_info(bp->dev, "powering down PHY\n");
1330 bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
1331 /* now reset the chip, but without enabling the MAC&PHY
1332 * part of it. This has to be done _after_ we shut down the PHY */
1333 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1336 /* bp->lock is held. */
1337 static void __b44_set_mac_addr(struct b44 *bp)
1339 bw32(bp, B44_CAM_CTRL, 0);
1340 if (!(bp->dev->flags & IFF_PROMISC)) {
1341 u32 val;
1343 __b44_cam_write(bp, bp->dev->dev_addr, 0);
1344 val = br32(bp, B44_CAM_CTRL);
1345 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1349 static int b44_set_mac_addr(struct net_device *dev, void *p)
1351 struct b44 *bp = netdev_priv(dev);
1352 struct sockaddr *addr = p;
1353 u32 val;
1355 if (netif_running(dev))
1356 return -EBUSY;
1358 if (!is_valid_ether_addr(addr->sa_data))
1359 return -EINVAL;
1361 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1363 spin_lock_irq(&bp->lock);
1365 val = br32(bp, B44_RXCONFIG);
1366 if (!(val & RXCONFIG_CAM_ABSENT))
1367 __b44_set_mac_addr(bp);
1369 spin_unlock_irq(&bp->lock);
1371 return 0;
1374 /* Called at device open time to get the chip ready for
1375 * packet processing. Invoked with bp->lock held.
1377 static void __b44_set_rx_mode(struct net_device *);
1378 static void b44_init_hw(struct b44 *bp, int reset_kind)
1380 u32 val;
1382 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
1383 if (reset_kind == B44_FULL_RESET) {
1384 b44_phy_reset(bp);
1385 b44_setup_phy(bp);
1388 /* Enable CRC32, set proper LED modes and power on PHY */
1389 bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
1390 bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
1392 /* This sets the MAC address too. */
1393 __b44_set_rx_mode(bp->dev);
1395 /* MTU + eth header + possible VLAN tag + struct rx_header */
1396 bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1397 bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
1399 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1400 if (reset_kind == B44_PARTIAL_RESET) {
1401 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1402 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1403 } else {
1404 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1405 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1406 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1407 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1408 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1410 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
1411 bp->rx_prod = bp->rx_pending;
1413 bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
1416 val = br32(bp, B44_ENET_CTRL);
1417 bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
1420 static int b44_open(struct net_device *dev)
1422 struct b44 *bp = netdev_priv(dev);
1423 int err;
1425 err = b44_alloc_consistent(bp, GFP_KERNEL);
1426 if (err)
1427 goto out;
1429 napi_enable(&bp->napi);
1431 b44_init_rings(bp);
1432 b44_init_hw(bp, B44_FULL_RESET);
1434 b44_check_phy(bp);
1436 err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
1437 if (unlikely(err < 0)) {
1438 napi_disable(&bp->napi);
1439 b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
1440 b44_free_rings(bp);
1441 b44_free_consistent(bp);
1442 goto out;
1445 init_timer(&bp->timer);
1446 bp->timer.expires = jiffies + HZ;
1447 bp->timer.data = (unsigned long) bp;
1448 bp->timer.function = b44_timer;
1449 add_timer(&bp->timer);
1451 b44_enable_ints(bp);
1452 netif_start_queue(dev);
1453 out:
1454 return err;
1457 #ifdef CONFIG_NET_POLL_CONTROLLER
1459 * Polling receive - used by netconsole and other diagnostic tools
1460 * to allow network i/o with interrupts disabled.
1462 static void b44_poll_controller(struct net_device *dev)
1464 disable_irq(dev->irq);
1465 b44_interrupt(dev->irq, dev);
1466 enable_irq(dev->irq);
1468 #endif
1470 static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
1472 u32 i;
1473 u32 *pattern = (u32 *) pp;
1475 for (i = 0; i < bytes; i += sizeof(u32)) {
1476 bw32(bp, B44_FILT_ADDR, table_offset + i);
1477 bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
1481 static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
1483 int magicsync = 6;
1484 int k, j, len = offset;
1485 int ethaddr_bytes = ETH_ALEN;
1487 memset(ppattern + offset, 0xff, magicsync);
1488 for (j = 0; j < magicsync; j++)
1489 set_bit(len++, (unsigned long *) pmask);
1491 for (j = 0; j < B44_MAX_PATTERNS; j++) {
1492 if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
1493 ethaddr_bytes = ETH_ALEN;
1494 else
1495 ethaddr_bytes = B44_PATTERN_SIZE - len;
1496 if (ethaddr_bytes <=0)
1497 break;
1498 for (k = 0; k< ethaddr_bytes; k++) {
1499 ppattern[offset + magicsync +
1500 (j * ETH_ALEN) + k] = macaddr[k];
1501 set_bit(len++, (unsigned long *) pmask);
1504 return len - 1;
1507 /* Setup magic packet patterns in the b44 WOL
1508 * pattern matching filter.
1510 static void b44_setup_pseudo_magicp(struct b44 *bp)
1513 u32 val;
1514 int plen0, plen1, plen2;
1515 u8 *pwol_pattern;
1516 u8 pwol_mask[B44_PMASK_SIZE];
1518 pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
1519 if (!pwol_pattern) {
1520 pr_err("Memory not available for WOL\n");
1521 return;
1524 /* Ipv4 magic packet pattern - pattern 0.*/
1525 memset(pwol_mask, 0, B44_PMASK_SIZE);
1526 plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1527 B44_ETHIPV4UDP_HLEN);
1529 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
1530 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
1532 /* Raw ethernet II magic packet pattern - pattern 1 */
1533 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1534 memset(pwol_mask, 0, B44_PMASK_SIZE);
1535 plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1536 ETH_HLEN);
1538 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1539 B44_PATTERN_BASE + B44_PATTERN_SIZE);
1540 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1541 B44_PMASK_BASE + B44_PMASK_SIZE);
1543 /* Ipv6 magic packet pattern - pattern 2 */
1544 memset(pwol_pattern, 0, B44_PATTERN_SIZE);
1545 memset(pwol_mask, 0, B44_PMASK_SIZE);
1546 plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
1547 B44_ETHIPV6UDP_HLEN);
1549 bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
1550 B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
1551 bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
1552 B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
1554 kfree(pwol_pattern);
1556 /* set these pattern's lengths: one less than each real length */
1557 val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
1558 bw32(bp, B44_WKUP_LEN, val);
1560 /* enable wakeup pattern matching */
1561 val = br32(bp, B44_DEVCTRL);
1562 bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
1566 #ifdef CONFIG_B44_PCI
1567 static void b44_setup_wol_pci(struct b44 *bp)
1569 u16 val;
1571 if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
1572 bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
1573 pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
1574 pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
1577 #else
1578 static inline void b44_setup_wol_pci(struct b44 *bp) { }
1579 #endif /* CONFIG_B44_PCI */
1581 static void b44_setup_wol(struct b44 *bp)
1583 u32 val;
1585 bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
1587 if (bp->flags & B44_FLAG_B0_ANDLATER) {
1589 bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
1591 val = bp->dev->dev_addr[2] << 24 |
1592 bp->dev->dev_addr[3] << 16 |
1593 bp->dev->dev_addr[4] << 8 |
1594 bp->dev->dev_addr[5];
1595 bw32(bp, B44_ADDR_LO, val);
1597 val = bp->dev->dev_addr[0] << 8 |
1598 bp->dev->dev_addr[1];
1599 bw32(bp, B44_ADDR_HI, val);
1601 val = br32(bp, B44_DEVCTRL);
1602 bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
1604 } else {
1605 b44_setup_pseudo_magicp(bp);
1607 b44_setup_wol_pci(bp);
1610 static int b44_close(struct net_device *dev)
1612 struct b44 *bp = netdev_priv(dev);
1614 netif_stop_queue(dev);
1616 napi_disable(&bp->napi);
1618 del_timer_sync(&bp->timer);
1620 spin_lock_irq(&bp->lock);
1622 b44_halt(bp);
1623 b44_free_rings(bp);
1624 netif_carrier_off(dev);
1626 spin_unlock_irq(&bp->lock);
1628 free_irq(dev->irq, dev);
1630 if (bp->flags & B44_FLAG_WOL_ENABLE) {
1631 b44_init_hw(bp, B44_PARTIAL_RESET);
1632 b44_setup_wol(bp);
1635 b44_free_consistent(bp);
1637 return 0;
1640 static struct net_device_stats *b44_get_stats(struct net_device *dev)
1642 struct b44 *bp = netdev_priv(dev);
1643 struct net_device_stats *nstat = &dev->stats;
1644 struct b44_hw_stats *hwstat = &bp->hw_stats;
1646 /* Convert HW stats into netdevice stats. */
1647 nstat->rx_packets = hwstat->rx_pkts;
1648 nstat->tx_packets = hwstat->tx_pkts;
1649 nstat->rx_bytes = hwstat->rx_octets;
1650 nstat->tx_bytes = hwstat->tx_octets;
1651 nstat->tx_errors = (hwstat->tx_jabber_pkts +
1652 hwstat->tx_oversize_pkts +
1653 hwstat->tx_underruns +
1654 hwstat->tx_excessive_cols +
1655 hwstat->tx_late_cols);
1656 nstat->multicast = hwstat->tx_multicast_pkts;
1657 nstat->collisions = hwstat->tx_total_cols;
1659 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
1660 hwstat->rx_undersize);
1661 nstat->rx_over_errors = hwstat->rx_missed_pkts;
1662 nstat->rx_frame_errors = hwstat->rx_align_errs;
1663 nstat->rx_crc_errors = hwstat->rx_crc_errs;
1664 nstat->rx_errors = (hwstat->rx_jabber_pkts +
1665 hwstat->rx_oversize_pkts +
1666 hwstat->rx_missed_pkts +
1667 hwstat->rx_crc_align_errs +
1668 hwstat->rx_undersize +
1669 hwstat->rx_crc_errs +
1670 hwstat->rx_align_errs +
1671 hwstat->rx_symbol_errs);
1673 nstat->tx_aborted_errors = hwstat->tx_underruns;
1674 #if 0
1675 /* Carrier lost counter seems to be broken for some devices */
1676 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1677 #endif
1679 return nstat;
1682 static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1684 struct netdev_hw_addr *ha;
1685 int i, num_ents;
1687 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1688 i = 0;
1689 netdev_for_each_mc_addr(ha, dev) {
1690 if (i == num_ents)
1691 break;
1692 __b44_cam_write(bp, ha->addr, i++ + 1);
1694 return i+1;
1697 static void __b44_set_rx_mode(struct net_device *dev)
1699 struct b44 *bp = netdev_priv(dev);
1700 u32 val;
1702 val = br32(bp, B44_RXCONFIG);
1703 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
1704 if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
1705 val |= RXCONFIG_PROMISC;
1706 bw32(bp, B44_RXCONFIG, val);
1707 } else {
1708 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1709 int i = 1;
1711 __b44_set_mac_addr(bp);
1713 if ((dev->flags & IFF_ALLMULTI) ||
1714 (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
1715 val |= RXCONFIG_ALLMULTI;
1716 else
1717 i = __b44_load_mcast(bp, dev);
1719 for (; i < 64; i++)
1720 __b44_cam_write(bp, zero, i);
1722 bw32(bp, B44_RXCONFIG, val);
1723 val = br32(bp, B44_CAM_CTRL);
1724 bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
1728 static void b44_set_rx_mode(struct net_device *dev)
1730 struct b44 *bp = netdev_priv(dev);
1732 spin_lock_irq(&bp->lock);
1733 __b44_set_rx_mode(dev);
1734 spin_unlock_irq(&bp->lock);
1737 static u32 b44_get_msglevel(struct net_device *dev)
1739 struct b44 *bp = netdev_priv(dev);
1740 return bp->msg_enable;
1743 static void b44_set_msglevel(struct net_device *dev, u32 value)
1745 struct b44 *bp = netdev_priv(dev);
1746 bp->msg_enable = value;
1749 static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1751 struct b44 *bp = netdev_priv(dev);
1752 struct ssb_bus *bus = bp->sdev->bus;
1754 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
1755 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1756 switch (bus->bustype) {
1757 case SSB_BUSTYPE_PCI:
1758 strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
1759 break;
1760 case SSB_BUSTYPE_SSB:
1761 strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
1762 break;
1763 case SSB_BUSTYPE_PCMCIA:
1764 case SSB_BUSTYPE_SDIO:
1765 WARN_ON(1); /* A device with this bus does not exist. */
1766 break;
1770 static int b44_nway_reset(struct net_device *dev)
1772 struct b44 *bp = netdev_priv(dev);
1773 u32 bmcr;
1774 int r;
1776 spin_lock_irq(&bp->lock);
1777 b44_readphy(bp, MII_BMCR, &bmcr);
1778 b44_readphy(bp, MII_BMCR, &bmcr);
1779 r = -EINVAL;
1780 if (bmcr & BMCR_ANENABLE) {
1781 b44_writephy(bp, MII_BMCR,
1782 bmcr | BMCR_ANRESTART);
1783 r = 0;
1785 spin_unlock_irq(&bp->lock);
1787 return r;
1790 static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1792 struct b44 *bp = netdev_priv(dev);
1794 cmd->supported = (SUPPORTED_Autoneg);
1795 cmd->supported |= (SUPPORTED_100baseT_Half |
1796 SUPPORTED_100baseT_Full |
1797 SUPPORTED_10baseT_Half |
1798 SUPPORTED_10baseT_Full |
1799 SUPPORTED_MII);
1801 cmd->advertising = 0;
1802 if (bp->flags & B44_FLAG_ADV_10HALF)
1803 cmd->advertising |= ADVERTISED_10baseT_Half;
1804 if (bp->flags & B44_FLAG_ADV_10FULL)
1805 cmd->advertising |= ADVERTISED_10baseT_Full;
1806 if (bp->flags & B44_FLAG_ADV_100HALF)
1807 cmd->advertising |= ADVERTISED_100baseT_Half;
1808 if (bp->flags & B44_FLAG_ADV_100FULL)
1809 cmd->advertising |= ADVERTISED_100baseT_Full;
1810 cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
1811 cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
1812 SPEED_100 : SPEED_10;
1813 cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
1814 DUPLEX_FULL : DUPLEX_HALF;
1815 cmd->port = 0;
1816 cmd->phy_address = bp->phy_addr;
1817 cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
1818 XCVR_INTERNAL : XCVR_EXTERNAL;
1819 cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
1820 AUTONEG_DISABLE : AUTONEG_ENABLE;
1821 if (cmd->autoneg == AUTONEG_ENABLE)
1822 cmd->advertising |= ADVERTISED_Autoneg;
1823 if (!netif_running(dev)){
1824 cmd->speed = 0;
1825 cmd->duplex = 0xff;
1827 cmd->maxtxpkt = 0;
1828 cmd->maxrxpkt = 0;
1829 return 0;
1832 static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1834 struct b44 *bp = netdev_priv(dev);
1836 /* We do not support gigabit. */
1837 if (cmd->autoneg == AUTONEG_ENABLE) {
1838 if (cmd->advertising &
1839 (ADVERTISED_1000baseT_Half |
1840 ADVERTISED_1000baseT_Full))
1841 return -EINVAL;
1842 } else if ((cmd->speed != SPEED_100 &&
1843 cmd->speed != SPEED_10) ||
1844 (cmd->duplex != DUPLEX_HALF &&
1845 cmd->duplex != DUPLEX_FULL)) {
1846 return -EINVAL;
1849 spin_lock_irq(&bp->lock);
1851 if (cmd->autoneg == AUTONEG_ENABLE) {
1852 bp->flags &= ~(B44_FLAG_FORCE_LINK |
1853 B44_FLAG_100_BASE_T |
1854 B44_FLAG_FULL_DUPLEX |
1855 B44_FLAG_ADV_10HALF |
1856 B44_FLAG_ADV_10FULL |
1857 B44_FLAG_ADV_100HALF |
1858 B44_FLAG_ADV_100FULL);
1859 if (cmd->advertising == 0) {
1860 bp->flags |= (B44_FLAG_ADV_10HALF |
1861 B44_FLAG_ADV_10FULL |
1862 B44_FLAG_ADV_100HALF |
1863 B44_FLAG_ADV_100FULL);
1864 } else {
1865 if (cmd->advertising & ADVERTISED_10baseT_Half)
1866 bp->flags |= B44_FLAG_ADV_10HALF;
1867 if (cmd->advertising & ADVERTISED_10baseT_Full)
1868 bp->flags |= B44_FLAG_ADV_10FULL;
1869 if (cmd->advertising & ADVERTISED_100baseT_Half)
1870 bp->flags |= B44_FLAG_ADV_100HALF;
1871 if (cmd->advertising & ADVERTISED_100baseT_Full)
1872 bp->flags |= B44_FLAG_ADV_100FULL;
1874 } else {
1875 bp->flags |= B44_FLAG_FORCE_LINK;
1876 bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
1877 if (cmd->speed == SPEED_100)
1878 bp->flags |= B44_FLAG_100_BASE_T;
1879 if (cmd->duplex == DUPLEX_FULL)
1880 bp->flags |= B44_FLAG_FULL_DUPLEX;
1883 if (netif_running(dev))
1884 b44_setup_phy(bp);
1886 spin_unlock_irq(&bp->lock);
1888 return 0;
1891 static void b44_get_ringparam(struct net_device *dev,
1892 struct ethtool_ringparam *ering)
1894 struct b44 *bp = netdev_priv(dev);
1896 ering->rx_max_pending = B44_RX_RING_SIZE - 1;
1897 ering->rx_pending = bp->rx_pending;
1899 /* XXX ethtool lacks a tx_max_pending, oops... */
1902 static int b44_set_ringparam(struct net_device *dev,
1903 struct ethtool_ringparam *ering)
1905 struct b44 *bp = netdev_priv(dev);
1907 if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
1908 (ering->rx_mini_pending != 0) ||
1909 (ering->rx_jumbo_pending != 0) ||
1910 (ering->tx_pending > B44_TX_RING_SIZE - 1))
1911 return -EINVAL;
1913 spin_lock_irq(&bp->lock);
1915 bp->rx_pending = ering->rx_pending;
1916 bp->tx_pending = ering->tx_pending;
1918 b44_halt(bp);
1919 b44_init_rings(bp);
1920 b44_init_hw(bp, B44_FULL_RESET);
1921 netif_wake_queue(bp->dev);
1922 spin_unlock_irq(&bp->lock);
1924 b44_enable_ints(bp);
1926 return 0;
1929 static void b44_get_pauseparam(struct net_device *dev,
1930 struct ethtool_pauseparam *epause)
1932 struct b44 *bp = netdev_priv(dev);
1934 epause->autoneg =
1935 (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
1936 epause->rx_pause =
1937 (bp->flags & B44_FLAG_RX_PAUSE) != 0;
1938 epause->tx_pause =
1939 (bp->flags & B44_FLAG_TX_PAUSE) != 0;
1942 static int b44_set_pauseparam(struct net_device *dev,
1943 struct ethtool_pauseparam *epause)
1945 struct b44 *bp = netdev_priv(dev);
1947 spin_lock_irq(&bp->lock);
1948 if (epause->autoneg)
1949 bp->flags |= B44_FLAG_PAUSE_AUTO;
1950 else
1951 bp->flags &= ~B44_FLAG_PAUSE_AUTO;
1952 if (epause->rx_pause)
1953 bp->flags |= B44_FLAG_RX_PAUSE;
1954 else
1955 bp->flags &= ~B44_FLAG_RX_PAUSE;
1956 if (epause->tx_pause)
1957 bp->flags |= B44_FLAG_TX_PAUSE;
1958 else
1959 bp->flags &= ~B44_FLAG_TX_PAUSE;
1960 if (bp->flags & B44_FLAG_PAUSE_AUTO) {
1961 b44_halt(bp);
1962 b44_init_rings(bp);
1963 b44_init_hw(bp, B44_FULL_RESET);
1964 } else {
1965 __b44_set_flow_ctrl(bp, bp->flags);
1967 spin_unlock_irq(&bp->lock);
1969 b44_enable_ints(bp);
1971 return 0;
1974 static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1976 switch(stringset) {
1977 case ETH_SS_STATS:
1978 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1979 break;
1983 static int b44_get_sset_count(struct net_device *dev, int sset)
1985 switch (sset) {
1986 case ETH_SS_STATS:
1987 return ARRAY_SIZE(b44_gstrings);
1988 default:
1989 return -EOPNOTSUPP;
1993 static void b44_get_ethtool_stats(struct net_device *dev,
1994 struct ethtool_stats *stats, u64 *data)
1996 struct b44 *bp = netdev_priv(dev);
1997 u32 *val = &bp->hw_stats.tx_good_octets;
1998 u32 i;
2000 spin_lock_irq(&bp->lock);
2002 b44_stats_update(bp);
2004 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2005 *data++ = *val++;
2007 spin_unlock_irq(&bp->lock);
2010 static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2012 struct b44 *bp = netdev_priv(dev);
2014 wol->supported = WAKE_MAGIC;
2015 if (bp->flags & B44_FLAG_WOL_ENABLE)
2016 wol->wolopts = WAKE_MAGIC;
2017 else
2018 wol->wolopts = 0;
2019 memset(&wol->sopass, 0, sizeof(wol->sopass));
2022 static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2024 struct b44 *bp = netdev_priv(dev);
2026 spin_lock_irq(&bp->lock);
2027 if (wol->wolopts & WAKE_MAGIC)
2028 bp->flags |= B44_FLAG_WOL_ENABLE;
2029 else
2030 bp->flags &= ~B44_FLAG_WOL_ENABLE;
2031 spin_unlock_irq(&bp->lock);
2033 return 0;
2036 static const struct ethtool_ops b44_ethtool_ops = {
2037 .get_drvinfo = b44_get_drvinfo,
2038 .get_settings = b44_get_settings,
2039 .set_settings = b44_set_settings,
2040 .nway_reset = b44_nway_reset,
2041 .get_link = ethtool_op_get_link,
2042 .get_wol = b44_get_wol,
2043 .set_wol = b44_set_wol,
2044 .get_ringparam = b44_get_ringparam,
2045 .set_ringparam = b44_set_ringparam,
2046 .get_pauseparam = b44_get_pauseparam,
2047 .set_pauseparam = b44_set_pauseparam,
2048 .get_msglevel = b44_get_msglevel,
2049 .set_msglevel = b44_set_msglevel,
2050 .get_strings = b44_get_strings,
2051 .get_sset_count = b44_get_sset_count,
2052 .get_ethtool_stats = b44_get_ethtool_stats,
2055 static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2057 struct mii_ioctl_data *data = if_mii(ifr);
2058 struct b44 *bp = netdev_priv(dev);
2059 int err = -EINVAL;
2061 if (!netif_running(dev))
2062 goto out;
2064 spin_lock_irq(&bp->lock);
2065 err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
2066 spin_unlock_irq(&bp->lock);
2067 out:
2068 return err;
2071 static int __devinit b44_get_invariants(struct b44 *bp)
2073 struct ssb_device *sdev = bp->sdev;
2074 int err = 0;
2075 u8 *addr;
2077 bp->dma_offset = ssb_dma_translation(sdev);
2079 if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
2080 instance > 1) {
2081 addr = sdev->bus->sprom.et1mac;
2082 bp->phy_addr = sdev->bus->sprom.et1phyaddr;
2083 } else {
2084 addr = sdev->bus->sprom.et0mac;
2085 bp->phy_addr = sdev->bus->sprom.et0phyaddr;
2087 /* Some ROMs have buggy PHY addresses with the high
2088 * bits set (sign extension?). Truncate them to a
2089 * valid PHY address. */
2090 bp->phy_addr &= 0x1F;
2092 memcpy(bp->dev->dev_addr, addr, 6);
2094 if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
2095 pr_err("Invalid MAC address found in EEPROM\n");
2096 return -EINVAL;
2099 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
2101 bp->imask = IMASK_DEF;
2103 /* XXX - really required?
2104 bp->flags |= B44_FLAG_BUGGY_TXPTR;
2107 if (bp->sdev->id.revision >= 7)
2108 bp->flags |= B44_FLAG_B0_ANDLATER;
2110 return err;
2113 static const struct net_device_ops b44_netdev_ops = {
2114 .ndo_open = b44_open,
2115 .ndo_stop = b44_close,
2116 .ndo_start_xmit = b44_start_xmit,
2117 .ndo_get_stats = b44_get_stats,
2118 .ndo_set_multicast_list = b44_set_rx_mode,
2119 .ndo_set_mac_address = b44_set_mac_addr,
2120 .ndo_validate_addr = eth_validate_addr,
2121 .ndo_do_ioctl = b44_ioctl,
2122 .ndo_tx_timeout = b44_tx_timeout,
2123 .ndo_change_mtu = b44_change_mtu,
2124 #ifdef CONFIG_NET_POLL_CONTROLLER
2125 .ndo_poll_controller = b44_poll_controller,
2126 #endif
2129 static int __devinit b44_init_one(struct ssb_device *sdev,
2130 const struct ssb_device_id *ent)
2132 static int b44_version_printed = 0;
2133 struct net_device *dev;
2134 struct b44 *bp;
2135 int err;
2137 instance++;
2139 if (b44_version_printed++ == 0)
2140 pr_info("%s", version);
2143 dev = alloc_etherdev(sizeof(*bp));
2144 if (!dev) {
2145 dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
2146 err = -ENOMEM;
2147 goto out;
2150 SET_NETDEV_DEV(dev, sdev->dev);
2152 /* No interesting netdevice features in this card... */
2153 dev->features |= 0;
2155 bp = netdev_priv(dev);
2156 bp->sdev = sdev;
2157 bp->dev = dev;
2158 bp->force_copybreak = 0;
2160 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
2162 spin_lock_init(&bp->lock);
2164 bp->rx_pending = B44_DEF_RX_RING_PENDING;
2165 bp->tx_pending = B44_DEF_TX_RING_PENDING;
2167 dev->netdev_ops = &b44_netdev_ops;
2168 netif_napi_add(dev, &bp->napi, b44_poll, 64);
2169 dev->watchdog_timeo = B44_TX_TIMEOUT;
2170 dev->irq = sdev->irq;
2171 SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
2173 netif_carrier_off(dev);
2175 err = ssb_bus_powerup(sdev->bus, 0);
2176 if (err) {
2177 dev_err(sdev->dev,
2178 "Failed to powerup the bus\n");
2179 goto err_out_free_dev;
2182 if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
2183 dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
2184 dev_err(sdev->dev,
2185 "Required 30BIT DMA mask unsupported by the system\n");
2186 goto err_out_powerdown;
2189 err = b44_get_invariants(bp);
2190 if (err) {
2191 dev_err(sdev->dev,
2192 "Problem fetching invariants of chip, aborting\n");
2193 goto err_out_powerdown;
2196 bp->mii_if.dev = dev;
2197 bp->mii_if.mdio_read = b44_mii_read;
2198 bp->mii_if.mdio_write = b44_mii_write;
2199 bp->mii_if.phy_id = bp->phy_addr;
2200 bp->mii_if.phy_id_mask = 0x1f;
2201 bp->mii_if.reg_num_mask = 0x1f;
2203 /* By default, advertise all speed/duplex settings. */
2204 bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
2205 B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
2207 /* By default, auto-negotiate PAUSE. */
2208 bp->flags |= B44_FLAG_PAUSE_AUTO;
2210 err = register_netdev(dev);
2211 if (err) {
2212 dev_err(sdev->dev, "Cannot register net device, aborting\n");
2213 goto err_out_powerdown;
2216 ssb_set_drvdata(sdev, dev);
2218 /* Chip reset provides power to the b44 MAC & PCI cores, which
2219 * is necessary for MAC register access.
2221 b44_chip_reset(bp, B44_CHIP_RESET_FULL);
2223 /* do a phy reset to test if there is an active phy */
2224 if (b44_phy_reset(bp) < 0)
2225 bp->phy_addr = B44_PHY_ADDR_NO_PHY;
2227 netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
2228 dev->dev_addr);
2230 return 0;
2232 err_out_powerdown:
2233 ssb_bus_may_powerdown(sdev->bus);
2235 err_out_free_dev:
2236 free_netdev(dev);
2238 out:
2239 return err;
2242 static void __devexit b44_remove_one(struct ssb_device *sdev)
2244 struct net_device *dev = ssb_get_drvdata(sdev);
2246 unregister_netdev(dev);
2247 ssb_device_disable(sdev, 0);
2248 ssb_bus_may_powerdown(sdev->bus);
2249 free_netdev(dev);
2250 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2251 ssb_set_drvdata(sdev, NULL);
2254 static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
2256 struct net_device *dev = ssb_get_drvdata(sdev);
2257 struct b44 *bp = netdev_priv(dev);
2259 if (!netif_running(dev))
2260 return 0;
2262 del_timer_sync(&bp->timer);
2264 spin_lock_irq(&bp->lock);
2266 b44_halt(bp);
2267 netif_carrier_off(bp->dev);
2268 netif_device_detach(bp->dev);
2269 b44_free_rings(bp);
2271 spin_unlock_irq(&bp->lock);
2273 free_irq(dev->irq, dev);
2274 if (bp->flags & B44_FLAG_WOL_ENABLE) {
2275 b44_init_hw(bp, B44_PARTIAL_RESET);
2276 b44_setup_wol(bp);
2279 ssb_pcihost_set_power_state(sdev, PCI_D3hot);
2280 return 0;
2283 static int b44_resume(struct ssb_device *sdev)
2285 struct net_device *dev = ssb_get_drvdata(sdev);
2286 struct b44 *bp = netdev_priv(dev);
2287 int rc = 0;
2289 rc = ssb_bus_powerup(sdev->bus, 0);
2290 if (rc) {
2291 dev_err(sdev->dev,
2292 "Failed to powerup the bus\n");
2293 return rc;
2296 if (!netif_running(dev))
2297 return 0;
2299 rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
2300 if (rc) {
2301 netdev_err(dev, "request_irq failed\n");
2302 return rc;
2305 spin_lock_irq(&bp->lock);
2307 b44_init_rings(bp);
2308 b44_init_hw(bp, B44_FULL_RESET);
2309 netif_device_attach(bp->dev);
2310 spin_unlock_irq(&bp->lock);
2312 b44_enable_ints(bp);
2313 netif_wake_queue(dev);
2315 mod_timer(&bp->timer, jiffies + 1);
2317 return 0;
2320 static struct ssb_driver b44_ssb_driver = {
2321 .name = DRV_MODULE_NAME,
2322 .id_table = b44_ssb_tbl,
2323 .probe = b44_init_one,
2324 .remove = __devexit_p(b44_remove_one),
2325 .suspend = b44_suspend,
2326 .resume = b44_resume,
2329 static inline int b44_pci_init(void)
2331 int err = 0;
2332 #ifdef CONFIG_B44_PCI
2333 err = ssb_pcihost_register(&b44_pci_driver);
2334 #endif
2335 return err;
2338 static inline void b44_pci_exit(void)
2340 #ifdef CONFIG_B44_PCI
2341 ssb_pcihost_unregister(&b44_pci_driver);
2342 #endif
2345 static int __init b44_init(void)
2347 unsigned int dma_desc_align_size = dma_get_cache_alignment();
2348 int err;
2350 /* Setup paramaters for syncing RX/TX DMA descriptors */
2351 dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
2353 err = b44_pci_init();
2354 if (err)
2355 return err;
2356 err = ssb_driver_register(&b44_ssb_driver);
2357 if (err)
2358 b44_pci_exit();
2359 return err;
2362 static void __exit b44_cleanup(void)
2364 ssb_driver_unregister(&b44_ssb_driver);
2365 b44_pci_exit();
2368 module_init(b44_init);
2369 module_exit(b44_cleanup);