1 /* b44.c: Broadcom 4400 device driver.
3 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/types.h>
9 #include <linux/netdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/mii.h>
12 #include <linux/if_ether.h>
13 #include <linux/etherdevice.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
18 #include <asm/uaccess.h>
24 #define DRV_MODULE_NAME "b44"
25 #define PFX DRV_MODULE_NAME ": "
26 #define DRV_MODULE_VERSION "0.6"
27 #define DRV_MODULE_RELDATE "Nov 11, 2002"
29 #define B44_DEF_MSG_ENABLE \
39 /* length of time before we decide the hardware is borked,
40 * and dev->tx_timeout() should be called to fix the problem
42 #define B44_TX_TIMEOUT (5 * HZ)
44 /* hardware minimum and maximum for a single frame's data payload */
45 #define B44_MIN_MTU 60
46 #define B44_MAX_MTU 1500
48 #define B44_RX_RING_SIZE 512
49 #define B44_DEF_RX_RING_PENDING 200
50 #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
52 #define B44_TX_RING_SIZE 512
53 #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
54 #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
57 #define TX_RING_GAP(BP) \
58 (B44_TX_RING_SIZE - (BP)->tx_pending)
59 #define TX_BUFFS_AVAIL(BP) \
60 (((BP)->tx_cons <= (BP)->tx_prod) ? \
61 (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
62 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
63 #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
65 #define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64)
67 /* minimum number of free TX descriptors required to wake up TX process */
68 #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
70 static char version
[] __devinitdata
=
71 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
73 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
74 MODULE_DESCRIPTION("Broadcom 4400 10/100 PCI ethernet driver");
75 MODULE_LICENSE("GPL");
76 MODULE_PARM(b44_debug
, "i");
77 MODULE_PARM_DESC(b44_debug
, "B44 bitmapped debugging message enable value");
79 static int b44_debug
= -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
81 static struct pci_device_id b44_pci_tbl
[] __devinitdata
= {
82 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_BCM4401
,
83 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
84 { } /* terminate list with empty entry */
87 MODULE_DEVICE_TABLE(pci
, b44_pci_tbl
);
89 static void b44_halt(struct b44
*);
90 static void b44_init_rings(struct b44
*);
91 static int b44_init_hw(struct b44
*);
93 static int b44_wait_bit(struct b44
*bp
, unsigned long reg
,
94 u32 bit
, unsigned long timeout
, const int clear
)
98 for (i
= 0; i
< timeout
; i
++) {
101 if (clear
&& !(val
& bit
))
103 if (!clear
&& (val
& bit
))
108 printk(KERN_ERR PFX
"%s: BUG! Timeout waiting for bit %08x of register "
112 (clear
? "clear" : "set"));
118 /* Sonics SiliconBackplane support routines. ROFL, you should see all the
119 * buzz words used on this company's website :-)
121 * All of these routines must be invoked with bp->lock held and
122 * interrupts disabled.
126 #define SBID_PCI_MEM 1
127 #define SBID_PCI_CFG 2
128 #define SBID_PCI_DMA 3
129 #define SBID_SDRAM_SWAPPED 4
131 #define SBID_REG_SDRAM 6
132 #define SBID_REG_ILINE20 7
133 #define SBID_REG_EMAC 8
134 #define SBID_REG_CODEC 9
135 #define SBID_REG_USB 10
136 #define SBID_REG_PCI 11
137 #define SBID_REG_MIPS 12
138 #define SBID_REG_EXTIF 13
139 #define SBID_EXTIF 14
140 #define SBID_EJTAG 15
143 static u32
ssb_get_addr(struct b44
*bp
, u32 id
, u32 instance
)
161 static u32
ssb_get_core_rev(struct b44
*bp
)
163 return (br32(B44_SBIDHIGH
) & SBIDHIGH_RC_MASK
);
166 static u32
ssb_pci_setup(struct b44
*bp
, u32 cores
)
168 u32 bar_orig
, pci_rev
, val
;
170 pci_read_config_dword(bp
->pdev
, SSB_BAR0_WIN
, &bar_orig
);
171 pci_write_config_dword(bp
->pdev
, SSB_BAR0_WIN
,
172 ssb_get_addr(bp
, SBID_REG_PCI
, 0));
173 pci_rev
= ssb_get_core_rev(bp
);
175 val
= br32(B44_SBINTVEC
);
177 bw32(B44_SBINTVEC
, val
);
179 val
= br32(SSB_PCI_TRANS_2
);
180 val
|= SSB_PCI_PREF
| SSB_PCI_BURST
;
181 bw32(SSB_PCI_TRANS_2
, val
);
183 pci_write_config_dword(bp
->pdev
, SSB_BAR0_WIN
, bar_orig
);
188 static void ssb_core_disable(struct b44
*bp
)
190 if (br32(B44_SBTMSLOW
) & SBTMSLOW_RESET
)
193 bw32(B44_SBTMSLOW
, (SBTMSLOW_REJECT
| SBTMSLOW_CLOCK
));
194 b44_wait_bit(bp
, B44_SBTMSLOW
, SBTMSLOW_REJECT
, 100000, 0);
195 b44_wait_bit(bp
, B44_SBTMSHIGH
, SBTMSHIGH_BUSY
, 100000, 1);
196 bw32(B44_SBTMSLOW
, (SBTMSLOW_FGC
| SBTMSLOW_CLOCK
|
197 SBTMSLOW_REJECT
| SBTMSLOW_RESET
));
200 bw32(B44_SBTMSLOW
, (SBTMSLOW_REJECT
| SBTMSLOW_RESET
));
205 static void ssb_core_reset(struct b44
*bp
)
209 ssb_core_disable(bp
);
210 bw32(B44_SBTMSLOW
, (SBTMSLOW_RESET
| SBTMSLOW_CLOCK
| SBTMSLOW_FGC
));
214 /* Clear SERR if set, this is a hw bug workaround. */
215 if (br32(B44_SBTMSHIGH
) & SBTMSHIGH_SERR
)
216 bw32(B44_SBTMSHIGH
, 0);
218 val
= br32(B44_SBIMSTATE
);
219 if (val
& (SBIMSTATE_IBE
| SBIMSTATE_TO
))
220 bw32(B44_SBIMSTATE
, val
& ~(SBIMSTATE_IBE
| SBIMSTATE_TO
));
222 bw32(B44_SBTMSLOW
, (SBTMSLOW_CLOCK
| SBTMSLOW_FGC
));
226 bw32(B44_SBTMSLOW
, (SBTMSLOW_CLOCK
));
231 static int ssb_core_unit(struct b44
*bp
)
234 u32 val
= br32(B44_SBADMATCH0
);
237 type
= val
& SBADMATCH0_TYPE_MASK
;
240 base
= val
& SBADMATCH0_BS0_MASK
;
244 base
= val
& SBADMATCH0_BS1_MASK
;
249 base
= val
& SBADMATCH0_BS2_MASK
;
256 static int ssb_is_core_up(struct b44
*bp
)
258 return ((br32(B44_SBTMSLOW
) & (SBTMSLOW_RESET
| SBTMSLOW_REJECT
| SBTMSLOW_CLOCK
))
262 static void __b44_cam_write(struct b44
*bp
, char *data
, int index
)
266 val
= ((u32
) data
[2]) << 24;
267 val
|= ((u32
) data
[3]) << 16;
268 val
|= ((u32
) data
[4]) << 8;
269 val
|= ((u32
) data
[5]) << 0;
270 bw32(B44_CAM_DATA_LO
, val
);
271 val
= (CAM_DATA_HI_VALID
|
272 (((u32
) data
[0]) << 8) |
273 (((u32
) data
[1]) << 0));
274 bw32(B44_CAM_DATA_HI
, val
);
275 bw32(B44_CAM_CTRL
, (CAM_CTRL_WRITE
|
276 (index
<< CAM_CTRL_INDEX_SHIFT
)));
277 b44_wait_bit(bp
, B44_CAM_CTRL
, CAM_CTRL_BUSY
, 100, 1);
280 static inline void __b44_disable_ints(struct b44
*bp
)
285 static void b44_disable_ints(struct b44
*bp
)
287 __b44_disable_ints(bp
);
289 /* Flush posted writes. */
293 static void b44_enable_ints(struct b44
*bp
)
295 bw32(B44_IMASK
, bp
->imask
);
298 static int b44_readphy(struct b44
*bp
, int reg
, u32
*val
)
302 bw32(B44_EMAC_ISTAT
, EMAC_INT_MII
);
303 bw32(B44_MDIO_DATA
, (MDIO_DATA_SB_START
|
304 (MDIO_OP_READ
<< MDIO_DATA_OP_SHIFT
) |
305 (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
) |
306 (reg
<< MDIO_DATA_RA_SHIFT
) |
307 (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
)));
308 err
= b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
309 *val
= br32(B44_MDIO_DATA
) & MDIO_DATA_DATA
;
314 static int b44_writephy(struct b44
*bp
, int reg
, u32 val
)
316 bw32(B44_EMAC_ISTAT
, EMAC_INT_MII
);
317 bw32(B44_MDIO_DATA
, (MDIO_DATA_SB_START
|
318 (MDIO_OP_WRITE
<< MDIO_DATA_OP_SHIFT
) |
319 (bp
->phy_addr
<< MDIO_DATA_PMD_SHIFT
) |
320 (reg
<< MDIO_DATA_RA_SHIFT
) |
321 (MDIO_TA_VALID
<< MDIO_DATA_TA_SHIFT
) |
322 (val
& MDIO_DATA_DATA
)));
323 return b44_wait_bit(bp
, B44_EMAC_ISTAT
, EMAC_INT_MII
, 100, 0);
326 static int b44_phy_reset(struct b44
*bp
)
331 err
= b44_writephy(bp
, MII_BMCR
, BMCR_RESET
);
335 err
= b44_readphy(bp
, MII_BMCR
, &val
);
337 if (val
& BMCR_RESET
) {
338 printk(KERN_ERR PFX
"%s: PHY Reset would not complete.\n",
347 static void __b44_set_flow_ctrl(struct b44
*bp
, u32 pause_flags
)
351 bp
->flags
&= ~(B44_FLAG_TX_PAUSE
| B44_FLAG_RX_PAUSE
);
352 bp
->flags
|= pause_flags
;
354 val
= br32(B44_RXCONFIG
);
355 if (pause_flags
& B44_FLAG_RX_PAUSE
)
356 val
|= RXCONFIG_FLOW
;
358 val
&= ~RXCONFIG_FLOW
;
359 bw32(B44_RXCONFIG
, val
);
361 val
= br32(B44_MAC_FLOW
);
362 if (pause_flags
& B44_FLAG_TX_PAUSE
)
363 val
|= (MAC_FLOW_PAUSE_ENAB
|
364 (0xc0 & MAC_FLOW_RX_HI_WATER
));
366 val
&= ~MAC_FLOW_PAUSE_ENAB
;
367 bw32(B44_MAC_FLOW
, val
);
370 static void b44_set_flow_ctrl(struct b44
*bp
, u32 local
, u32 remote
)
372 u32 pause_enab
= bp
->flags
& (B44_FLAG_TX_PAUSE
|
375 if (local
& ADVERTISE_PAUSE_CAP
) {
376 if (local
& ADVERTISE_PAUSE_ASYM
) {
377 if (remote
& LPA_PAUSE_CAP
)
378 pause_enab
|= (B44_FLAG_TX_PAUSE
|
380 else if (remote
& LPA_PAUSE_ASYM
)
381 pause_enab
|= B44_FLAG_RX_PAUSE
;
383 if (remote
& LPA_PAUSE_CAP
)
384 pause_enab
|= (B44_FLAG_TX_PAUSE
|
387 } else if (local
& ADVERTISE_PAUSE_ASYM
) {
388 if ((remote
& LPA_PAUSE_CAP
) &&
389 (remote
& LPA_PAUSE_ASYM
))
390 pause_enab
|= B44_FLAG_TX_PAUSE
;
393 __b44_set_flow_ctrl(bp
, pause_enab
);
396 static int b44_setup_phy(struct b44
*bp
)
401 if ((err
= b44_readphy(bp
, B44_MII_ALEDCTRL
, &val
)) != 0)
403 if ((err
= b44_writephy(bp
, B44_MII_ALEDCTRL
,
404 val
& MII_ALEDCTRL_ALLMSK
)) != 0)
406 if ((err
= b44_readphy(bp
, B44_MII_TLEDCTRL
, &val
)) != 0)
408 if ((err
= b44_writephy(bp
, B44_MII_TLEDCTRL
,
409 val
| MII_TLEDCTRL_ENABLE
)) != 0)
412 if (!(bp
->flags
& B44_FLAG_FORCE_LINK
)) {
413 u32 adv
= ADVERTISE_CSMA
;
415 if (bp
->flags
& B44_FLAG_ADV_10HALF
)
416 adv
|= ADVERTISE_10HALF
;
417 if (bp
->flags
& B44_FLAG_ADV_10FULL
)
418 adv
|= ADVERTISE_10FULL
;
419 if (bp
->flags
& B44_FLAG_ADV_100HALF
)
420 adv
|= ADVERTISE_100HALF
;
421 if (bp
->flags
& B44_FLAG_ADV_100FULL
)
422 adv
|= ADVERTISE_100FULL
;
424 if (bp
->flags
& B44_FLAG_PAUSE_AUTO
)
425 adv
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
427 if ((err
= b44_writephy(bp
, MII_ADVERTISE
, adv
)) != 0)
429 if ((err
= b44_writephy(bp
, MII_BMCR
, (BMCR_ANENABLE
|
430 BMCR_ANRESTART
))) != 0)
435 if ((err
= b44_readphy(bp
, MII_BMCR
, &bmcr
)) != 0)
437 bmcr
&= ~(BMCR_FULLDPLX
| BMCR_ANENABLE
| BMCR_SPEED100
);
438 if (bp
->flags
& B44_FLAG_100_BASE_T
)
439 bmcr
|= BMCR_SPEED100
;
440 if (bp
->flags
& B44_FLAG_FULL_DUPLEX
)
441 bmcr
|= BMCR_FULLDPLX
;
442 if ((err
= b44_writephy(bp
, MII_BMCR
, bmcr
)) != 0)
445 /* Since we will not be negotiating there is no safe way
446 * to determine if the link partner supports flow control
447 * or not. So just disable it completely in this case.
449 b44_set_flow_ctrl(bp
, 0, 0);
456 static void b44_stats_update(struct b44
*bp
)
461 val
= &bp
->hw_stats
.tx_good_octets
;
462 for (reg
= B44_TX_GOOD_O
; reg
<= B44_TX_PAUSE
; reg
+= 4UL) {
465 val
= &bp
->hw_stats
.rx_good_octets
;
466 for (reg
= B44_RX_GOOD_O
; reg
<= B44_RX_NPAUSE
; reg
+= 4UL) {
471 static void b44_link_report(struct b44
*bp
)
473 if (!netif_carrier_ok(bp
->dev
)) {
474 printk(KERN_INFO PFX
"%s: Link is down.\n", bp
->dev
->name
);
476 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
478 (bp
->flags
& B44_FLAG_100_BASE_T
) ? 100 : 10,
479 (bp
->flags
& B44_FLAG_FULL_DUPLEX
) ? "full" : "half");
481 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
484 (bp
->flags
& B44_FLAG_TX_PAUSE
) ? "on" : "off",
485 (bp
->flags
& B44_FLAG_RX_PAUSE
) ? "on" : "off");
489 static void b44_check_phy(struct b44
*bp
)
493 if (!b44_readphy(bp
, MII_BMSR
, &bmsr
) &&
494 !b44_readphy(bp
, B44_MII_AUXCTRL
, &aux
) &&
496 if (aux
& MII_AUXCTRL_SPEED
)
497 bp
->flags
|= B44_FLAG_100_BASE_T
;
499 bp
->flags
&= ~B44_FLAG_100_BASE_T
;
500 if (aux
& MII_AUXCTRL_DUPLEX
)
501 bp
->flags
|= B44_FLAG_FULL_DUPLEX
;
503 bp
->flags
&= ~B44_FLAG_FULL_DUPLEX
;
505 if (!netif_carrier_ok(bp
->dev
) &&
506 (bmsr
& BMSR_LSTATUS
)) {
507 u32 val
= br32(B44_TX_CTRL
);
508 u32 local_adv
, remote_adv
;
510 if (bp
->flags
& B44_FLAG_FULL_DUPLEX
)
511 val
|= TX_CTRL_DUPLEX
;
513 val
&= ~TX_CTRL_DUPLEX
;
514 bw32(B44_TX_CTRL
, val
);
516 if (!(bp
->flags
& B44_FLAG_FORCE_LINK
) &&
517 !b44_readphy(bp
, MII_ADVERTISE
, &local_adv
) &&
518 !b44_readphy(bp
, MII_LPA
, &remote_adv
))
519 b44_set_flow_ctrl(bp
, local_adv
, remote_adv
);
522 netif_carrier_on(bp
->dev
);
524 } else if (netif_carrier_ok(bp
->dev
)) {
526 netif_carrier_off(bp
->dev
);
530 if (bmsr
& BMSR_RFAULT
)
531 printk(KERN_WARNING PFX
"%s: Remote fault detected in PHY\n",
534 printk(KERN_WARNING PFX
"%s: Jabber detected in PHY\n",
539 static void b44_timer(unsigned long __opaque
)
541 struct b44
*bp
= (struct b44
*) __opaque
;
543 spin_lock_irq(&bp
->lock
);
547 b44_stats_update(bp
);
549 spin_unlock_irq(&bp
->lock
);
551 bp
->timer
.expires
= jiffies
+ HZ
;
552 add_timer(&bp
->timer
);
555 static void b44_tx(struct b44
*bp
)
559 cur
= br32(B44_DMATX_STAT
) & DMATX_STAT_CDMASK
;
560 cur
/= sizeof(struct dma_desc
);
562 /* XXX needs updating when NETIF_F_SG is supported */
563 for (cons
= bp
->tx_cons
; cons
!= cur
; cons
= NEXT_TX(cons
)) {
564 struct ring_info
*rp
= &bp
->tx_buffers
[cons
];
565 struct sk_buff
*skb
= rp
->skb
;
567 if (unlikely(skb
== NULL
))
570 pci_unmap_single(bp
->pdev
,
571 pci_unmap_addr(rp
, mapping
),
575 dev_kfree_skb_irq(skb
);
579 if (netif_queue_stopped(bp
->dev
) &&
580 TX_BUFFS_AVAIL(bp
) > B44_TX_WAKEUP_THRESH
)
581 netif_wake_queue(bp
->dev
);
583 bw32(B44_GPTIMER
, 0);
586 /* Works like this. This chip writes a 'struct rx_header" 30 bytes
587 * before the DMA address you give it. So we allocate 30 more bytes
588 * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
589 * point the chip at 30 bytes past where the rx_header will go.
591 static int b44_alloc_rx_skb(struct b44
*bp
, int src_idx
, u32 dest_idx_unmasked
)
594 struct ring_info
*src_map
, *map
;
595 struct rx_header
*rh
;
603 src_map
= &bp
->rx_buffers
[src_idx
];
604 dest_idx
= dest_idx_unmasked
& (B44_RX_RING_SIZE
- 1);
605 map
= &bp
->rx_buffers
[dest_idx
];
606 skb
= dev_alloc_skb(RX_PKT_BUF_SZ
);
611 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
614 skb_reserve(skb
, bp
->rx_offset
);
616 rh
= (struct rx_header
*)
617 (skb
->data
- bp
->rx_offset
);
622 pci_unmap_addr_set(map
, mapping
, mapping
);
627 ctrl
= (DESC_CTRL_LEN
& (RX_PKT_BUF_SZ
- bp
->rx_offset
));
628 if (dest_idx
== (B44_RX_RING_SIZE
- 1))
629 ctrl
|= DESC_CTRL_EOT
;
631 dp
= &bp
->rx_ring
[dest_idx
];
632 dp
->ctrl
= cpu_to_le32(ctrl
);
633 dp
->addr
= cpu_to_le32((u32
) mapping
+ bp
->rx_offset
+ bp
->dma_offset
);
635 return RX_PKT_BUF_SZ
;
638 static void b44_recycle_rx(struct b44
*bp
, int src_idx
, u32 dest_idx_unmasked
)
640 struct dma_desc
*src_desc
, *dest_desc
;
641 struct ring_info
*src_map
, *dest_map
;
642 struct rx_header
*rh
;
646 dest_idx
= dest_idx_unmasked
& (B44_RX_RING_SIZE
- 1);
647 dest_desc
= &bp
->rx_ring
[dest_idx
];
648 dest_map
= &bp
->rx_buffers
[dest_idx
];
649 src_desc
= &bp
->rx_ring
[src_idx
];
650 src_map
= &bp
->rx_buffers
[src_idx
];
652 dest_map
->skb
= src_map
->skb
;
653 rh
= (struct rx_header
*)
654 (src_map
->skb
->data
- bp
->rx_offset
);
657 pci_unmap_addr_set(dest_map
, mapping
,
658 pci_unmap_addr(src_map
, mapping
));
660 ctrl
= src_desc
->ctrl
;
661 if (dest_idx
== (B44_RX_RING_SIZE
- 1))
662 ctrl
|= cpu_to_le32(DESC_CTRL_EOT
);
664 dest_desc
->ctrl
= ctrl
;
665 dest_desc
->addr
= src_desc
->addr
;
668 static int b44_rx(struct b44
*bp
, int budget
)
674 prod
= br32(B44_DMARX_STAT
) & DMARX_STAT_CDMASK
;
675 prod
/= sizeof(struct dma_desc
);
678 while (cons
!= prod
&& budget
> 0) {
679 struct ring_info
*rp
= &bp
->rx_buffers
[cons
];
680 struct sk_buff
*skb
= rp
->skb
;
681 dma_addr_t map
= pci_unmap_addr(rp
, mapping
);
682 struct rx_header
*rh
;
685 pci_dma_sync_single(bp
->pdev
, map
,
688 rh
= (struct rx_header
*) (skb
->data
- bp
->rx_offset
);
689 len
= cpu_to_le16(rh
->len
);
690 if ((len
> (RX_PKT_BUF_SZ
- bp
->rx_offset
)) ||
691 (rh
->flags
& cpu_to_le16(RX_FLAG_ERRORS
))) {
693 b44_recycle_rx(bp
, cons
, bp
->rx_prod
);
695 bp
->stats
.rx_dropped
++;
705 len
= cpu_to_le16(rh
->len
);
706 } while (len
== 0 && i
++ < 5);
714 if (len
> RX_COPY_THRESHOLD
) {
716 skb_size
= b44_alloc_rx_skb(bp
, cons
, bp
->rx_prod
);
719 pci_unmap_single(bp
->pdev
, map
,
720 skb_size
, PCI_DMA_FROMDEVICE
);
723 struct sk_buff
*copy_skb
;
725 b44_recycle_rx(bp
, cons
, bp
->rx_prod
);
726 copy_skb
= dev_alloc_skb(len
+ 2);
727 if (copy_skb
== NULL
)
728 goto drop_it_no_recycle
;
730 copy_skb
->dev
= bp
->dev
;
731 skb_reserve(copy_skb
, 2);
732 skb_put(copy_skb
, len
);
733 /* DMA sync done above */
734 memcpy(copy_skb
->data
, skb
->data
, len
);
738 skb
->ip_summed
= CHECKSUM_NONE
;
739 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
740 netif_receive_skb(skb
);
741 bp
->dev
->last_rx
= jiffies
;
745 bp
->rx_prod
= (bp
->rx_prod
+ 1) &
746 (B44_RX_RING_SIZE
- 1);
747 cons
= (cons
+ 1) & (B44_RX_RING_SIZE
- 1);
755 static int b44_poll(struct net_device
*netdev
, int *budget
)
757 struct b44
*bp
= netdev
->priv
;
760 spin_lock_irq(&bp
->lock
);
762 if (bp
->istat
& (ISTAT_TX
| ISTAT_TO
)) {
763 /* spin_lock(&bp->tx_lock); */
765 /* spin_unlock(&bp->tx_lock); */
769 if (bp
->istat
& ISTAT_RX
) {
770 int orig_budget
= *budget
;
773 if (orig_budget
> netdev
->quota
)
774 orig_budget
= netdev
->quota
;
776 work_done
= b44_rx(bp
, orig_budget
);
778 *budget
-= work_done
;
779 netdev
->quota
-= work_done
;
781 if (work_done
>= orig_budget
)
785 if (bp
->istat
& ISTAT_ERRORS
) {
789 netif_wake_queue(bp
->dev
);
794 netif_rx_complete(netdev
);
797 spin_unlock_irq(&bp
->lock
);
799 return (done
? 0 : 1);
802 static irqreturn_t
b44_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
804 struct net_device
*dev
= dev_id
;
805 struct b44
*bp
= dev
->priv
;
810 spin_lock_irqsave(&bp
->lock
, flags
);
812 istat
= br32(B44_ISTAT
);
813 imask
= br32(B44_IMASK
);
815 /* ??? What the fuck is the purpose of the interrupt mask
816 * ??? register if we have to mask it out by hand anyways?
821 if (netif_rx_schedule_prep(dev
)) {
822 /* NOTE: These writes are posted by the readback of
823 * the ISTAT register below.
826 __b44_disable_ints(bp
);
827 __netif_rx_schedule(dev
);
829 printk(KERN_ERR PFX
"%s: Error, poll already scheduled\n",
833 bw32(B44_ISTAT
, istat
);
836 spin_unlock_irqrestore(&bp
->lock
, flags
);
837 return IRQ_RETVAL(handled
);
840 static void b44_tx_timeout(struct net_device
*dev
)
842 struct b44
*bp
= dev
->priv
;
844 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
847 spin_lock_irq(&bp
->lock
);
853 spin_unlock_irq(&bp
->lock
);
855 netif_wake_queue(dev
);
858 static int b44_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
860 struct b44
*bp
= dev
->priv
;
862 u32 len
, entry
, ctrl
;
865 spin_lock_irq(&bp
->lock
);
867 /* This is a hard error, log it. */
868 if (unlikely(TX_BUFFS_AVAIL(bp
) < 1)) {
869 netif_stop_queue(dev
);
870 spin_unlock_irq(&bp
->lock
);
871 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when queue awake!\n",
877 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
879 bp
->tx_buffers
[entry
].skb
= skb
;
880 pci_unmap_addr_set(&bp
->tx_buffers
[entry
], mapping
, mapping
);
882 ctrl
= (len
& DESC_CTRL_LEN
);
883 ctrl
|= DESC_CTRL_IOC
| DESC_CTRL_SOF
| DESC_CTRL_EOF
;
884 if (entry
== (B44_TX_RING_SIZE
- 1))
885 ctrl
|= DESC_CTRL_EOT
;
887 bp
->tx_ring
[entry
].ctrl
= cpu_to_le32(ctrl
);
888 bp
->tx_ring
[entry
].addr
= cpu_to_le32((u32
) mapping
);
890 entry
= NEXT_TX(entry
);
896 bw32(B44_DMATX_PTR
, entry
* sizeof(struct dma_desc
));
897 if (bp
->flags
& B44_FLAG_BUGGY_TXPTR
)
898 bw32(B44_DMATX_PTR
, entry
* sizeof(struct dma_desc
));
899 if (bp
->flags
& B44_FLAG_REORDER_BUG
)
902 if (TX_BUFFS_AVAIL(bp
) < 1)
903 netif_stop_queue(dev
);
905 spin_unlock_irq(&bp
->lock
);
907 dev
->trans_start
= jiffies
;
912 static int b44_change_mtu(struct net_device
*dev
, int new_mtu
)
914 struct b44
*bp
= dev
->priv
;
916 if (new_mtu
< B44_MIN_MTU
|| new_mtu
> B44_MAX_MTU
)
919 if (!netif_running(dev
)) {
920 /* We'll just catch it later when the
927 spin_lock_irq(&bp
->lock
);
932 spin_unlock_irq(&bp
->lock
);
937 /* Free up pending packets in all rx/tx rings.
939 * The chip has been shut down and the driver detached from
940 * the networking, so no interrupts or new tx packets will
941 * end up in the driver. bp->lock is not held and we are not
942 * in an interrupt context and thus may sleep.
944 static void b44_free_rings(struct b44
*bp
)
946 struct ring_info
*rp
;
949 for (i
= 0; i
< B44_RX_RING_SIZE
; i
++) {
950 rp
= &bp
->rx_buffers
[i
];
954 pci_unmap_single(bp
->pdev
,
955 pci_unmap_addr(rp
, mapping
),
958 dev_kfree_skb_any(rp
->skb
);
962 /* XXX needs changes once NETIF_F_SG is set... */
963 for (i
= 0; i
< B44_TX_RING_SIZE
; i
++) {
964 rp
= &bp
->tx_buffers
[i
];
968 pci_unmap_single(bp
->pdev
,
969 pci_unmap_addr(rp
, mapping
),
972 dev_kfree_skb_any(rp
->skb
);
977 /* Initialize tx/rx rings for packet processing.
979 * The chip has been shut down and the driver detached from
980 * the networking, so no interrupts or new tx packets will
981 * end up in the driver. bp->lock is not held and we are not
982 * in an interrupt context and thus may sleep.
984 static void b44_init_rings(struct b44
*bp
)
990 memset(bp
->rx_ring
, 0, B44_RX_RING_BYTES
);
991 memset(bp
->tx_ring
, 0, B44_TX_RING_BYTES
);
993 for (i
= 0; i
< bp
->rx_pending
; i
++) {
994 if (b44_alloc_rx_skb(bp
, -1, i
) < 0)
1000 * Must not be invoked with interrupt sources disabled and
1001 * the hardware shutdown down.
1003 static void b44_free_consistent(struct b44
*bp
)
1005 if (bp
->rx_buffers
) {
1006 kfree(bp
->rx_buffers
);
1007 bp
->rx_buffers
= NULL
;
1009 if (bp
->tx_buffers
) {
1010 kfree(bp
->tx_buffers
);
1011 bp
->tx_buffers
= NULL
;
1014 pci_free_consistent(bp
->pdev
, DMA_TABLE_BYTES
,
1015 bp
->rx_ring
, bp
->rx_ring_dma
);
1019 pci_free_consistent(bp
->pdev
, DMA_TABLE_BYTES
,
1020 bp
->tx_ring
, bp
->tx_ring_dma
);
1026 * Must not be invoked with interrupt sources disabled and
1027 * the hardware shutdown down. Can sleep.
1029 static int b44_alloc_consistent(struct b44
*bp
)
1033 size
= B44_RX_RING_SIZE
* sizeof(struct ring_info
);
1034 bp
->rx_buffers
= kmalloc(size
, GFP_KERNEL
);
1035 if (!bp
->rx_buffers
)
1037 memset(bp
->rx_buffers
, 0, size
);
1039 size
= B44_TX_RING_SIZE
* sizeof(struct ring_info
);
1040 bp
->tx_buffers
= kmalloc(size
, GFP_KERNEL
);
1041 if (!bp
->tx_buffers
)
1043 memset(bp
->tx_buffers
, 0, size
);
1045 size
= DMA_TABLE_BYTES
;
1046 bp
->rx_ring
= pci_alloc_consistent(bp
->pdev
, size
, &bp
->rx_ring_dma
);
1050 bp
->tx_ring
= pci_alloc_consistent(bp
->pdev
, size
, &bp
->tx_ring_dma
);
1057 b44_free_consistent(bp
);
1061 /* bp->lock is held. */
1062 static void b44_clear_stats(struct b44
*bp
)
1066 bw32(B44_MIB_CTRL
, MIB_CTRL_CLR_ON_READ
);
1067 for (reg
= B44_TX_GOOD_O
; reg
<= B44_TX_PAUSE
; reg
+= 4UL)
1069 for (reg
= B44_RX_GOOD_O
; reg
<= B44_RX_NPAUSE
; reg
+= 4UL)
1073 /* bp->lock is held. */
1074 static void b44_chip_reset(struct b44
*bp
)
1076 if (ssb_is_core_up(bp
)) {
1077 bw32(B44_RCV_LAZY
, 0);
1078 bw32(B44_ENET_CTRL
, ENET_CTRL_DISABLE
);
1079 b44_wait_bit(bp
, B44_ENET_CTRL
, ENET_CTRL_DISABLE
, 100, 1);
1080 bw32(B44_DMATX_CTRL
, 0);
1081 bp
->tx_prod
= bp
->tx_cons
= 0;
1082 if (br32(B44_DMARX_STAT
) & DMARX_STAT_EMASK
) {
1083 b44_wait_bit(bp
, B44_DMARX_STAT
, DMARX_STAT_SIDLE
,
1086 bw32(B44_DMARX_CTRL
, 0);
1087 bp
->rx_prod
= bp
->rx_cons
= 0;
1089 ssb_pci_setup(bp
, (bp
->core_unit
== 0 ?
1096 b44_clear_stats(bp
);
1098 /* Make PHY accessible. */
1099 bw32(B44_MDIO_CTRL
, (MDIO_CTRL_PREAMBLE
|
1100 (0x0d & MDIO_CTRL_MAXF_MASK
)));
1101 br32(B44_MDIO_CTRL
);
1103 if (!(br32(B44_DEVCTRL
) & DEVCTRL_IPP
)) {
1104 bw32(B44_ENET_CTRL
, ENET_CTRL_EPSEL
);
1105 br32(B44_ENET_CTRL
);
1106 bp
->flags
&= ~B44_FLAG_INTERNAL_PHY
;
1108 u32 val
= br32(B44_DEVCTRL
);
1110 if (val
& DEVCTRL_EPR
) {
1111 bw32(B44_DEVCTRL
, (val
& ~DEVCTRL_EPR
));
1115 bp
->flags
|= B44_FLAG_INTERNAL_PHY
;
1119 /* bp->lock is held. */
1120 static void b44_halt(struct b44
*bp
)
1122 b44_disable_ints(bp
);
1126 /* bp->lock is held. */
1127 static void __b44_set_mac_addr(struct b44
*bp
)
1129 bw32(B44_CAM_CTRL
, 0);
1130 if (!(bp
->dev
->flags
& IFF_PROMISC
)) {
1133 __b44_cam_write(bp
, bp
->dev
->dev_addr
, 0);
1134 val
= br32(B44_CAM_CTRL
);
1135 bw32(B44_CAM_CTRL
, val
| CAM_CTRL_ENABLE
);
1139 static int b44_set_mac_addr(struct net_device
*dev
, void *p
)
1141 struct b44
*bp
= dev
->priv
;
1142 struct sockaddr
*addr
= p
;
1144 if (netif_running(dev
))
1147 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1149 spin_lock_irq(&bp
->lock
);
1150 __b44_set_mac_addr(bp
);
1151 spin_unlock_irq(&bp
->lock
);
1156 /* Called at device open time to get the chip ready for
1157 * packet processing. Invoked with bp->lock held.
1159 static void __b44_set_rx_mode(struct net_device
*);
1160 static int b44_init_hw(struct b44
*bp
)
1164 b44_disable_ints(bp
);
1168 val
= br32(B44_MAC_CTRL
);
1169 bw32(B44_MAC_CTRL
, val
| MAC_CTRL_CRC32_ENAB
);
1170 bw32(B44_RCV_LAZY
, (1 << RCV_LAZY_FC_SHIFT
));
1172 /* This sets the MAC address too. */
1173 __b44_set_rx_mode(bp
->dev
);
1175 /* MTU + eth header + possible VLAN tag + struct rx_header */
1176 bw32(B44_RXMAXLEN
, bp
->dev
->mtu
+ ETH_HLEN
+ 8 + 24);
1177 bw32(B44_TXMAXLEN
, bp
->dev
->mtu
+ ETH_HLEN
+ 8 + 24);
1179 bw32(B44_TX_WMARK
, 56); /* XXX magic */
1180 bw32(B44_DMATX_CTRL
, DMATX_CTRL_ENABLE
);
1181 bw32(B44_DMATX_ADDR
, bp
->tx_ring_dma
+ bp
->dma_offset
);
1182 bw32(B44_DMARX_CTRL
, (DMARX_CTRL_ENABLE
|
1183 (bp
->rx_offset
<< DMARX_CTRL_ROSHIFT
)));
1184 bw32(B44_DMARX_ADDR
, bp
->rx_ring_dma
+ bp
->dma_offset
);
1186 bw32(B44_DMARX_PTR
, bp
->rx_pending
);
1188 bw32(B44_MIB_CTRL
, MIB_CTRL_CLR_ON_READ
);
1190 val
= br32(B44_ENET_CTRL
);
1191 bw32(B44_ENET_CTRL
, (val
| ENET_CTRL_ENABLE
));
1196 static int b44_open(struct net_device
*dev
)
1198 struct b44
*bp
= dev
->priv
;
1201 err
= b44_alloc_consistent(bp
);
1205 err
= request_irq(dev
->irq
, b44_interrupt
, SA_SHIRQ
, dev
->name
, dev
);
1209 spin_lock_irq(&bp
->lock
);
1212 err
= b44_init_hw(bp
);
1214 goto err_out_noinit
;
1215 bp
->flags
|= B44_FLAG_INIT_COMPLETE
;
1217 spin_unlock_irq(&bp
->lock
);
1219 init_timer(&bp
->timer
);
1220 bp
->timer
.expires
= jiffies
+ HZ
;
1221 bp
->timer
.data
= (unsigned long) bp
;
1222 bp
->timer
.function
= b44_timer
;
1223 add_timer(&bp
->timer
);
1225 b44_enable_ints(bp
);
1232 spin_unlock_irq(&bp
->lock
);
1233 free_irq(dev
->irq
, dev
);
1235 b44_free_consistent(bp
);
1240 /*static*/ void b44_dump_state(struct b44
*bp
)
1242 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
1245 pci_read_config_word(bp
->pdev
, PCI_STATUS
, &val16
);
1246 printk("DEBUG: PCI status [%04x] \n", val16
);
1251 static int b44_close(struct net_device
*dev
)
1253 struct b44
*bp
= dev
->priv
;
1255 netif_stop_queue(dev
);
1257 del_timer_sync(&bp
->timer
);
1259 spin_lock_irq(&bp
->lock
);
1266 bp
->flags
&= ~B44_FLAG_INIT_COMPLETE
;
1267 netif_carrier_off(bp
->dev
);
1269 spin_unlock_irq(&bp
->lock
);
1271 free_irq(dev
->irq
, dev
);
1273 b44_free_consistent(bp
);
1278 static struct net_device_stats
*b44_get_stats(struct net_device
*dev
)
1280 struct b44
*bp
= dev
->priv
;
1281 struct net_device_stats
*nstat
= &bp
->stats
;
1282 struct b44_hw_stats
*hwstat
= &bp
->hw_stats
;
1284 /* Convert HW stats into netdevice stats. */
1285 nstat
->rx_packets
= hwstat
->rx_pkts
;
1286 nstat
->tx_packets
= hwstat
->tx_pkts
;
1287 nstat
->rx_bytes
= hwstat
->rx_octets
;
1288 nstat
->tx_bytes
= hwstat
->tx_octets
;
1289 nstat
->tx_errors
= (hwstat
->tx_jabber_pkts
+
1290 hwstat
->tx_oversize_pkts
+
1291 hwstat
->tx_underruns
+
1292 hwstat
->tx_excessive_cols
+
1293 hwstat
->tx_late_cols
);
1294 nstat
->multicast
= hwstat
->tx_multicast_pkts
;
1295 nstat
->collisions
= hwstat
->tx_total_cols
;
1297 nstat
->rx_length_errors
= (hwstat
->rx_oversize_pkts
+
1298 hwstat
->rx_undersize
);
1299 nstat
->rx_over_errors
= hwstat
->rx_missed_pkts
;
1300 nstat
->rx_frame_errors
= hwstat
->rx_align_errs
;
1301 nstat
->rx_crc_errors
= hwstat
->rx_crc_errs
;
1302 nstat
->rx_errors
= (hwstat
->rx_jabber_pkts
+
1303 hwstat
->rx_oversize_pkts
+
1304 hwstat
->rx_missed_pkts
+
1305 hwstat
->rx_crc_align_errs
+
1306 hwstat
->rx_undersize
+
1307 hwstat
->rx_crc_errs
+
1308 hwstat
->rx_align_errs
+
1309 hwstat
->rx_symbol_errs
);
1311 nstat
->tx_aborted_errors
= hwstat
->tx_underruns
;
1312 nstat
->tx_carrier_errors
= hwstat
->tx_carrier_lost
;
1317 static void __b44_load_mcast(struct b44
*bp
, struct net_device
*dev
)
1319 struct dev_mc_list
*mclist
;
1322 num_ents
= min_t(int, dev
->mc_count
, B44_MCAST_TABLE_SIZE
);
1323 mclist
= dev
->mc_list
;
1324 for (i
= 0; mclist
&& i
< num_ents
; i
++, mclist
= mclist
->next
) {
1325 __b44_cam_write(bp
, mclist
->dmi_addr
, i
+ 1);
1329 static void __b44_set_rx_mode(struct net_device
*dev
)
1331 struct b44
*bp
= dev
->priv
;
1334 val
= br32(B44_RXCONFIG
);
1335 val
&= ~(RXCONFIG_PROMISC
| RXCONFIG_ALLMULTI
);
1336 if (dev
->flags
& IFF_PROMISC
) {
1337 val
|= RXCONFIG_PROMISC
;
1338 bw32(B44_RXCONFIG
, val
);
1340 __b44_set_mac_addr(bp
);
1342 if (dev
->flags
& IFF_ALLMULTI
)
1343 val
|= RXCONFIG_ALLMULTI
;
1345 __b44_load_mcast(bp
, dev
);
1347 bw32(B44_RXCONFIG
, val
);
1351 static void b44_set_rx_mode(struct net_device
*dev
)
1353 struct b44
*bp
= dev
->priv
;
1355 spin_lock_irq(&bp
->lock
);
1356 __b44_set_rx_mode(dev
);
1357 spin_unlock_irq(&bp
->lock
);
1360 static int b44_ethtool_ioctl (struct net_device
*dev
, void *useraddr
)
1362 struct b44
*bp
= dev
->priv
;
1363 struct pci_dev
*pci_dev
= bp
->pdev
;
1366 if (copy_from_user (ðcmd
, useraddr
, sizeof (ethcmd
)))
1370 case ETHTOOL_GDRVINFO
:{
1371 struct ethtool_drvinfo info
= { ETHTOOL_GDRVINFO
};
1372 strcpy (info
.driver
, DRV_MODULE_NAME
);
1373 strcpy (info
.version
, DRV_MODULE_VERSION
);
1374 memset(&info
.fw_version
, 0, sizeof(info
.fw_version
));
1375 strcpy (info
.bus_info
, pci_dev
->slot_name
);
1376 info
.eedump_len
= 0;
1377 info
.regdump_len
= 0;
1378 if (copy_to_user (useraddr
, &info
, sizeof (info
)))
1383 case ETHTOOL_GSET
: {
1384 struct ethtool_cmd cmd
= { ETHTOOL_GSET
};
1386 if (!(bp
->flags
& B44_FLAG_INIT_COMPLETE
))
1388 cmd
.supported
= (SUPPORTED_Autoneg
);
1389 cmd
.supported
|= (SUPPORTED_100baseT_Half
|
1390 SUPPORTED_100baseT_Full
|
1391 SUPPORTED_10baseT_Half
|
1392 SUPPORTED_10baseT_Full
|
1395 cmd
.advertising
= 0;
1396 if (bp
->flags
& B44_FLAG_ADV_10HALF
)
1397 cmd
.advertising
|= ADVERTISE_10HALF
;
1398 if (bp
->flags
& B44_FLAG_ADV_10FULL
)
1399 cmd
.advertising
|= ADVERTISE_10FULL
;
1400 if (bp
->flags
& B44_FLAG_ADV_100HALF
)
1401 cmd
.advertising
|= ADVERTISE_100HALF
;
1402 if (bp
->flags
& B44_FLAG_ADV_100FULL
)
1403 cmd
.advertising
|= ADVERTISE_100FULL
;
1404 cmd
.advertising
|= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1405 cmd
.speed
= (bp
->flags
& B44_FLAG_100_BASE_T
) ?
1406 SPEED_100
: SPEED_10
;
1407 cmd
.duplex
= (bp
->flags
& B44_FLAG_FULL_DUPLEX
) ?
1408 DUPLEX_FULL
: DUPLEX_HALF
;
1410 cmd
.phy_address
= bp
->phy_addr
;
1411 cmd
.transceiver
= (bp
->flags
& B44_FLAG_INTERNAL_PHY
) ?
1412 XCVR_INTERNAL
: XCVR_EXTERNAL
;
1413 cmd
.autoneg
= (bp
->flags
& B44_FLAG_FORCE_LINK
) ?
1414 AUTONEG_DISABLE
: AUTONEG_ENABLE
;
1417 if (copy_to_user(useraddr
, &cmd
, sizeof(cmd
)))
1421 case ETHTOOL_SSET
: {
1422 struct ethtool_cmd cmd
;
1424 if (!(bp
->flags
& B44_FLAG_INIT_COMPLETE
))
1427 if (copy_from_user(&cmd
, useraddr
, sizeof(cmd
)))
1430 /* We do not support gigabit. */
1431 if (cmd
.autoneg
== AUTONEG_ENABLE
) {
1432 if (cmd
.advertising
&
1433 (ADVERTISED_1000baseT_Half
|
1434 ADVERTISED_1000baseT_Full
))
1436 } else if ((cmd
.speed
!= SPEED_100
&&
1437 cmd
.speed
!= SPEED_10
) ||
1438 (cmd
.duplex
!= DUPLEX_HALF
&&
1439 cmd
.duplex
!= DUPLEX_FULL
)) {
1443 spin_lock_irq(&bp
->lock
);
1445 if (cmd
.autoneg
== AUTONEG_ENABLE
) {
1446 bp
->flags
&= ~B44_FLAG_FORCE_LINK
;
1447 bp
->flags
&= ~(B44_FLAG_ADV_10HALF
|
1448 B44_FLAG_ADV_10FULL
|
1449 B44_FLAG_ADV_100HALF
|
1450 B44_FLAG_ADV_100FULL
);
1451 if (cmd
.advertising
& ADVERTISE_10HALF
)
1452 bp
->flags
|= B44_FLAG_ADV_10HALF
;
1453 if (cmd
.advertising
& ADVERTISE_10FULL
)
1454 bp
->flags
|= B44_FLAG_ADV_10FULL
;
1455 if (cmd
.advertising
& ADVERTISE_100HALF
)
1456 bp
->flags
|= B44_FLAG_ADV_100HALF
;
1457 if (cmd
.advertising
& ADVERTISE_100FULL
)
1458 bp
->flags
|= B44_FLAG_ADV_100FULL
;
1460 bp
->flags
|= B44_FLAG_FORCE_LINK
;
1461 if (cmd
.speed
== SPEED_100
)
1462 bp
->flags
|= B44_FLAG_100_BASE_T
;
1463 if (cmd
.duplex
== DUPLEX_FULL
)
1464 bp
->flags
|= B44_FLAG_FULL_DUPLEX
;
1469 spin_unlock_irq(&bp
->lock
);
1474 case ETHTOOL_GMSGLVL
: {
1475 struct ethtool_value edata
= { ETHTOOL_GMSGLVL
};
1476 edata
.data
= bp
->msg_enable
;
1477 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1481 case ETHTOOL_SMSGLVL
: {
1482 struct ethtool_value edata
;
1483 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1485 bp
->msg_enable
= edata
.data
;
1488 case ETHTOOL_NWAY_RST
: {
1492 spin_lock_irq(&bp
->lock
);
1493 b44_readphy(bp
, MII_BMCR
, &bmcr
);
1494 b44_readphy(bp
, MII_BMCR
, &bmcr
);
1496 if (bmcr
& BMCR_ANENABLE
) {
1497 b44_writephy(bp
, MII_BMCR
,
1498 bmcr
| BMCR_ANRESTART
);
1501 spin_unlock_irq(&bp
->lock
);
1505 case ETHTOOL_GLINK
: {
1506 struct ethtool_value edata
= { ETHTOOL_GLINK
};
1507 edata
.data
= netif_carrier_ok(bp
->dev
) ? 1 : 0;
1508 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1512 case ETHTOOL_GRINGPARAM
: {
1513 struct ethtool_ringparam ering
= { ETHTOOL_GRINGPARAM
};
1515 ering
.rx_max_pending
= B44_RX_RING_SIZE
- 1;
1516 ering
.rx_pending
= bp
->rx_pending
;
1518 /* XXX ethtool lacks a tx_max_pending, oops... */
1520 if (copy_to_user(useraddr
, &ering
, sizeof(ering
)))
1524 case ETHTOOL_SRINGPARAM
: {
1525 struct ethtool_ringparam ering
;
1527 if (copy_from_user(&ering
, useraddr
, sizeof(ering
)))
1530 if ((ering
.rx_pending
> B44_RX_RING_SIZE
- 1) ||
1531 (ering
.rx_mini_pending
!= 0) ||
1532 (ering
.rx_jumbo_pending
!= 0) ||
1533 (ering
.tx_pending
> B44_TX_RING_SIZE
- 1))
1536 spin_lock_irq(&bp
->lock
);
1538 bp
->rx_pending
= ering
.rx_pending
;
1539 bp
->tx_pending
= ering
.tx_pending
;
1544 netif_wake_queue(bp
->dev
);
1545 spin_unlock_irq(&bp
->lock
);
1549 case ETHTOOL_GPAUSEPARAM
: {
1550 struct ethtool_pauseparam epause
= { ETHTOOL_GPAUSEPARAM
};
1553 (bp
->flags
& B44_FLAG_PAUSE_AUTO
) != 0;
1555 (bp
->flags
& B44_FLAG_RX_PAUSE
) != 0;
1557 (bp
->flags
& B44_FLAG_TX_PAUSE
) != 0;
1558 if (copy_to_user(useraddr
, &epause
, sizeof(epause
)))
1562 case ETHTOOL_SPAUSEPARAM
: {
1563 struct ethtool_pauseparam epause
;
1565 if (copy_from_user(&epause
, useraddr
, sizeof(epause
)))
1568 spin_lock_irq(&bp
->lock
);
1570 bp
->flags
|= B44_FLAG_PAUSE_AUTO
;
1572 bp
->flags
&= ~B44_FLAG_PAUSE_AUTO
;
1573 if (epause
.rx_pause
)
1574 bp
->flags
|= B44_FLAG_RX_PAUSE
;
1576 bp
->flags
&= ~B44_FLAG_RX_PAUSE
;
1577 if (epause
.tx_pause
)
1578 bp
->flags
|= B44_FLAG_TX_PAUSE
;
1580 bp
->flags
&= ~B44_FLAG_TX_PAUSE
;
1581 if (bp
->flags
& B44_FLAG_PAUSE_AUTO
) {
1586 __b44_set_flow_ctrl(bp
, bp
->flags
);
1588 spin_unlock_irq(&bp
->lock
);
1597 static int b44_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1599 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&ifr
->ifr_data
;
1600 struct b44
*bp
= dev
->priv
;
1605 return b44_ethtool_ioctl(dev
, (void *) ifr
->ifr_data
);
1608 data
->phy_id
= bp
->phy_addr
;
1614 spin_lock_irq(&bp
->lock
);
1615 err
= b44_readphy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
1616 spin_unlock_irq(&bp
->lock
);
1618 data
->val_out
= mii_regval
;
1624 if (!capable(CAP_NET_ADMIN
))
1627 spin_lock_irq(&bp
->lock
);
1628 err
= b44_writephy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
1629 spin_unlock_irq(&bp
->lock
);
1640 /* Read 128-bytes of EEPROM. */
1641 static int b44_read_eeprom(struct b44
*bp
, u8
*data
)
1644 u16
*ptr
= (u16
*) data
;
1646 for (i
= 0; i
< 128; i
+= 2)
1647 ptr
[i
/ 2] = readw(bp
->regs
+ 4096 + i
);
1652 static int __devinit
b44_get_invariants(struct b44
*bp
)
1657 err
= b44_read_eeprom(bp
, &eeprom
[0]);
1661 bp
->dev
->dev_addr
[0] = eeprom
[79];
1662 bp
->dev
->dev_addr
[1] = eeprom
[78];
1663 bp
->dev
->dev_addr
[2] = eeprom
[81];
1664 bp
->dev
->dev_addr
[3] = eeprom
[80];
1665 bp
->dev
->dev_addr
[4] = eeprom
[83];
1666 bp
->dev
->dev_addr
[5] = eeprom
[82];
1668 bp
->phy_addr
= eeprom
[90] & 0x1f;
1669 bp
->mdc_port
= (eeprom
[90] >> 14) & 0x1;
1671 /* With this, plus the rx_header prepended to the data by the
1672 * hardware, we'll land the ethernet header on a 2-byte boundary.
1676 bp
->imask
= IMASK_DEF
;
1678 bp
->core_unit
= ssb_core_unit(bp
);
1679 bp
->dma_offset
= ssb_get_addr(bp
, SBID_PCI_DMA
, 0);
1681 bp
->flags
|= B44_FLAG_BUGGY_TXPTR
;
1687 static int __devinit
b44_init_one(struct pci_dev
*pdev
,
1688 const struct pci_device_id
*ent
)
1690 static int b44_version_printed
= 0;
1691 unsigned long b44reg_base
, b44reg_len
;
1692 struct net_device
*dev
;
1696 if (b44_version_printed
++ == 0)
1697 printk(KERN_INFO
"%s", version
);
1699 err
= pci_enable_device(pdev
);
1701 printk(KERN_ERR PFX
"Cannot enable PCI device, "
1706 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
1707 printk(KERN_ERR PFX
"Cannot find proper PCI device "
1708 "base address, aborting.\n");
1710 goto err_out_disable_pdev
;
1713 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
1715 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
1717 goto err_out_disable_pdev
;
1720 pci_set_master(pdev
);
1722 err
= pci_set_dma_mask(pdev
, (u64
) 0xffffffff);
1724 printk(KERN_ERR PFX
"No usable DMA configuration, "
1726 goto err_out_free_res
;
1729 b44reg_base
= pci_resource_start(pdev
, 0);
1730 b44reg_len
= pci_resource_len(pdev
, 0);
1732 dev
= alloc_etherdev(sizeof(*bp
));
1734 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
1736 goto err_out_free_res
;
1739 SET_MODULE_OWNER(dev
);
1741 /* No interesting netdevice features in this card... */
1748 bp
->msg_enable
= (1 << b44_debug
) - 1;
1750 bp
->msg_enable
= B44_DEF_MSG_ENABLE
;
1752 spin_lock_init(&bp
->lock
);
1754 bp
->regs
= (unsigned long) ioremap(b44reg_base
, b44reg_len
);
1755 if (bp
->regs
== 0UL) {
1756 printk(KERN_ERR PFX
"Cannot map device registers, "
1759 goto err_out_free_dev
;
1762 bp
->rx_pending
= B44_DEF_RX_RING_PENDING
;
1763 bp
->tx_pending
= B44_DEF_TX_RING_PENDING
;
1765 dev
->open
= b44_open
;
1766 dev
->stop
= b44_close
;
1767 dev
->hard_start_xmit
= b44_start_xmit
;
1768 dev
->get_stats
= b44_get_stats
;
1769 dev
->set_multicast_list
= b44_set_rx_mode
;
1770 dev
->set_mac_address
= b44_set_mac_addr
;
1771 dev
->do_ioctl
= b44_ioctl
;
1772 dev
->tx_timeout
= b44_tx_timeout
;
1773 dev
->poll
= b44_poll
;
1775 dev
->watchdog_timeo
= B44_TX_TIMEOUT
;
1776 dev
->change_mtu
= b44_change_mtu
;
1777 dev
->irq
= pdev
->irq
;
1779 err
= b44_get_invariants(bp
);
1781 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
1783 goto err_out_iounmap
;
1786 /* By default, advertise all speed/duplex settings. */
1787 bp
->flags
|= (B44_FLAG_ADV_10HALF
| B44_FLAG_ADV_10FULL
|
1788 B44_FLAG_ADV_100HALF
| B44_FLAG_ADV_100FULL
);
1790 /* By default, auto-negotiate PAUSE. */
1791 bp
->flags
|= B44_FLAG_PAUSE_AUTO
;
1793 err
= register_netdev(dev
);
1795 printk(KERN_ERR PFX
"Cannot register net device, "
1797 goto err_out_iounmap
;
1800 pci_set_drvdata(pdev
, dev
);
1802 pci_save_state(bp
->pdev
, bp
->pci_cfg_state
);
1804 printk(KERN_INFO
"%s: Broadcom 4400 10/100BaseT Ethernet ", dev
->name
);
1805 for (i
= 0; i
< 6; i
++)
1806 printk("%2.2x%c", dev
->dev_addr
[i
],
1807 i
== 5 ? '\n' : ':');
1812 iounmap((void *) bp
->regs
);
1818 pci_release_regions(pdev
);
1820 err_out_disable_pdev
:
1821 pci_disable_device(pdev
);
1822 pci_set_drvdata(pdev
, NULL
);
1826 static void __devexit
b44_remove_one(struct pci_dev
*pdev
)
1828 struct net_device
*dev
= pci_get_drvdata(pdev
);
1831 unregister_netdev(dev
);
1832 iounmap((void *) ((struct b44
*)(dev
->priv
))->regs
);
1834 pci_release_regions(pdev
);
1835 pci_disable_device(pdev
);
1836 pci_set_drvdata(pdev
, NULL
);
1840 static struct pci_driver b44_driver
= {
1841 .name
= DRV_MODULE_NAME
,
1842 .id_table
= b44_pci_tbl
,
1843 .probe
= b44_init_one
,
1844 .remove
= __devexit_p(b44_remove_one
),
1847 static int __init
b44_init(void)
1849 return pci_module_init(&b44_driver
);
1852 static void __exit
b44_cleanup(void)
1854 pci_unregister_driver(&b44_driver
);
1857 module_init(b44_init
);
1858 module_exit(b44_cleanup
);