1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
13 /* $Id: b57um.c,v 1.29.2.6 2010/02/21 20:06:36 Exp $ */
15 char bcm5700_driver
[] = "bcm5700";
16 char bcm5700_version
[] = "8.3.14";
17 char bcm5700_date
[] = "(11/2/05)";
34 /* this is needed to get good and stable performances */
35 #define EXTRA_HDR BCMEXTRAHDROOM
37 /* A few user-configurable values. */
40 /* Used to pass the full-duplex flag, etc. */
41 static int line_speed
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
42 static int auto_speed
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
43 static int full_duplex
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
44 static int rx_flow_control
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
45 static int tx_flow_control
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
46 static int auto_flow_control
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
47 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
48 static int mtu
[MAX_UNITS
] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
50 static int tx_checksum
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
51 static int rx_checksum
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
52 static int scatter_gather
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
54 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
55 static unsigned int tx_pkt_desc_cnt
[MAX_UNITS
] =
56 {TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,
57 TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,
58 TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,TX_DESC_CNT
,
61 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
62 static unsigned int rx_std_desc_cnt
[MAX_UNITS
] =
63 {RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,
64 RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,
65 RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,RX_DESC_CNT
,
68 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
69 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
70 static unsigned int rx_jumbo_desc_cnt
[MAX_UNITS
] =
71 {JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,
72 JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,
73 JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,JBO_DESC_CNT
,
78 #ifdef BCM_NAPI_RXPOLL
79 static unsigned int adaptive_coalesce
[MAX_UNITS
] =
80 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
82 static unsigned int adaptive_coalesce
[MAX_UNITS
] =
83 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
86 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
87 static unsigned int rx_coalesce_ticks
[MAX_UNITS
] =
88 {RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,
89 RX_COAL_TK
, RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,
90 RX_COAL_TK
,RX_COAL_TK
, RX_COAL_TK
,RX_COAL_TK
,RX_COAL_TK
,
93 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
94 static unsigned int rx_max_coalesce_frames
[MAX_UNITS
] =
95 {RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,
96 RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,
97 RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,RX_COAL_FM
,
100 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
101 static unsigned int tx_coalesce_ticks
[MAX_UNITS
] =
102 {TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,
103 TX_COAL_TK
, TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,
104 TX_COAL_TK
,TX_COAL_TK
, TX_COAL_TK
,TX_COAL_TK
,TX_COAL_TK
,
107 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
108 static unsigned int tx_max_coalesce_frames
[MAX_UNITS
] =
109 {TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,
110 TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,
111 TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,TX_COAL_FM
,
114 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
115 static unsigned int stats_coalesce_ticks
[MAX_UNITS
] =
116 {ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,
117 ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,
118 ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,ST_COAL_TK
,
123 static int enable_wol
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
126 static int enable_tso
[MAX_UNITS
] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
128 #ifdef BCM_NIC_SEND_BD
129 static int nic_tx_bd
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
132 static int vlan_tag_mode
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
134 static int delay_link
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
135 static int disable_d3hot
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
137 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
138 static int disable_msi
[MAX_UNITS
] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
139 static int bcm_msi_chipset_bug
= 0;
142 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
144 /* Hack to hook the data path to the BCM WL dirver */
145 #ifdef BCM_WL_EMULATOR
146 #include "bcmnvram.h"
147 #include "wl_bcm57emu.h"
149 int skb_old_alloc
= 0;
151 #endif /* BCM_WL_EMULATOR */
153 /* Operational parameters that usually are not changed. */
154 /* Time in jiffies before concluding the transmitter is hung. */
155 #define TX_TIMEOUT (2*HZ)
157 #if (LINUX_VERSION_CODE < 0x02030d)
158 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
159 #elif (LINUX_VERSION_CODE < 0x02032b)
160 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
163 #if (LINUX_VERSION_CODE < 0x02032b)
164 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
165 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
166 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
168 static inline void netif_start_queue(struct net_device
*dev
)
175 #define netif_queue_stopped(dev) dev->tbusy
176 #define netif_running(dev) dev->start
178 static inline void tasklet_schedule(struct tasklet_struct
*tasklet
)
180 queue_task(tasklet
, &tq_immediate
);
181 mark_bh(IMMEDIATE_BH
);
184 static inline void tasklet_init(struct tasklet_struct
*tasklet
,
185 void (*func
)(unsigned long),
188 tasklet
->next
= NULL
;
190 tasklet
->routine
= (void (*)(void *))func
;
191 tasklet
->data
= (void *)data
;
194 #define tasklet_kill(tasklet)
198 #if (LINUX_VERSION_CODE < 0x020300)
199 struct pci_device_id
{
200 unsigned int vendor
, device
; /* Vendor and device ID or PCI_ANY_ID */
201 unsigned int subvendor
, subdevice
; /* Subsystem ID's or PCI_ANY_ID */
202 unsigned int class, class_mask
; /* (class,subclass,prog-if) triplet */
203 unsigned long driver_data
; /* Data private to the driver */
208 #define pci_set_drvdata(pdev, dev)
209 #define pci_get_drvdata(pdev) 0
211 #define pci_enable_device(pdev) 0
213 #define __devinit __init
214 #define __devinitdata __initdata
217 #define SET_MODULE_OWNER(dev)
218 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
222 #if (LINUX_VERSION_CODE < 0x020411)
224 #define __devexit_p(x) x
228 #ifndef MODULE_LICENSE
229 #define MODULE_LICENSE(license)
233 typedef void irqreturn_t
;
234 #define IRQ_RETVAL(x)
237 #if (LINUX_VERSION_CODE < 0x02032a)
238 static inline void *pci_alloc_consistent(struct pci_dev
*pdev
, size_t size
,
239 dma_addr_t
*dma_handle
)
243 /* Maximum in slab.c */
247 virt_ptr
= kmalloc(size
, GFP_KERNEL
);
248 *dma_handle
= virt_to_bus(virt_ptr
);
251 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
253 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
256 #if (LINUX_VERSION_CODE < 0x02040d)
258 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
260 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
261 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
264 /* pci_set_dma_mask is using dma_addr_t */
266 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
267 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
271 #else /* (LINUX_VERSION_CODE < 0x02040d) */
273 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
274 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
277 #if (LINUX_VERSION_CODE < 0x020329)
278 #define pci_set_dma_mask(pdev, mask) (0)
280 #if (LINUX_VERSION_CODE < 0x020403)
282 pci_set_dma_mask(struct pci_dev
*dev
, dma_addr_t mask
)
284 if(! pci_dma_supported(dev
, mask
))
287 dev
->dma_mask
= mask
;
294 #if (LINUX_VERSION_CODE < 0x020547)
295 #define pci_set_consistent_dma_mask(pdev, mask) (0)
298 #if (LINUX_VERSION_CODE < 0x020402)
299 #define pci_request_regions(pdev, name) (0)
300 #define pci_release_regions(pdev)
303 #if !defined(spin_is_locked)
304 #define spin_is_locked(lock) (test_bit(0,(lock)))
307 #define BCM5700_LOCK(pUmDevice, flags) \
308 if ((pUmDevice)->do_global_lock) { \
309 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
312 #define BCM5700_UNLOCK(pUmDevice, flags) \
313 if ((pUmDevice)->do_global_lock) { \
314 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
318 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice
)
320 if (pUmDevice
->do_global_lock
) {
321 spin_lock(&pUmDevice
->global_lock
);
326 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice
)
328 if (pUmDevice
->do_global_lock
) {
329 spin_unlock(&pUmDevice
->global_lock
);
334 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice
)
336 atomic_inc(&pUmDevice
->intr_sem
);
337 LM_DisableInterrupt(&pUmDevice
->lm_dev
);
338 #if (LINUX_VERSION_CODE >= 0x2051c)
339 synchronize_irq(pUmDevice
->dev
->irq
);
343 LM_DisableInterrupt(&pUmDevice
->lm_dev
);
347 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice
)
349 if (atomic_dec_and_test(&pUmDevice
->intr_sem
)) {
350 LM_EnableInterrupt(&pUmDevice
->lm_dev
);
355 * Broadcom NIC Extension support
366 #endif /* NICE_SUPPORT */
368 int MM_Packet_Desc_Size
= sizeof(UM_PACKET
);
371 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
372 MODULE_DESCRIPTION("BCM5700 Driver");
373 MODULE_LICENSE("GPL");
375 #if (LINUX_VERSION_CODE < 0x020605)
377 MODULE_PARM(debug
, "i");
378 MODULE_PARM(msglevel
, "i");
379 MODULE_PARM(line_speed
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
380 MODULE_PARM(auto_speed
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
381 MODULE_PARM(full_duplex
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
382 MODULE_PARM(rx_flow_control
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
383 MODULE_PARM(tx_flow_control
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
384 MODULE_PARM(auto_flow_control
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
385 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
386 MODULE_PARM(mtu
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
388 MODULE_PARM(tx_checksum
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
389 MODULE_PARM(rx_checksum
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
390 MODULE_PARM(scatter_gather
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
391 MODULE_PARM(tx_pkt_desc_cnt
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
392 MODULE_PARM(rx_std_desc_cnt
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
393 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
394 MODULE_PARM(rx_jumbo_desc_cnt
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
397 MODULE_PARM(adaptive_coalesce
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
398 MODULE_PARM(rx_coalesce_ticks
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
399 MODULE_PARM(rx_max_coalesce_frames
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
400 MODULE_PARM(tx_coalesce_ticks
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
401 MODULE_PARM(tx_max_coalesce_frames
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
402 MODULE_PARM(stats_coalesce_ticks
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
405 MODULE_PARM(enable_wol
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
408 MODULE_PARM(enable_tso
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
410 #ifdef BCM_NIC_SEND_BD
411 MODULE_PARM(nic_tx_bd
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
414 MODULE_PARM(vlan_tag_mode
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
416 MODULE_PARM(delay_link
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
417 MODULE_PARM(disable_d3hot
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
419 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
420 MODULE_PARM(disable_msi
, "1-" __MODULE_STRING(MAX_UNITS
) "i");
425 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
433 #if (LINUX_VERSION_CODE >= 0x2060a)
439 module_param_array(line_speed
, int, numvar
, 0);
440 module_param_array(auto_speed
, int, numvar
, 0);
441 module_param_array(full_duplex
, int, numvar
, 0);
442 module_param_array(rx_flow_control
, int, numvar
, 0);
443 module_param_array(tx_flow_control
, int, numvar
, 0);
444 module_param_array(auto_flow_control
, int, numvar
, 0);
445 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
446 module_param_array(mtu
, int, numvar
, 0);
448 module_param_array(tx_checksum
, int, numvar
, 0);
449 module_param_array(rx_checksum
, int, numvar
, 0);
450 module_param_array(scatter_gather
, int, numvar
, 0);
451 module_param_array(tx_pkt_desc_cnt
, int, numvar
, 0);
452 module_param_array(rx_std_desc_cnt
, int, numvar
, 0);
453 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
454 module_param_array(rx_jumbo_desc_cnt
, int, numvar
, 0);
457 module_param_array(adaptive_coalesce
, int, numvar
, 0);
458 module_param_array(rx_coalesce_ticks
, int, numvar
, 0);
459 module_param_array(rx_max_coalesce_frames
, int, numvar
, 0);
460 module_param_array(tx_coalesce_ticks
, int, numvar
, 0);
461 module_param_array(tx_max_coalesce_frames
, int, numvar
, 0);
462 module_param_array(stats_coalesce_ticks
, int, numvar
, 0);
465 module_param_array(enable_wol
, int, numvar
, 0);
468 module_param_array(enable_tso
, int, numvar
, 0);
470 #ifdef BCM_NIC_SEND_BD
471 module_param_array(nic_tx_bd
, int, numvar
, 0);
474 module_param_array(vlan_tag_mode
, int, numvar
, 0);
476 module_param_array(delay_link
, int, numvar
, 0);
477 module_param_array(disable_d3hot
, int, numvar
, 0);
479 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
480 module_param_array(disable_msi
, int, numvar
, 0);
489 #define RUN_AT(x) (jiffies + (x))
491 char kernel_version
[] = UTS_RELEASE
;
493 #define PCI_SUPPORT_VER2
495 #if !defined(CAP_NET_ADMIN)
496 #define capable(CAP_XXX) (suser())
499 #define tigon3_debug debug
501 static int tigon3_debug
= TIGON3_DEBUG
;
503 static int tigon3_debug
= 0;
505 static int msglevel
= 0xdeadbeef;
508 int bcm5700_open(struct net_device
*dev
);
509 STATIC
void bcm5700_timer(unsigned long data
);
510 STATIC
void bcm5700_stats_timer(unsigned long data
);
511 STATIC
void bcm5700_reset(struct net_device
*dev
);
512 STATIC
int bcm5700_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
514 STATIC irqreturn_t
bcm5700_interrupt(int irq
, void *dev_instance
);
516 STATIC irqreturn_t
bcm5700_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
);
519 STATIC
void bcm5700_tasklet(unsigned long data
);
521 STATIC
int bcm5700_close(struct net_device
*dev
);
522 STATIC
struct net_device_stats
*bcm5700_get_stats(struct net_device
*dev
);
523 STATIC
int bcm5700_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
524 STATIC
void bcm5700_do_rx_mode(struct net_device
*dev
);
525 STATIC
void bcm5700_set_rx_mode(struct net_device
*dev
);
526 STATIC
int bcm5700_set_mac_addr(struct net_device
*dev
, void *p
);
527 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
528 STATIC
int bcm5700_change_mtu(struct net_device
*dev
, int new_mtu
);
530 #ifdef BCM_NAPI_RXPOLL
531 STATIC
int bcm5700_poll(struct net_device
*dev
, int *budget
);
533 STATIC
int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice
, int max
);
534 STATIC
int bcm5700_freemem(struct net_device
*dev
);
536 STATIC
int bcm5700_freemem2(UM_DEVICE_BLOCK
*pUmDevice
, int index
);
539 #ifndef BCM_NAPI_RXPOLL
540 STATIC
int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice
);
543 STATIC
void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK
*pUmDevice
);
544 STATIC
int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice
);
546 STATIC
void bcm5700_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
);
547 STATIC
void bcm5700_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
);
549 void bcm5700_shutdown(UM_DEVICE_BLOCK
*pUmDevice
);
550 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK
*pUmDevice
);
551 void bcm5700_validate_param_range(UM_DEVICE_BLOCK
*pUmDevice
, int *param
,
552 char *param_name
, int min
, int max
, int deflt
);
554 static int bcm5700_notify_reboot(struct notifier_block
*this, unsigned long event
, void *unused
);
555 static struct notifier_block bcm5700_reboot_notifier
= {
556 bcm5700_notify_reboot
,
561 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
562 STATIC
void poll_bcm5700(struct net_device
*dev
);
565 /* A list of all installed bcm5700 devices. */
566 static struct net_device
*root_tigon3_dev
= NULL
;
568 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
571 #if (LINUX_VERSION_CODE < 0x20500)
572 extern int register_ioctl32_conversion(unsigned int cmd
,
573 int (*handler
)(unsigned int, unsigned int, unsigned long,
575 int unregister_ioctl32_conversion(unsigned int cmd
);
577 #include <linux/ioctl32.h>
580 #define BCM_IOCTL32 1
582 atomic_t bcm5700_load_count
= ATOMIC_INIT(0);
585 bcm5700_ioctl32(unsigned int fd
, unsigned int cmd
, unsigned long arg
,
589 struct net_device
*tmp_dev
= root_tigon3_dev
;
591 struct nice_req
* nrq
;
592 struct ifreq_nice32
{
600 if (!capable(CAP_NET_ADMIN
))
603 if (mm_copy_from_user(&nrq32
, (char *) arg
, 32))
606 memcpy(rq
.ifr_name
, nrq32
.ifnr_name
, 16);
608 nrq
= (struct nice_req
*) &rq
.ifr_ifru
;
609 nrq
->cmd
= nrq32
.cmd
;
610 if (nrq
->cmd
== NICE_CMD_GET_STATS_BLOCK
) {
611 nrq
->nrq_stats_useraddr
= (void *) ((__u64
) nrq32
.nrq1
);
612 nrq
->nrq_stats_size
= nrq32
.nrq2
;
615 memcpy(&nrq
->nrq_speed
, &nrq32
.nrq1
, 12);
618 if (strcmp(rq
.ifr_name
, tmp_dev
->name
) == 0) {
619 ret
= bcm5700_ioctl(tmp_dev
, &rq
, cmd
);
621 if (nrq
->cmd
== NICE_CMD_GET_STATS_BLOCK
)
624 memcpy(&nrq32
.nrq1
, &nrq
->nrq_speed
, 12);
625 if (mm_copy_to_user((char *) arg
, &nrq32
, 32))
630 tmp_dev
= ((UM_DEVICE_BLOCK
*)(tmp_dev
->priv
))->next_module
;
634 #endif /* NICE_SUPPORT */
718 /* indexed by board_t, above */
721 } board_info
[] __devinitdata
= {
722 { "Broadcom BCM5700 1000Base-T" },
723 { "Broadcom BCM5700 1000Base-SX" },
724 { "Broadcom BCM5700 1000Base-SX" },
725 { "Broadcom BCM5700 1000Base-T" },
726 { "Broadcom BCM5700" },
727 { "Broadcom BCM5701 1000Base-T" },
728 { "Broadcom BCM5701 1000Base-T" },
729 { "Broadcom BCM5701 1000Base-T" },
730 { "Broadcom BCM5701 1000Base-SX" },
731 { "Broadcom BCM5701 1000Base-T" },
732 { "Broadcom BCM5701 1000Base-T" },
733 { "Broadcom BCM5701" },
734 { "Broadcom BCM5702 1000Base-T" },
735 { "Broadcom BCM5703 1000Base-T" },
736 { "Broadcom BCM5703 1000Base-SX" },
737 { "Broadcom B5703 1000Base-SX" },
738 { "3Com 3C996 10/100/1000 Server NIC" },
739 { "3Com 3C996 10/100/1000 Server NIC" },
740 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
741 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
742 { "3Com 3C996B Gigabit Server NIC" },
743 { "3Com 3C997 Gigabit Server NIC" },
744 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
745 { "3Com 3C1000 Gigabit NIC" },
746 { "3Com 3C1000B-T 10/100/1000 PCI" },
747 { "3Com 3C940 Gigabit LOM (21X21)" },
748 { "3Com 3C942 Gigabit LOM (31X31)" },
749 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
750 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
751 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
752 { "HP NC6770 Gigabit Server Adapter" },
753 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
754 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
755 { "HP NC7760 Gigabit Server Adapter" },
756 { "HP NC7761 Gigabit Server Adapter" },
757 { "HP NC7770 Gigabit Server Adapter" },
758 { "HP NC7771 Gigabit Server Adapter" },
759 { "HP NC7780 Gigabit Server Adapter" },
760 { "HP NC7781 Gigabit Server Adapter" },
761 { "HP NC7772 Gigabit Server Adapter" },
762 { "HP NC7782 Gigabit Server Adapter" },
763 { "HP NC7783 Gigabit Server Adapter" },
764 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
765 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
766 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
767 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
768 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
769 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
770 { "Broadcom BCM5704 1000Base-T" },
771 { "Broadcom BCM5704 1000Base-SX" },
772 { "Broadcom BCM5705 1000Base-T" },
773 { "Broadcom BCM5705M 1000Base-T" },
774 { "Broadcom 570x 10/100 Integrated Controller" },
775 { "Broadcom BCM5901 100Base-TX" },
776 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
777 { "Broadcom BCM5788 NetLink 1000Base-T" },
778 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
779 { "Broadcom BCM5750 1000Base-T PCI" },
780 { "Broadcom BCM5750M 1000Base-T PCI" },
781 { "Broadcom BCM5720 1000Base-T PCI" },
782 { "Broadcom BCM5751 1000Base-T PCI Express" },
783 { "Broadcom BCM5751M 1000Base-T PCI Express" },
784 { "Broadcom BCM5751F 100Base-TX PCI Express" },
785 { "Broadcom BCM5721 1000Base-T PCI Express" },
786 { "Broadcom BCM5753 1000Base-T PCI Express" },
787 { "Broadcom BCM5753M 1000Base-T PCI Express" },
788 { "Broadcom BCM5753F 100Base-TX PCI Express" },
789 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
790 { "Broadcom BCM5752 1000Base-T PCI Express" },
791 { "Broadcom BCM5752M 1000Base-T PCI Express" },
792 { "Broadcom BCM5714 1000Base-T " },
793 { "Broadcom BCM5780 1000Base-T" },
794 { "Broadcom BCM5780S 1000Base-SX" },
795 { "Broadcom BCM5715 1000Base-T " },
796 { "Broadcom BCM4785 10/100/1000 Integrated Controller" },
797 { "Broadcom BCM5903M Gigabit Ethernet " },
798 { "Unknown BCM5788 Gigabit Ethernet " },
802 static struct pci_device_id bcm5700_pci_tbl
[] __devinitdata
= {
803 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6
},
804 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6
},
805 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9
},
806 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9
},
807 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700
},
808 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700
},
809 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700
},
810 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700
},
811 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T
},
812 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST
},
813 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX
},
814 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T
},
815 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX
},
816 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01
},
817 {0x14e4, 0x1644, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5700
},
818 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5
},
819 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1
},
820 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8
},
821 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7
},
822 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10
},
823 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12
},
824 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770
},
825 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770
},
826 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780
},
827 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701
},
828 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX
},
829 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT
},
830 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T
},
831 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01
},
832 {0x14e4, 0x1645, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5701
},
833 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702
},
834 {0x14e4, 0x1646, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5702
},
835 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702
},
836 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702
},
837 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760
},
838 {0x14e4, 0x16a6, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5702
},
839 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT
},
840 {0x14e4, 0x16c6, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5702
},
841 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703
},
842 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31
},
843 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703
},
844 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703
},
845 {0x14e4, 0x1647, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5703
},
846 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703
},
847 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31
},
848 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703
},
849 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703
},
850 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771
},
851 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781
},
852 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE
},
853 {0x14e4, 0x16a7, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5703
},
854 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31
},
855 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771
},
856 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781
},
857 {0x14e4, 0x16c7, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5703
},
858 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772
},
859 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782
},
860 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783
},
861 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T
},
862 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T
},
863 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE
},
864 {0x14e4, 0x1648, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5704
},
865 {0x14e4, 0x1649, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5704S
},
866 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S
},
867 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX
},
868 {0x14e4, 0x16a8, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5704S
},
869 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761
},
870 {0x14e4, 0x1653, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5705
},
871 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761
},
872 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020
},
873 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T
},
874 {0x14e4, 0x1654, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5705
},
875 {0x14e4, 0x165d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5705M
},
876 {0x14e4, 0x165e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5705M
},
877 {0x14e4, 0x166e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5705F
},
878 {0x14e4, 0x1696, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5782
},
879 {0x14e4, 0x169c, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5788
},
880 {0x14e4, 0x169d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5789
},
881 {0x14e4, 0x170d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5901
},
882 {0x14e4, 0x170e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5901
},
883 {0x14e4, 0x1676, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5750
},
884 {0x14e4, 0x167c, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5750M
},
885 {0x14e4, 0x1677, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5751
},
886 {0x14e4, 0x167d, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5751M
},
887 {0x14e4, 0x167e, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5751F
},
888 {0x14e4, 0x1658, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5720
},
889 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T
},
890 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T
},
891 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I
},
892 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I
},
893 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I
},
894 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I
},
895 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I
},
896 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I
},
897 {0x14e4, 0x1659, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5721
},
898 {0x14e4, 0x16f7, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5753
},
899 {0x14e4, 0x16fd, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5753M
},
900 {0x14e4, 0x16fe, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5753F
},
901 {0x14e4, 0x16dd, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5781
},
902 {0x14e4, 0x1600, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5752
},
903 {0x14e4, 0x1601, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5752M
},
904 {0x14e4, 0x1668, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5714
},
905 {0x14e4, 0x166a, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5780
},
906 {0x14e4, 0x166b, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5780S
},
907 {0x14e4, 0x1678, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5715
},
908 {0x14e4, 0x471f, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM4785
},
909 {0x14e4, 0x16ff, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5903M
},
910 {0x173b, 0x03ed, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, UNK5788
},
914 MODULE_DEVICE_TABLE(pci
, bcm5700_pci_tbl
);
917 extern int bcm5700_proc_create(void);
918 extern int bcm5700_proc_create_dev(struct net_device
*dev
);
919 extern int bcm5700_proc_remove_dev(struct net_device
*dev
);
920 extern int bcm5700_proc_remove_notifier(void);
923 #if (LINUX_VERSION_CODE >= 0x2060a)
924 static struct pci_device_id pci_AMD762id
[]={
925 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
926 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
931 static int sbgige
= -1;
933 /*******************************************************************************
934 *******************************************************************************
937 int get_csum_flag(LM_UINT32 ChipRevId
)
939 return NETIF_F_IP_CSUM
;
942 /*******************************************************************************
943 *******************************************************************************
945 This function returns true if the device passed to it is attached to an
946 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
947 or newer, it returns false.
949 This function determines which bridge it is attached to by scaning the pci
950 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
951 the bridge's subordinate's secondary bus number is compared with this
952 devices bus number. If they match, then the device is attached to this
953 bridge. The bridge's device id is compared to a list of known device ids for
954 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
955 chip revision must also be checked to determine if the chip is older than an
958 To scan the bus, one of two functions is used depending on the kernel
959 version. For 2.4 kernels, the pci_find_device function is used. This
960 function has been depricated in the 2.6 kernel and replaced with the
961 fucntion pci_get_device. The macro walk_pci_bus determines which function to
962 use when the driver is built.
965 #if (LINUX_VERSION_CODE >= 0x2060a)
966 #define walk_pci_bus(d) while ((d = pci_get_device( \
967 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
969 #define unwalk_pci_bus(d) pci_dev_put(d)
972 #define walk_pci_bus(d) while ((d = pci_find_device( \
973 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
974 #define unwalk_pci_bus(d)
978 #define ICH5_CHIP_VERSION 0xc0
980 static struct pci_device_id pci_ICHtable
[] = {
981 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
982 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
983 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
984 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
988 int attached_to_ICH4_or_older( struct pci_dev
*pdev
)
990 struct pci_dev
*tmp_pdev
= NULL
;
991 struct pci_device_id
*ich_table
;
994 walk_pci_bus (tmp_pdev
) {
995 if ((tmp_pdev
->hdr_type
== 1) &&
996 (tmp_pdev
->subordinate
!= NULL
) &&
997 (tmp_pdev
->subordinate
->secondary
== pdev
->bus
->number
)) {
999 ich_table
= pci_ICHtable
;
1001 while (ich_table
->vendor
) {
1002 if ((ich_table
->vendor
== tmp_pdev
->vendor
) &&
1003 (ich_table
->device
== tmp_pdev
->device
)) {
1005 pci_read_config_byte( tmp_pdev
,
1006 PCI_REVISION_ID
, &chip_rev
);
1008 if (chip_rev
< ICH5_CHIP_VERSION
) {
1009 unwalk_pci_bus( tmp_pdev
);
1021 __devinit
bcm5700_init_board(struct pci_dev
*pdev
, struct net_device
**dev_out
, int board_idx
)
1023 struct net_device
*dev
;
1024 PUM_DEVICE_BLOCK pUmDevice
;
1025 PLM_DEVICE_BLOCK pDevice
;
1032 /* dev zeroed in init_etherdev */
1033 #if (LINUX_VERSION_CODE >= 0x20600)
1034 dev
= alloc_etherdev(sizeof(*pUmDevice
));
1036 dev
= init_etherdev(NULL
, sizeof(*pUmDevice
));
1039 printk(KERN_ERR
"%s: unable to alloc new ethernet\n", bcm5700_driver
);
1042 SET_MODULE_OWNER(dev
);
1043 #if (LINUX_VERSION_CODE >= 0x20600)
1044 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1046 pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
1048 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1049 rc
= pci_enable_device(pdev
);
1053 /* init core specific stuff */
1054 if (pdev
->device
== T3_PCI_DEVICE_ID(T3_PCI_ID_BCM471F
)) {
1055 sih
= si_kattach(SI_OSH
);
1056 hndgige_init(sih
, ++sbgige
, &rgmii
);
1059 rc
= pci_request_regions(pdev
, bcm5700_driver
);
1063 printk(KERN_INFO
"bcm5700_init_board: pci_request_regions returned error %d\n"
1064 "This may be because the region is already requested by"
1065 " the SMBus driver. Ignore the PCI error messages.\n", rc
);
1068 pci_set_master(pdev
);
1070 if (pci_set_dma_mask(pdev
, BCM_64BIT_DMA_MASK
) == 0) {
1071 pUmDevice
->using_dac
= 1;
1072 if (pci_set_consistent_dma_mask(pdev
, BCM_64BIT_DMA_MASK
) != 0) {
1073 printk(KERN_ERR
"pci_set_consistent_dma_mask failed\n");
1074 pci_release_regions(pdev
);
1077 } else if (pci_set_dma_mask(pdev
, BCM_32BIT_DMA_MASK
) == 0) {
1078 pUmDevice
->using_dac
= 0;
1080 printk(KERN_ERR
"System does not support DMA\n");
1081 pci_release_regions(pdev
);
1085 pUmDevice
->dev
= dev
;
1086 pUmDevice
->pdev
= pdev
;
1087 pUmDevice
->mem_list_num
= 0;
1088 pUmDevice
->next_module
= root_tigon3_dev
;
1089 pUmDevice
->index
= board_idx
;
1090 pUmDevice
->sih
= (void *)sih
;
1091 root_tigon3_dev
= dev
;
1093 spin_lock_init(&pUmDevice
->global_lock
);
1095 spin_lock_init(&pUmDevice
->undi_lock
);
1097 spin_lock_init(&pUmDevice
->phy_lock
);
1099 pDevice
= &pUmDevice
->lm_dev
;
1101 pDevice
->FunctNum
= PCI_FUNC(pUmDevice
->pdev
->devfn
);
1102 pUmDevice
->boardflags
= getintvar(NULL
, "boardflags");
1104 if (pUmDevice
->boardflags
& BFL_ENETROBO
)
1105 pDevice
->Flags
|= ROBO_SWITCH_FLAG
;
1106 pDevice
->Flags
|= rgmii
? RGMII_MODE_FLAG
: 0;
1107 if ((sih
->chip
== BCM4785_CHIP_ID
) && (sih
->chiprev
< 2))
1108 pDevice
->Flags
|= ONE_DMA_AT_ONCE_FLAG
;
1109 pDevice
->Flags
|= SB_CORE_FLAG
;
1110 if (sih
->chip
== BCM4785_CHIP_ID
)
1111 pDevice
->Flags
|= FLUSH_POSTED_WRITE_FLAG
;
1114 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1115 if (board_idx
< MAX_UNITS
) {
1116 bcm5700_validate_param_range(pUmDevice
, &mtu
[board_idx
], "mtu", 1500, 9000, 1500);
1117 dev
->mtu
= mtu
[board_idx
];
1121 if (attached_to_ICH4_or_older(pdev
)) {
1122 pDevice
->Flags
|= UNDI_FIX_FLAG
;
1125 #if (LINUX_VERSION_CODE >= 0x2060a)
1126 if (pci_dev_present(pci_AMD762id
)) {
1127 pDevice
->Flags
|= FLUSH_POSTED_WRITE_FLAG
;
1128 pDevice
->Flags
&= ~NIC_SEND_BD_FLAG
;
1131 if (pci_find_device(0x1022, 0x700c, NULL
)) {
1132 /* AMD762 writes I/O out of order */
1133 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1135 pDevice
->Flags
|= FLUSH_POSTED_WRITE_FLAG
;
1136 pDevice
->Flags
&= ~NIC_SEND_BD_FLAG
;
1139 if (LM_GetAdapterInfo(pDevice
) != LM_STATUS_SUCCESS
) {
1144 if (pDevice
->Flags
& ROBO_SWITCH_FLAG
) {
1147 if ((robo
= bcm_robo_attach(sih
, pDevice
, NULL
,
1148 robo_miird
, robo_miiwr
)) == NULL
) {
1149 B57_ERR(("robo_setup: failed to attach robo switch \n"));
1153 if (bcm_robo_enable_device(robo
)) {
1154 B57_ERR(("robo_setup: failed to enable robo switch \n"));
1158 /* Configure the switch to do VLAN */
1159 if ((pUmDevice
->boardflags
& BFL_ENETVLAN
) &&
1160 bcm_robo_config_vlan(robo
, pDevice
->PermanentNodeAddress
)) {
1161 B57_ERR(("robo_setup: robo_config_vlan failed\n"));
1165 /* Enable the switch */
1166 if (bcm_robo_enable_switch(robo
)) {
1167 B57_ERR(("robo_setup: robo_enable_switch failed\n"));
1169 bcm_robo_detach(robo
);
1173 pUmDevice
->robo
= (void *)robo
;
1176 if ((pDevice
->Flags
& JUMBO_CAPABLE_FLAG
) == 0) {
1177 if (dev
->mtu
> 1500) {
1180 "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n",
1181 bcm5700_driver
, pUmDevice
->index
);
1185 pUmDevice
->do_global_lock
= 0;
1186 if (T3_ASIC_REV(pUmDevice
->lm_dev
.ChipRevId
) == T3_ASIC_REV_5700
) {
1187 /* The 5700 chip works best without interleaved register */
1188 /* accesses on certain machines. */
1189 pUmDevice
->do_global_lock
= 1;
1192 if ((T3_ASIC_REV(pUmDevice
->lm_dev
.ChipRevId
) == T3_ASIC_REV_5701
) &&
1193 ((pDevice
->PciState
& T3_PCI_STATE_NOT_PCI_X_BUS
) == 0)) {
1195 pUmDevice
->rx_buf_align
= 0;
1197 pUmDevice
->rx_buf_align
= 2;
1199 dev
->mem_start
= pci_resource_start(pdev
, 0);
1200 dev
->mem_end
= dev
->mem_start
+ sizeof(T3_STD_MEM_MAP
);
1201 dev
->irq
= pdev
->irq
;
1206 pUmDevice
->osh
= osl_attach(pdev
, PCI_BUS
, FALSE
);
1208 pUmDevice
->cih
= ctf_attach(pUmDevice
->osh
, dev
->name
, &b57_msg_level
, NULL
, NULL
);
1210 ctf_dev_register(pUmDevice
->cih
, dev
, FALSE
);
1211 ctf_enable(pUmDevice
->cih
, dev
, TRUE
);
1217 pci_release_regions(pdev
);
1218 bcm5700_freemem(dev
);
1221 #if (LINUX_VERSION_CODE < 0x020600)
1222 unregister_netdev(dev
);
1230 static int __devinit
1231 bcm5700_print_ver(void)
1233 printk(KERN_INFO
"Broadcom Gigabit Ethernet Driver %s ",
1236 printk("with Broadcom NIC Extension (NICE) ");
1238 printk("ver. %s %s\n", bcm5700_version
, bcm5700_date
);
1242 static int __devinit
1243 bcm5700_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1245 struct net_device
*dev
= NULL
;
1246 PUM_DEVICE_BLOCK pUmDevice
;
1247 PLM_DEVICE_BLOCK pDevice
;
1249 static int board_idx
= -1;
1250 static int printed_version
= 0;
1251 struct pci_dev
*pci_dev
;
1255 if (!printed_version
) {
1256 bcm5700_print_ver();
1258 bcm5700_proc_create();
1260 printed_version
= 1;
1263 i
= bcm5700_init_board(pdev
, &dev
, board_idx
);
1272 if (atomic_read(&bcm5700_load_count
) == 0) {
1273 register_ioctl32_conversion(SIOCNICE
, bcm5700_ioctl32
);
1275 atomic_inc(&bcm5700_load_count
);
1277 dev
->open
= bcm5700_open
;
1278 dev
->hard_start_xmit
= bcm5700_start_xmit
;
1279 dev
->stop
= bcm5700_close
;
1280 dev
->get_stats
= bcm5700_get_stats
;
1281 dev
->set_multicast_list
= bcm5700_set_rx_mode
;
1282 dev
->do_ioctl
= bcm5700_ioctl
;
1283 dev
->set_mac_address
= &bcm5700_set_mac_addr
;
1284 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1285 dev
->change_mtu
= &bcm5700_change_mtu
;
1287 #if (LINUX_VERSION_CODE >= 0x20400)
1288 dev
->tx_timeout
= bcm5700_reset
;
1289 dev
->watchdog_timeo
= TX_TIMEOUT
;
1292 dev
->vlan_rx_register
= &bcm5700_vlan_rx_register
;
1293 dev
->vlan_rx_kill_vid
= &bcm5700_vlan_rx_kill_vid
;
1295 #ifdef BCM_NAPI_RXPOLL
1296 dev
->poll
= bcm5700_poll
;
1300 pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
1301 pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1303 dev
->base_addr
= pci_resource_start(pdev
, 0);
1304 dev
->irq
= pdev
->irq
;
1305 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1306 dev
->poll_controller
= poll_bcm5700
;
1309 #if (LINUX_VERSION_CODE >= 0x20600)
1310 if ((i
= register_netdev(dev
))) {
1311 printk(KERN_ERR
"%s: Cannot register net device\n",
1313 if (pUmDevice
->lm_dev
.pMappedMemBase
)
1314 iounmap(pUmDevice
->lm_dev
.pMappedMemBase
);
1315 pci_release_regions(pdev
);
1316 bcm5700_freemem(dev
);
1323 pci_set_drvdata(pdev
, dev
);
1325 memcpy(dev
->dev_addr
, pDevice
->NodeAddress
, 6);
1326 pUmDevice
->name
= board_info
[ent
->driver_data
].name
,
1327 printk(KERN_INFO
"%s: %s found at mem %lx, IRQ %d, ",
1328 dev
->name
, pUmDevice
->name
, dev
->base_addr
,
1330 printk("node addr ");
1331 for (i
= 0; i
< 6; i
++) {
1332 printk("%2.2x", dev
->dev_addr
[i
]);
1336 printk(KERN_INFO
"%s: ", dev
->name
);
1337 if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5400_PHY_ID
)
1338 printk("Broadcom BCM5400 Copper ");
1339 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5401_PHY_ID
)
1340 printk("Broadcom BCM5401 Copper ");
1341 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5411_PHY_ID
)
1342 printk("Broadcom BCM5411 Copper ");
1343 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5461_PHY_ID
)
1344 printk("Broadcom BCM5461 Copper ");
1345 else if (((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5701_PHY_ID
) &&
1346 !(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)) {
1347 printk("Broadcom BCM5701 Integrated Copper ");
1349 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5703_PHY_ID
) {
1350 printk("Broadcom BCM5703 Integrated ");
1351 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
1356 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5704_PHY_ID
) {
1357 printk("Broadcom BCM5704 Integrated ");
1358 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
1363 else if (pDevice
->PhyFlags
& PHY_IS_FIBER
){
1364 if(( pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5780_PHY_ID
)
1365 printk("Broadcom BCM5780S Integrated Serdes ");
1368 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5705_PHY_ID
)
1369 printk("Broadcom BCM5705 Integrated Copper ");
1370 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5750_PHY_ID
)
1371 printk("Broadcom BCM5750 Integrated Copper ");
1373 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5714_PHY_ID
)
1374 printk("Broadcom BCM5714 Integrated Copper ");
1375 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5780_PHY_ID
)
1376 printk("Broadcom BCM5780 Integrated Copper ");
1378 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM5752_PHY_ID
)
1379 printk("Broadcom BCM5752 Integrated Copper ");
1380 else if ((pDevice
->PhyId
& PHY_ID_MASK
) == PHY_BCM8002_PHY_ID
)
1381 printk("Broadcom BCM8002 SerDes ");
1382 else if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) {
1383 if (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5703
) {
1384 printk("Broadcom BCM5703 Integrated SerDes ");
1386 else if (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5704
) {
1387 printk("Broadcom BCM5704 Integrated SerDes ");
1390 printk("Agilent HDMP-1636 SerDes ");
1396 printk("transceiver found\n");
1398 #if (LINUX_VERSION_CODE >= 0x20400)
1399 if (scatter_gather
[board_idx
]) {
1400 dev
->features
|= NETIF_F_SG
;
1401 if (pUmDevice
->using_dac
&& !(pDevice
->Flags
& BCM5788_FLAG
))
1402 dev
->features
|= NETIF_F_HIGHDMA
;
1404 if ((pDevice
->TaskOffloadCap
& LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
) &&
1405 tx_checksum
[board_idx
]) {
1407 dev
->features
|= get_csum_flag( pDevice
->ChipRevId
);
1410 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
1413 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1414 the same time. Since only one of these features can be enable at a
1415 time, we'll enable only Jumbo Frames and disable TSO when the user
1416 tries to enable both.
1418 dev
->features
&= ~NETIF_F_TSO
;
1420 if ((pDevice
->TaskToOffload
& LM_TASK_OFFLOAD_TCP_SEGMENTATION
) &&
1421 (enable_tso
[board_idx
])) {
1422 if (T3_ASIC_5714_FAMILY(pDevice
->ChipRevId
) &&
1423 (dev
->mtu
> 1500)) {
1424 printk(KERN_ALERT
"%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev
->name
);
1426 dev
->features
|= NETIF_F_TSO
;
1430 printk(KERN_INFO
"%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1432 (char *) ((dev
->features
& NETIF_F_SG
) ? "ON" : "OFF"),
1433 (char *) ((dev
->features
& NETIF_F_HIGHDMA
) ? "ON" : "OFF"),
1434 (char *) ((dev
->features
& get_csum_flag( pDevice
->ChipRevId
)) ? "ON" : "OFF"));
1436 if ((pDevice
->ChipRevId
!= T3_CHIP_ID_5700_B0
) &&
1437 rx_checksum
[board_idx
])
1438 printk("Rx Checksum ON");
1440 printk("Rx Checksum OFF");
1442 printk(", 802.1Q VLAN ON");
1445 if (dev
->features
& NETIF_F_TSO
) {
1450 #ifdef BCM_NAPI_RXPOLL
1451 printk(", NAPI ON");
1456 bcm5700_proc_create_dev(dev
);
1458 register_reboot_notifier(&bcm5700_reboot_notifier
);
1460 tasklet_init(&pUmDevice
->tasklet
, bcm5700_tasklet
,
1461 (unsigned long) pUmDevice
);
1463 if (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5704
) {
1464 if ((REG_RD(pDevice
, PciCfg
.DualMacCtrl
) &
1465 T3_DUAL_MAC_CH_CTRL_MASK
) == 3) {
1467 printk(KERN_WARNING
"%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev
->name
);
1471 #if (LINUX_VERSION_CODE > 0x20605)
1473 if ((pci_dev
= pci_get_device(0x1022, 0x700c, NULL
)))
1475 if ((pci_dev
= pci_find_device(0x1022, 0x700c, NULL
)))
1480 /* Found AMD 762 North bridge */
1481 pci_read_config_dword(pci_dev
, 0x4c, &val
);
1482 if ((val
& 0x02) == 0) {
1483 pci_write_config_dword(pci_dev
, 0x4c, val
| 0x02);
1484 printk(KERN_INFO
"%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver
);
1488 #if (LINUX_VERSION_CODE > 0x20605)
1490 pci_dev_put(pci_dev
);
1492 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1494 if ((pci_dev
= pci_get_device(0x1066, 0x0017, NULL
))) {
1495 bcm_msi_chipset_bug
= 1;
1497 pci_dev_put(pci_dev
);
1505 static void __devexit
1506 bcm5700_remove_one (struct pci_dev
*pdev
)
1508 struct net_device
*dev
= pci_get_drvdata (pdev
);
1509 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1512 bcm5700_proc_remove_dev(dev
);
1515 atomic_dec(&bcm5700_load_count
);
1516 if (atomic_read(&bcm5700_load_count
) == 0)
1517 unregister_ioctl32_conversion(SIOCNICE
);
1520 ctf_dev_unregister(pUmDevice
->cih
, dev
);
1522 unregister_netdev(dev
);
1524 if (pUmDevice
->lm_dev
.pMappedMemBase
)
1525 iounmap(pUmDevice
->lm_dev
.pMappedMemBase
);
1527 pci_release_regions(pdev
);
1529 #if (LINUX_VERSION_CODE < 0x020600)
1535 pci_set_drvdata(pdev
, NULL
);
1539 int b57_test_intr(UM_DEVICE_BLOCK
*pUmDevice
);
1541 #ifdef BCM_WL_EMULATOR
1542 /* new transmit callback */
1543 static int bcm5700emu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
1544 /* keep track of the 2 gige devices */
1545 static PLM_DEVICE_BLOCK pDev1
;
1546 static PLM_DEVICE_BLOCK pDev2
;
1549 bcm5700emu_open(struct net_device
*dev
)
1551 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1552 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1553 static int instance
= 0;
1554 static char *wlemu_if
= NULL
;
1555 char *wlemu_mode
= NULL
;
1556 //int wlemu_idx = 0;
1557 static int rx_enable
= 0;
1558 static int tx_enable
= 0;
1560 /* which interface is the emulator ? */
1562 wlemu_if
= nvram_get("wlemu_if");
1563 /* do we emulate rx, tx or both */
1564 wlemu_mode
= nvram_get("wlemu_mode");
1566 if (!strcmp(wlemu_mode
,"rx"))
1570 else if (!strcmp(wlemu_mode
,"tx"))
1576 else if (!strcmp(wlemu_mode
,"rx_tx"))
1587 /* The context is used for accessing the OSL for emulating devices */
1588 pDevice
->wlc
= NULL
;
1590 /* determines if this device is an emulator */
1591 pDevice
->wl_emulate_rx
= 0;
1592 pDevice
->wl_emulate_tx
= 0;
1594 if(wlemu_if
&& !strcmp(dev
->name
,wlemu_if
))
1596 /* create an emulator context. */
1597 pDevice
->wlc
= (void *)wlcemu_wlccreate((void *)dev
);
1598 B57_INFO(("Using %s for wl emulation \n", dev
->name
));
1601 B57_INFO(("Enabling wl RX emulation \n"));
1602 pDevice
->wl_emulate_rx
= 1;
1604 /* re-direct transmit callback to emulator */
1607 pDevice
->wl_emulate_tx
= 1;
1608 dev
->hard_start_xmit
= bcm5700emu_start_xmit
;
1609 B57_INFO(("Enabling wl TX emulation \n"));
1612 /* for debug access to configured devices only */
1615 else if (instance
== 2)
1619 /* Public API to get current emulation info */
1620 int bcm5700emu_get_info(char *buf
)
1625 /* look for an emulating device */
1628 len
+= sprintf(buf
+len
,"emulation device : eth0\n");
1630 else if (pDev2
->wlc
) {
1632 len
+= sprintf(buf
+len
,"emulation device : eth1\n");
1635 len
+= sprintf(buf
+len
,"emulation not activated\n");
1638 if(p
->wl_emulate_rx
)
1639 len
+= sprintf(buf
+len
,"RX emulation enabled\n");
1641 len
+= sprintf(buf
+len
,"RX emulation disabled\n");
1642 if(p
->wl_emulate_tx
)
1643 len
+= sprintf(buf
+len
,"TX emulation enabled\n");
1645 len
+= sprintf(buf
+len
,"TX emulation disabled\n");
1651 /* Public API to access the bcm5700_start_xmit callback */
1654 bcm5700emu_forward_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1656 return bcm5700_start_xmit(skb
, dev
);
1660 /* hook to kernel txmit callback */
1662 bcm5700emu_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1665 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1666 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1667 return wlcemu_start_xmit(skb
,pDevice
->wlc
);
1670 #endif /* BCM_WL_EMULATOR */
1673 bcm5700_open(struct net_device
*dev
)
1675 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1676 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1679 if (pUmDevice
->suspended
){
1683 #ifdef BCM_WL_EMULATOR
1684 bcm5700emu_open(dev
);
1687 /* delay for 6 seconds */
1688 pUmDevice
->delayed_link_ind
= (6 * HZ
) / pUmDevice
->timer_interval
;
1691 #ifndef BCM_NAPI_RXPOLL
1692 pUmDevice
->adaptive_expiry
= HZ
/ pUmDevice
->timer_interval
;
1696 #ifdef INCLUDE_TBI_SUPPORT
1697 if ((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
1698 (pDevice
->TbiFlags
& TBI_POLLING_FLAGS
)) {
1699 pUmDevice
->poll_tbi_interval
= HZ
/ pUmDevice
->timer_interval
;
1700 if (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5703
) {
1701 pUmDevice
->poll_tbi_interval
/= 4;
1703 pUmDevice
->poll_tbi_expiry
= pUmDevice
->poll_tbi_interval
;
1706 /* set this timer for 2 seconds */
1707 pUmDevice
->asf_heartbeat
= (2 * HZ
) / pUmDevice
->timer_interval
;
1709 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1712 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice
->ChipRevId
) ) &&
1713 (T3_ASIC_REV(pDevice
->ChipRevId
) != T3_ASIC_REV_5714_A0
) &&
1714 (T3_CHIP_REV(pDevice
->ChipRevId
) != T3_CHIP_REV_5750_AX
) &&
1715 (T3_CHIP_REV(pDevice
->ChipRevId
) != T3_CHIP_REV_5750_BX
) ) &&
1716 !bcm_msi_chipset_bug
){
1718 if (disable_msi
[pUmDevice
->index
]==1){
1719 /* do nothing-it's not turned on */
1721 pDevice
->Flags
|= USING_MSI_FLAG
;
1723 REG_WR(pDevice
, Msi
.Mode
, 2 );
1725 rc
= pci_enable_msi(pUmDevice
->pdev
);
1728 pDevice
->Flags
&= ~ USING_MSI_FLAG
;
1729 REG_WR(pDevice
, Msi
.Mode
, 1 );
1737 if ((rc
= request_irq(pUmDevice
->pdev
->irq
, &bcm5700_interrupt
, IRQF_SHARED
, dev
->name
, dev
)))
1740 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1742 if(pDevice
->Flags
& USING_MSI_FLAG
) {
1744 pci_disable_msi(pUmDevice
->pdev
);
1745 pDevice
->Flags
&= ~USING_MSI_FLAG
;
1746 REG_WR(pDevice
, Msi
.Mode
, 1 );
1753 pUmDevice
->opened
= 1;
1754 if (LM_InitializeAdapter(pDevice
) != LM_STATUS_SUCCESS
) {
1755 pUmDevice
->opened
= 0;
1756 free_irq(dev
->irq
, dev
);
1757 bcm5700_freemem(dev
);
1761 bcm5700_set_vlan_mode(pUmDevice
);
1762 bcm5700_init_counters(pUmDevice
);
1764 if (pDevice
->Flags
& UNDI_FIX_FLAG
) {
1765 printk(KERN_INFO
"%s: Using indirect register access\n", dev
->name
);
1768 if (memcmp(dev
->dev_addr
, pDevice
->NodeAddress
, 6))
1770 /* Do not use invalid eth addrs: any multicast & all zeros */
1771 if( is_valid_ether_addr(dev
->dev_addr
) ){
1772 LM_SetMacAddress(pDevice
, dev
->dev_addr
);
1776 printk(KERN_INFO
"%s: Invalid administered node address\n",dev
->name
);
1777 memcpy(dev
->dev_addr
, pDevice
->NodeAddress
, 6);
1781 if (tigon3_debug
> 1)
1782 printk(KERN_DEBUG
"%s: tigon3_open() irq %d.\n", dev
->name
, dev
->irq
);
1784 QQ_InitQueue(&pUmDevice
->rx_out_of_buf_q
.Container
,
1785 MAX_RX_PACKET_DESC_COUNT
);
1788 #if (LINUX_VERSION_CODE < 0x020300)
1792 atomic_set(&pUmDevice
->intr_sem
, 0);
1794 LM_EnableInterrupt(pDevice
);
1796 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1798 if (pDevice
->Flags
& USING_MSI_FLAG
){
1800 /* int test to check support on older machines */
1801 if (b57_test_intr(pUmDevice
) != 1) {
1803 LM_DisableInterrupt(pDevice
);
1804 free_irq(pUmDevice
->pdev
->irq
, dev
);
1805 pci_disable_msi(pUmDevice
->pdev
);
1806 REG_WR(pDevice
, Msi
.Mode
, 1 );
1807 pDevice
->Flags
&= ~USING_MSI_FLAG
;
1809 rc
= LM_ResetAdapter(pDevice
);
1810 printk(KERN_ALERT
" The MSI support in this system is not functional.\n");
1812 if (rc
== LM_STATUS_SUCCESS
)
1818 rc
= request_irq(pUmDevice
->pdev
->irq
, &bcm5700_interrupt
,
1819 SA_SHIRQ
, dev
->name
, dev
);
1824 bcm5700_freemem(dev
);
1825 pUmDevice
->opened
= 0;
1830 pDevice
->InitDone
= TRUE
;
1831 atomic_set(&pUmDevice
->intr_sem
, 0);
1832 LM_EnableInterrupt(pDevice
);
1837 init_timer(&pUmDevice
->timer
);
1838 pUmDevice
->timer
.expires
= RUN_AT(pUmDevice
->timer_interval
);
1839 pUmDevice
->timer
.data
= (unsigned long)dev
;
1840 pUmDevice
->timer
.function
= &bcm5700_timer
;
1841 add_timer(&pUmDevice
->timer
);
1843 if (T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
)) {
1844 init_timer(&pUmDevice
->statstimer
);
1845 pUmDevice
->statstimer
.expires
= RUN_AT(pUmDevice
->statstimer_interval
);
1846 pUmDevice
->statstimer
.data
= (unsigned long)dev
;
1847 pUmDevice
->statstimer
.function
= &bcm5700_stats_timer
;
1848 add_timer(&pUmDevice
->statstimer
);
1851 if(pDevice
->Flags
& USING_MSI_FLAG
)
1852 printk(KERN_INFO
"%s: Using Message Signaled Interrupt (MSI) \n", dev
->name
);
1854 printk(KERN_INFO
"%s: Using PCI INTX interrupt \n", dev
->name
);
1856 netif_start_queue(dev
);
1863 bcm5700_stats_timer(unsigned long data
)
1865 struct net_device
*dev
= (struct net_device
*)data
;
1866 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1867 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1868 unsigned long flags
= 0;
1870 if (!pUmDevice
->opened
)
1873 if (!atomic_read(&pUmDevice
->intr_sem
) &&
1874 !pUmDevice
->suspended
&&
1875 (pDevice
->LinkStatus
== LM_STATUS_LINK_ACTIVE
)) {
1876 BCM5700_LOCK(pUmDevice
, flags
);
1877 LM_GetStats(pDevice
);
1878 BCM5700_UNLOCK(pUmDevice
, flags
);
1881 pUmDevice
->statstimer
.expires
= RUN_AT(pUmDevice
->statstimer_interval
);
1883 add_timer(&pUmDevice
->statstimer
);
1888 bcm5700_timer(unsigned long data
)
1890 struct net_device
*dev
= (struct net_device
*)data
;
1891 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
1892 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
1893 unsigned long flags
= 0;
1896 if (!pUmDevice
->opened
)
1899 /* BCM4785: Flush posted writes from GbE to host memory. */
1900 if (pDevice
->Flags
& FLUSH_POSTED_WRITE_FLAG
)
1901 REG_RD(pDevice
, HostCoalesce
.Mode
);
1903 if (atomic_read(&pUmDevice
->intr_sem
) || pUmDevice
->suspended
) {
1904 pUmDevice
->timer
.expires
= RUN_AT(pUmDevice
->timer_interval
);
1905 add_timer(&pUmDevice
->timer
);
1909 #ifdef INCLUDE_TBI_SUPPORT
1910 if ((pDevice
->TbiFlags
& TBI_POLLING_FLAGS
) &&
1911 (--pUmDevice
->poll_tbi_expiry
<= 0)) {
1913 BCM5700_PHY_LOCK(pUmDevice
, flags
);
1914 value32
= REG_RD(pDevice
, MacCtrl
.Status
);
1915 if (((pDevice
->LinkStatus
== LM_STATUS_LINK_ACTIVE
) &&
1916 ((value32
& (MAC_STATUS_LINK_STATE_CHANGED
|
1917 MAC_STATUS_CFG_CHANGED
)) ||
1918 !(value32
& MAC_STATUS_PCS_SYNCED
)))
1920 ((pDevice
->LinkStatus
!= LM_STATUS_LINK_ACTIVE
) &&
1921 (value32
& (MAC_STATUS_PCS_SYNCED
|
1922 MAC_STATUS_SIGNAL_DETECTED
))))
1924 LM_SetupPhy(pDevice
);
1926 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
1927 pUmDevice
->poll_tbi_expiry
= pUmDevice
->poll_tbi_interval
;
1932 if (pUmDevice
->delayed_link_ind
> 0) {
1933 if (pUmDevice
->delayed_link_ind
== 1)
1934 MM_IndicateStatus(pDevice
, pDevice
->LinkStatus
);
1936 pUmDevice
->delayed_link_ind
--;
1939 if (pUmDevice
->crc_counter_expiry
> 0)
1940 pUmDevice
->crc_counter_expiry
--;
1942 if (!pUmDevice
->interrupt
) {
1943 if (!(pDevice
->Flags
& USE_TAGGED_STATUS_FLAG
)) {
1944 BCM5700_LOCK(pUmDevice
, flags
);
1945 if (pDevice
->pStatusBlkVirt
->Status
& STATUS_BLOCK_UPDATED
) {
1946 /* This will generate an interrupt */
1947 REG_WR(pDevice
, Grc
.LocalCtrl
,
1948 pDevice
->GrcLocalCtrl
|
1949 GRC_MISC_LOCAL_CTRL_SET_INT
);
1952 REG_WR(pDevice
, HostCoalesce
.Mode
,
1953 pDevice
->CoalesceMode
|
1954 HOST_COALESCE_ENABLE
|
1957 if (!(REG_RD(pDevice
, DmaWrite
.Mode
) &
1958 DMA_WRITE_MODE_ENABLE
)) {
1959 BCM5700_UNLOCK(pUmDevice
, flags
);
1963 BCM5700_UNLOCK(pUmDevice
, flags
);
1965 if (pUmDevice
->tx_queued
) {
1966 pUmDevice
->tx_queued
= 0;
1967 netif_wake_queue(dev
);
1970 #if (LINUX_VERSION_CODE < 0x02032b)
1971 if ((QQ_GetEntryCnt(&pDevice
->TxPacketFreeQ
.Container
) !=
1972 pDevice
->TxPacketDescCnt
) &&
1973 ((jiffies
- dev
->trans_start
) > TX_TIMEOUT
)) {
1975 printk(KERN_WARNING
"%s: Tx hung\n", dev
->name
);
1981 #ifndef BCM_NAPI_RXPOLL
1982 if (pUmDevice
->adaptive_coalesce
) {
1983 pUmDevice
->adaptive_expiry
--;
1984 if (pUmDevice
->adaptive_expiry
== 0) {
1985 pUmDevice
->adaptive_expiry
= HZ
/
1986 pUmDevice
->timer_interval
;
1987 bcm5700_adapt_coalesce(pUmDevice
);
1992 if (QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
) >
1993 (unsigned int) pUmDevice
->rx_buf_repl_panic_thresh
) {
1994 /* Generate interrupt and let isr allocate buffers */
1995 REG_WR(pDevice
, HostCoalesce
.Mode
, pDevice
->CoalesceMode
|
1996 HOST_COALESCE_ENABLE
| HOST_COALESCE_NOW
);
2000 if (pDevice
->AsfFlags
& ASF_ENABLED
) {
2001 pUmDevice
->asf_heartbeat
--;
2002 if (pUmDevice
->asf_heartbeat
== 0) {
2003 if( (pDevice
->Flags
& UNDI_FIX_FLAG
) ||
2004 (pDevice
->Flags
& ENABLE_PCIX_FIX_FLAG
)) {
2005 MEM_WR_OFFSET(pDevice
, T3_CMD_MAILBOX
,
2006 T3_CMD_NICDRV_ALIVE2
);
2007 MEM_WR_OFFSET(pDevice
, T3_CMD_LENGTH_MAILBOX
,
2009 MEM_WR_OFFSET(pDevice
, T3_CMD_DATA_MAILBOX
, 5);
2012 (T3_NIC_MBUF_POOL_ADDR
+
2014 T3_CMD_NICDRV_ALIVE2
, 1);
2016 (T3_NIC_MBUF_POOL_ADDR
+
2017 T3_CMD_LENGTH_MAILBOX
),4,1);
2019 (T3_NIC_MBUF_POOL_ADDR
+
2020 T3_CMD_DATA_MAILBOX
),5,1);
2023 value32
= REG_RD(pDevice
, Grc
.RxCpuEvent
);
2024 REG_WR(pDevice
, Grc
.RxCpuEvent
, value32
| BIT_14
);
2025 pUmDevice
->asf_heartbeat
= (2 * HZ
) /
2026 pUmDevice
->timer_interval
;
2031 if (pDevice
->PhyFlags
& PHY_IS_FIBER
){
2032 BCM5700_PHY_LOCK(pUmDevice
, flags
);
2033 LM_5714_FamFiberCheckLink(pDevice
);
2034 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
2037 pUmDevice
->timer
.expires
= RUN_AT(pUmDevice
->timer_interval
);
2038 add_timer(&pUmDevice
->timer
);
2042 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice
)
2045 #ifndef BCM_NAPI_RXPOLL
2046 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2048 pUmDevice
->rx_curr_coalesce_frames
= pDevice
->RxMaxCoalescedFrames
;
2049 pUmDevice
->rx_curr_coalesce_ticks
= pDevice
->RxCoalescingTicks
;
2050 pUmDevice
->tx_curr_coalesce_frames
= pDevice
->TxMaxCoalescedFrames
;
2051 pUmDevice
->rx_last_cnt
= 0;
2052 pUmDevice
->tx_last_cnt
= 0;
2055 pUmDevice
->phy_crc_count
= 0;
2057 pUmDevice
->tx_zc_count
= 0;
2058 pUmDevice
->tx_chksum_count
= 0;
2059 pUmDevice
->tx_himem_count
= 0;
2060 pUmDevice
->rx_good_chksum_count
= 0;
2061 pUmDevice
->rx_bad_chksum_count
= 0;
2064 pUmDevice
->tso_pkt_count
= 0;
2070 #ifndef BCM_NAPI_RXPOLL
2072 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice
,
2073 int rx_frames
, int rx_ticks
, int tx_frames
, int rx_frames_intr
)
2075 unsigned long flags
= 0;
2076 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2078 if (pUmDevice
->do_global_lock
) {
2079 if (spin_is_locked(&pUmDevice
->global_lock
))
2081 spin_lock_irqsave(&pUmDevice
->global_lock
, flags
);
2083 pUmDevice
->rx_curr_coalesce_frames
= rx_frames
;
2084 pUmDevice
->rx_curr_coalesce_ticks
= rx_ticks
;
2085 pUmDevice
->tx_curr_coalesce_frames
= tx_frames
;
2086 pUmDevice
->rx_curr_coalesce_frames_intr
= rx_frames_intr
;
2087 REG_WR(pDevice
, HostCoalesce
.RxMaxCoalescedFrames
, rx_frames
);
2089 REG_WR(pDevice
, HostCoalesce
.RxCoalescingTicks
, rx_ticks
);
2091 REG_WR(pDevice
, HostCoalesce
.TxMaxCoalescedFrames
, tx_frames
);
2093 REG_WR(pDevice
, HostCoalesce
.RxMaxCoalescedFramesDuringInt
,
2096 BCM5700_UNLOCK(pUmDevice
, flags
);
2101 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice
)
2103 PLM_DEVICE_BLOCK pDevice
= &pUmDevice
->lm_dev
;
2104 uint rx_curr_cnt
, tx_curr_cnt
, rx_delta
, tx_delta
, total_delta
;
2106 rx_curr_cnt
= pDevice
->pStatsBlkVirt
->ifHCInUcastPkts
.Low
;
2107 tx_curr_cnt
= pDevice
->pStatsBlkVirt
->ifHCOutUcastPkts
.Low
;
2108 if ((rx_curr_cnt
<= pUmDevice
->rx_last_cnt
) ||
2109 (tx_curr_cnt
< pUmDevice
->tx_last_cnt
)) {
2111 /* skip if there is counter rollover */
2112 pUmDevice
->rx_last_cnt
= rx_curr_cnt
;
2113 pUmDevice
->tx_last_cnt
= tx_curr_cnt
;
2117 rx_delta
= rx_curr_cnt
- pUmDevice
->rx_last_cnt
;
2118 tx_delta
= tx_curr_cnt
- pUmDevice
->tx_last_cnt
;
2119 total_delta
= (((rx_delta
+ rx_delta
) + tx_delta
) / 3) << 1;
2121 pUmDevice
->rx_last_cnt
= rx_curr_cnt
;
2122 pUmDevice
->tx_last_cnt
= tx_curr_cnt
;
2124 if (total_delta
< ADAPTIVE_LO_PKT_THRESH
) {
2125 if (pUmDevice
->rx_curr_coalesce_frames
!=
2126 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES
) {
2128 bcm5700_do_adapt_coalesce(pUmDevice
,
2129 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES
,
2130 ADAPTIVE_LO_RX_COALESCING_TICKS
,
2131 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES
,
2132 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT
);
2135 else if (total_delta
< ADAPTIVE_HI_PKT_THRESH
) {
2136 if (pUmDevice
->rx_curr_coalesce_frames
!=
2137 DEFAULT_RX_MAX_COALESCED_FRAMES
) {
2139 bcm5700_do_adapt_coalesce(pUmDevice
,
2140 DEFAULT_RX_MAX_COALESCED_FRAMES
,
2141 DEFAULT_RX_COALESCING_TICKS
,
2142 DEFAULT_TX_MAX_COALESCED_FRAMES
,
2143 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT
);
2147 if (pUmDevice
->rx_curr_coalesce_frames
!=
2148 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES
) {
2150 bcm5700_do_adapt_coalesce(pUmDevice
,
2151 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES
,
2152 ADAPTIVE_HI_RX_COALESCING_TICKS
,
2153 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES
,
2154 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT
);
2163 bcm5700_reset(struct net_device
*dev
)
2165 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2166 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
2167 unsigned long flags
;
2171 if( (dev
->features
& NETIF_F_TSO
) &&
2172 (pUmDevice
->tx_full
) ) {
2174 dev
->features
&= ~NETIF_F_TSO
;
2178 netif_stop_queue(dev
);
2179 bcm5700_intr_off(pUmDevice
);
2180 BCM5700_PHY_LOCK(pUmDevice
, flags
);
2181 LM_ResetAdapter(pDevice
);
2182 pDevice
->InitDone
= TRUE
;
2183 bcm5700_do_rx_mode(dev
);
2184 bcm5700_set_vlan_mode(pUmDevice
);
2185 bcm5700_init_counters(pUmDevice
);
2186 if (memcmp(dev
->dev_addr
, pDevice
->NodeAddress
, 6)) {
2187 LM_SetMacAddress(pDevice
, dev
->dev_addr
);
2189 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
2190 atomic_set(&pUmDevice
->intr_sem
, 1);
2191 bcm5700_intr_on(pUmDevice
);
2192 netif_wake_queue(dev
);
2196 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK
*pUmDevice
)
2198 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2199 LM_UINT32 ReceiveMask
= pDevice
->ReceiveMask
;
2200 int vlan_tag_mode
= pUmDevice
->vlan_tag_mode
;
2202 if (vlan_tag_mode
== VLAN_TAG_MODE_AUTO_STRIP
) {
2203 if (pDevice
->AsfFlags
& ASF_ENABLED
) {
2204 vlan_tag_mode
= VLAN_TAG_MODE_FORCED_STRIP
;
2207 vlan_tag_mode
= VLAN_TAG_MODE_NORMAL_STRIP
;
2210 if (vlan_tag_mode
== VLAN_TAG_MODE_NORMAL_STRIP
) {
2211 ReceiveMask
|= LM_KEEP_VLAN_TAG
;
2213 if (pUmDevice
->vlgrp
)
2214 ReceiveMask
&= ~LM_KEEP_VLAN_TAG
;
2217 if (pUmDevice
->nice_rx
)
2218 ReceiveMask
&= ~LM_KEEP_VLAN_TAG
;
2221 else if (vlan_tag_mode
== VLAN_TAG_MODE_FORCED_STRIP
) {
2222 ReceiveMask
&= ~LM_KEEP_VLAN_TAG
;
2224 if (ReceiveMask
!= pDevice
->ReceiveMask
)
2226 LM_SetReceiveMask(pDevice
, ReceiveMask
);
2231 bcm5700_poll_wait(UM_DEVICE_BLOCK
*pUmDevice
)
2233 #ifdef BCM_NAPI_RXPOLL
2234 while (pUmDevice
->lm_dev
.RxPoll
) {
2235 current
->state
= TASK_INTERRUPTIBLE
;
2236 schedule_timeout(1);
2244 bcm5700_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
2246 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
2248 bcm5700_intr_off(pUmDevice
);
2249 bcm5700_poll_wait(pUmDevice
);
2250 pUmDevice
->vlgrp
= vlgrp
;
2251 bcm5700_set_vlan_mode(pUmDevice
);
2252 bcm5700_intr_on(pUmDevice
);
2256 bcm5700_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
)
2258 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
2260 bcm5700_intr_off(pUmDevice
);
2261 bcm5700_poll_wait(pUmDevice
);
2262 if (pUmDevice
->vlgrp
) {
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2264 vlan_group_set_device(pUmDevice
->vlgrp
, vid
, NULL
);
2266 pUmDevice
->vlgrp
->vlan_devices
[vid
] = NULL
;
2269 bcm5700_intr_on(pUmDevice
);
2274 bcm5700_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2276 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2277 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
2279 PUM_PACKET pUmPacket
;
2280 unsigned long flags
= 0;
2283 vlan_tag_t
*vlan_tag
;
2287 uint16_t ip_tcp_len
, tcp_opt_len
, tcp_seg_flags
;
2289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2294 if ((pDevice
->LinkStatus
== LM_STATUS_LINK_DOWN
) ||
2295 !pDevice
->InitDone
|| pUmDevice
->suspended
)
2301 #if (LINUX_VERSION_CODE < 0x02032b)
2302 if (test_and_set_bit(0, &dev
->tbusy
)) {
2307 if (pUmDevice
->do_global_lock
&& pUmDevice
->interrupt
) {
2308 netif_stop_queue(dev
);
2309 pUmDevice
->tx_queued
= 1;
2310 if (!pUmDevice
->interrupt
) {
2311 netif_wake_queue(dev
);
2312 pUmDevice
->tx_queued
= 0;
2317 pPacket
= (PLM_PACKET
)
2318 QQ_PopHead(&pDevice
->TxPacketFreeQ
.Container
);
2320 netif_stop_queue(dev
);
2321 pUmDevice
->tx_full
= 1;
2322 if (QQ_GetEntryCnt(&pDevice
->TxPacketFreeQ
.Container
)) {
2323 netif_wake_queue(dev
);
2324 pUmDevice
->tx_full
= 0;
2328 pUmPacket
= (PUM_PACKET
) pPacket
;
2329 pUmPacket
->skbuff
= skb
;
2330 pUmDevice
->stats
.tx_bytes
+= skb
->len
;
2332 if (skb
->ip_summed
== CHECKSUM_HW
) {
2333 pPacket
->Flags
= SND_BD_FLAG_TCP_UDP_CKSUM
;
2335 pUmDevice
->tx_chksum_count
++;
2342 frag_no
= skb_shinfo(skb
)->nr_frags
;
2346 if (atomic_read(&pDevice
->SendBdLeft
) < (frag_no
+ 1)) {
2347 netif_stop_queue(dev
);
2348 pUmDevice
->tx_full
= 1;
2349 QQ_PushHead(&pDevice
->TxPacketFreeQ
.Container
, pPacket
);
2350 if (atomic_read(&pDevice
->SendBdLeft
) >= (frag_no
+ 1)) {
2351 netif_wake_queue(dev
);
2352 pUmDevice
->tx_full
= 0;
2357 pPacket
->u
.Tx
.FragCount
= frag_no
+ 1;
2359 if (pPacket
->u
.Tx
.FragCount
> 1)
2360 pUmDevice
->tx_zc_count
++;
2364 if (pUmDevice
->vlgrp
&& vlan_tx_tag_present(skb
)) {
2365 pPacket
->VlanTag
= vlan_tx_tag_get(skb
);
2366 pPacket
->Flags
|= SND_BD_FLAG_VLAN_TAG
;
2370 vlan_tag
= (vlan_tag_t
*) &skb
->cb
[0];
2371 if (vlan_tag
->signature
== 0x5555) {
2372 pPacket
->VlanTag
= vlan_tag
->tag
;
2373 pPacket
->Flags
|= SND_BD_FLAG_VLAN_TAG
;
2374 vlan_tag
->signature
= 0;
2379 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
2380 if ((mss
= (LM_UINT32
) skb_shinfo(skb
)->gso_size
) &&
2381 (skb
->len
> pDevice
->TxMtu
)) {
2383 if ((mss
= (LM_UINT32
) skb_shinfo(skb
)->tso_size
) &&
2384 (skb
->len
> pDevice
->TxMtu
)) {
2387 #if (LINUX_VERSION_CODE >= 0x02060c)
2389 if (skb_header_cloned(skb
) &&
2390 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
2396 pUmDevice
->tso_pkt_count
++;
2398 pPacket
->Flags
|= SND_BD_FLAG_CPU_PRE_DMA
|
2399 SND_BD_FLAG_CPU_POST_DMA
;
2402 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2403 th
= (struct tcphdr
*)skb_transport_header(skb
);
2404 iph
= (struct iphdr
*)skb_network_header(skb
);
2406 ASSERT((iph
!= NULL
) && (th
!= NULL
));
2409 tcp_opt_len
= (th
->doff
- 5) << 2;
2411 ip_tcp_len
= (iph
->ihl
<< 2) + sizeof(struct tcphdr
);
2414 if ( T3_ASIC_IS_575X_PLUS(pDevice
->ChipRevId
) ){
2416 pPacket
->Flags
&= ~SND_BD_FLAG_TCP_UDP_CKSUM
;
2419 th
->check
= ~csum_tcpudp_magic(
2420 iph
->saddr
, iph
->daddr
,
2424 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
2427 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
2428 if ( T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
) ){
2431 (tcp_opt_len
>> 2)) << 11;
2436 (tcp_opt_len
>> 2)) << 12;
2440 if (skb
->h
.th
->doff
> 5) {
2441 tcp_opt_len
= (skb
->h
.th
->doff
- 5) << 2;
2443 ip_tcp_len
= (skb
->nh
.iph
->ihl
<< 2) + sizeof(struct tcphdr
);
2444 skb
->nh
.iph
->check
= 0;
2446 if ( T3_ASIC_IS_575X_PLUS(pDevice
->ChipRevId
) ){
2447 skb
->h
.th
->check
= 0;
2448 pPacket
->Flags
&= ~SND_BD_FLAG_TCP_UDP_CKSUM
;
2451 skb
->h
.th
->check
= ~csum_tcpudp_magic(
2452 skb
->nh
.iph
->saddr
, skb
->nh
.iph
->daddr
,
2456 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
2459 if (tcp_opt_len
|| (skb
->nh
.iph
->ihl
> 5)) {
2460 if ( T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
) ){
2462 ((skb
->nh
.iph
->ihl
- 5) +
2463 (tcp_opt_len
>> 2)) << 11;
2467 ((skb
->nh
.iph
->ihl
- 5) +
2468 (tcp_opt_len
>> 2)) << 12;
2472 pPacket
->u
.Tx
.MaxSegmentSize
= mss
| tcp_seg_flags
;
2476 pPacket
->u
.Tx
.MaxSegmentSize
= 0;
2479 BCM5700_LOCK(pUmDevice
, flags
);
2480 LM_SendPacket(pDevice
, pPacket
);
2481 BCM5700_UNLOCK(pUmDevice
, flags
);
2483 #if (LINUX_VERSION_CODE < 0x02032b)
2484 netif_wake_queue(dev
);
2486 dev
->trans_start
= jiffies
;
2492 #ifdef BCM_NAPI_RXPOLL
2494 bcm5700_poll(struct net_device
*dev
, int *budget
)
2496 int orig_budget
= *budget
;
2498 UM_DEVICE_BLOCK
*pUmDevice
= (UM_DEVICE_BLOCK
*) dev
->priv
;
2499 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2500 unsigned long flags
= 0;
2503 if (orig_budget
> dev
->quota
)
2504 orig_budget
= dev
->quota
;
2506 BCM5700_LOCK(pUmDevice
, flags
);
2507 /* BCM4785: Flush posted writes from GbE to host memory. */
2508 if (pDevice
->Flags
& FLUSH_POSTED_WRITE_FLAG
)
2509 REG_RD(pDevice
, HostCoalesce
.Mode
);
2510 work_done
= LM_ServiceRxPoll(pDevice
, orig_budget
);
2511 *budget
-= work_done
;
2512 dev
->quota
-= work_done
;
2514 if (QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
)) {
2515 replenish_rx_buffers(pUmDevice
, 0);
2517 BCM5700_UNLOCK(pUmDevice
, flags
);
2519 MM_IndicateRxPackets(pDevice
);
2520 BCM5700_LOCK(pUmDevice
, flags
);
2521 LM_QueueRxPackets(pDevice
);
2522 BCM5700_UNLOCK(pUmDevice
, flags
);
2524 if ((work_done
< orig_budget
) || atomic_read(&pUmDevice
->intr_sem
) ||
2525 pUmDevice
->suspended
) {
2527 netif_rx_complete(dev
);
2528 BCM5700_LOCK(pUmDevice
, flags
);
2529 REG_WR(pDevice
, Grc
.Mode
, pDevice
->GrcMode
);
2530 pDevice
->RxPoll
= FALSE
;
2531 if (pDevice
->RxPoll
) {
2532 BCM5700_UNLOCK(pUmDevice
, flags
);
2535 /* Take care of possible missed rx interrupts */
2536 REG_RD_BACK(pDevice
, Grc
.Mode
); /* flush the register write */
2537 tag
= pDevice
->pStatusBlkVirt
->StatusTag
;
2538 if ((pDevice
->pStatusBlkVirt
->Status
& STATUS_BLOCK_UPDATED
) ||
2539 (pDevice
->pStatusBlkVirt
->Idx
[0].RcvProdIdx
!=
2540 pDevice
->RcvRetConIdx
)) {
2542 REG_WR(pDevice
, HostCoalesce
.Mode
,
2543 pDevice
->CoalesceMode
| HOST_COALESCE_ENABLE
|
2546 /* If a new status block is pending in the WDMA state machine */
2547 /* before the register write to enable the rx interrupt, */
2548 /* the new status block may DMA with no interrupt. In this */
2549 /* scenario, the tag read above will be older than the tag in */
2550 /* the pending status block and writing the older tag will */
2551 /* cause interrupt to be generated. */
2552 else if (pDevice
->Flags
& USE_TAGGED_STATUS_FLAG
) {
2553 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
,
2555 /* Make sure we service tx in case some tx interrupts */
2557 if (atomic_read(&pDevice
->SendBdLeft
) <
2558 (T3_SEND_RCB_ENTRY_COUNT
/ 2)) {
2559 REG_WR(pDevice
, HostCoalesce
.Mode
,
2560 pDevice
->CoalesceMode
|
2561 HOST_COALESCE_ENABLE
|
2565 BCM5700_UNLOCK(pUmDevice
, flags
);
2570 #endif /* BCM_NAPI_RXPOLL */
2572 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2574 bcm5700_interrupt(int irq
, void *dev_instance
)
2577 bcm5700_interrupt(int irq
, void *dev_instance
, struct pt_regs
*regs
)
2580 struct net_device
*dev
= (struct net_device
*)dev_instance
;
2581 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2582 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
2583 LM_UINT32 oldtag
, newtag
;
2584 int i
, max_intr_loop
;
2588 unsigned int handled
= 1;
2590 if (!pDevice
->InitDone
) {
2592 return IRQ_RETVAL(handled
);
2595 bcm5700_intr_lock(pUmDevice
);
2596 if (atomic_read(&pUmDevice
->intr_sem
)) {
2597 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
, 1);
2598 bcm5700_intr_unlock(pUmDevice
);
2600 return IRQ_RETVAL(handled
);
2603 if (test_and_set_bit(0, (void*)&pUmDevice
->interrupt
)) {
2604 printk(KERN_ERR
"%s: Duplicate entry of the interrupt handler\n",
2606 bcm5700_intr_unlock(pUmDevice
);
2608 return IRQ_RETVAL(handled
);
2611 /* BCM4785: Flush posted writes from GbE to host memory. */
2612 if (pDevice
->Flags
& FLUSH_POSTED_WRITE_FLAG
)
2613 REG_RD(pDevice
, HostCoalesce
.Mode
);
2615 if ((pDevice
->Flags
& USING_MSI_FLAG
) ||
2616 (pDevice
->pStatusBlkVirt
->Status
& STATUS_BLOCK_UPDATED
) ||
2617 !(REG_RD(pDevice
,PciCfg
.PciState
) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE
) )
2620 if (pUmDevice
->intr_test
) {
2621 if (!(REG_RD(pDevice
, PciCfg
.PciState
) &
2622 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE
) ||
2623 pDevice
->Flags
& USING_MSI_FLAG
) {
2624 pUmDevice
->intr_test_result
= 1;
2626 pUmDevice
->intr_test
= 0;
2629 #ifdef BCM_NAPI_RXPOLL
2634 if (pDevice
->Flags
& USE_TAGGED_STATUS_FLAG
) {
2635 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
, 1);
2636 oldtag
= pDevice
->pStatusBlkVirt
->StatusTag
;
2638 for (i
= 0; ; i
++) {
2639 pDevice
->pStatusBlkVirt
->Status
&= ~STATUS_BLOCK_UPDATED
;
2641 LM_ServiceInterrupts(pDevice
);
2642 /* BCM4785: Flush GbE posted writes to host memory. */
2643 if (pDevice
->Flags
& FLUSH_POSTED_WRITE_FLAG
)
2644 MB_REG_RD(pDevice
, Mailbox
.Interrupt
[0].Low
);
2645 newtag
= pDevice
->pStatusBlkVirt
->StatusTag
;
2646 if ((newtag
== oldtag
) || (i
> max_intr_loop
)) {
2647 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
, oldtag
<< 24);
2648 pDevice
->LastTag
= oldtag
;
2649 if (pDevice
->Flags
& UNDI_FIX_FLAG
) {
2650 REG_WR(pDevice
, Grc
.LocalCtrl
,
2651 pDevice
->GrcLocalCtrl
| 0x2);
2664 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
, 1);
2665 pDevice
->pStatusBlkVirt
->Status
&= ~STATUS_BLOCK_UPDATED
;
2666 LM_ServiceInterrupts(pDevice
);
2667 MB_REG_WR(pDevice
, Mailbox
.Interrupt
[0].Low
, 0);
2668 dummy
= MB_REG_RD(pDevice
, Mailbox
.Interrupt
[0].Low
);
2671 while ((pDevice
->pStatusBlkVirt
->Status
& STATUS_BLOCK_UPDATED
) &&
2672 (i
< max_intr_loop
));
2674 if (pDevice
->Flags
& UNDI_FIX_FLAG
) {
2675 REG_WR(pDevice
, Grc
.LocalCtrl
,
2676 pDevice
->GrcLocalCtrl
| 0x2);
2682 /* not my interrupt */
2687 repl_buf_count
= QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
);
2688 if (((repl_buf_count
> pUmDevice
->rx_buf_repl_panic_thresh
) ||
2689 pDevice
->QueueAgain
) &&
2690 (!test_and_set_bit(0, &pUmDevice
->tasklet_busy
))) {
2692 replenish_rx_buffers(pUmDevice
, pUmDevice
->rx_buf_repl_isr_limit
);
2693 clear_bit(0, (void*)&pUmDevice
->tasklet_busy
);
2695 else if ((repl_buf_count
> pUmDevice
->rx_buf_repl_thresh
) &&
2696 !pUmDevice
->tasklet_pending
) {
2698 pUmDevice
->tasklet_pending
= 1;
2699 tasklet_schedule(&pUmDevice
->tasklet
);
2702 #ifdef BCM_NAPI_RXPOLL
2703 if (!pDevice
->RxPoll
&&
2704 QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
)) {
2705 pDevice
->RxPoll
= 1;
2706 MM_ScheduleRxPoll(pDevice
);
2709 if (QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
)) {
2710 replenish_rx_buffers(pUmDevice
, 0);
2713 if (QQ_GetEntryCnt(&pDevice
->RxPacketFreeQ
.Container
) ||
2714 pDevice
->QueueAgain
) {
2716 LM_QueueRxPackets(pDevice
);
2721 clear_bit(0, (void*)&pUmDevice
->interrupt
);
2722 bcm5700_intr_unlock(pUmDevice
);
2723 if (pUmDevice
->tx_queued
) {
2724 pUmDevice
->tx_queued
= 0;
2725 netif_wake_queue(dev
);
2727 return IRQ_RETVAL(handled
);
2733 bcm5700_tasklet(unsigned long data
)
2735 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)data
;
2736 unsigned long flags
= 0;
2738 /* RH 7.2 Beta 3 tasklets are reentrant */
2739 if (test_and_set_bit(0, &pUmDevice
->tasklet_busy
)) {
2740 pUmDevice
->tasklet_pending
= 0;
2744 pUmDevice
->tasklet_pending
= 0;
2745 if (pUmDevice
->opened
&& !pUmDevice
->suspended
) {
2746 BCM5700_LOCK(pUmDevice
, flags
);
2747 replenish_rx_buffers(pUmDevice
, 0);
2748 BCM5700_UNLOCK(pUmDevice
, flags
);
2751 clear_bit(0, &pUmDevice
->tasklet_busy
);
2756 bcm5700_close(struct net_device
*dev
)
2759 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2760 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
2762 #if (LINUX_VERSION_CODE < 0x02032b)
2765 netif_stop_queue(dev
);
2766 pUmDevice
->opened
= 0;
2769 if( !(pDevice
->AsfFlags
& ASF_ENABLED
) )
2772 if( enable_wol
[pUmDevice
->index
] == 0 )
2774 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver
, dev
->name
));
2776 if (tigon3_debug
> 1)
2777 printk(KERN_DEBUG
"%s: Shutting down Tigon3\n",
2780 LM_MulticastClear(pDevice
);
2781 bcm5700_shutdown(pUmDevice
);
2783 if (T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
)) {
2784 del_timer_sync(&pUmDevice
->statstimer
);
2787 del_timer_sync(&pUmDevice
->timer
);
2789 free_irq(pUmDevice
->pdev
->irq
, dev
);
2791 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2793 if(pDevice
->Flags
& USING_MSI_FLAG
) {
2794 pci_disable_msi(pUmDevice
->pdev
);
2795 REG_WR(pDevice
, Msi
.Mode
, 1 );
2796 pDevice
->Flags
&= ~USING_MSI_FLAG
;
2802 #if (LINUX_VERSION_CODE < 0x020300)
2806 /* BCM4785: Don't go to low-power state because it will power down the smbus block. */
2807 if (!(pDevice
->Flags
& SB_CORE_FLAG
))
2808 LM_SetPowerState(pDevice
, LM_POWER_STATE_D3
);
2811 bcm5700_freemem(dev
);
2813 QQ_InitQueue(&pDevice
->RxPacketFreeQ
.Container
,
2814 MAX_RX_PACKET_DESC_COUNT
);
2820 bcm5700_freemem(struct net_device
*dev
)
2823 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2824 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2826 for (i
= 0; i
< pUmDevice
->mem_list_num
; i
++) {
2827 if (pUmDevice
->mem_size_list
[i
] == 0) {
2828 kfree(pUmDevice
->mem_list
[i
]);
2831 pci_free_consistent(pUmDevice
->pdev
,
2832 (size_t) pUmDevice
->mem_size_list
[i
],
2833 pUmDevice
->mem_list
[i
],
2834 pUmDevice
->dma_list
[i
]);
2838 pDevice
->pStatusBlkVirt
= 0;
2839 pDevice
->pStatsBlkVirt
= 0;
2840 pUmDevice
->mem_list_num
= 0;
2843 if (!pUmDevice
->opened
) {
2844 for (i
= 0; i
< MAX_MEM2
; i
++) {
2845 if (pUmDevice
->mem_size_list2
[i
]) {
2846 bcm5700_freemem2(pUmDevice
, i
);
2855 /* Frees consistent memory allocated through ioctl */
2856 /* The memory to be freed is in mem_list2[index] */
2858 bcm5700_freemem2(UM_DEVICE_BLOCK
*pUmDevice
, int index
)
2860 #if (LINUX_VERSION_CODE >= 0x020400)
2862 struct page
*pg
, *last_pg
;
2864 /* Probably won't work on some architectures */
2865 ptr
= pUmDevice
->mem_list2
[index
],
2866 pg
= virt_to_page(ptr
);
2867 last_pg
= virt_to_page(ptr
+ pUmDevice
->mem_size_list2
[index
] - 1);
2869 #if (LINUX_VERSION_CODE > 0x020500)
2870 ClearPageReserved(pg
);
2872 mem_map_unreserve(pg
);
2877 pci_free_consistent(pUmDevice
->pdev
,
2878 (size_t) pUmDevice
->mem_size_list2
[index
],
2879 pUmDevice
->mem_list2
[index
],
2880 pUmDevice
->dma_list2
[index
]);
2881 pUmDevice
->mem_size_list2
[index
] = 0;
2888 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice
)
2890 PLM_DEVICE_BLOCK pDevice
= &pUmDevice
->lm_dev
;
2892 PT3_STATS_BLOCK pStats
= (PT3_STATS_BLOCK
) pDevice
->pStatsBlkVirt
;
2893 unsigned long flags
;
2895 if ((T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5700
||
2896 T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5701
) &&
2897 !(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)) {
2899 if (!pUmDevice
->opened
|| !pDevice
->InitDone
)
2905 /* regulate MDIO access during run time */
2906 if (pUmDevice
->crc_counter_expiry
> 0)
2907 return pUmDevice
->phy_crc_count
;
2909 pUmDevice
->crc_counter_expiry
= (5 * HZ
) /
2910 pUmDevice
->timer_interval
;
2912 BCM5700_PHY_LOCK(pUmDevice
, flags
);
2913 LM_ReadPhy(pDevice
, 0x1e, &Value32
);
2914 if ((Value32
& 0x8000) == 0)
2915 LM_WritePhy(pDevice
, 0x1e, Value32
| 0x8000);
2916 LM_ReadPhy(pDevice
, 0x14, &Value32
);
2917 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
2918 /* Sometimes data on the MDIO bus can be corrupted */
2919 if (Value32
!= 0xffff)
2920 pUmDevice
->phy_crc_count
+= Value32
;
2921 return pUmDevice
->phy_crc_count
;
2923 else if (pStats
== 0) {
2927 return (MM_GETSTATS64(pStats
->dot3StatsFCSErrors
));
2932 bcm5700_rx_err_count(UM_DEVICE_BLOCK
*pUmDevice
)
2934 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
2935 T3_STATS_BLOCK
*pStats
= (T3_STATS_BLOCK
*) pDevice
->pStatsBlkVirt
;
2939 return (bcm5700_crc_count(pUmDevice
) +
2940 MM_GETSTATS64(pStats
->dot3StatsAlignmentErrors
) +
2941 MM_GETSTATS64(pStats
->etherStatsUndersizePkts
) +
2942 MM_GETSTATS64(pStats
->etherStatsFragments
) +
2943 MM_GETSTATS64(pStats
->dot3StatsFramesTooLong
) +
2944 MM_GETSTATS64(pStats
->etherStatsJabbers
));
2947 STATIC
struct net_device_stats
*
2948 bcm5700_get_stats(struct net_device
*dev
)
2950 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
2951 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
2952 PT3_STATS_BLOCK pStats
= (PT3_STATS_BLOCK
) pDevice
->pStatsBlkVirt
;
2953 struct net_device_stats
*p_netstats
= &pUmDevice
->stats
;
2958 /* Get stats from LM */
2959 p_netstats
->rx_packets
=
2960 MM_GETSTATS(pStats
->ifHCInUcastPkts
) +
2961 MM_GETSTATS(pStats
->ifHCInMulticastPkts
) +
2962 MM_GETSTATS(pStats
->ifHCInBroadcastPkts
);
2963 p_netstats
->tx_packets
=
2964 MM_GETSTATS(pStats
->ifHCOutUcastPkts
) +
2965 MM_GETSTATS(pStats
->ifHCOutMulticastPkts
) +
2966 MM_GETSTATS(pStats
->ifHCOutBroadcastPkts
);
2967 /* There counters seem to be innacurate. Use byte number accumulation
2969 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2970 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2972 p_netstats
->tx_errors
=
2973 MM_GETSTATS(pStats
->dot3StatsInternalMacTransmitErrors
) +
2974 MM_GETSTATS(pStats
->dot3StatsCarrierSenseErrors
) +
2975 MM_GETSTATS(pStats
->ifOutDiscards
) +
2976 MM_GETSTATS(pStats
->ifOutErrors
);
2977 p_netstats
->multicast
= MM_GETSTATS(pStats
->ifHCInMulticastPkts
);
2978 p_netstats
->collisions
= MM_GETSTATS(pStats
->etherStatsCollisions
);
2979 p_netstats
->rx_length_errors
=
2980 MM_GETSTATS(pStats
->dot3StatsFramesTooLong
) +
2981 MM_GETSTATS(pStats
->etherStatsUndersizePkts
);
2982 p_netstats
->rx_over_errors
= MM_GETSTATS(pStats
->nicNoMoreRxBDs
);
2983 p_netstats
->rx_frame_errors
=
2984 MM_GETSTATS(pStats
->dot3StatsAlignmentErrors
);
2985 p_netstats
->rx_crc_errors
= (unsigned long)
2986 bcm5700_crc_count(pUmDevice
);
2987 p_netstats
->rx_errors
= (unsigned long)
2988 bcm5700_rx_err_count(pUmDevice
);
2990 p_netstats
->tx_aborted_errors
= MM_GETSTATS(pStats
->ifOutDiscards
);
2991 p_netstats
->tx_carrier_errors
=
2992 MM_GETSTATS(pStats
->dot3StatsCarrierSenseErrors
);
2998 b57_suspend_chip(UM_DEVICE_BLOCK
*pUmDevice
)
3000 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
3002 if (pUmDevice
->opened
) {
3003 bcm5700_intr_off(pUmDevice
);
3004 netif_carrier_off(pUmDevice
->dev
);
3005 netif_stop_queue(pUmDevice
->dev
);
3007 tasklet_kill(&pUmDevice
->tasklet
);
3009 bcm5700_poll_wait(pUmDevice
);
3011 pUmDevice
->suspended
= 1;
3012 LM_ShutdownChip(pDevice
, LM_SUSPEND_RESET
);
3016 b57_resume_chip(UM_DEVICE_BLOCK
*pUmDevice
)
3018 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
3020 if (pUmDevice
->suspended
) {
3021 pUmDevice
->suspended
= 0;
3022 if (pUmDevice
->opened
) {
3023 bcm5700_reset(pUmDevice
->dev
);
3026 LM_ShutdownChip(pDevice
, LM_SHUTDOWN_RESET
);
3031 /* Returns 0 on failure, 1 on success */
3033 b57_test_intr(UM_DEVICE_BLOCK
*pUmDevice
)
3035 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
3038 if (!pUmDevice
->opened
)
3040 pUmDevice
->intr_test_result
= 0;
3041 pUmDevice
->intr_test
= 1;
3043 REG_WR(pDevice
, HostCoalesce
.Mode
,
3044 pDevice
->CoalesceMode
| HOST_COALESCE_ENABLE
|
3047 for (j
= 0; j
< 10; j
++) {
3048 if (pUmDevice
->intr_test_result
){
3052 REG_WR(pDevice
, HostCoalesce
.Mode
,
3053 pDevice
->CoalesceMode
| HOST_COALESCE_ENABLE
|
3056 MM_Sleep(pDevice
, 1);
3059 return pUmDevice
->intr_test_result
;
3065 #ifdef ETHTOOL_GSTRINGS
3067 #define ETH_NUM_STATS 30
3068 #define RX_CRC_IDX 5
3069 #define RX_MAC_ERR_IDX 14
3072 char string
[ETH_GSTRING_LEN
];
3073 } bcm5700_stats_str_arr
[ETH_NUM_STATS
] = {
3074 { "rx_unicast_packets" },
3075 { "rx_multicast_packets" },
3076 { "rx_broadcast_packets" },
3079 { "rx_crc_errors" }, /* this needs to be calculated */
3080 { "rx_align_errors" },
3081 { "rx_xon_frames" },
3082 { "rx_xoff_frames" },
3083 { "rx_long_frames" },
3084 { "rx_short_frames" },
3088 { "rx_mac_errors" }, /* this needs to be calculated */
3089 { "tx_unicast_packets" },
3090 { "tx_multicast_packets" },
3091 { "tx_broadcast_packets" },
3094 { "tx_single_collisions" },
3095 { "tx_multi_collisions" },
3096 { "tx_total_collisions" },
3097 { "tx_excess_collisions" },
3098 { "tx_late_collisions" },
3099 { "tx_xon_frames" },
3100 { "tx_xoff_frames" },
3101 { "tx_internal_mac_errors" },
3102 { "tx_carrier_errors" },
3106 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
3109 #define SWAP_DWORD_64(x) (x)
3111 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
3114 unsigned long bcm5700_stats_offset_arr
[ETH_NUM_STATS
] = {
3115 STATS_OFFSET(ifHCInUcastPkts
),
3116 STATS_OFFSET(ifHCInMulticastPkts
),
3117 STATS_OFFSET(ifHCInBroadcastPkts
),
3118 STATS_OFFSET(ifHCInOctets
),
3119 STATS_OFFSET(etherStatsFragments
),
3121 STATS_OFFSET(dot3StatsAlignmentErrors
),
3122 STATS_OFFSET(xonPauseFramesReceived
),
3123 STATS_OFFSET(xoffPauseFramesReceived
),
3124 STATS_OFFSET(dot3StatsFramesTooLong
),
3125 STATS_OFFSET(etherStatsUndersizePkts
),
3126 STATS_OFFSET(etherStatsJabbers
),
3127 STATS_OFFSET(ifInDiscards
),
3128 STATS_OFFSET(ifInErrors
),
3130 STATS_OFFSET(ifHCOutUcastPkts
),
3131 STATS_OFFSET(ifHCOutMulticastPkts
),
3132 STATS_OFFSET(ifHCOutBroadcastPkts
),
3133 STATS_OFFSET(ifHCOutOctets
),
3134 STATS_OFFSET(dot3StatsDeferredTransmissions
),
3135 STATS_OFFSET(dot3StatsSingleCollisionFrames
),
3136 STATS_OFFSET(dot3StatsMultipleCollisionFrames
),
3137 STATS_OFFSET(etherStatsCollisions
),
3138 STATS_OFFSET(dot3StatsExcessiveCollisions
),
3139 STATS_OFFSET(dot3StatsLateCollisions
),
3140 STATS_OFFSET(outXonSent
),
3141 STATS_OFFSET(outXoffSent
),
3142 STATS_OFFSET(dot3StatsInternalMacTransmitErrors
),
3143 STATS_OFFSET(dot3StatsCarrierSenseErrors
),
3144 STATS_OFFSET(ifOutErrors
),
3147 #endif /* ETHTOOL_GSTRINGS */
3150 #define ETH_NUM_TESTS 6
3152 char string
[ETH_GSTRING_LEN
];
3153 } bcm5700_tests_str_arr
[ETH_NUM_TESTS
] = {
3154 { "register test (offline)" },
3155 { "memory test (offline)" },
3156 { "loopback test (offline)" },
3157 { "nvram test (online)" },
3158 { "interrupt test (online)" },
3159 { "link test (online)" },
3162 extern LM_STATUS
b57_test_registers(UM_DEVICE_BLOCK
*pUmDevice
);
3163 extern LM_STATUS
b57_test_memory(UM_DEVICE_BLOCK
*pUmDevice
);
3164 extern LM_STATUS
b57_test_nvram(UM_DEVICE_BLOCK
*pUmDevice
);
3165 extern LM_STATUS
b57_test_link(UM_DEVICE_BLOCK
*pUmDevice
);
3166 extern LM_STATUS
b57_test_loopback(UM_DEVICE_BLOCK
*pUmDevice
, int looptype
, int linespeed
);
3169 #ifdef ETHTOOL_GREGS
3170 #if (LINUX_VERSION_CODE >= 0x02040f)
3172 bcm5700_get_reg_blk(UM_DEVICE_BLOCK
*pUmDevice
, u32
**buf
, u32 start
, u32 end
,
3176 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
3179 memset(*buf
, 0, end
- start
);
3180 *buf
= *buf
+ (end
- start
)/4;
3183 for (offset
= start
; offset
< end
; offset
+=4, *buf
= *buf
+ 1) {
3184 if (T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
)){
3185 if (((offset
>= 0x3400) && (offset
< 0x3c00)) ||
3186 ((offset
>= 0x5400) && (offset
< 0x5800)) ||
3187 ((offset
>= 0x6400) && (offset
< 0x6800))) {
3192 **buf
= REG_RD_OFFSET(pDevice
, offset
);
3198 static int netdev_ethtool_ioctl(struct net_device
*dev
, void *useraddr
)
3200 struct ethtool_cmd ethcmd
;
3201 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
3202 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
3204 if (mm_copy_from_user(ðcmd
, useraddr
, sizeof(ethcmd
)))
3207 switch (ethcmd
.cmd
) {
3208 #ifdef ETHTOOL_GDRVINFO
3209 case ETHTOOL_GDRVINFO
: {
3210 struct ethtool_drvinfo info
= {ETHTOOL_GDRVINFO
};
3212 strcpy(info
.driver
, bcm5700_driver
);
3213 #ifdef INCLUDE_5701_AX_FIX
3214 if(pDevice
->ChipRevId
== T3_CHIP_ID_5701_A0
) {
3215 extern int t3FwReleaseMajor
;
3216 extern int t3FwReleaseMinor
;
3217 extern int t3FwReleaseFix
;
3219 sprintf(info
.fw_version
, "%i.%i.%i",
3220 t3FwReleaseMajor
, t3FwReleaseMinor
,
3224 strcpy(info
.fw_version
, pDevice
->BootCodeVer
);
3225 strcpy(info
.version
, bcm5700_version
);
3226 #if (LINUX_VERSION_CODE <= 0x020422)
3227 strcpy(info
.bus_info
, pUmDevice
->pdev
->slot_name
);
3229 strcpy(info
.bus_info
, pci_name(pUmDevice
->pdev
));
3234 #ifdef ETHTOOL_GEEPROM
3235 BCM_EEDUMP_LEN(&info
, pDevice
->NvramSize
);
3237 #ifdef ETHTOOL_GREGS
3238 /* dump everything, including holes in the register space */
3239 info
.regdump_len
= 0x6c00;
3241 #ifdef ETHTOOL_GSTATS
3242 info
.n_stats
= ETH_NUM_STATS
;
3245 info
.testinfo_len
= ETH_NUM_TESTS
;
3247 if (mm_copy_to_user(useraddr
, &info
, sizeof(info
)))
3252 case ETHTOOL_GSET
: {
3253 if ((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)||
3254 (pDevice
->PhyFlags
& PHY_IS_FIBER
)) {
3256 (SUPPORTED_1000baseT_Full
|
3258 ethcmd
.supported
|= SUPPORTED_FIBRE
;
3259 ethcmd
.port
= PORT_FIBRE
;
3262 (SUPPORTED_10baseT_Half
|
3263 SUPPORTED_10baseT_Full
|
3264 SUPPORTED_100baseT_Half
|
3265 SUPPORTED_100baseT_Full
|
3266 SUPPORTED_1000baseT_Half
|
3267 SUPPORTED_1000baseT_Full
|
3269 ethcmd
.supported
|= SUPPORTED_TP
;
3270 ethcmd
.port
= PORT_TP
;
3273 ethcmd
.transceiver
= XCVR_INTERNAL
;
3274 ethcmd
.phy_address
= 0;
3276 if (pDevice
->LineSpeed
== LM_LINE_SPEED_1000MBPS
)
3277 ethcmd
.speed
= SPEED_1000
;
3278 else if (pDevice
->LineSpeed
== LM_LINE_SPEED_100MBPS
)
3279 ethcmd
.speed
= SPEED_100
;
3280 else if (pDevice
->LineSpeed
== LM_LINE_SPEED_10MBPS
)
3281 ethcmd
.speed
= SPEED_10
;
3285 if (pDevice
->DuplexMode
== LM_DUPLEX_MODE_FULL
)
3286 ethcmd
.duplex
= DUPLEX_FULL
;
3288 ethcmd
.duplex
= DUPLEX_HALF
;
3290 if (pDevice
->DisableAutoNeg
== FALSE
) {
3291 ethcmd
.autoneg
= AUTONEG_ENABLE
;
3292 ethcmd
.advertising
= ADVERTISED_Autoneg
;
3293 if ((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) ||
3294 (pDevice
->PhyFlags
& PHY_IS_FIBER
)) {
3295 ethcmd
.advertising
|=
3296 ADVERTISED_1000baseT_Full
|
3300 ethcmd
.advertising
|=
3302 if (pDevice
->advertising
&
3303 PHY_AN_AD_10BASET_HALF
) {
3305 ethcmd
.advertising
|=
3306 ADVERTISED_10baseT_Half
;
3308 if (pDevice
->advertising
&
3309 PHY_AN_AD_10BASET_FULL
) {
3311 ethcmd
.advertising
|=
3312 ADVERTISED_10baseT_Full
;
3314 if (pDevice
->advertising
&
3315 PHY_AN_AD_100BASETX_HALF
) {
3317 ethcmd
.advertising
|=
3318 ADVERTISED_100baseT_Half
;
3320 if (pDevice
->advertising
&
3321 PHY_AN_AD_100BASETX_FULL
) {
3323 ethcmd
.advertising
|=
3324 ADVERTISED_100baseT_Full
;
3326 if (pDevice
->advertising1000
&
3327 BCM540X_AN_AD_1000BASET_HALF
) {
3329 ethcmd
.advertising
|=
3330 ADVERTISED_1000baseT_Half
;
3332 if (pDevice
->advertising1000
&
3333 BCM540X_AN_AD_1000BASET_FULL
) {
3335 ethcmd
.advertising
|=
3336 ADVERTISED_1000baseT_Full
;
3341 ethcmd
.autoneg
= AUTONEG_DISABLE
;
3342 ethcmd
.advertising
= 0;
3345 ethcmd
.maxtxpkt
= pDevice
->TxMaxCoalescedFrames
;
3346 ethcmd
.maxrxpkt
= pDevice
->RxMaxCoalescedFrames
;
3348 if(mm_copy_to_user(useraddr
, ðcmd
, sizeof(ethcmd
)))
3352 case ETHTOOL_SSET
: {
3353 unsigned long flags
;
3355 if(!capable(CAP_NET_ADMIN
))
3357 if (ethcmd
.autoneg
== AUTONEG_ENABLE
) {
3358 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
3359 pDevice
->RequestedDuplexMode
= LM_DUPLEX_MODE_UNKNOWN
;
3360 pDevice
->DisableAutoNeg
= FALSE
;
3363 if (ethcmd
.speed
== SPEED_1000
&&
3364 pDevice
->PhyFlags
& PHY_NO_GIGABIT
)
3367 if (ethcmd
.speed
== SPEED_1000
&&
3368 (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
||
3369 pDevice
->PhyFlags
& PHY_IS_FIBER
) ) {
3371 pDevice
->RequestedLineSpeed
=
3372 LM_LINE_SPEED_1000MBPS
;
3374 pDevice
->RequestedDuplexMode
=
3375 LM_DUPLEX_MODE_FULL
;
3377 else if (ethcmd
.speed
== SPEED_100
&&
3378 !(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
3379 !(pDevice
->PhyFlags
& PHY_IS_FIBER
)) {
3381 pDevice
->RequestedLineSpeed
=
3382 LM_LINE_SPEED_100MBPS
;
3384 else if (ethcmd
.speed
== SPEED_10
&&
3385 !(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
3386 !(pDevice
->PhyFlags
& PHY_IS_FIBER
)) {
3388 pDevice
->RequestedLineSpeed
=
3389 LM_LINE_SPEED_10MBPS
;
3395 pDevice
->DisableAutoNeg
= TRUE
;
3396 if (ethcmd
.duplex
== DUPLEX_FULL
) {
3397 pDevice
->RequestedDuplexMode
=
3398 LM_DUPLEX_MODE_FULL
;
3401 if (!(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
3402 !(pDevice
->PhyFlags
& PHY_IS_FIBER
) ) {
3404 pDevice
->RequestedDuplexMode
=
3405 LM_DUPLEX_MODE_HALF
;
3409 if (netif_running(dev
)) {
3410 BCM5700_PHY_LOCK(pUmDevice
, flags
);
3411 LM_SetupPhy(pDevice
);
3412 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
3418 case ETHTOOL_GWOL
: {
3419 struct ethtool_wolinfo wol
= {ETHTOOL_GWOL
};
3421 if (((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
3422 !(pDevice
->Flags
& FIBER_WOL_CAPABLE_FLAG
)) ||
3423 (pDevice
->Flags
& DISABLE_D3HOT_FLAG
)) {
3428 wol
.supported
= WAKE_MAGIC
;
3429 if (pDevice
->WakeUpMode
== LM_WAKE_UP_MODE_MAGIC_PACKET
)
3431 wol
.wolopts
= WAKE_MAGIC
;
3437 if (mm_copy_to_user(useraddr
, &wol
, sizeof(wol
)))
3441 case ETHTOOL_SWOL
: {
3442 struct ethtool_wolinfo wol
;
3444 if(!capable(CAP_NET_ADMIN
))
3446 if (mm_copy_from_user(&wol
, useraddr
, sizeof(wol
)))
3448 if ((((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
3449 !(pDevice
->Flags
& FIBER_WOL_CAPABLE_FLAG
)) ||
3450 (pDevice
->Flags
& DISABLE_D3HOT_FLAG
)) &&
3455 if ((wol
.wolopts
& ~WAKE_MAGIC
) != 0) {
3458 if (wol
.wolopts
& WAKE_MAGIC
) {
3459 pDevice
->WakeUpModeCap
= LM_WAKE_UP_MODE_MAGIC_PACKET
;
3460 pDevice
->WakeUpMode
= LM_WAKE_UP_MODE_MAGIC_PACKET
;
3463 pDevice
->WakeUpModeCap
= LM_WAKE_UP_MODE_NONE
;
3464 pDevice
->WakeUpMode
= LM_WAKE_UP_MODE_NONE
;
3470 #ifdef ETHTOOL_GLINK
3471 case ETHTOOL_GLINK
: {
3472 struct ethtool_value edata
= {ETHTOOL_GLINK
};
3474 /* ifup only waits for 5 seconds for link up */
3475 /* NIC may take more than 5 seconds to establish link */
3476 if ((pUmDevice
->delayed_link_ind
> 0) &&
3477 delay_link
[pUmDevice
->index
])
3480 if (pDevice
->LinkStatus
== LM_STATUS_LINK_ACTIVE
) {
3486 if (mm_copy_to_user(useraddr
, &edata
, sizeof(edata
)))
3491 #ifdef ETHTOOL_NWAY_RST
3492 case ETHTOOL_NWAY_RST
: {
3494 unsigned long flags
;
3496 if(!capable(CAP_NET_ADMIN
))
3498 if (pDevice
->DisableAutoNeg
) {
3501 if (!netif_running(dev
))
3503 BCM5700_PHY_LOCK(pUmDevice
, flags
);
3504 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) {
3505 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_1000MBPS
;
3506 pDevice
->DisableAutoNeg
= TRUE
;
3507 LM_SetupPhy(pDevice
);
3509 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
3510 pDevice
->DisableAutoNeg
= FALSE
;
3511 LM_SetupPhy(pDevice
);
3514 if ((T3_ASIC_REV(pDevice
->ChipRevId
) ==
3515 T3_ASIC_REV_5703
) ||
3516 (T3_ASIC_REV(pDevice
->ChipRevId
) ==
3517 T3_ASIC_REV_5704
) ||
3518 (T3_ASIC_REV(pDevice
->ChipRevId
) ==
3521 LM_ResetPhy(pDevice
);
3522 LM_SetupPhy(pDevice
);
3524 pDevice
->PhyFlags
&= ~PHY_FIBER_FALLBACK
;
3525 LM_ReadPhy(pDevice
, PHY_CTRL_REG
, &phyctrl
);
3526 LM_WritePhy(pDevice
, PHY_CTRL_REG
, phyctrl
|
3527 PHY_CTRL_AUTO_NEG_ENABLE
|
3528 PHY_CTRL_RESTART_AUTO_NEG
);
3530 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
3534 #ifdef ETHTOOL_GEEPROM
3535 case ETHTOOL_GEEPROM
: {
3536 struct ethtool_eeprom eeprom
;
3538 LM_UINT32 buf1
[64/4];
3539 int i
, j
, offset
, len
;
3541 if (mm_copy_from_user(&eeprom
, useraddr
, sizeof(eeprom
)))
3544 if (eeprom
.offset
>= pDevice
->NvramSize
)
3547 /* maximum data limited */
3548 /* to read more, call again with a different offset */
3549 if (eeprom
.len
> 0x800) {
3551 if (mm_copy_to_user(useraddr
, &eeprom
, sizeof(eeprom
)))
3555 if (eeprom
.len
> 64) {
3556 buf
= kmalloc(eeprom
.len
, GFP_KERNEL
);
3563 useraddr
+= offsetof(struct ethtool_eeprom
, data
);
3565 offset
= eeprom
.offset
;
3568 offset
&= 0xfffffffc;
3569 len
+= (offset
& 3);
3571 len
= (len
+ 3) & 0xfffffffc;
3572 for (i
= 0, j
= 0; j
< len
; i
++, j
+= 4) {
3573 if (LM_NvramRead(pDevice
, offset
+ j
, buf
+ i
) !=
3574 LM_STATUS_SUCCESS
) {
3579 buf
+= (eeprom
.offset
& 3);
3580 i
= mm_copy_to_user(useraddr
, buf
, eeprom
.len
);
3582 if (eeprom
.len
> 64) {
3589 case ETHTOOL_SEEPROM
: {
3590 struct ethtool_eeprom eeprom
;
3591 LM_UINT32 buf
[64/4];
3594 if(!capable(CAP_NET_ADMIN
))
3596 if (mm_copy_from_user(&eeprom
, useraddr
, sizeof(eeprom
)))
3599 if ((eeprom
.offset
& 3) || (eeprom
.len
& 3) ||
3600 (eeprom
.offset
>= pDevice
->NvramSize
)) {
3604 if ((eeprom
.offset
+ eeprom
.len
) >= pDevice
->NvramSize
) {
3605 eeprom
.len
= pDevice
->NvramSize
- eeprom
.offset
;
3608 useraddr
+= offsetof(struct ethtool_eeprom
, data
);
3611 offset
= eeprom
.offset
;
3617 if (mm_copy_from_user(&buf
, useraddr
, i
))
3620 bcm5700_intr_off(pUmDevice
);
3621 /* Prevent race condition on Grc.Mode register */
3622 bcm5700_poll_wait(pUmDevice
);
3624 if (LM_NvramWriteBlock(pDevice
, offset
, buf
, i
/4) !=
3625 LM_STATUS_SUCCESS
) {
3626 bcm5700_intr_on(pUmDevice
);
3629 bcm5700_intr_on(pUmDevice
);
3637 #ifdef ETHTOOL_GREGS
3638 #if (LINUX_VERSION_CODE >= 0x02040f)
3639 case ETHTOOL_GREGS
: {
3640 struct ethtool_regs eregs
;
3641 LM_UINT32
*buf
, *buf1
;
3644 if(!capable(CAP_NET_ADMIN
))
3646 if (pDevice
->Flags
& UNDI_FIX_FLAG
)
3648 if (mm_copy_from_user(&eregs
, useraddr
, sizeof(eregs
)))
3650 if (eregs
.len
> 0x6c00)
3652 eregs
.version
= 0x0;
3653 if (mm_copy_to_user(useraddr
, &eregs
, sizeof(eregs
)))
3655 buf
= buf1
= kmalloc(eregs
.len
, GFP_KERNEL
);
3658 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0, 0xb0, 0);
3659 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0xb0, 0x200, 1);
3660 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x200, 0x8f0, 0);
3661 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x8f0, 0xc00, 1);
3662 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0xc00, 0xce0, 0);
3663 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0xce0, 0x1000, 1);
3664 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1000, 0x1004, 0);
3665 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1004, 0x1400, 1);
3666 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1400, 0x1480, 0);
3667 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1480, 0x1800, 1);
3668 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1800, 0x1848, 0);
3669 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1848, 0x1c00, 1);
3670 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1c00, 0x1c04, 0);
3671 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x1c04, 0x2000, 1);
3672 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2000, 0x225c, 0);
3673 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x225c, 0x2400, 1);
3674 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2400, 0x24c4, 0);
3675 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x24c4, 0x2800, 1);
3676 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2800, 0x2804, 0);
3677 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2804, 0x2c00, 1);
3678 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2c00, 0x2c20, 0);
3679 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x2c20, 0x3000, 1);
3680 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3000, 0x3014, 0);
3681 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3014, 0x3400, 1);
3682 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3400, 0x3408, 0);
3683 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3408, 0x3800, 1);
3684 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3800, 0x3808, 0);
3685 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3808, 0x3c00, 1);
3686 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3c00, 0x3d00, 0);
3687 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x3d00, 0x4000, 1);
3688 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4000, 0x4010, 0);
3689 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4010, 0x4400, 1);
3690 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4400, 0x4458, 0);
3691 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4458, 0x4800, 1);
3692 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4800, 0x4808, 0);
3693 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4808, 0x4c00, 1);
3694 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4c00, 0x4c08, 0);
3695 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x4c08, 0x5000, 1);
3696 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5000, 0x5050, 0);
3697 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5050, 0x5400, 1);
3698 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5400, 0x5450, 0);
3699 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5450, 0x5800, 1);
3700 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5800, 0x5a10, 0);
3701 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x5a10, 0x6000, 1);
3702 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x6000, 0x600c, 0);
3703 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x600c, 0x6400, 1);
3704 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x6400, 0x6404, 0);
3705 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x6404, 0x6800, 1);
3706 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x6800, 0x6848, 0);
3707 bcm5700_get_reg_blk(pUmDevice
, &buf
, 0x6848, 0x6c00, 1);
3709 i
= mm_copy_to_user(useraddr
+ sizeof(eregs
), buf1
, eregs
.len
);
3717 #ifdef ETHTOOL_GPAUSEPARAM
3718 case ETHTOOL_GPAUSEPARAM
: {
3719 struct ethtool_pauseparam epause
= { ETHTOOL_GPAUSEPARAM
};
3721 if (!pDevice
->DisableAutoNeg
) {
3722 epause
.autoneg
= (pDevice
->FlowControlCap
&
3723 LM_FLOW_CONTROL_AUTO_PAUSE
) != 0;
3729 (pDevice
->FlowControl
&
3730 LM_FLOW_CONTROL_RECEIVE_PAUSE
) != 0;
3732 (pDevice
->FlowControl
&
3733 LM_FLOW_CONTROL_TRANSMIT_PAUSE
) != 0;
3734 if (mm_copy_to_user(useraddr
, &epause
, sizeof(epause
)))
3739 case ETHTOOL_SPAUSEPARAM
: {
3740 struct ethtool_pauseparam epause
;
3741 unsigned long flags
;
3743 if(!capable(CAP_NET_ADMIN
))
3745 if (mm_copy_from_user(&epause
, useraddr
, sizeof(epause
)))
3747 pDevice
->FlowControlCap
= 0;
3748 if (epause
.autoneg
&& !pDevice
->DisableAutoNeg
) {
3749 pDevice
->FlowControlCap
|= LM_FLOW_CONTROL_AUTO_PAUSE
;
3751 if (epause
.rx_pause
) {
3752 pDevice
->FlowControlCap
|=
3753 LM_FLOW_CONTROL_RECEIVE_PAUSE
;
3755 if (epause
.tx_pause
) {
3756 pDevice
->FlowControlCap
|=
3757 LM_FLOW_CONTROL_TRANSMIT_PAUSE
;
3759 if (netif_running(dev
)) {
3760 BCM5700_PHY_LOCK(pUmDevice
, flags
);
3761 LM_SetupPhy(pDevice
);
3762 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
3768 #ifdef ETHTOOL_GRXCSUM
3769 case ETHTOOL_GRXCSUM
: {
3770 struct ethtool_value edata
= { ETHTOOL_GRXCSUM
};
3773 (pDevice
->TaskToOffload
&
3774 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM
) != 0;
3775 if (mm_copy_to_user(useraddr
, &edata
, sizeof(edata
)))
3780 case ETHTOOL_SRXCSUM
: {
3781 struct ethtool_value edata
;
3783 if(!capable(CAP_NET_ADMIN
))
3785 if (mm_copy_from_user(&edata
, useraddr
, sizeof(edata
)))
3788 if (!(pDevice
->TaskOffloadCap
&
3789 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
)) {
3793 pDevice
->TaskToOffload
|=
3794 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM
|
3795 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM
;
3798 pDevice
->TaskToOffload
&=
3799 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM
|
3800 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM
);
3804 case ETHTOOL_GTXCSUM
: {
3805 struct ethtool_value edata
= { ETHTOOL_GTXCSUM
};
3808 (dev
->features
& get_csum_flag( pDevice
->ChipRevId
)) != 0;
3809 if (mm_copy_to_user(useraddr
, &edata
, sizeof(edata
)))
3814 case ETHTOOL_STXCSUM
: {
3815 struct ethtool_value edata
;
3817 if(!capable(CAP_NET_ADMIN
))
3819 if (mm_copy_from_user(&edata
, useraddr
, sizeof(edata
)))
3822 if (!(pDevice
->TaskOffloadCap
&
3823 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
)) {
3827 dev
->features
|= get_csum_flag( pDevice
->ChipRevId
);
3828 pDevice
->TaskToOffload
|=
3829 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
|
3830 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM
;
3833 dev
->features
&= ~get_csum_flag( pDevice
->ChipRevId
);
3834 pDevice
->TaskToOffload
&=
3835 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
|
3836 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM
);
3841 struct ethtool_value edata
= { ETHTOOL_GSG
};
3844 (dev
->features
& NETIF_F_SG
) != 0;
3845 if (mm_copy_to_user(useraddr
, &edata
, sizeof(edata
)))
3850 struct ethtool_value edata
;
3852 if(!capable(CAP_NET_ADMIN
))
3854 if (mm_copy_from_user(&edata
, useraddr
, sizeof(edata
)))
3857 dev
->features
|= NETIF_F_SG
;
3860 dev
->features
&= ~NETIF_F_SG
;
3865 #ifdef ETHTOOL_GRINGPARAM
3866 case ETHTOOL_GRINGPARAM
: {
3867 struct ethtool_ringparam ering
= { ETHTOOL_GRINGPARAM
};
3869 ering
.rx_max_pending
= T3_STD_RCV_RCB_ENTRY_COUNT
- 1;
3870 ering
.rx_pending
= pDevice
->RxStdDescCnt
;
3871 ering
.rx_mini_max_pending
= 0;
3872 ering
.rx_mini_pending
= 0;
3873 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3874 ering
.rx_jumbo_max_pending
= T3_JUMBO_RCV_RCB_ENTRY_COUNT
- 1;
3875 ering
.rx_jumbo_pending
= pDevice
->RxJumboDescCnt
;
3877 ering
.rx_jumbo_max_pending
= 0;
3878 ering
.rx_jumbo_pending
= 0;
3880 ering
.tx_max_pending
= MAX_TX_PACKET_DESC_COUNT
- 1;
3881 ering
.tx_pending
= pDevice
->TxPacketDescCnt
;
3882 if (mm_copy_to_user(useraddr
, &ering
, sizeof(ering
)))
3887 #ifdef ETHTOOL_PHYS_ID
3888 case ETHTOOL_PHYS_ID
: {
3889 struct ethtool_value edata
;
3891 if(!capable(CAP_NET_ADMIN
))
3893 if (mm_copy_from_user(&edata
, useraddr
, sizeof(edata
)))
3895 if (LM_BlinkLED(pDevice
, edata
.data
) == LM_STATUS_SUCCESS
)
3900 #ifdef ETHTOOL_GSTRINGS
3901 case ETHTOOL_GSTRINGS
: {
3902 struct ethtool_gstrings egstr
= { ETHTOOL_GSTRINGS
};
3904 if (mm_copy_from_user(&egstr
, useraddr
, sizeof(egstr
)))
3906 switch(egstr
.string_set
) {
3907 #ifdef ETHTOOL_GSTATS
3909 egstr
.len
= ETH_NUM_STATS
;
3910 if (mm_copy_to_user(useraddr
, &egstr
, sizeof(egstr
)))
3912 if (mm_copy_to_user(useraddr
+ sizeof(egstr
),
3913 bcm5700_stats_str_arr
,
3914 sizeof(bcm5700_stats_str_arr
)))
3920 egstr
.len
= ETH_NUM_TESTS
;
3921 if (mm_copy_to_user(useraddr
, &egstr
, sizeof(egstr
)))
3923 if (mm_copy_to_user(useraddr
+ sizeof(egstr
),
3924 bcm5700_tests_str_arr
,
3925 sizeof(bcm5700_tests_str_arr
)))
3934 #ifdef ETHTOOL_GSTATS
3935 case ETHTOOL_GSTATS
: {
3936 struct ethtool_stats estats
= { ETHTOOL_GSTATS
};
3937 uint64_t stats
[ETH_NUM_STATS
];
3940 (uint64_t *) pDevice
->pStatsBlkVirt
;
3942 estats
.n_stats
= ETH_NUM_STATS
;
3944 memset(stats
, 0, sizeof(stats
));
3948 for (i
= 0; i
< ETH_NUM_STATS
; i
++) {
3949 if (bcm5700_stats_offset_arr
[i
] != 0) {
3950 stats
[i
] = SWAP_DWORD_64(*(pStats
+
3951 bcm5700_stats_offset_arr
[i
]));
3953 else if (i
== RX_CRC_IDX
) {
3955 bcm5700_crc_count(pUmDevice
);
3957 else if (i
== RX_MAC_ERR_IDX
) {
3959 bcm5700_rx_err_count(pUmDevice
);
3963 if (mm_copy_to_user(useraddr
, &estats
, sizeof(estats
))) {
3966 if (mm_copy_to_user(useraddr
+ sizeof(estats
), &stats
,
3974 case ETHTOOL_TEST
: {
3975 struct ethtool_test etest
;
3976 uint64_t tests
[ETH_NUM_TESTS
] = {0, 0, 0, 0, 0, 0};
3977 LM_POWER_STATE old_power_level
;
3979 printk( KERN_ALERT
"Performing ethtool test.\n"
3980 "This test will take a few seconds to complete.\n" );
3982 if (mm_copy_from_user(&etest
, useraddr
, sizeof(etest
)))
3985 etest
.len
= ETH_NUM_TESTS
;
3986 old_power_level
= pDevice
->PowerLevel
;
3987 if (old_power_level
!= LM_POWER_STATE_D0
) {
3988 LM_SetPowerState(pDevice
, LM_POWER_STATE_D0
);
3989 LM_SwitchClocks(pDevice
);
3991 MM_Sleep(pDevice
, 1000);
3992 if (etest
.flags
& ETH_TEST_FL_OFFLINE
) {
3993 b57_suspend_chip(pUmDevice
);
3994 MM_Sleep(pDevice
, 1000);
3995 LM_HaltCpu(pDevice
,T3_RX_CPU_ID
| T3_TX_CPU_ID
);
3996 MM_Sleep(pDevice
, 1000);
3997 if (b57_test_registers(pUmDevice
) == 0) {
3998 etest
.flags
|= ETH_TEST_FL_FAILED
;
4001 MM_Sleep(pDevice
, 1000);
4002 if (b57_test_memory(pUmDevice
) == 0) {
4003 etest
.flags
|= ETH_TEST_FL_FAILED
;
4006 MM_Sleep(pDevice
, 1000);
4007 if (b57_test_loopback(pUmDevice
, NICE_LOOPBACK_TESTTYPE_MAC
, 0) == 0) {
4008 etest
.flags
|= ETH_TEST_FL_FAILED
;
4011 MM_Sleep(pDevice
, 1000);
4012 b57_resume_chip(pUmDevice
);
4013 /* wait for link to come up for the link test */
4014 MM_Sleep(pDevice
, 4000);
4015 if ((pDevice
->LinkStatus
!= LM_STATUS_LINK_ACTIVE
) &&
4016 !(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)) {
4018 /* wait a little longer for linkup on copper */
4019 MM_Sleep(pDevice
, 3000);
4022 if (b57_test_nvram(pUmDevice
) == 0) {
4023 etest
.flags
|= ETH_TEST_FL_FAILED
;
4026 MM_Sleep(pDevice
, 1000);
4027 if (b57_test_intr(pUmDevice
) == 0) {
4028 etest
.flags
|= ETH_TEST_FL_FAILED
;
4031 MM_Sleep(pDevice
, 1000);
4032 if (b57_test_link(pUmDevice
) == 0) {
4033 etest
.flags
|= ETH_TEST_FL_FAILED
;
4036 MM_Sleep(pDevice
, 1000);
4037 if (old_power_level
!= LM_POWER_STATE_D0
) {
4038 LM_SetPowerState(pDevice
, old_power_level
);
4040 if (mm_copy_to_user(useraddr
, &etest
, sizeof(etest
))) {
4043 if (mm_copy_to_user(useraddr
+ sizeof(etest
), tests
,
4051 case ETHTOOL_GTSO
: {
4052 struct ethtool_value edata
= { ETHTOOL_GTSO
};
4056 (dev
->features
& NETIF_F_TSO
) != 0;
4060 if (mm_copy_to_user(useraddr
, &edata
, sizeof(edata
)))
4066 case ETHTOOL_STSO
: {
4068 struct ethtool_value edata
;
4070 if (!capable(CAP_NET_ADMIN
))
4073 if (mm_copy_from_user(&edata
, useraddr
, sizeof(edata
)))
4076 if (!(pDevice
->TaskToOffload
&
4077 LM_TASK_OFFLOAD_TCP_SEGMENTATION
)) {
4081 dev
->features
&= ~NETIF_F_TSO
;
4084 if (T3_ASIC_5714_FAMILY(pDevice
->ChipRevId
) &&
4085 (dev
->mtu
> 1500)) {
4086 printk(KERN_ALERT
"%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev
->name
);
4089 dev
->features
|= NETIF_F_TSO
;
4102 #endif /* #ifdef SIOCETHTOOL */
4104 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
4105 #include <linux/iobuf.h>
4109 /* Provide ioctl() calls to examine the MII xcvr state. */
4110 STATIC
int bcm5700_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
4112 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
4113 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
4114 u16
*data
= (u16
*)&rq
->ifr_data
;
4116 unsigned long flags
;
4120 case SIOCGMIIPHY
: /* Get the address of the PHY in use. */
4122 data
[0] = pDevice
->PhyAddr
;
4127 case SIOCGMIIREG
: /* Read the specified MII register. */
4129 uint32 savephyaddr
= 0;
4131 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
4134 /* ifup only waits for 5 seconds for link up */
4135 /* NIC may take more than 5 seconds to establish link */
4136 if ((pUmDevice
->delayed_link_ind
> 0) &&
4137 delay_link
[pUmDevice
->index
]) {
4141 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4142 if (data
[0] != 0xffff) {
4143 savephyaddr
= pDevice
->PhyAddr
;
4144 pDevice
->PhyAddr
= data
[0];
4146 LM_ReadPhy(pDevice
, data
[1] & 0x1f, (LM_UINT32
*)&value
);
4147 if (data
[0] != 0xffff)
4148 pDevice
->PhyAddr
= savephyaddr
;
4149 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4150 data
[3] = value
& 0xffff;
4155 case SIOCGETCPHYRD
: /* Read the specified MII register. */
4156 case SIOCGETCPHYRD2
:
4159 uint32 savephyaddr
= 0;
4161 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
4164 /* ifup only waits for 5 seconds for link up */
4165 /* NIC may take more than 5 seconds to establish link */
4166 if ((pUmDevice
->delayed_link_ind
> 0) &&
4167 delay_link
[pUmDevice
->index
]) {
4171 if (mm_copy_from_user(&args
, rq
->ifr_data
, sizeof(args
)))
4174 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4175 if (cmd
== SIOCGETCPHYRD2
) {
4176 savephyaddr
= pDevice
->PhyAddr
;
4177 pDevice
->PhyAddr
= (args
[0] >> 16) & 0xffff;
4179 LM_ReadPhy(pDevice
, args
[0] & 0xffff, (LM_UINT32
*)&value
);
4180 if (cmd
== SIOCGETCPHYRD2
)
4181 pDevice
->PhyAddr
= savephyaddr
;
4182 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4184 args
[1] = value
& 0xffff;
4185 if (mm_copy_to_user(rq
->ifr_data
, &args
, sizeof(args
)))
4192 case SIOCSMIIREG
: /* Write the specified MII register */
4194 uint32 savephyaddr
= 0;
4196 if (!capable(CAP_NET_ADMIN
))
4199 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
4202 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4203 if (data
[0] != 0xffff) {
4204 savephyaddr
= pDevice
->PhyAddr
;
4205 pDevice
->PhyAddr
= data
[0];
4207 LM_WritePhy(pDevice
, data
[1] & 0x1f, data
[2]);
4208 if (data
[0] != 0xffff)
4209 pDevice
->PhyAddr
= savephyaddr
;
4210 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4216 case SIOCSETCPHYWR
: /* Write the specified MII register */
4217 case SIOCSETCPHYWR2
:
4220 uint32 savephyaddr
= 0;
4222 if (!capable(CAP_NET_ADMIN
))
4225 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)
4228 if (mm_copy_from_user(&args
, rq
->ifr_data
, sizeof(args
)))
4231 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4232 if (cmd
== SIOCSETCPHYWR2
) {
4233 savephyaddr
= pDevice
->PhyAddr
;
4234 pDevice
->PhyAddr
= (args
[0] >> 16) & 0xffff;
4236 LM_WritePhy(pDevice
, args
[0] & 0xffff, args
[1]);
4237 if (cmd
== SIOCSETCPHYWR2
)
4238 pDevice
->PhyAddr
= savephyaddr
;
4239 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4243 case SIOCGETCROBORD
: /* Read the specified ROBO register. */
4246 robo_info_t
*robo
= (robo_info_t
*)pUmDevice
->robo
;
4248 if (((pDevice
->Flags
& ROBO_SWITCH_FLAG
) == 0) || (robo
== NULL
))
4251 if (mm_copy_from_user(&args
, rq
->ifr_data
, sizeof(args
)))
4254 if (robo
->ops
->read_reg(robo
, (args
[0] >> 16) & 0xffff, args
[0] & 0xffff, &value
, 2))
4257 args
[1] = value
& 0xffff;
4258 if (mm_copy_to_user(rq
->ifr_data
, &args
, sizeof(args
)))
4264 case SIOCSETCROBOWR
: /* Write the specified ROBO register. */
4267 robo_info_t
*robo
= (robo_info_t
*)pUmDevice
->robo
;
4269 if (!capable(CAP_NET_ADMIN
))
4272 if (((pDevice
->Flags
& ROBO_SWITCH_FLAG
) == 0) || (robo
== NULL
))
4275 if (mm_copy_from_user(&args
, rq
->ifr_data
, sizeof(args
)))
4278 if (robo
->ops
->write_reg(robo
, (args
[0] >> 16) & 0xffff, args
[0] & 0xffff,
4285 case SIOCSETCSETMSGLEVEL
:
4286 if (mm_copy_from_user(&value
, rq
->ifr_data
, sizeof(value
)))
4289 b57_msg_level
= value
;
4290 printf("%s: msglevel set to 0x%x\n", __FUNCTION__
, b57_msg_level
);
4293 case SIOCSETCQOS
: /* Set the qos flag */
4294 if (mm_copy_from_user(&value
, rq
->ifr_data
, sizeof(value
)))
4297 pUmDevice
->qos
= value
;
4298 B57_INFO(("Qos flag now: %d\n", pUmDevice
->qos
));
4305 if ((buf
= MALLOC(SI_OSH
, 4096)) == NULL
) {
4306 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__
,
4311 if (b57_msg_level
& 0x10000)
4312 bcmdumplog(buf
, 4096);
4313 value
= mm_copy_to_user(rq
->ifr_data
, buf
, 4096);
4315 MFREE(SI_OSH
, buf
, 4096);
4326 struct nice_req
* nrq
;
4328 if (!capable(CAP_NET_ADMIN
))
4331 nrq
= (struct nice_req
*)&rq
->ifr_ifru
;
4332 if( nrq
->cmd
== NICE_CMD_QUERY_SUPPORT
) {
4333 nrq
->nrq_magic
= NICE_DEVICE_MAGIC
;
4334 nrq
->nrq_support_rx
= 1;
4335 nrq
->nrq_support_vlan
= 1;
4336 nrq
->nrq_support_get_speed
= 1;
4337 #ifdef BCM_NAPI_RXPOLL
4338 nrq
->nrq_support_rx_napi
= 1;
4342 #ifdef BCM_NAPI_RXPOLL
4343 else if( nrq
->cmd
== NICE_CMD_SET_RX_NAPI
)
4345 else if( nrq
->cmd
== NICE_CMD_SET_RX
)
4348 pUmDevice
->nice_rx
= nrq
->nrq_rx
;
4349 pUmDevice
->nice_ctx
= nrq
->nrq_ctx
;
4350 bcm5700_set_vlan_mode(pUmDevice
);
4353 #ifdef BCM_NAPI_RXPOLL
4354 else if( nrq
->cmd
== NICE_CMD_GET_RX_NAPI
)
4356 else if( nrq
->cmd
== NICE_CMD_GET_RX
)
4359 nrq
->nrq_rx
= pUmDevice
->nice_rx
;
4360 nrq
->nrq_ctx
= pUmDevice
->nice_ctx
;
4363 else if( nrq
->cmd
== NICE_CMD_GET_SPEED
) {
4364 if(pDevice
->LinkStatus
!= LM_STATUS_LINK_ACTIVE
){
4367 else if (pDevice
->LineSpeed
== LM_LINE_SPEED_1000MBPS
) {
4368 nrq
->nrq_speed
= SPEED_1000
;
4369 } else if (pDevice
->LineSpeed
== LM_LINE_SPEED_100MBPS
) {
4370 nrq
->nrq_speed
= SPEED_100
;
4371 } else if (pDevice
->LineSpeed
== LM_LINE_SPEED_10MBPS
) {
4372 nrq
->nrq_speed
= SPEED_100
;
4379 if (!pUmDevice
->opened
)
4383 case NICE_CMD_BLINK_LED
:
4384 if (LM_BlinkLED(pDevice
, nrq
->nrq_blink_time
) ==
4385 LM_STATUS_SUCCESS
) {
4390 case NICE_CMD_DIAG_SUSPEND
:
4391 b57_suspend_chip(pUmDevice
);
4394 case NICE_CMD_DIAG_RESUME
:
4395 b57_resume_chip(pUmDevice
);
4398 case NICE_CMD_REG_READ
:
4399 if (nrq
->nrq_offset
>= 0x10000) {
4400 nrq
->nrq_data
= LM_RegRdInd(pDevice
,
4404 nrq
->nrq_data
= LM_RegRd(pDevice
,
4409 case NICE_CMD_REG_WRITE
:
4410 if (nrq
->nrq_offset
>= 0x10000) {
4411 LM_RegWrInd(pDevice
, nrq
->nrq_offset
,
4415 LM_RegWr(pDevice
, nrq
->nrq_offset
,
4416 nrq
->nrq_data
, FALSE
);
4420 case NICE_CMD_REG_READ_DIRECT
:
4421 case NICE_CMD_REG_WRITE_DIRECT
:
4422 if ((nrq
->nrq_offset
>= 0x10000) ||
4423 (pDevice
->Flags
& UNDI_FIX_FLAG
)) {
4427 if (nrq
->cmd
== NICE_CMD_REG_READ_DIRECT
) {
4428 nrq
->nrq_data
= REG_RD_OFFSET(pDevice
,
4432 REG_WR_OFFSET(pDevice
, nrq
->nrq_offset
,
4437 case NICE_CMD_MEM_READ
:
4438 nrq
->nrq_data
= LM_MemRdInd(pDevice
,
4442 case NICE_CMD_MEM_WRITE
:
4443 LM_MemWrInd(pDevice
, nrq
->nrq_offset
,
4447 case NICE_CMD_CFG_READ32
:
4448 pci_read_config_dword(pUmDevice
->pdev
,
4449 nrq
->nrq_offset
, (u32
*)&nrq
->nrq_data
);
4452 case NICE_CMD_CFG_READ16
:
4453 pci_read_config_word(pUmDevice
->pdev
,
4454 nrq
->nrq_offset
, (u16
*)&nrq
->nrq_data
);
4457 case NICE_CMD_CFG_READ8
:
4458 pci_read_config_byte(pUmDevice
->pdev
,
4459 nrq
->nrq_offset
, (u8
*)&nrq
->nrq_data
);
4462 case NICE_CMD_CFG_WRITE32
:
4463 pci_write_config_dword(pUmDevice
->pdev
,
4464 nrq
->nrq_offset
, (u32
)nrq
->nrq_data
);
4467 case NICE_CMD_CFG_WRITE16
:
4468 pci_write_config_word(pUmDevice
->pdev
,
4469 nrq
->nrq_offset
, (u16
)nrq
->nrq_data
);
4472 case NICE_CMD_CFG_WRITE8
:
4473 pci_write_config_byte(pUmDevice
->pdev
,
4474 nrq
->nrq_offset
, (u8
)nrq
->nrq_data
);
4477 case NICE_CMD_RESET
:
4481 case NICE_CMD_ENABLE_MAC_LOOPBACK
:
4482 if (pDevice
->LoopBackMode
!= 0) {
4486 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4487 LM_EnableMacLoopBack(pDevice
);
4488 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4491 case NICE_CMD_DISABLE_MAC_LOOPBACK
:
4492 if (pDevice
->LoopBackMode
!=
4493 LM_MAC_LOOP_BACK_MODE
) {
4497 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4498 LM_DisableMacLoopBack(pDevice
);
4499 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4502 case NICE_CMD_ENABLE_PHY_LOOPBACK
:
4503 if (pDevice
->LoopBackMode
!= 0) {
4507 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4508 LM_EnablePhyLoopBack(pDevice
);
4509 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4512 case NICE_CMD_DISABLE_PHY_LOOPBACK
:
4513 if (pDevice
->LoopBackMode
!=
4514 LM_PHY_LOOP_BACK_MODE
) {
4518 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4519 LM_DisablePhyLoopBack(pDevice
);
4520 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4523 case NICE_CMD_ENABLE_EXT_LOOPBACK
:
4524 if (pDevice
->LoopBackMode
!= 0) {
4528 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) {
4529 if (nrq
->nrq_speed
!= 1000)
4533 if ((nrq
->nrq_speed
!= 1000) &&
4534 (nrq
->nrq_speed
!= 100) &&
4535 (nrq
->nrq_speed
!= 10)) {
4539 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4540 LM_EnableExtLoopBack(pDevice
, nrq
->nrq_speed
);
4541 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4544 case NICE_CMD_DISABLE_EXT_LOOPBACK
:
4545 if (pDevice
->LoopBackMode
!=
4546 LM_EXT_LOOP_BACK_MODE
) {
4550 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4551 LM_DisableExtLoopBack(pDevice
);
4552 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4555 case NICE_CMD_INTERRUPT_TEST
:
4556 nrq
->nrq_intr_test_result
=
4557 b57_test_intr(pUmDevice
);
4560 case NICE_CMD_LOOPBACK_TEST
:
4562 switch (nrq
->nrq_looptype
) {
4563 case NICE_LOOPBACK_TESTTYPE_EXT
:
4564 if ((nrq
->nrq_loopspeed
& ~NICE_LOOPBACK_TEST_SPEEDMASK
) ||
4565 !(nrq
->nrq_loopspeed
& NICE_LOOPBACK_TEST_SPEEDMASK
))
4567 switch (nrq
->nrq_loopspeed
) {
4568 case NICE_LOOPBACK_TEST_10MBPS
:
4569 value
= LM_LINE_SPEED_10MBPS
;
4571 case NICE_LOOPBACK_TEST_100MBPS
:
4572 value
= LM_LINE_SPEED_100MBPS
;
4574 case NICE_LOOPBACK_TEST_1000MBPS
:
4575 value
= LM_LINE_SPEED_1000MBPS
;
4580 case NICE_LOOPBACK_TESTTYPE_MAC
:
4581 case NICE_LOOPBACK_TESTTYPE_PHY
:
4582 b57_suspend_chip(pUmDevice
);
4583 value
= b57_test_loopback(pUmDevice
,
4584 nrq
->nrq_looptype
, value
);
4585 b57_resume_chip(pUmDevice
);
4590 /* A '1' indicates success */
4598 case NICE_CMD_KMALLOC_PHYS
: {
4599 #if (LINUX_VERSION_CODE >= 0x020400)
4604 struct page
*pg
, *last_pg
;
4606 for (i
= 0; i
< MAX_MEM2
; i
++) {
4607 if (pUmDevice
->mem_size_list2
[i
] == 0)
4612 ptr
= pci_alloc_consistent(pUmDevice
->pdev
,
4613 nrq
->nrq_size
, &mapping
);
4617 pUmDevice
->mem_size_list2
[i
] = nrq
->nrq_size
;
4618 pUmDevice
->mem_list2
[i
] = ptr
;
4619 pUmDevice
->dma_list2
[i
] = mapping
;
4621 /* put pci mapping at the beginning of buffer */
4622 *((__u64
*) ptr
) = (__u64
) mapping
;
4624 /* Probably won't work on some architectures */
4625 /* get CPU mapping */
4626 cpu_pa
= (__u64
) virt_to_phys(ptr
);
4627 pUmDevice
->cpu_pa_list2
[i
] = cpu_pa
;
4628 nrq
->nrq_phys_addr_lo
= (__u32
) cpu_pa
;
4629 nrq
->nrq_phys_addr_hi
= (__u32
) (cpu_pa
>> 32);
4631 pg
= virt_to_page(ptr
);
4632 last_pg
= virt_to_page(ptr
+ nrq
->nrq_size
- 1);
4634 #if (LINUX_VERSION_CODE > 0x020500)
4635 SetPageReserved(pg
);
4637 mem_map_reserve(pg
);
4648 case NICE_CMD_KFREE_PHYS
: {
4652 cpu_pa
= (__u64
) nrq
->nrq_phys_addr_lo
+
4653 ((__u64
) nrq
->nrq_phys_addr_hi
<< 32);
4654 for (i
= 0; i
< MAX_MEM2
; i
++) {
4655 if (pUmDevice
->cpu_pa_list2
[i
] ==
4664 bcm5700_freemem2(pUmDevice
, i
);
4668 case NICE_CMD_SET_WRITE_PROTECT
:
4669 if (nrq
->nrq_write_protect
)
4670 pDevice
->Flags
|= EEPROM_WP_FLAG
;
4672 pDevice
->Flags
&= ~EEPROM_WP_FLAG
;
4674 case NICE_CMD_GET_STATS_BLOCK
: {
4675 PT3_STATS_BLOCK pStats
=
4676 (PT3_STATS_BLOCK
)pDevice
->pStatsBlkVirt
;
4677 if (mm_copy_to_user(nrq
->nrq_stats_useraddr
,
4678 pStats
, nrq
->nrq_stats_size
)) {
4683 case NICE_CMD_CLR_STATS_BLOCK
: {
4685 PT3_STATS_BLOCK pStats
=
4686 (PT3_STATS_BLOCK
)pDevice
->pStatsBlkVirt
;
4688 memset(pStats
, 0, sizeof(T3_STATS_BLOCK
));
4689 if (T3_ASIC_REV(pDevice
->ChipRevId
) ==
4693 for(j
= 0x0300; j
< 0x0b00; j
= j
+ 4) {
4694 MEM_WR_OFFSET(pDevice
, j
, 0);
4704 #endif /* NICE_SUPPORT */
4707 return netdev_ethtool_ioctl(dev
, (void *) rq
->ifr_data
);
4715 STATIC
void bcm5700_do_rx_mode(struct net_device
*dev
)
4717 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
4718 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
4720 struct dev_mc_list
*mclist
;
4722 LM_MulticastClear(pDevice
);
4723 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
4724 i
++, mclist
= mclist
->next
) {
4725 LM_MulticastAdd(pDevice
, (PLM_UINT8
) &mclist
->dmi_addr
);
4727 if (dev
->flags
& IFF_ALLMULTI
) {
4728 if (!(pDevice
->ReceiveMask
& LM_ACCEPT_ALL_MULTICAST
)) {
4729 LM_SetReceiveMask(pDevice
,
4730 pDevice
->ReceiveMask
| LM_ACCEPT_ALL_MULTICAST
);
4733 else if (pDevice
->ReceiveMask
& LM_ACCEPT_ALL_MULTICAST
) {
4734 LM_SetReceiveMask(pDevice
,
4735 pDevice
->ReceiveMask
& ~LM_ACCEPT_ALL_MULTICAST
);
4737 if (dev
->flags
& IFF_PROMISC
) {
4738 if (!(pDevice
->ReceiveMask
& LM_PROMISCUOUS_MODE
)) {
4739 LM_SetReceiveMask(pDevice
,
4740 pDevice
->ReceiveMask
| LM_PROMISCUOUS_MODE
);
4743 else if (pDevice
->ReceiveMask
& LM_PROMISCUOUS_MODE
) {
4744 LM_SetReceiveMask(pDevice
,
4745 pDevice
->ReceiveMask
& ~LM_PROMISCUOUS_MODE
);
4750 STATIC
void bcm5700_set_rx_mode(struct net_device
*dev
)
4752 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
4753 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
4755 struct dev_mc_list
*mclist
;
4756 unsigned long flags
;
4758 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4760 LM_MulticastClear(pDevice
);
4761 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
4762 i
++, mclist
= mclist
->next
) {
4763 LM_MulticastAdd(pDevice
, (PLM_UINT8
) &mclist
->dmi_addr
);
4765 if (dev
->flags
& IFF_ALLMULTI
) {
4766 if (!(pDevice
->ReceiveMask
& LM_ACCEPT_ALL_MULTICAST
)) {
4767 LM_SetReceiveMask(pDevice
,
4768 pDevice
->ReceiveMask
| LM_ACCEPT_ALL_MULTICAST
);
4771 else if (pDevice
->ReceiveMask
& LM_ACCEPT_ALL_MULTICAST
) {
4772 LM_SetReceiveMask(pDevice
,
4773 pDevice
->ReceiveMask
& ~LM_ACCEPT_ALL_MULTICAST
);
4775 if (dev
->flags
& IFF_PROMISC
) {
4776 if (!(pDevice
->ReceiveMask
& LM_PROMISCUOUS_MODE
)) {
4777 LM_SetReceiveMask(pDevice
,
4778 pDevice
->ReceiveMask
| LM_PROMISCUOUS_MODE
);
4781 else if (pDevice
->ReceiveMask
& LM_PROMISCUOUS_MODE
) {
4782 LM_SetReceiveMask(pDevice
,
4783 pDevice
->ReceiveMask
& ~LM_PROMISCUOUS_MODE
);
4786 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4790 * Set the hardware MAC address.
4792 STATIC
int bcm5700_set_mac_addr(struct net_device
*dev
, void *p
)
4794 struct sockaddr
*addr
=p
;
4795 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) dev
->priv
;
4796 UM_DEVICE_BLOCK
*pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
4798 if(is_valid_ether_addr(addr
->sa_data
)){
4800 memcpy(dev
->dev_addr
, addr
->sa_data
,dev
->addr_len
);
4801 if (pUmDevice
->opened
)
4802 LM_SetMacAddress(pDevice
, dev
->dev_addr
);
4808 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4809 STATIC
int bcm5700_change_mtu(struct net_device
*dev
, int new_mtu
)
4811 int pkt_size
= new_mtu
+ ETHERNET_PACKET_HEADER_SIZE
;
4812 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
)dev
->priv
;
4813 PLM_DEVICE_BLOCK pDevice
= &pUmDevice
->lm_dev
;
4814 unsigned long flags
;
4817 if ((pkt_size
< MIN_ETHERNET_PACKET_SIZE_NO_CRC
) ||
4818 (pkt_size
> MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC
)) {
4822 if ( !(pDevice
->Flags
& JUMBO_CAPABLE_FLAG
) &&
4823 (pkt_size
> MAX_ETHERNET_PACKET_SIZE_NO_CRC
) ) {
4827 if (pUmDevice
->suspended
)
4830 if (pUmDevice
->opened
&& (new_mtu
!= dev
->mtu
) &&
4831 (pDevice
->Flags
& JUMBO_CAPABLE_FLAG
)) {
4835 BCM5700_PHY_LOCK(pUmDevice
, flags
);
4837 netif_stop_queue(dev
);
4838 bcm5700_shutdown(pUmDevice
);
4839 bcm5700_freemem(dev
);
4843 if (pkt_size
< MAX_ETHERNET_PACKET_SIZE_NO_CRC
) {
4844 pDevice
->RxMtu
= pDevice
->TxMtu
=
4845 MAX_ETHERNET_PACKET_SIZE_NO_CRC
;
4848 pDevice
->RxMtu
= pDevice
->TxMtu
= pkt_size
;
4851 if (dev
->mtu
<= 1514) {
4852 pDevice
->RxJumboDescCnt
= 0;
4854 else if (pDevice
->Flags
& JUMBO_CAPABLE_FLAG
){
4855 pDevice
->RxJumboDescCnt
=
4856 rx_jumbo_desc_cnt
[pUmDevice
->index
];
4858 pDevice
->RxPacketDescCnt
= pDevice
->RxJumboDescCnt
+
4859 pDevice
->RxStdDescCnt
;
4861 pDevice
->RxJumboBufferSize
= (pDevice
->RxMtu
+ 8 /* CRC + VLAN */ +
4862 COMMON_CACHE_LINE_SIZE
-1) & ~COMMON_CACHE_LINE_MASK
;
4865 if (T3_ASIC_5714_FAMILY(pDevice
->ChipRevId
) &&
4866 (dev
->mtu
> 1514) ) {
4867 if (dev
->features
& NETIF_F_TSO
) {
4868 dev
->features
&= ~NETIF_F_TSO
;
4869 printk(KERN_ALERT
"%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev
->name
);
4875 LM_InitializeAdapter(pDevice
);
4876 bcm5700_do_rx_mode(dev
);
4877 bcm5700_set_vlan_mode(pUmDevice
);
4878 bcm5700_init_counters(pUmDevice
);
4879 if (memcmp(dev
->dev_addr
, pDevice
->NodeAddress
, 6)) {
4880 LM_SetMacAddress(pDevice
, dev
->dev_addr
);
4882 netif_start_queue(dev
);
4883 bcm5700_intr_on(pUmDevice
);
4885 BCM5700_PHY_UNLOCK(pUmDevice
, flags
);
4892 #if (LINUX_VERSION_CODE < 0x020300)
4894 bcm5700_probe(struct net_device
*dev
)
4896 int cards_found
= 0;
4897 struct pci_dev
*pdev
= NULL
;
4898 struct pci_device_id
*pci_tbl
;
4901 if ( ! pci_present())
4904 pci_tbl
= bcm5700_pci_tbl
;
4905 while ((pdev
= pci_find_class(PCI_CLASS_NETWORK_ETHERNET
<< 8, pdev
))) {
4908 pci_read_config_word(pdev
, PCI_SUBSYSTEM_VENDOR_ID
, &ssvid
);
4909 pci_read_config_word(pdev
, PCI_SUBSYSTEM_ID
, &ssid
);
4910 for (idx
= 0; pci_tbl
[idx
].vendor
; idx
++) {
4911 if ((pci_tbl
[idx
].vendor
== PCI_ANY_ID
||
4912 pci_tbl
[idx
].vendor
== pdev
->vendor
) &&
4913 (pci_tbl
[idx
].device
== PCI_ANY_ID
||
4914 pci_tbl
[idx
].device
== pdev
->device
) &&
4915 (pci_tbl
[idx
].subvendor
== PCI_ANY_ID
||
4916 pci_tbl
[idx
].subvendor
== ssvid
) &&
4917 (pci_tbl
[idx
].subdevice
== PCI_ANY_ID
||
4918 pci_tbl
[idx
].subdevice
== ssid
))
4924 if (pci_tbl
[idx
].vendor
== 0)
4928 if (bcm5700_init_one(pdev
, &pci_tbl
[idx
]) == 0)
4932 return cards_found
? 0 : -ENODEV
;
4936 int init_module(void)
4938 return bcm5700_probe(NULL
);
4941 void cleanup_module(void)
4943 struct net_device
*next_dev
;
4944 PUM_DEVICE_BLOCK pUmDevice
;
4947 bcm5700_proc_remove_notifier();
4949 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4950 while (root_tigon3_dev
) {
4951 pUmDevice
= (PUM_DEVICE_BLOCK
)root_tigon3_dev
->priv
;
4953 bcm5700_proc_remove_dev(root_tigon3_dev
);
4955 next_dev
= pUmDevice
->next_module
;
4956 unregister_netdev(root_tigon3_dev
);
4957 if (pUmDevice
->lm_dev
.pMappedMemBase
)
4958 iounmap(pUmDevice
->lm_dev
.pMappedMemBase
);
4959 #if (LINUX_VERSION_CODE < 0x020600)
4960 kfree(root_tigon3_dev
);
4962 free_netdev(root_tigon3_dev
);
4964 root_tigon3_dev
= next_dev
;
4967 unregister_ioctl32_conversion(SIOCNICE
);
4972 #else /* LINUX_VERSION_CODE < 0x020300 */
4974 #if (LINUX_VERSION_CODE >= 0x020406)
4975 static int bcm5700_suspend (struct pci_dev
*pdev
, DRV_SUSPEND_STATE_TYPE state
)
4977 static void bcm5700_suspend (struct pci_dev
*pdev
)
4980 struct net_device
*dev
= (struct net_device
*) pci_get_drvdata(pdev
);
4981 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
4982 PLM_DEVICE_BLOCK pDevice
= &pUmDevice
->lm_dev
;
4984 if (!netif_running(dev
))
4985 #if (LINUX_VERSION_CODE >= 0x020406)
4991 netif_device_detach (dev
);
4992 bcm5700_shutdown(pUmDevice
);
4994 LM_SetPowerState(pDevice
, LM_POWER_STATE_D3
);
4996 /* pci_power_off(pdev, -1);*/
4997 #if (LINUX_VERSION_CODE >= 0x020406)
5003 #if (LINUX_VERSION_CODE >= 0x020406)
5004 static int bcm5700_resume(struct pci_dev
*pdev
)
5006 static void bcm5700_resume(struct pci_dev
*pdev
)
5009 struct net_device
*dev
= (struct net_device
*) pci_get_drvdata(pdev
);
5010 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) dev
->priv
;
5011 PLM_DEVICE_BLOCK pDevice
= &pUmDevice
->lm_dev
;
5013 if (!netif_running(dev
))
5014 #if (LINUX_VERSION_CODE >= 0x020406)
5019 /* pci_power_on(pdev);*/
5020 netif_device_attach(dev
);
5021 LM_SetPowerState(pDevice
, LM_POWER_STATE_D0
);
5022 MM_InitializeUmPackets(pDevice
);
5024 #if (LINUX_VERSION_CODE >= 0x020406)
5030 static struct pci_driver bcm5700_pci_driver
= {
5031 name
: bcm5700_driver
,
5032 id_table
: bcm5700_pci_tbl
,
5033 probe
: bcm5700_init_one
,
5034 remove
: __devexit_p(bcm5700_remove_one
),
5035 suspend
: bcm5700_suspend
,
5036 resume
: bcm5700_resume
,
5040 bcm5700_notify_reboot(struct notifier_block
*this, unsigned long event
, void *unused
)
5051 B57_INFO(("bcm5700 reboot notification\n"));
5052 pci_unregister_driver(&bcm5700_pci_driver
);
5056 static int __init
bcm5700_init_module (void)
5058 if (msglevel
!= 0xdeadbeef) {
5059 b57_msg_level
= msglevel
;
5060 printf("%s: msglevel set to 0x%x\n", __FUNCTION__
, b57_msg_level
);
5062 b57_msg_level
= B57_ERR_VAL
;
5064 return pci_module_init(&bcm5700_pci_driver
);
5067 static void __exit
bcm5700_cleanup_module (void)
5070 bcm5700_proc_remove_notifier();
5072 unregister_reboot_notifier(&bcm5700_reboot_notifier
);
5073 pci_unregister_driver(&bcm5700_pci_driver
);
5076 module_init(bcm5700_init_module
);
5077 module_exit(bcm5700_cleanup_module
);
5086 #ifdef BCM_NAPI_RXPOLL
5088 MM_ScheduleRxPoll(LM_DEVICE_BLOCK
*pDevice
)
5090 struct net_device
*dev
= ((UM_DEVICE_BLOCK
*) pDevice
)->dev
;
5092 if (netif_rx_schedule_prep(dev
)) {
5093 __netif_rx_schedule(dev
);
5094 return LM_STATUS_SUCCESS
;
5096 return LM_STATUS_FAILURE
;
5101 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
5102 LM_UINT16
*pValue16
)
5104 UM_DEVICE_BLOCK
*pUmDevice
;
5106 pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
5107 pci_read_config_word(pUmDevice
->pdev
, Offset
, (u16
*) pValue16
);
5108 return LM_STATUS_SUCCESS
;
5112 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
5113 LM_UINT32
*pValue32
)
5115 UM_DEVICE_BLOCK
*pUmDevice
;
5117 pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
5118 pci_read_config_dword(pUmDevice
->pdev
, Offset
, (u32
*) pValue32
);
5119 return LM_STATUS_SUCCESS
;
5123 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
5126 UM_DEVICE_BLOCK
*pUmDevice
;
5128 pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
5129 pci_write_config_word(pUmDevice
->pdev
, Offset
, Value16
);
5130 return LM_STATUS_SUCCESS
;
5134 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 Offset
,
5137 UM_DEVICE_BLOCK
*pUmDevice
;
5139 pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
5140 pci_write_config_dword(pUmDevice
->pdev
, Offset
, Value32
);
5141 return LM_STATUS_SUCCESS
;
5145 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 BlockSize
,
5146 PLM_VOID
*pMemoryBlockVirt
, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy
,
5150 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5153 pvirt
= pci_alloc_consistent(pUmDevice
->pdev
, BlockSize
,
5156 return LM_STATUS_FAILURE
;
5158 pUmDevice
->mem_list
[pUmDevice
->mem_list_num
] = pvirt
;
5159 pUmDevice
->dma_list
[pUmDevice
->mem_list_num
] = mapping
;
5160 pUmDevice
->mem_size_list
[pUmDevice
->mem_list_num
++] = BlockSize
;
5161 memset(pvirt
, 0, BlockSize
);
5162 *pMemoryBlockVirt
= (PLM_VOID
) pvirt
;
5163 MM_SetAddr(pMemoryBlockPhy
, mapping
);
5164 return LM_STATUS_SUCCESS
;
5168 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice
, LM_UINT32 BlockSize
,
5169 PLM_VOID
*pMemoryBlockVirt
)
5172 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5175 /* Maximum in slab.c */
5176 if (BlockSize
> 131072) {
5177 goto MM_Alloc_error
;
5180 pvirt
= kmalloc(BlockSize
, GFP_ATOMIC
);
5182 goto MM_Alloc_error
;
5184 pUmDevice
->mem_list
[pUmDevice
->mem_list_num
] = pvirt
;
5185 pUmDevice
->dma_list
[pUmDevice
->mem_list_num
] = 0;
5186 pUmDevice
->mem_size_list
[pUmDevice
->mem_list_num
++] = 0;
5187 /* mem_size_list[i] == 0 indicates that the memory should be freed */
5189 memset(pvirt
, 0, BlockSize
);
5190 *pMemoryBlockVirt
= pvirt
;
5191 return LM_STATUS_SUCCESS
;
5194 printk(KERN_WARNING
"%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice
->dev
->name
);
5195 return LM_STATUS_FAILURE
;
5199 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice
)
5201 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5203 pDevice
->pMappedMemBase
= ioremap_nocache(
5204 pci_resource_start(pUmDevice
->pdev
, 0), sizeof(T3_STD_MEM_MAP
));
5205 if (pDevice
->pMappedMemBase
== 0)
5206 return LM_STATUS_FAILURE
;
5208 return LM_STATUS_SUCCESS
;
5212 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice
)
5215 struct sk_buff
*skb
;
5216 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5217 PUM_PACKET pUmPacket
;
5220 for (i
= 0; i
< pDevice
->RxPacketDescCnt
; i
++) {
5221 pPacket
= QQ_PopHead(&pDevice
->RxPacketFreeQ
.Container
);
5222 pUmPacket
= (PUM_PACKET
) pPacket
;
5224 printk(KERN_DEBUG
"Bad RxPacketFreeQ\n");
5226 if (pUmPacket
->skbuff
== 0) {
5227 #ifdef BCM_WL_EMULATOR
5228 skb
= (struct sk_buff
*)wlcemu_pktget(pDevice
->wlc
,pPacket
->u
.Rx
.RxBufferSize
+ 2);
5230 skb
= dev_alloc_skb(pPacket
->u
.Rx
.RxBufferSize
+ 2 + EXTRA_HDR
);
5233 pUmPacket
->skbuff
= 0;
5235 &pUmDevice
->rx_out_of_buf_q
.Container
,
5239 pUmPacket
->skbuff
= skb
;
5240 skb
->dev
= pUmDevice
->dev
;
5241 #ifndef BCM_WL_EMULATOR
5242 skb_reserve(skb
, EXTRA_HDR
- pUmDevice
->rx_buf_align
);
5245 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
5247 if (T3_ASIC_REV(pUmDevice
->lm_dev
.ChipRevId
) == T3_ASIC_REV_5700
) {
5248 /* reallocate buffers in the ISR */
5249 pUmDevice
->rx_buf_repl_thresh
= 0;
5250 pUmDevice
->rx_buf_repl_panic_thresh
= 0;
5251 pUmDevice
->rx_buf_repl_isr_limit
= 0;
5254 pUmDevice
->rx_buf_repl_thresh
= pDevice
->RxPacketDescCnt
/ 8;
5255 pUmDevice
->rx_buf_repl_panic_thresh
=
5256 pDevice
->RxPacketDescCnt
* 7 / 8;
5258 /* This limits the time spent in the ISR when the receiver */
5259 /* is in a steady state of being overrun. */
5260 pUmDevice
->rx_buf_repl_isr_limit
= pDevice
->RxPacketDescCnt
/ 8;
5262 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5263 if (pDevice
->RxJumboDescCnt
!= 0) {
5264 if (pUmDevice
->rx_buf_repl_thresh
>=
5265 pDevice
->RxJumboDescCnt
) {
5267 pUmDevice
->rx_buf_repl_thresh
=
5268 pUmDevice
->rx_buf_repl_panic_thresh
=
5269 pDevice
->RxJumboDescCnt
- 1;
5271 if (pUmDevice
->rx_buf_repl_thresh
>=
5272 pDevice
->RxStdDescCnt
) {
5274 pUmDevice
->rx_buf_repl_thresh
=
5275 pUmDevice
->rx_buf_repl_panic_thresh
=
5276 pDevice
->RxStdDescCnt
- 1;
5281 return LM_STATUS_SUCCESS
;
5285 MM_GetConfig(PLM_DEVICE_BLOCK pDevice
)
5287 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5288 int index
= pUmDevice
->index
;
5289 struct net_device
*dev
= pUmDevice
->dev
;
5291 if (index
>= MAX_UNITS
)
5292 return LM_STATUS_SUCCESS
;
5294 #if LINUX_KERNEL_VERSION < 0x0020609
5296 bcm5700_validate_param_range(pUmDevice
, &auto_speed
[index
], "auto_speed",
5298 if (auto_speed
[index
] == 0)
5299 pDevice
->DisableAutoNeg
= TRUE
;
5301 pDevice
->DisableAutoNeg
= FALSE
;
5303 if (line_speed
[index
] == 0) {
5304 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
5305 pDevice
->DisableAutoNeg
= FALSE
;
5308 bcm5700_validate_param_range(pUmDevice
, &full_duplex
[index
],
5309 "full_duplex", 0, 1, 1);
5310 if (full_duplex
[index
]) {
5311 pDevice
->RequestedDuplexMode
= LM_DUPLEX_MODE_FULL
;
5314 pDevice
->RequestedDuplexMode
= LM_DUPLEX_MODE_HALF
;
5317 if (line_speed
[index
] == 1000) {
5318 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_1000MBPS
;
5319 if (pDevice
->PhyFlags
& PHY_NO_GIGABIT
) {
5320 pDevice
->RequestedLineSpeed
=
5321 LM_LINE_SPEED_100MBPS
;
5322 printk(KERN_WARNING
"%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver
, index
);
5325 if ((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
5326 !full_duplex
[index
]) {
5327 printk(KERN_WARNING
"%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver
, index
);
5328 pDevice
->RequestedDuplexMode
=
5329 LM_DUPLEX_MODE_FULL
;
5332 if (!(pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) &&
5333 !auto_speed
[index
] && !(pDevice
->PhyFlags
& PHY_IS_FIBER
) ) {
5334 printk(KERN_WARNING
"%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver
, index
);
5335 pDevice
->DisableAutoNeg
= FALSE
;
5339 else if ((pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) ||
5340 (pDevice
->PhyFlags
& PHY_IS_FIBER
)){
5341 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
5342 pDevice
->RequestedDuplexMode
= LM_DUPLEX_MODE_FULL
;
5343 pDevice
->DisableAutoNeg
= FALSE
;
5344 printk(KERN_WARNING
"%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver
, index
, line_speed
[index
]);
5346 else if (line_speed
[index
] == 100) {
5348 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_100MBPS
;
5350 else if (line_speed
[index
] == 10) {
5352 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_10MBPS
;
5355 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
5356 pDevice
->DisableAutoNeg
= FALSE
;
5357 printk(KERN_WARNING
"%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver
, index
, line_speed
[index
]);
5362 #endif /* LINUX_KERNEL_VERSION */
5364 /* This is an unmanageable switch nic and will have link problems if
5367 if(pDevice
->SubsystemVendorId
==0x103c && pDevice
->SubsystemId
==0x3226)
5369 if(pDevice
->RequestedLineSpeed
!= LM_LINE_SPEED_AUTO
)
5371 printk(KERN_WARNING
"%s-%d: Invalid line_speed parameter (%d), using 0\n",
5372 bcm5700_driver
, index
, line_speed
[index
]);
5374 pDevice
->RequestedLineSpeed
= LM_LINE_SPEED_AUTO
;
5375 pDevice
->DisableAutoNeg
= FALSE
;
5378 #if LINUX_KERNEL_VERSION < 0x0020609
5380 pDevice
->FlowControlCap
= 0;
5381 bcm5700_validate_param_range(pUmDevice
, &rx_flow_control
[index
],
5382 "rx_flow_control", 0, 1, 0);
5383 if (rx_flow_control
[index
] != 0) {
5384 pDevice
->FlowControlCap
|= LM_FLOW_CONTROL_RECEIVE_PAUSE
;
5386 bcm5700_validate_param_range(pUmDevice
, &tx_flow_control
[index
],
5387 "tx_flow_control", 0, 1, 0);
5388 if (tx_flow_control
[index
] != 0) {
5389 pDevice
->FlowControlCap
|= LM_FLOW_CONTROL_TRANSMIT_PAUSE
;
5391 bcm5700_validate_param_range(pUmDevice
, &auto_flow_control
[index
],
5392 "auto_flow_control", 0, 1, 0);
5393 if (auto_flow_control
[index
] != 0) {
5394 if (pDevice
->DisableAutoNeg
== FALSE
) {
5396 pDevice
->FlowControlCap
|= LM_FLOW_CONTROL_AUTO_PAUSE
;
5397 if ((tx_flow_control
[index
] == 0) &&
5398 (rx_flow_control
[index
] == 0)) {
5400 pDevice
->FlowControlCap
|=
5401 LM_FLOW_CONTROL_TRANSMIT_PAUSE
|
5402 LM_FLOW_CONTROL_RECEIVE_PAUSE
;
5407 if (dev
->mtu
> 1500) {
5409 if (T3_ASIC_5714_FAMILY(pDevice
->ChipRevId
) &&
5410 (dev
->features
& NETIF_F_TSO
)) {
5411 dev
->features
&= ~NETIF_F_TSO
;
5412 printk(KERN_ALERT
"%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev
->name
);
5415 pDevice
->RxMtu
= dev
->mtu
+ 14;
5418 if ((T3_ASIC_REV(pDevice
->ChipRevId
) != T3_ASIC_REV_5700
) &&
5419 !(pDevice
->Flags
& BCM5788_FLAG
)) {
5420 pDevice
->Flags
|= USE_TAGGED_STATUS_FLAG
;
5421 pUmDevice
->timer_interval
= HZ
;
5422 if ((T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5703
) &&
5423 (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
)) {
5424 pUmDevice
->timer_interval
= HZ
/4;
5428 pUmDevice
->timer_interval
= HZ
/10;
5431 bcm5700_validate_param_range(pUmDevice
, &tx_pkt_desc_cnt
[index
],
5432 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT
-1, TX_DESC_CNT
);
5433 pDevice
->TxPacketDescCnt
= tx_pkt_desc_cnt
[index
];
5434 bcm5700_validate_param_range(pUmDevice
, &rx_std_desc_cnt
[index
],
5435 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT
-1,
5437 pDevice
->RxStdDescCnt
= rx_std_desc_cnt
[index
];
5439 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5440 bcm5700_validate_param_range(pUmDevice
, &rx_jumbo_desc_cnt
[index
],
5441 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT
-1,
5444 if (mtu
[index
] <= 1514)
5445 pDevice
->RxJumboDescCnt
= 0;
5446 else if(!T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
)){
5447 pDevice
->RxJumboDescCnt
= rx_jumbo_desc_cnt
[index
];
5452 bcm5700_validate_param_range(pUmDevice
, &adaptive_coalesce
[index
],
5453 "adaptive_coalesce", 0, 1, 1);
5454 #ifdef BCM_NAPI_RXPOLL
5455 if (adaptive_coalesce
[index
]) {
5456 printk(KERN_WARNING
"%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver
, index
);
5457 adaptive_coalesce
[index
] = 0;
5461 pUmDevice
->adaptive_coalesce
= adaptive_coalesce
[index
];
5462 if (!pUmDevice
->adaptive_coalesce
) {
5463 bcm5700_validate_param_range(pUmDevice
,
5464 &rx_coalesce_ticks
[index
], "rx_coalesce_ticks", 0,
5465 MAX_RX_COALESCING_TICKS
, RX_COAL_TK
);
5466 if ((rx_coalesce_ticks
[index
] == 0) &&
5467 (rx_max_coalesce_frames
[index
] == 0)) {
5469 printk(KERN_WARNING
"%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5470 bcm5700_driver
, index
, RX_COAL_TK
, RX_COAL_FM
);
5472 rx_coalesce_ticks
[index
] = RX_COAL_TK
;
5473 rx_max_coalesce_frames
[index
] = RX_COAL_FM
;
5475 pDevice
->RxCoalescingTicks
= pUmDevice
->rx_curr_coalesce_ticks
=
5476 rx_coalesce_ticks
[index
];
5477 #ifdef BCM_NAPI_RXPOLL
5478 pDevice
->RxCoalescingTicksDuringInt
= rx_coalesce_ticks
[index
];
5481 bcm5700_validate_param_range(pUmDevice
,
5482 &rx_max_coalesce_frames
[index
],
5483 "rx_max_coalesce_frames", 0,
5484 MAX_RX_MAX_COALESCED_FRAMES
, RX_COAL_FM
);
5486 pDevice
->RxMaxCoalescedFrames
=
5487 pUmDevice
->rx_curr_coalesce_frames
=
5488 rx_max_coalesce_frames
[index
];
5489 #ifdef BCM_NAPI_RXPOLL
5490 pDevice
->RxMaxCoalescedFramesDuringInt
=
5491 rx_max_coalesce_frames
[index
];
5494 bcm5700_validate_param_range(pUmDevice
,
5495 &tx_coalesce_ticks
[index
], "tx_coalesce_ticks", 0,
5496 MAX_TX_COALESCING_TICKS
, TX_COAL_TK
);
5497 if ((tx_coalesce_ticks
[index
] == 0) &&
5498 (tx_max_coalesce_frames
[index
] == 0)) {
5500 printk(KERN_WARNING
"%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5501 bcm5700_driver
, index
, TX_COAL_TK
, TX_COAL_FM
);
5503 tx_coalesce_ticks
[index
] = TX_COAL_TK
;
5504 tx_max_coalesce_frames
[index
] = TX_COAL_FM
;
5506 pDevice
->TxCoalescingTicks
= tx_coalesce_ticks
[index
];
5507 bcm5700_validate_param_range(pUmDevice
,
5508 &tx_max_coalesce_frames
[index
],
5509 "tx_max_coalesce_frames", 0,
5510 MAX_TX_MAX_COALESCED_FRAMES
, TX_COAL_FM
);
5511 pDevice
->TxMaxCoalescedFrames
= tx_max_coalesce_frames
[index
];
5512 pUmDevice
->tx_curr_coalesce_frames
=
5513 pDevice
->TxMaxCoalescedFrames
;
5515 bcm5700_validate_param_range(pUmDevice
,
5516 &stats_coalesce_ticks
[index
], "stats_coalesce_ticks",
5517 0, MAX_STATS_COALESCING_TICKS
, ST_COAL_TK
);
5518 if (adaptive_coalesce
[index
]) {
5519 printk(KERN_WARNING
"%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver
, index
);
5521 if ((stats_coalesce_ticks
[index
] > 0) &&
5522 (stats_coalesce_ticks
[index
] < 100)) {
5523 printk(KERN_WARNING
"%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver
, index
, (unsigned int) stats_coalesce_ticks
[index
]);
5524 stats_coalesce_ticks
[index
] = 100;
5525 pDevice
->StatsCoalescingTicks
= stats_coalesce_ticks
[index
];
5526 pDevice
->StatsCoalescingTicks
= stats_coalesce_ticks
[index
];
5531 pUmDevice
->rx_curr_coalesce_frames
= RX_COAL_FM
;
5532 pUmDevice
->rx_curr_coalesce_ticks
= RX_COAL_TK
;
5533 pUmDevice
->tx_curr_coalesce_frames
= TX_COAL_FM
;
5537 if (T3_ASIC_IS_5705_BEYOND(pDevice
->ChipRevId
)) {
5538 unsigned int tmpvar
;
5540 tmpvar
= pDevice
->StatsCoalescingTicks
/ BCM_TIMER_GRANULARITY
;
5543 * If the result is zero, the request is too demanding.
5549 pDevice
->StatsCoalescingTicks
= tmpvar
* BCM_TIMER_GRANULARITY
;
5551 pUmDevice
->statstimer_interval
= tmpvar
;
5555 bcm5700_validate_param_range(pUmDevice
, &enable_wol
[index
],
5556 "enable_wol", 0, 1, 0);
5557 if (enable_wol
[index
]) {
5558 pDevice
->WakeUpModeCap
= LM_WAKE_UP_MODE_MAGIC_PACKET
;
5559 pDevice
->WakeUpMode
= LM_WAKE_UP_MODE_MAGIC_PACKET
;
5562 #ifdef INCLUDE_TBI_SUPPORT
5563 if (pDevice
->TbiFlags
& ENABLE_TBI_FLAG
) {
5564 if ((T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5704
) ||
5565 (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5703
)) {
5566 /* just poll since we have hardware autoneg. in 5704 */
5567 pDevice
->TbiFlags
|= TBI_PURE_POLLING_FLAG
;
5570 pDevice
->TbiFlags
|= TBI_POLLING_INTR_FLAG
;
5574 bcm5700_validate_param_range(pUmDevice
, &scatter_gather
[index
],
5575 "scatter_gather", 0, 1, 1);
5576 bcm5700_validate_param_range(pUmDevice
, &tx_checksum
[index
],
5577 "tx_checksum", 0, 1, 1);
5578 bcm5700_validate_param_range(pUmDevice
, &rx_checksum
[index
],
5579 "rx_checksum", 0, 1, 1);
5580 if (!(pDevice
->TaskOffloadCap
& LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
)) {
5581 if (tx_checksum
[index
] || rx_checksum
[index
]) {
5583 pDevice
->TaskToOffload
= LM_TASK_OFFLOAD_NONE
;
5584 printk(KERN_WARNING
"%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver
, index
);
5588 if (rx_checksum
[index
]) {
5589 pDevice
->TaskToOffload
|=
5590 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM
|
5591 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM
;
5593 if (tx_checksum
[index
]) {
5594 pDevice
->TaskToOffload
|=
5595 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM
|
5596 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM
;
5597 pDevice
->Flags
|= NO_TX_PSEUDO_HDR_CSUM_FLAG
;
5601 bcm5700_validate_param_range(pUmDevice
, &enable_tso
[index
],
5602 "enable_tso", 0, 1, 1);
5604 /* Always enable TSO firmware if supported */
5605 /* This way we can turn it on or off on the fly */
5606 if (pDevice
->TaskOffloadCap
& LM_TASK_OFFLOAD_TCP_SEGMENTATION
)
5608 pDevice
->TaskToOffload
|=
5609 LM_TASK_OFFLOAD_TCP_SEGMENTATION
;
5611 if (enable_tso
[index
] &&
5612 !(pDevice
->TaskToOffload
& LM_TASK_OFFLOAD_TCP_SEGMENTATION
))
5614 printk(KERN_WARNING
"%s-%d: TSO not available on this NIC\n", bcm5700_driver
, index
);
5618 bcm5700_validate_param_range(pUmDevice
, &vlan_tag_mode
[index
],
5619 "vlan_strip_mode", 0, 2, 0);
5620 pUmDevice
->vlan_tag_mode
= vlan_tag_mode
[index
];
5622 pUmDevice
->vlan_tag_mode
= VLAN_TAG_MODE_NORMAL_STRIP
;
5625 #endif /* LINUX_KERNEL_VERSION */
5627 #ifdef BCM_NIC_SEND_BD
5628 bcm5700_validate_param_range(pUmDevice
, &nic_tx_bd
[index
], "nic_tx_bd",
5630 if (nic_tx_bd
[index
])
5631 pDevice
->Flags
|= NIC_SEND_BD_FLAG
;
5632 if ((pDevice
->Flags
& ENABLE_PCIX_FIX_FLAG
) ||
5633 (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5705
)) {
5634 if (pDevice
->Flags
& NIC_SEND_BD_FLAG
) {
5635 pDevice
->Flags
&= ~NIC_SEND_BD_FLAG
;
5636 printk(KERN_WARNING
"%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver
, index
);
5640 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5641 bcm5700_validate_param_range(pUmDevice
, &disable_msi
[pUmDevice
->index
],
5642 "disable_msi", 0, 1, 0);
5645 bcm5700_validate_param_range(pUmDevice
, &delay_link
[index
],
5646 "delay_link", 0, 1, 0);
5648 bcm5700_validate_param_range(pUmDevice
, &disable_d3hot
[index
],
5649 "disable_d3hot", 0, 1, 0);
5650 if (disable_d3hot
[index
]) {
5653 if (enable_wol
[index
]) {
5654 pDevice
->WakeUpModeCap
= LM_WAKE_UP_MODE_NONE
;
5655 pDevice
->WakeUpMode
= LM_WAKE_UP_MODE_NONE
;
5656 printk(KERN_WARNING
"%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver
, index
);
5659 pDevice
->Flags
|= DISABLE_D3HOT_FLAG
;
5662 return LM_STATUS_SUCCESS
;
5665 /* From include/proto/ethernet.h */
5666 #define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
5668 /* From include/proto/vlan.h */
5669 #define VLAN_PRI_MASK 7 /* 3 bits of priority */
5670 #define VLAN_PRI_SHIFT 13
5672 /* Replace the priority in a vlan tag */
5673 #define UPD_VLANTAG_PRIO(tag, prio) do { \
5674 tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); \
5675 tag |= prio << VLAN_PRI_SHIFT; \
5679 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice
)
5681 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5683 PUM_PACKET pUmPacket
;
5684 struct sk_buff
*skb
;
5686 int vlan_tag_size
= 0;
5689 if (pDevice
->ReceiveMask
& LM_KEEP_VLAN_TAG
)
5693 pPacket
= (PLM_PACKET
)
5694 QQ_PopHead(&pDevice
->RxPacketReceivedQ
.Container
);
5697 pUmPacket
= (PUM_PACKET
) pPacket
;
5698 #if !defined(NO_PCI_UNMAP)
5699 pci_unmap_single(pUmDevice
->pdev
,
5700 pci_unmap_addr(pUmPacket
, map
[0]),
5701 pPacket
->u
.Rx
.RxBufferSize
,
5702 PCI_DMA_FROMDEVICE
);
5704 if ((pPacket
->PacketStatus
!= LM_STATUS_SUCCESS
) ||
5705 ((size
= pPacket
->PacketSize
) >
5706 (pDevice
->RxMtu
+ vlan_tag_size
))) {
5710 QQ_PushTail(&pUmDevice
->rx_out_of_buf_q
.Container
, pPacket
);
5712 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
5714 pUmDevice
->rx_misc_errors
++;
5717 skb
= pUmPacket
->skbuff
;
5722 if (CTF_ENAB(pUmDevice
->cih
)) {
5723 if (ctf_forward(pUmDevice
->cih
, skb
) != BCME_ERROR
) {
5724 pUmDevice
->dev
->last_rx
= jiffies
;
5725 pUmDevice
->stats
.rx_bytes
+= skb
->len
;
5729 /* clear skipct flag before sending up */
5730 PKTCLRSKIPCT(pUmDevice
->osh
, skb
);
5734 /* Extract priority from payload and put it in skb->priority */
5736 if (pUmDevice
->qos
) {
5739 rc
= pktsetprio(skb
, TRUE
);
5740 if (rc
& (PKTPRIO_VDSCP
| PKTPRIO_DSCP
))
5741 dscp_prio
= rc
& VLAN_PRI_MASK
;
5743 B57_INFO(("pktsetprio returned 0x%x, skb->priority: %d\n",
5744 rc
, skb
->priority
));
5746 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
5747 if (size
> pDevice
->RxMtu
) {
5748 /* Make sure we have a valid VLAN tag */
5749 if (htons(skb
->protocol
) != ETHER_TYPE_8021Q
) {
5750 dev_kfree_skb_irq(skb
);
5751 pUmDevice
->rx_misc_errors
++;
5756 pUmDevice
->stats
.rx_bytes
+= skb
->len
;
5758 if ((pPacket
->Flags
& RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD
) &&
5759 (pDevice
->TaskToOffload
&
5760 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM
)) {
5761 if (pPacket
->u
.Rx
.TcpUdpChecksum
== 0xffff) {
5763 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5765 pUmDevice
->rx_good_chksum_count
++;
5769 skb
->ip_summed
= CHECKSUM_NONE
;
5770 pUmDevice
->rx_bad_chksum_count
++;
5774 skb
->ip_summed
= CHECKSUM_NONE
;
5777 if( pUmDevice
->nice_rx
) {
5778 vlan_tag_t
*vlan_tag
;
5780 vlan_tag
= (vlan_tag_t
*) &skb
->cb
[0];
5781 if (pPacket
->Flags
& RCV_BD_FLAG_VLAN_TAG
) {
5782 vlan_tag
->signature
= 0x7777;
5783 vlan_tag
->tag
= pPacket
->VlanTag
;
5784 /* Override vlan priority with dscp priority */
5786 UPD_VLANTAG_PRIO(vlan_tag
->tag
, dscp_prio
);
5788 vlan_tag
->signature
= 0;
5790 pUmDevice
->nice_rx(skb
, pUmDevice
->nice_ctx
);
5795 if (pUmDevice
->vlgrp
&&
5796 (pPacket
->Flags
& RCV_BD_FLAG_VLAN_TAG
)) {
5797 /* Override vlan priority with dscp priority */
5799 UPD_VLANTAG_PRIO(pPacket
->VlanTag
, dscp_prio
);
5800 #ifdef BCM_NAPI_RXPOLL
5801 vlan_hwaccel_receive_skb(skb
, pUmDevice
->vlgrp
,
5804 vlan_hwaccel_rx(skb
, pUmDevice
->vlgrp
,
5810 #ifdef BCM_WL_EMULATOR
5811 if(pDevice
->wl_emulate_rx
) {
5812 /* bcmstats("emu recv %d %d"); */
5813 wlcemu_receive_skb(pDevice
->wlc
, skb
);
5814 /* bcmstats("emu recv end %d %d"); */
5817 #endif /* BCM_WL_EMULATOR */
5819 #ifdef BCM_NAPI_RXPOLL
5820 netif_receive_skb(skb
);
5827 pUmDevice
->dev
->last_rx
= jiffies
;
5831 pUmPacket
->skbuff
= 0;
5832 QQ_PushTail(&pUmDevice
->rx_out_of_buf_q
.Container
, pPacket
);
5834 #ifdef BCM_WL_EMULATOR
5835 skb
= (struct sk_buff
*)wlcemu_pktget(pDevice
->wlc
,pPacket
->u
.Rx
.RxBufferSize
+ 2);
5837 skb
= dev_alloc_skb(pPacket
->u
.Rx
.RxBufferSize
+ 2 + EXTRA_HDR
);
5838 #endif /* BCM_WL_EMULATOR */
5840 pUmPacket
->skbuff
= 0;
5841 QQ_PushTail(&pUmDevice
->rx_out_of_buf_q
.Container
, pPacket
);
5844 pUmPacket
->skbuff
= skb
;
5845 skb
->dev
= pUmDevice
->dev
;
5846 #ifndef BCM_WL_EMULATOR
5847 skb_reserve(skb
, EXTRA_HDR
- pUmDevice
->rx_buf_align
);
5849 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
5853 return LM_STATUS_SUCCESS
;
5857 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice
, PLM_PACKET pPacket
)
5859 PUM_PACKET pUmPacket
= (PUM_PACKET
) pPacket
;
5860 struct sk_buff
*skb
= pUmPacket
->skbuff
;
5861 struct sk_buff
*nskb
;
5862 #if !defined(NO_PCI_UNMAP)
5863 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5865 pci_unmap_single(pUmDevice
->pdev
,
5866 pci_unmap_addr(pUmPacket
, map
[0]),
5867 pci_unmap_len(pUmPacket
, map_len
[0]),
5873 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5874 pci_unmap_page(pUmDevice
->pdev
,
5875 pci_unmap_addr(pUmPacket
, map
[i
+ 1]),
5876 pci_unmap_len(pUmPacket
, map_len
[i
+ 1]),
5882 if ((nskb
= skb_copy(skb
, GFP_ATOMIC
))) {
5883 pUmPacket
->lm_packet
.u
.Tx
.FragCount
= 1;
5885 pUmPacket
->skbuff
= nskb
;
5886 return LM_STATUS_SUCCESS
;
5889 pUmPacket
->skbuff
= 0;
5890 return LM_STATUS_FAILURE
;
5893 /* Returns 1 if not all buffers are allocated */
5895 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice
, int max
)
5898 PUM_PACKET pUmPacket
;
5899 PLM_DEVICE_BLOCK pDevice
= (PLM_DEVICE_BLOCK
) pUmDevice
;
5900 struct sk_buff
*skb
;
5905 while ((pUmPacket
= (PUM_PACKET
)
5906 QQ_PopHead(&pUmDevice
->rx_out_of_buf_q
.Container
)) != 0) {
5907 pPacket
= (PLM_PACKET
) pUmPacket
;
5908 if (pUmPacket
->skbuff
) {
5909 /* reuse an old skb */
5910 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
5914 #ifdef BCM_WL_EMULATOR
5915 if ((skb
= (struct sk_buff
*)wlcemu_pktget(pDevice
->wlc
,pPacket
->u
.Rx
.RxBufferSize
+ 2)) == 0)
5917 if ((skb
= dev_alloc_skb(pPacket
->u
.Rx
.RxBufferSize
+ 2 + EXTRA_HDR
)) == 0)
5918 #endif /* BCM_WL_EMULATOR */
5920 QQ_PushHead(&pUmDevice
->rx_out_of_buf_q
.Container
,
5925 pUmPacket
->skbuff
= skb
;
5926 skb
->dev
= pUmDevice
->dev
;
5927 #ifndef BCM_WL_EMULATOR
5928 skb_reserve(skb
, EXTRA_HDR
- pUmDevice
->rx_buf_align
);
5930 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
, pPacket
);
5934 if (alloc_cnt
>= max
)
5938 if (queue_rx
|| pDevice
->QueueAgain
) {
5939 LM_QueueRxPackets(pDevice
);
5945 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice
)
5947 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5949 PUM_PACKET pUmPacket
;
5950 struct sk_buff
*skb
;
5951 #if !defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5956 pPacket
= (PLM_PACKET
)
5957 QQ_PopHead(&pDevice
->TxPacketXmittedQ
.Container
);
5960 pUmPacket
= (PUM_PACKET
) pPacket
;
5961 skb
= pUmPacket
->skbuff
;
5962 #if !defined(NO_PCI_UNMAP)
5963 pci_unmap_single(pUmDevice
->pdev
,
5964 pci_unmap_addr(pUmPacket
, map
[0]),
5965 pci_unmap_len(pUmPacket
, map_len
[0]),
5968 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
5969 pci_unmap_page(pUmDevice
->pdev
,
5970 pci_unmap_addr(pUmPacket
, map
[i
+ 1]),
5971 pci_unmap_len(pUmPacket
, map_len
[i
+ 1]),
5976 dev_kfree_skb_irq(skb
);
5977 pUmPacket
->skbuff
= 0;
5978 QQ_PushTail(&pDevice
->TxPacketFreeQ
.Container
, pPacket
);
5980 if (pUmDevice
->tx_full
) {
5981 if (QQ_GetEntryCnt(&pDevice
->TxPacketFreeQ
.Container
) >=
5982 (pDevice
->TxPacketDescCnt
>> 1)) {
5984 pUmDevice
->tx_full
= 0;
5985 netif_wake_queue(pUmDevice
->dev
);
5988 return LM_STATUS_SUCCESS
;
5992 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice
, LM_STATUS Status
)
5994 PUM_DEVICE_BLOCK pUmDevice
= (PUM_DEVICE_BLOCK
) pDevice
;
5995 struct net_device
*dev
= pUmDevice
->dev
;
5996 LM_FLOW_CONTROL flow_control
;
5999 if (!pUmDevice
->opened
)
6000 return LM_STATUS_SUCCESS
;
6002 if (!pUmDevice
->suspended
) {
6003 if (Status
== LM_STATUS_LINK_DOWN
) {
6004 netif_carrier_off(dev
);
6006 else if (Status
== LM_STATUS_LINK_ACTIVE
) {
6007 netif_carrier_on(dev
);
6011 if (pUmDevice
->delayed_link_ind
> 0) {
6012 pUmDevice
->delayed_link_ind
= 0;
6013 if (Status
== LM_STATUS_LINK_DOWN
) {
6014 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver
, dev
->name
));
6016 else if (Status
== LM_STATUS_LINK_ACTIVE
) {
6017 B57_INFO(("%s: %s NIC Link is UP, ", bcm5700_driver
, dev
->name
));
6021 if (Status
== LM_STATUS_LINK_DOWN
) {
6022 B57_INFO(("%s: %s NIC Link is Down\n", bcm5700_driver
, dev
->name
));
6024 else if (Status
== LM_STATUS_LINK_ACTIVE
) {
6025 B57_INFO(("%s: %s NIC Link is Up, ", bcm5700_driver
, dev
->name
));
6029 if (Status
== LM_STATUS_LINK_ACTIVE
) {
6030 if (pDevice
->LineSpeed
== LM_LINE_SPEED_1000MBPS
)
6032 else if (pDevice
->LineSpeed
== LM_LINE_SPEED_100MBPS
)
6034 else if (pDevice
->LineSpeed
== LM_LINE_SPEED_10MBPS
)
6037 B57_INFO(("%d Mbps ", speed
));
6039 if (pDevice
->DuplexMode
== LM_DUPLEX_MODE_FULL
)
6040 B57_INFO(("full duplex"));
6042 B57_INFO(("half duplex"));
6044 flow_control
= pDevice
->FlowControl
&
6045 (LM_FLOW_CONTROL_RECEIVE_PAUSE
|
6046 LM_FLOW_CONTROL_TRANSMIT_PAUSE
);
6048 if (flow_control
& LM_FLOW_CONTROL_RECEIVE_PAUSE
) {
6049 B57_INFO((", receive "));
6050 if (flow_control
& LM_FLOW_CONTROL_TRANSMIT_PAUSE
)
6051 B57_INFO(("& transmit "));
6054 B57_INFO((", transmit "));
6056 B57_INFO(("flow control ON"));
6060 return LM_STATUS_SUCCESS
;
6064 MM_UnmapRxDma(LM_DEVICE_BLOCK
*pDevice
, LM_PACKET
*pPacket
)
6066 #if !defined(NO_PCI_UNMAP)
6067 UM_DEVICE_BLOCK
*pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
6068 UM_PACKET
*pUmPacket
= (UM_PACKET
*) pPacket
;
6070 if (!pUmPacket
->skbuff
)
6073 pci_unmap_single(pUmDevice
->pdev
,
6074 pci_unmap_addr(pUmPacket
, map
[0]),
6075 pPacket
->u
.Rx
.RxBufferSize
,
6076 PCI_DMA_FROMDEVICE
);
6081 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice
, PLM_PACKET pPacket
)
6083 PUM_PACKET pUmPacket
;
6084 struct sk_buff
*skb
;
6087 return LM_STATUS_SUCCESS
;
6088 pUmPacket
= (PUM_PACKET
) pPacket
;
6089 if ((skb
= pUmPacket
->skbuff
)) {
6090 /* DMA address already unmapped */
6093 pUmPacket
->skbuff
= 0;
6094 return LM_STATUS_SUCCESS
;
6098 MM_Sleep(LM_DEVICE_BLOCK
*pDevice
, LM_UINT32 msec
)
6100 current
->state
= TASK_INTERRUPTIBLE
;
6101 if (schedule_timeout(HZ
* msec
/ 1000) != 0) {
6102 return LM_STATUS_FAILURE
;
6104 if (signal_pending(current
))
6105 return LM_STATUS_FAILURE
;
6107 return LM_STATUS_SUCCESS
;
6111 bcm5700_shutdown(UM_DEVICE_BLOCK
*pUmDevice
)
6113 LM_DEVICE_BLOCK
*pDevice
= (LM_DEVICE_BLOCK
*) pUmDevice
;
6115 bcm5700_intr_off(pUmDevice
);
6116 netif_carrier_off(pUmDevice
->dev
);
6118 tasklet_kill(&pUmDevice
->tasklet
);
6120 bcm5700_poll_wait(pUmDevice
);
6124 pDevice
->InitDone
= 0;
6125 bcm5700_free_remaining_rx_bufs(pUmDevice
);
6129 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK
*pUmDevice
)
6131 LM_DEVICE_BLOCK
*pDevice
= &pUmDevice
->lm_dev
;
6132 UM_PACKET
*pUmPacket
;
6135 cnt
= QQ_GetEntryCnt(&pUmDevice
->rx_out_of_buf_q
.Container
);
6136 for (i
= 0; i
< cnt
; i
++) {
6138 QQ_PopHead(&pUmDevice
->rx_out_of_buf_q
.Container
))
6141 MM_UnmapRxDma(pDevice
, (LM_PACKET
*) pUmPacket
);
6142 MM_FreeRxBuffer(pDevice
, &pUmPacket
->lm_packet
);
6143 QQ_PushTail(&pDevice
->RxPacketFreeQ
.Container
,
6150 bcm5700_validate_param_range(UM_DEVICE_BLOCK
*pUmDevice
, int *param
,
6151 char *param_name
, int min
, int max
, int deflt
)
6153 if (((unsigned int) *param
< (unsigned int) min
) ||
6154 ((unsigned int) *param
> (unsigned int) max
)) {
6156 printk(KERN_WARNING
"%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver
, pUmDevice
->index
, param_name
, (unsigned int) *param
, (unsigned int) deflt
);
6162 bcm5700_find_peer(struct net_device
*dev
)
6164 struct net_device
*tmp_dev
;
6165 UM_DEVICE_BLOCK
*pUmDevice
, *pUmTmp
;
6166 LM_DEVICE_BLOCK
*pDevice
;
6169 pUmDevice
= (UM_DEVICE_BLOCK
*) dev
->priv
;
6170 pDevice
= &pUmDevice
->lm_dev
;
6171 if (T3_ASIC_REV(pDevice
->ChipRevId
) == T3_ASIC_REV_5704
) {
6172 tmp_dev
= root_tigon3_dev
;
6174 pUmTmp
= (PUM_DEVICE_BLOCK
) tmp_dev
->priv
;
6175 if ((tmp_dev
!= dev
) &&
6176 (pUmDevice
->pdev
->bus
->number
==
6177 pUmTmp
->pdev
->bus
->number
) &&
6178 PCI_SLOT(pUmDevice
->pdev
->devfn
) ==
6179 PCI_SLOT(pUmTmp
->pdev
->devfn
)) {
6183 tmp_dev
= pUmTmp
->next_module
;
6190 MM_FindPeerDev(LM_DEVICE_BLOCK
*pDevice
)
6192 UM_DEVICE_BLOCK
*pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
6193 struct net_device
*dev
= pUmDevice
->dev
;
6194 struct net_device
*peer_dev
;
6196 peer_dev
= bcm5700_find_peer(dev
);
6199 return ((LM_DEVICE_BLOCK
*) peer_dev
->priv
);
6202 int MM_FindCapability(LM_DEVICE_BLOCK
*pDevice
, int capability
)
6204 UM_DEVICE_BLOCK
*pUmDevice
= (UM_DEVICE_BLOCK
*) pDevice
;
6205 return (pci_find_capability(pUmDevice
->pdev
, capability
));
6208 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
6210 poll_bcm5700(struct net_device
*dev
)
6212 UM_DEVICE_BLOCK
*pUmDevice
= dev
->priv
;
6214 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
6216 bcm5700_interrupt(pUmDevice
->pdev
->irq
, dev
, NULL
);
6217 #ifdef BCM_NAPI_RXPOLL
6218 if (dev
->poll_list
.prev
) {
6221 bcm5700_poll(dev
, &budget
);
6228 disable_irq(pUmDevice
->pdev
->irq
);
6229 bcm5700_interrupt(pUmDevice
->pdev
->irq
, dev
, NULL
);
6230 enable_irq(pUmDevice
->pdev
->irq
);