Fix 'Interference Mitigation' option
[tomato.git] / release / src-rt / bcm57xx / linux / 757um.c
blob4812ed15b8774d350373f6bce815f3c00ba2df57
1 /******************************************************************************/
2 /* */
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
4 /* Corporation. */
5 /* All rights reserved. */
6 /* */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
10 /* */
11 /******************************************************************************/
13 /* $Id: b57um.c,v 1.34.10.1 2010-10-09 01:46:48 Exp $ */
15 char bcm5700_driver[] = "bcm5700";
16 char bcm5700_version[] = "8.3.14";
17 char bcm5700_date[] = "(11/2/05)";
19 #define B57UM
20 #include "mm.h"
22 #include "typedefs.h"
23 #include <epivers.h>
24 #include "osl.h"
25 #include "bcmdefs.h"
26 #include "bcmdevs.h"
27 #include "bcmutils.h"
28 #include "hndsoc.h"
29 #include "siutils.h"
30 #include "hndgige.h"
31 #include "etioctl.h"
32 #include "bcmrobo.h"
34 /* this is needed to get good and stable performances */
35 #define EXTRA_HDR BCMEXTRAHDROOM
37 /* A few user-configurable values. */
39 #define MAX_UNITS 16
40 /* Used to pass the full-duplex flag, etc. */
41 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
42 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
43 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
44 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
45 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
46 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
47 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
48 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
49 #endif
50 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
51 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
52 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
54 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
55 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
56 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
57 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
58 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
59 TX_DESC_CNT};
61 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
62 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
63 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
64 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
65 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
66 RX_DESC_CNT };
68 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
69 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
70 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
71 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
72 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
73 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
74 JBO_DESC_CNT };
75 #endif
77 #ifdef BCM_INT_COAL
78 #ifdef BCM_NAPI_RXPOLL
79 static unsigned int adaptive_coalesce[MAX_UNITS] =
80 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
81 #else
82 static unsigned int adaptive_coalesce[MAX_UNITS] =
83 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
84 #endif
86 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
87 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
88 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
89 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
90 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
91 RX_COAL_TK};
93 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
94 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
95 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
96 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
97 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
98 RX_COAL_FM};
100 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
101 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
102 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
103 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
104 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
105 TX_COAL_TK};
107 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
108 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
109 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
110 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
111 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
112 TX_COAL_FM};
114 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
115 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
116 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
117 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
118 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
119 ST_COAL_TK,};
121 #endif
122 #ifdef BCM_WOL
123 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
124 #endif
125 #ifdef BCM_TSO
126 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
127 #endif
128 #ifdef BCM_NIC_SEND_BD
129 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
130 #endif
131 #ifdef BCM_ASF
132 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
133 #endif
134 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
135 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
137 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
138 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
139 static int bcm_msi_chipset_bug = 0;
140 #endif
142 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
144 /* Hack to hook the data path to the BCM WL dirver */
145 #ifdef BCM_WL_EMULATOR
146 #include "bcmnvram.h"
147 #include "wl_bcm57emu.h"
148 #ifdef SKB_MANAGER
149 int skb_old_alloc = 0;
150 #endif
151 #endif /* BCM_WL_EMULATOR */
153 /* Operational parameters that usually are not changed. */
154 /* Time in jiffies before concluding the transmitter is hung. */
155 #define TX_TIMEOUT (2*HZ)
157 #if (LINUX_VERSION_CODE < 0x02030d)
158 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
159 #elif (LINUX_VERSION_CODE < 0x02032b)
160 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
161 #endif
163 #if (LINUX_VERSION_CODE < 0x02032b)
164 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
165 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
166 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
168 static inline void netif_start_queue(struct net_device *dev)
170 dev->tbusy = 0;
171 dev->interrupt = 0;
172 dev->start = 1;
175 #define netif_queue_stopped(dev) dev->tbusy
176 #define netif_running(dev) dev->start
178 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
180 queue_task(tasklet, &tq_immediate);
181 mark_bh(IMMEDIATE_BH);
184 static inline void tasklet_init(struct tasklet_struct *tasklet,
185 void (*func)(unsigned long),
186 unsigned long data)
188 tasklet->next = NULL;
189 tasklet->sync = 0;
190 tasklet->routine = (void (*)(void *))func;
191 tasklet->data = (void *)data;
194 #define tasklet_kill(tasklet)
196 #endif
198 #if (LINUX_VERSION_CODE < 0x020300)
199 struct pci_device_id {
200 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
201 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
202 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
203 unsigned long driver_data; /* Data private to the driver */
206 #define PCI_ANY_ID 0
208 #define pci_set_drvdata(pdev, dev)
209 #define pci_get_drvdata(pdev) 0
211 #define pci_enable_device(pdev) 0
213 #define __devinit __init
214 #define __devinitdata __initdata
215 #define __devexit
217 #define SET_MODULE_OWNER(dev)
218 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
220 #endif
222 #if (LINUX_VERSION_CODE < 0x020411)
223 #ifndef __devexit_p
224 #define __devexit_p(x) x
225 #endif
226 #endif
228 #ifndef MODULE_LICENSE
229 #define MODULE_LICENSE(license)
230 #endif
232 #ifndef IRQ_RETVAL
233 typedef void irqreturn_t;
234 #define IRQ_RETVAL(x)
235 #endif
237 #if (LINUX_VERSION_CODE < 0x02032a)
238 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
239 dma_addr_t *dma_handle)
241 void *virt_ptr;
243 /* Maximum in slab.c */
244 if (size > 131072)
245 return 0;
247 virt_ptr = kmalloc(size, GFP_KERNEL);
248 *dma_handle = virt_to_bus(virt_ptr);
249 return virt_ptr;
251 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
253 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
256 #if (LINUX_VERSION_CODE < 0x02040d)
258 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
260 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
261 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
263 #else
264 /* pci_set_dma_mask is using dma_addr_t */
266 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
267 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
269 #endif
271 #else /* (LINUX_VERSION_CODE < 0x02040d) */
273 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
274 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
275 #endif
277 #if (LINUX_VERSION_CODE < 0x020329)
278 #define pci_set_dma_mask(pdev, mask) (0)
279 #else
280 #if (LINUX_VERSION_CODE < 0x020403)
282 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
284 if(! pci_dma_supported(dev, mask))
285 return -EIO;
287 dev->dma_mask = mask;
289 return 0;
291 #endif
292 #endif
294 #if (LINUX_VERSION_CODE < 0x020547)
295 #define pci_set_consistent_dma_mask(pdev, mask) (0)
296 #endif
298 #if (LINUX_VERSION_CODE < 0x020402)
299 #define pci_request_regions(pdev, name) (0)
300 #define pci_release_regions(pdev)
301 #endif
303 #if !defined(spin_is_locked)
304 #define spin_is_locked(lock) (test_bit(0,(lock)))
305 #endif
307 #define BCM5700_LOCK(pUmDevice, flags) \
308 if ((pUmDevice)->do_global_lock) { \
309 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
312 #define BCM5700_UNLOCK(pUmDevice, flags) \
313 if ((pUmDevice)->do_global_lock) { \
314 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
317 inline void
318 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
320 if (pUmDevice->do_global_lock) {
321 spin_lock(&pUmDevice->global_lock);
325 inline void
326 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
328 if (pUmDevice->do_global_lock) {
329 spin_unlock(&pUmDevice->global_lock);
333 void
334 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
336 atomic_inc(&pUmDevice->intr_sem);
337 LM_DisableInterrupt(&pUmDevice->lm_dev);
338 #if (LINUX_VERSION_CODE >= 0x2051c)
339 synchronize_irq(pUmDevice->dev->irq);
340 #else
341 synchronize_irq();
342 #endif
343 LM_DisableInterrupt(&pUmDevice->lm_dev);
346 void
347 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
349 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
350 LM_EnableInterrupt(&pUmDevice->lm_dev);
355 * Broadcom NIC Extension support
356 * -ffan
358 #ifdef NICE_SUPPORT
359 #include "nicext.h"
361 typedef struct {
362 ushort tag;
363 ushort signature;
364 } vlan_tag_t;
366 #endif /* NICE_SUPPORT */
368 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
370 #if defined(MODULE)
371 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
372 MODULE_DESCRIPTION("BCM5700 Driver");
373 MODULE_LICENSE("GPL");
375 #if (LINUX_VERSION_CODE < 0x020605)
377 MODULE_PARM(debug, "i");
378 MODULE_PARM(msglevel, "i");
379 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
380 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
381 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
385 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
386 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
387 #endif
388 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
389 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
390 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
391 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
392 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
393 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
394 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
395 #endif
396 #ifdef BCM_INT_COAL
397 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
398 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
399 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
400 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
401 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
402 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
403 #endif
404 #ifdef BCM_WOL
405 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
406 #endif
407 #ifdef BCM_TSO
408 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
409 #endif
410 #ifdef BCM_NIC_SEND_BD
411 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
412 #endif
413 #ifdef BCM_ASF
414 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
415 #endif
416 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
417 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
419 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
420 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
421 #endif
423 #else /* parms*/
425 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
427 static int var;
429 #define numvar var
431 #endif
433 #if (LINUX_VERSION_CODE >= 0x2060a)
435 #define numvar NULL
437 #endif
439 module_param_array(line_speed, int, numvar, 0);
440 module_param_array(auto_speed, int, numvar, 0);
441 module_param_array(full_duplex, int, numvar, 0);
442 module_param_array(rx_flow_control, int, numvar, 0);
443 module_param_array(tx_flow_control, int, numvar, 0);
444 module_param_array(auto_flow_control, int, numvar, 0);
445 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
446 module_param_array(mtu, int, numvar, 0);
447 #endif
448 module_param_array(tx_checksum, int, numvar, 0);
449 module_param_array(rx_checksum, int, numvar, 0);
450 module_param_array(scatter_gather, int, numvar, 0);
451 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
452 module_param_array(rx_std_desc_cnt, int, numvar, 0);
453 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
454 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
455 #endif
456 #ifdef BCM_INT_COAL
457 module_param_array(adaptive_coalesce, int, numvar, 0);
458 module_param_array(rx_coalesce_ticks, int, numvar, 0);
459 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
460 module_param_array(tx_coalesce_ticks, int, numvar, 0);
461 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
462 module_param_array(stats_coalesce_ticks, int, numvar, 0);
463 #endif
464 #ifdef BCM_WOL
465 module_param_array(enable_wol, int, numvar, 0);
466 #endif
467 #ifdef BCM_TSO
468 module_param_array(enable_tso, int, numvar, 0);
469 #endif
470 #ifdef BCM_NIC_SEND_BD
471 module_param_array(nic_tx_bd, int, numvar, 0);
472 #endif
473 #ifdef BCM_ASF
474 module_param_array(vlan_tag_mode, int, numvar, 0);
475 #endif
476 module_param_array(delay_link, int, numvar, 0);
477 module_param_array(disable_d3hot, int, numvar, 0);
479 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
480 module_param_array(disable_msi, int, numvar, 0);
481 #endif
484 #endif /* params */
487 #endif
489 #define RUN_AT(x) (jiffies + (x))
491 char kernel_version[] = UTS_RELEASE;
493 #define PCI_SUPPORT_VER2
495 #if !defined(CAP_NET_ADMIN)
496 #define capable(CAP_XXX) (suser())
497 #endif
499 #define tigon3_debug debug
500 #if TIGON3_DEBUG
501 static int tigon3_debug = TIGON3_DEBUG;
502 #else
503 static int tigon3_debug = 0;
504 #endif
505 static int msglevel = 0xdeadbeef;
506 int b57_msg_level;
508 int bcm5700_open(struct net_device *dev);
509 STATIC void bcm5700_timer(unsigned long data);
510 STATIC void bcm5700_stats_timer(unsigned long data);
511 STATIC void bcm5700_reset(struct net_device *dev);
512 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
514 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance);
515 #else
516 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
517 #endif
518 #ifdef BCM_TASKLET
519 STATIC void bcm5700_tasklet(unsigned long data);
520 #endif
521 STATIC int bcm5700_close(struct net_device *dev);
522 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
523 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
524 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
525 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
526 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
527 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
528 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
529 #endif
530 #ifdef BCM_NAPI_RXPOLL
531 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
532 #endif
533 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
534 STATIC int bcm5700_freemem(struct net_device *dev);
535 #ifdef NICE_SUPPORT
536 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
537 #endif
538 #ifdef BCM_INT_COAL
539 #ifndef BCM_NAPI_RXPOLL
540 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
541 #endif
542 #endif
543 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
544 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
545 #ifdef BCM_VLAN
546 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
547 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
548 #endif
549 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
550 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
551 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
552 char *param_name, int min, int max, int deflt);
554 static int bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused);
555 static struct notifier_block bcm5700_reboot_notifier = {
556 bcm5700_notify_reboot,
557 NULL,
561 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
562 STATIC void poll_bcm5700(struct net_device *dev);
563 #endif
565 /* A list of all installed bcm5700 devices. */
566 static struct net_device *root_tigon3_dev = NULL;
568 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
570 #ifdef NICE_SUPPORT
571 #if (LINUX_VERSION_CODE < 0x20500)
572 extern int register_ioctl32_conversion(unsigned int cmd,
573 int (*handler)(unsigned int, unsigned int, unsigned long,
574 struct file *));
575 int unregister_ioctl32_conversion(unsigned int cmd);
576 #else
577 #include <linux/ioctl32.h>
578 #endif
580 #define BCM_IOCTL32 1
582 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
584 static int
585 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
586 struct file *filep)
588 struct ifreq rq;
589 struct net_device *tmp_dev = root_tigon3_dev;
590 int ret;
591 struct nice_req* nrq;
592 struct ifreq_nice32 {
593 char ifnr_name[16];
594 __u32 cmd;
595 __u32 nrq1;
596 __u32 nrq2;
597 __u32 nrq3;
598 } nrq32;
600 if (!capable(CAP_NET_ADMIN))
601 return -EPERM;
603 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
604 return -EFAULT;
606 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
608 nrq = (struct nice_req*) &rq.ifr_ifru;
609 nrq->cmd = nrq32.cmd;
610 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
611 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
612 nrq->nrq_stats_size = nrq32.nrq2;
614 else {
615 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
617 while (tmp_dev) {
618 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
619 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
620 if (ret == 0) {
621 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
622 return ret;
624 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
625 if (mm_copy_to_user((char *) arg, &nrq32, 32))
626 return -EFAULT;
628 return ret;
630 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
632 return -ENODEV;
634 #endif /* NICE_SUPPORT */
635 #endif
637 typedef enum {
638 BCM5700A6 = 0,
639 BCM5700T6,
640 BCM5700A9,
641 BCM5700T9,
642 BCM5700,
643 BCM5701A5,
644 BCM5701T1,
645 BCM5701T8,
646 BCM5701A7,
647 BCM5701A10,
648 BCM5701A12,
649 BCM5701,
650 BCM5702,
651 BCM5703,
652 BCM5703A31,
653 BCM5703ARBUCKLE,
654 TC996T,
655 TC996ST,
656 TC996SSX,
657 TC996SX,
658 TC996BT,
659 TC997T,
660 TC997SX,
661 TC1000T,
662 TC1000BT,
663 TC940BR01,
664 TC942BR01,
665 TC998T,
666 TC998SX,
667 TC999T,
668 NC6770,
669 NC1020,
670 NC150T,
671 NC7760,
672 NC7761,
673 NC7770,
674 NC7771,
675 NC7780,
676 NC7781,
677 NC7772,
678 NC7782,
679 NC7783,
680 NC320T,
681 NC320I,
682 NC325I,
683 NC324I,
684 NC326I,
685 BCM5704CIOBE,
686 BCM5704,
687 BCM5704S,
688 BCM5705,
689 BCM5705M,
690 BCM5705F,
691 BCM5901,
692 BCM5782,
693 BCM5788,
694 BCM5789,
695 BCM5750,
696 BCM5750M,
697 BCM5720,
698 BCM5751,
699 BCM5751M,
700 BCM5751F,
701 BCM5721,
702 BCM5753,
703 BCM5753M,
704 BCM5753F,
705 BCM5781,
706 BCM5752,
707 BCM5752M,
708 BCM5714,
709 BCM5780,
710 BCM5780S,
711 BCM5715,
712 BCM4785,
713 BCM5903M,
714 UNK5788
715 } board_t;
718 /* indexed by board_t, above */
719 static struct {
720 char *name;
721 } board_info[] __devinitdata = {
722 { "Broadcom BCM5700 1000Base-T" },
723 { "Broadcom BCM5700 1000Base-SX" },
724 { "Broadcom BCM5700 1000Base-SX" },
725 { "Broadcom BCM5700 1000Base-T" },
726 { "Broadcom BCM5700" },
727 { "Broadcom BCM5701 1000Base-T" },
728 { "Broadcom BCM5701 1000Base-T" },
729 { "Broadcom BCM5701 1000Base-T" },
730 { "Broadcom BCM5701 1000Base-SX" },
731 { "Broadcom BCM5701 1000Base-T" },
732 { "Broadcom BCM5701 1000Base-T" },
733 { "Broadcom BCM5701" },
734 { "Broadcom BCM5702 1000Base-T" },
735 { "Broadcom BCM5703 1000Base-T" },
736 { "Broadcom BCM5703 1000Base-SX" },
737 { "Broadcom B5703 1000Base-SX" },
738 { "3Com 3C996 10/100/1000 Server NIC" },
739 { "3Com 3C996 10/100/1000 Server NIC" },
740 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
741 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
742 { "3Com 3C996B Gigabit Server NIC" },
743 { "3Com 3C997 Gigabit Server NIC" },
744 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
745 { "3Com 3C1000 Gigabit NIC" },
746 { "3Com 3C1000B-T 10/100/1000 PCI" },
747 { "3Com 3C940 Gigabit LOM (21X21)" },
748 { "3Com 3C942 Gigabit LOM (31X31)" },
749 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
750 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
751 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
752 { "HP NC6770 Gigabit Server Adapter" },
753 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
754 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
755 { "HP NC7760 Gigabit Server Adapter" },
756 { "HP NC7761 Gigabit Server Adapter" },
757 { "HP NC7770 Gigabit Server Adapter" },
758 { "HP NC7771 Gigabit Server Adapter" },
759 { "HP NC7780 Gigabit Server Adapter" },
760 { "HP NC7781 Gigabit Server Adapter" },
761 { "HP NC7772 Gigabit Server Adapter" },
762 { "HP NC7782 Gigabit Server Adapter" },
763 { "HP NC7783 Gigabit Server Adapter" },
764 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
765 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
766 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
767 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
768 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
769 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
770 { "Broadcom BCM5704 1000Base-T" },
771 { "Broadcom BCM5704 1000Base-SX" },
772 { "Broadcom BCM5705 1000Base-T" },
773 { "Broadcom BCM5705M 1000Base-T" },
774 { "Broadcom 570x 10/100 Integrated Controller" },
775 { "Broadcom BCM5901 100Base-TX" },
776 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
777 { "Broadcom BCM5788 NetLink 1000Base-T" },
778 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
779 { "Broadcom BCM5750 1000Base-T PCI" },
780 { "Broadcom BCM5750M 1000Base-T PCI" },
781 { "Broadcom BCM5720 1000Base-T PCI" },
782 { "Broadcom BCM5751 1000Base-T PCI Express" },
783 { "Broadcom BCM5751M 1000Base-T PCI Express" },
784 { "Broadcom BCM5751F 100Base-TX PCI Express" },
785 { "Broadcom BCM5721 1000Base-T PCI Express" },
786 { "Broadcom BCM5753 1000Base-T PCI Express" },
787 { "Broadcom BCM5753M 1000Base-T PCI Express" },
788 { "Broadcom BCM5753F 100Base-TX PCI Express" },
789 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
790 { "Broadcom BCM5752 1000Base-T PCI Express" },
791 { "Broadcom BCM5752M 1000Base-T PCI Express" },
792 { "Broadcom BCM5714 1000Base-T " },
793 { "Broadcom BCM5780 1000Base-T" },
794 { "Broadcom BCM5780S 1000Base-SX" },
795 { "Broadcom BCM5715 1000Base-T " },
796 { "Broadcom BCM4785 10/100/1000 Integrated Controller" },
797 { "Broadcom BCM5903M Gigabit Ethernet " },
798 { "Unknown BCM5788 Gigabit Ethernet " },
799 { 0 }
802 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
803 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
804 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
805 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
806 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
807 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
808 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
809 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
810 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
811 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
812 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
813 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
814 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
815 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
816 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
817 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
818 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
819 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
820 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
821 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
822 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
823 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
824 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
825 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
826 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
827 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
828 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
829 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
830 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
831 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
832 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
833 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
834 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
835 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
836 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
837 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
838 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
839 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
840 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
841 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
842 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
843 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
844 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
845 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
846 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
847 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
848 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
849 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
850 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
851 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
852 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
853 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
854 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
855 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
856 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
857 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
858 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
859 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
860 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
861 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
862 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
863 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
864 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
865 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
866 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
867 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
868 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
869 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
870 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
871 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
872 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
873 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
874 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
875 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
876 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
877 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
878 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
879 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
880 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
881 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
882 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
883 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
884 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
885 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
886 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
887 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
888 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
889 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
890 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
891 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
892 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
893 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
894 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
895 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
896 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
897 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
898 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
899 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
900 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
901 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
902 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
903 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
904 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
905 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
906 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
907 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
908 {0x14e4, 0x471f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM4785 },
909 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
910 {0x173b, 0x03ed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, UNK5788 },
911 {0,}
914 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
916 #ifdef BCM_PROC_FS
917 extern int bcm5700_proc_create(void);
918 extern int bcm5700_proc_create_dev(struct net_device *dev);
919 extern int bcm5700_proc_remove_dev(struct net_device *dev);
920 extern int bcm5700_proc_remove_notifier(void);
921 #endif
923 #if (LINUX_VERSION_CODE >= 0x2060a)
924 static struct pci_device_id pci_AMD762id[]={
925 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
926 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
929 #endif
931 static int sbgige = -1;
933 /*******************************************************************************
934 *******************************************************************************
937 int get_csum_flag(LM_UINT32 ChipRevId)
939 return NETIF_F_IP_CSUM;
942 /*******************************************************************************
943 *******************************************************************************
945 This function returns true if the device passed to it is attached to an
946 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
947 or newer, it returns false.
949 This function determines which bridge it is attached to by scaning the pci
950 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
951 the bridge's subordinate's secondary bus number is compared with this
952 devices bus number. If they match, then the device is attached to this
953 bridge. The bridge's device id is compared to a list of known device ids for
954 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
955 chip revision must also be checked to determine if the chip is older than an
956 ICH5.
958 To scan the bus, one of two functions is used depending on the kernel
959 version. For 2.4 kernels, the pci_find_device function is used. This
960 function has been depricated in the 2.6 kernel and replaced with the
961 fucntion pci_get_device. The macro walk_pci_bus determines which function to
962 use when the driver is built.
965 #if (LINUX_VERSION_CODE >= 0x2060a)
966 #define walk_pci_bus(d) while ((d = pci_get_device( \
967 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
969 #define unwalk_pci_bus(d) pci_dev_put(d)
971 #else
972 #define walk_pci_bus(d) while ((d = pci_find_device( \
973 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
974 #define unwalk_pci_bus(d)
976 #endif
978 #define ICH5_CHIP_VERSION 0xc0
980 static struct pci_device_id pci_ICHtable[] = {
981 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
982 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
983 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
984 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
985 {0, 0}
988 int attached_to_ICH4_or_older( struct pci_dev *pdev)
990 struct pci_dev *tmp_pdev = NULL;
991 struct pci_device_id *ich_table;
992 u8 chip_rev;
994 walk_pci_bus (tmp_pdev) {
995 if ((tmp_pdev->hdr_type == 1) &&
996 (tmp_pdev->subordinate != NULL) &&
997 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
999 ich_table = pci_ICHtable;
1001 while (ich_table->vendor) {
1002 if ((ich_table->vendor == tmp_pdev->vendor) &&
1003 (ich_table->device == tmp_pdev->device)) {
1005 pci_read_config_byte( tmp_pdev,
1006 PCI_REVISION_ID, &chip_rev);
1008 if (chip_rev < ICH5_CHIP_VERSION) {
1009 unwalk_pci_bus( tmp_pdev);
1010 return 1;
1013 ich_table++;
1017 return 0;
1020 static int
1021 __devinit bcm5700_init_board(struct pci_dev *pdev, struct net_device **dev_out, int board_idx)
1023 struct net_device *dev;
1024 PUM_DEVICE_BLOCK pUmDevice;
1025 PLM_DEVICE_BLOCK pDevice;
1026 bool rgmii = FALSE;
1027 si_t *sih = NULL;
1028 int rc;
1030 *dev_out = NULL;
1032 /* dev zeroed in init_etherdev */
1033 #if (LINUX_VERSION_CODE >= 0x20600)
1034 dev = alloc_etherdev(sizeof(*pUmDevice));
1035 #else
1036 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1037 #endif
1038 if (dev == NULL) {
1039 printk(KERN_ERR "%s: unable to alloc new ethernet\n", bcm5700_driver);
1040 return -ENOMEM;
1042 SET_MODULE_OWNER(dev);
1043 #if (LINUX_VERSION_CODE >= 0x20600)
1044 SET_NETDEV_DEV(dev, &pdev->dev);
1045 #endif
1046 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1048 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1049 rc = pci_enable_device(pdev);
1050 if (rc)
1051 goto err_out;
1053 /* init core specific stuff */
1054 if (pdev->device == T3_PCI_DEVICE_ID(T3_PCI_ID_BCM471F)) {
1055 sih = si_kattach(SI_OSH);
1056 hndgige_init(sih, ++sbgige, &rgmii);
1059 rc = pci_request_regions(pdev, bcm5700_driver);
1060 if (rc) {
1061 if (!sih)
1062 goto err_out;
1063 printk(KERN_INFO "bcm5700_init_board: pci_request_regions returned error %d\n"
1064 "This may be because the region is already requested by"
1065 " the SMBus driver. Ignore the PCI error messages.\n", rc);
1068 pci_set_master(pdev);
1070 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1071 pUmDevice->using_dac = 1;
1072 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0) {
1073 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1074 pci_release_regions(pdev);
1075 goto err_out;
1077 } else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1078 pUmDevice->using_dac = 0;
1079 } else {
1080 printk(KERN_ERR "System does not support DMA\n");
1081 pci_release_regions(pdev);
1082 goto err_out;
1085 pUmDevice->dev = dev;
1086 pUmDevice->pdev = pdev;
1087 pUmDevice->mem_list_num = 0;
1088 pUmDevice->next_module = root_tigon3_dev;
1089 pUmDevice->index = board_idx;
1090 pUmDevice->sih = (void *)sih;
1091 root_tigon3_dev = dev;
1093 spin_lock_init(&pUmDevice->global_lock);
1095 spin_lock_init(&pUmDevice->undi_lock);
1097 spin_lock_init(&pUmDevice->phy_lock);
1099 pDevice = &pUmDevice->lm_dev;
1100 pDevice->Flags = 0;
1101 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1102 pUmDevice->boardflags = getintvar(NULL, "boardflags");
1103 if (sih) {
1104 if (pUmDevice->boardflags & BFL_ENETROBO)
1105 pDevice->Flags |= ROBO_SWITCH_FLAG;
1106 pDevice->Flags |= rgmii ? RGMII_MODE_FLAG : 0;
1107 if ((sih->chip == BCM4785_CHIP_ID) && (sih->chiprev < 2))
1108 pDevice->Flags |= ONE_DMA_AT_ONCE_FLAG;
1109 pDevice->Flags |= SB_CORE_FLAG;
1110 if (sih->chip == BCM4785_CHIP_ID)
1111 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1114 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1115 if (board_idx < MAX_UNITS) {
1116 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1117 dev->mtu = mtu[board_idx];
1119 #endif
1121 if (attached_to_ICH4_or_older(pdev)) {
1122 pDevice->Flags |= UNDI_FIX_FLAG;
1125 #if (LINUX_VERSION_CODE >= 0x2060a)
1126 if (pci_dev_present(pci_AMD762id)) {
1127 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1128 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1130 #else
1131 if (pci_find_device(0x1022, 0x700c, NULL)) {
1132 /* AMD762 writes I/O out of order */
1133 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1134 /* in all cases */
1135 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1136 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1138 #endif
1139 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1140 rc = -ENODEV;
1141 goto err_out_unmap;
1144 if (pDevice->Flags & ROBO_SWITCH_FLAG) {
1145 robo_info_t *robo;
1147 if ((robo = bcm_robo_attach(sih, pDevice, NULL,
1148 robo_miird, robo_miiwr)) == NULL) {
1149 B57_ERR(("robo_setup: failed to attach robo switch \n"));
1150 goto robo_fail;
1153 if (bcm_robo_enable_device(robo)) {
1154 B57_ERR(("robo_setup: failed to enable robo switch \n"));
1155 goto robo_fail;
1158 /* Configure the switch to do VLAN */
1159 if ((pUmDevice->boardflags & BFL_ENETVLAN) &&
1160 bcm_robo_config_vlan(robo, pDevice->PermanentNodeAddress)) {
1161 B57_ERR(("robo_setup: robo_config_vlan failed\n"));
1162 goto robo_fail;
1165 /* Enable the switch */
1166 if (bcm_robo_enable_switch(robo)) {
1167 B57_ERR(("robo_setup: robo_enable_switch failed\n"));
1168 robo_fail:
1169 bcm_robo_detach(robo);
1170 rc = -ENODEV;
1171 goto err_out_unmap;
1173 pUmDevice->robo = (void *)robo;
1176 if ((pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0) {
1177 if (dev->mtu > 1500) {
1178 dev->mtu = 1500;
1179 printk(KERN_WARNING
1180 "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n",
1181 bcm5700_driver, pUmDevice->index);
1185 pUmDevice->do_global_lock = 0;
1186 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1187 /* The 5700 chip works best without interleaved register */
1188 /* accesses on certain machines. */
1189 pUmDevice->do_global_lock = 1;
1192 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1193 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1195 pUmDevice->rx_buf_align = 0;
1196 } else {
1197 pUmDevice->rx_buf_align = 2;
1199 dev->mem_start = pci_resource_start(pdev, 0);
1200 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1201 dev->irq = pdev->irq;
1203 *dev_out = dev;
1205 #ifdef HNDCTF
1206 pUmDevice->osh = osl_attach(pdev, PCI_BUS, FALSE);
1208 pUmDevice->cih = ctf_attach(pUmDevice->osh, dev->name, &b57_msg_level, NULL, NULL);
1210 ctf_dev_register(pUmDevice->cih, dev, FALSE);
1211 ctf_enable(pUmDevice->cih, dev, TRUE);
1212 #endif /* HNDCTF */
1214 return 0;
1216 err_out_unmap:
1217 pci_release_regions(pdev);
1218 bcm5700_freemem(dev);
1220 err_out:
1221 #if (LINUX_VERSION_CODE < 0x020600)
1222 unregister_netdev(dev);
1223 kfree(dev);
1224 #else
1225 free_netdev(dev);
1226 #endif
1227 return rc;
1230 static int __devinit
1231 bcm5700_print_ver(void)
1233 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1234 bcm5700_driver);
1235 #ifdef NICE_SUPPORT
1236 printk("with Broadcom NIC Extension (NICE) ");
1237 #endif
1238 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1239 return 0;
1242 static int __devinit
1243 bcm5700_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1245 struct net_device *dev = NULL;
1246 PUM_DEVICE_BLOCK pUmDevice;
1247 PLM_DEVICE_BLOCK pDevice;
1248 int i;
1249 static int board_idx = -1;
1250 static int printed_version = 0;
1251 struct pci_dev *pci_dev;
1253 board_idx++;
1255 if (!printed_version) {
1256 bcm5700_print_ver();
1257 #ifdef BCM_PROC_FS
1258 bcm5700_proc_create();
1259 #endif
1260 printed_version = 1;
1263 i = bcm5700_init_board(pdev, &dev, board_idx);
1264 if (i < 0) {
1265 return i;
1268 if (dev == NULL)
1269 return -ENOMEM;
1271 #ifdef BCM_IOCTL32
1272 if (atomic_read(&bcm5700_load_count) == 0) {
1273 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1275 atomic_inc(&bcm5700_load_count);
1276 #endif
1277 dev->open = bcm5700_open;
1278 dev->hard_start_xmit = bcm5700_start_xmit;
1279 dev->stop = bcm5700_close;
1280 dev->get_stats = bcm5700_get_stats;
1281 dev->set_multicast_list = bcm5700_set_rx_mode;
1282 dev->do_ioctl = bcm5700_ioctl;
1283 dev->set_mac_address = &bcm5700_set_mac_addr;
1284 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1285 dev->change_mtu = &bcm5700_change_mtu;
1286 #endif
1287 #if (LINUX_VERSION_CODE >= 0x20400)
1288 dev->tx_timeout = bcm5700_reset;
1289 dev->watchdog_timeo = TX_TIMEOUT;
1290 #endif
1291 #ifdef BCM_VLAN
1292 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1293 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1294 #endif
1295 #ifdef BCM_NAPI_RXPOLL
1296 dev->poll = bcm5700_poll;
1297 dev->weight = 64;
1298 #endif
1300 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1301 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1303 dev->base_addr = pci_resource_start(pdev, 0);
1304 dev->irq = pdev->irq;
1305 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1306 dev->poll_controller = poll_bcm5700;
1307 #endif
1309 #if (LINUX_VERSION_CODE >= 0x20600)
1310 if ((i = register_netdev(dev))) {
1311 printk(KERN_ERR "%s: Cannot register net device\n",
1312 bcm5700_driver);
1313 if (pUmDevice->lm_dev.pMappedMemBase)
1314 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1315 pci_release_regions(pdev);
1316 bcm5700_freemem(dev);
1317 free_netdev(dev);
1318 return i;
1320 #endif
1323 pci_set_drvdata(pdev, dev);
1325 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1326 pUmDevice->name = board_info[ent->driver_data].name,
1327 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1328 dev->name, pUmDevice->name, dev->base_addr,
1329 dev->irq);
1330 printk("node addr ");
1331 for (i = 0; i < 6; i++) {
1332 printk("%2.2x", dev->dev_addr[i]);
1334 printk("\n");
1336 printk(KERN_INFO "%s: ", dev->name);
1337 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1338 printk("Broadcom BCM5400 Copper ");
1339 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1340 printk("Broadcom BCM5401 Copper ");
1341 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1342 printk("Broadcom BCM5411 Copper ");
1343 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5461_PHY_ID)
1344 printk("Broadcom BCM5461 Copper ");
1345 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1346 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1347 printk("Broadcom BCM5701 Integrated Copper ");
1349 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1350 printk("Broadcom BCM5703 Integrated ");
1351 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1352 printk("SerDes ");
1353 else
1354 printk("Copper ");
1356 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1357 printk("Broadcom BCM5704 Integrated ");
1358 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1359 printk("SerDes ");
1360 else
1361 printk("Copper ");
1363 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1364 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1365 printk("Broadcom BCM5780S Integrated Serdes ");
1368 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1369 printk("Broadcom BCM5705 Integrated Copper ");
1370 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1371 printk("Broadcom BCM5750 Integrated Copper ");
1373 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1374 printk("Broadcom BCM5714 Integrated Copper ");
1375 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1376 printk("Broadcom BCM5780 Integrated Copper ");
1378 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1379 printk("Broadcom BCM5752 Integrated Copper ");
1380 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1381 printk("Broadcom BCM8002 SerDes ");
1382 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1383 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1384 printk("Broadcom BCM5703 Integrated SerDes ");
1386 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1387 printk("Broadcom BCM5704 Integrated SerDes ");
1389 else {
1390 printk("Agilent HDMP-1636 SerDes ");
1393 else {
1394 printk("Unknown ");
1396 printk("transceiver found\n");
1398 #if (LINUX_VERSION_CODE >= 0x20400)
1399 if (scatter_gather[board_idx]) {
1400 dev->features |= NETIF_F_SG;
1401 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1402 dev->features |= NETIF_F_HIGHDMA;
1404 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1405 tx_checksum[board_idx]) {
1407 dev->features |= get_csum_flag( pDevice->ChipRevId);
1409 #ifdef BCM_VLAN
1410 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1411 #endif
1412 #ifdef BCM_TSO
1413 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1414 the same time. Since only one of these features can be enable at a
1415 time, we'll enable only Jumbo Frames and disable TSO when the user
1416 tries to enable both.
1418 dev->features &= ~NETIF_F_TSO;
1420 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1421 (enable_tso[board_idx])) {
1422 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1423 (dev->mtu > 1500)) {
1424 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1425 } else {
1426 dev->features |= NETIF_F_TSO;
1429 #endif
1430 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1431 dev->name,
1432 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1433 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1434 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1435 #endif
1436 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1437 rx_checksum[board_idx])
1438 printk("Rx Checksum ON");
1439 else
1440 printk("Rx Checksum OFF");
1441 #ifdef BCM_VLAN
1442 printk(", 802.1Q VLAN ON");
1443 #endif
1444 #ifdef BCM_TSO
1445 if (dev->features & NETIF_F_TSO) {
1446 printk(", TSO ON");
1448 else
1449 #endif
1450 #ifdef BCM_NAPI_RXPOLL
1451 printk(", NAPI ON");
1452 #endif
1453 printk("\n");
1455 #ifdef BCM_PROC_FS
1456 bcm5700_proc_create_dev(dev);
1457 #endif
1458 register_reboot_notifier(&bcm5700_reboot_notifier);
1459 #ifdef BCM_TASKLET
1460 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1461 (unsigned long) pUmDevice);
1462 #endif
1463 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1464 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1465 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1467 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1471 #if (LINUX_VERSION_CODE > 0x20605)
1473 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL)))
1474 #else
1475 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL)))
1476 #endif
1478 u32 val;
1480 /* Found AMD 762 North bridge */
1481 pci_read_config_dword(pci_dev, 0x4c, &val);
1482 if ((val & 0x02) == 0) {
1483 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1484 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1488 #if (LINUX_VERSION_CODE > 0x20605)
1490 pci_dev_put(pci_dev);
1492 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1494 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1495 bcm_msi_chipset_bug = 1;
1497 pci_dev_put(pci_dev);
1498 #endif
1499 #endif
1501 return 0;
1505 static void __devexit
1506 bcm5700_remove_one (struct pci_dev *pdev)
1508 struct net_device *dev = pci_get_drvdata (pdev);
1509 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1511 #ifdef BCM_PROC_FS
1512 bcm5700_proc_remove_dev(dev);
1513 #endif
1514 #ifdef BCM_IOCTL32
1515 atomic_dec(&bcm5700_load_count);
1516 if (atomic_read(&bcm5700_load_count) == 0)
1517 unregister_ioctl32_conversion(SIOCNICE);
1518 #endif
1519 #ifdef HNDCTF
1520 ctf_dev_unregister(pUmDevice->cih, dev);
1521 #endif /* HNDCTF */
1522 unregister_netdev(dev);
1524 if (pUmDevice->lm_dev.pMappedMemBase)
1525 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1527 pci_release_regions(pdev);
1529 #if (LINUX_VERSION_CODE < 0x020600)
1530 kfree(dev);
1531 #else
1532 free_netdev(dev);
1533 #endif
1535 pci_set_drvdata(pdev, NULL);
1539 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1541 #ifdef BCM_WL_EMULATOR
1542 /* new transmit callback */
1543 static int bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev);
1544 /* keep track of the 2 gige devices */
1545 static PLM_DEVICE_BLOCK pDev1;
1546 static PLM_DEVICE_BLOCK pDev2;
1548 static void
1549 bcm5700emu_open(struct net_device *dev)
1551 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1552 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1553 static int instance = 0;
1554 static char *wlemu_if = NULL;
1555 char *wlemu_mode = NULL;
1556 //int wlemu_idx = 0;
1557 static int rx_enable = 0;
1558 static int tx_enable = 0;
1560 /* which interface is the emulator ? */
1561 if(instance == 0) {
1562 wlemu_if = nvram_get("wlemu_if");
1563 /* do we emulate rx, tx or both */
1564 wlemu_mode = nvram_get("wlemu_mode");
1565 if(wlemu_mode) {
1566 if (!strcmp(wlemu_mode,"rx"))
1568 rx_enable = 1;
1570 else if (!strcmp(wlemu_mode,"tx"))
1573 tx_enable = 1;
1576 else if (!strcmp(wlemu_mode,"rx_tx"))
1579 rx_enable = 1;
1580 tx_enable = 1;
1585 instance++;
1587 /* The context is used for accessing the OSL for emulating devices */
1588 pDevice->wlc = NULL;
1590 /* determines if this device is an emulator */
1591 pDevice->wl_emulate_rx = 0;
1592 pDevice->wl_emulate_tx = 0;
1594 if(wlemu_if && !strcmp(dev->name,wlemu_if))
1596 /* create an emulator context. */
1597 pDevice->wlc = (void *)wlcemu_wlccreate((void *)dev);
1598 B57_INFO(("Using %s for wl emulation \n", dev->name));
1599 if(rx_enable)
1601 B57_INFO(("Enabling wl RX emulation \n"));
1602 pDevice->wl_emulate_rx = 1;
1604 /* re-direct transmit callback to emulator */
1605 if(tx_enable)
1607 pDevice->wl_emulate_tx = 1;
1608 dev->hard_start_xmit = bcm5700emu_start_xmit;
1609 B57_INFO(("Enabling wl TX emulation \n"));
1612 /* for debug access to configured devices only */
1613 if(instance == 1)
1614 pDev1 = pDevice;
1615 else if (instance == 2)
1616 pDev2 = pDevice;
1619 /* Public API to get current emulation info */
1620 int bcm5700emu_get_info(char *buf)
1622 int len = 0;
1623 PLM_DEVICE_BLOCK p;
1625 /* look for an emulating device */
1626 if(pDev1->wlc) {
1627 p = pDev1;
1628 len += sprintf(buf+len,"emulation device : eth0\n");
1630 else if (pDev2->wlc) {
1631 p = pDev2;
1632 len += sprintf(buf+len,"emulation device : eth1\n");
1634 else {
1635 len += sprintf(buf+len,"emulation not activated\n");
1636 return len;
1638 if(p->wl_emulate_rx)
1639 len += sprintf(buf+len,"RX emulation enabled\n");
1640 else
1641 len += sprintf(buf+len,"RX emulation disabled\n");
1642 if(p->wl_emulate_tx)
1643 len += sprintf(buf+len,"TX emulation enabled\n");
1644 else
1645 len += sprintf(buf+len,"TX emulation disabled\n");
1646 return len;
1651 /* Public API to access the bcm5700_start_xmit callback */
1653 int
1654 bcm5700emu_forward_xmit(struct sk_buff *skb, struct net_device *dev)
1656 return bcm5700_start_xmit(skb, dev);
1660 /* hook to kernel txmit callback */
1661 STATIC int
1662 bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev)
1665 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1666 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1667 return wlcemu_start_xmit(skb,pDevice->wlc);
1670 #endif /* BCM_WL_EMULATOR */
1673 bcm5700_open(struct net_device *dev)
1675 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1676 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1677 int rc;
1679 if (pUmDevice->suspended){
1680 return -EAGAIN;
1683 #ifdef BCM_WL_EMULATOR
1684 bcm5700emu_open(dev);
1685 #endif
1687 /* delay for 6 seconds */
1688 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1690 #ifdef BCM_INT_COAL
1691 #ifndef BCM_NAPI_RXPOLL
1692 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1693 #endif
1694 #endif
1696 #ifdef INCLUDE_TBI_SUPPORT
1697 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1698 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1699 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1700 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1701 pUmDevice->poll_tbi_interval /= 4;
1703 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1705 #endif
1706 /* set this timer for 2 seconds */
1707 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1709 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1712 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1713 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1714 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1715 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1716 !bcm_msi_chipset_bug ){
1718 if (disable_msi[pUmDevice->index]==1){
1719 /* do nothing-it's not turned on */
1720 }else{
1721 pDevice->Flags |= USING_MSI_FLAG;
1723 REG_WR(pDevice, Msi.Mode, 2 );
1725 rc = pci_enable_msi(pUmDevice->pdev);
1727 if(rc!=0){
1728 pDevice->Flags &= ~ USING_MSI_FLAG;
1729 REG_WR(pDevice, Msi.Mode, 1 );
1735 #endif
1737 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, IRQF_SHARED, dev->name, dev)))
1740 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1742 if(pDevice->Flags & USING_MSI_FLAG) {
1744 pci_disable_msi(pUmDevice->pdev);
1745 pDevice->Flags &= ~USING_MSI_FLAG;
1746 REG_WR(pDevice, Msi.Mode, 1 );
1749 #endif
1750 return rc;
1753 pUmDevice->opened = 1;
1754 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1755 pUmDevice->opened = 0;
1756 free_irq(dev->irq, dev);
1757 bcm5700_freemem(dev);
1758 return -EAGAIN;
1761 bcm5700_set_vlan_mode(pUmDevice);
1762 bcm5700_init_counters(pUmDevice);
1764 if (pDevice->Flags & UNDI_FIX_FLAG) {
1765 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1768 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1770 /* Do not use invalid eth addrs: any multicast & all zeros */
1771 if( is_valid_ether_addr(dev->dev_addr) ){
1772 LM_SetMacAddress(pDevice, dev->dev_addr);
1774 else
1776 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1777 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1781 if (tigon3_debug > 1)
1782 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1784 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1785 MAX_RX_PACKET_DESC_COUNT);
1788 #if (LINUX_VERSION_CODE < 0x020300)
1789 MOD_INC_USE_COUNT;
1790 #endif
1792 atomic_set(&pUmDevice->intr_sem, 0);
1794 LM_EnableInterrupt(pDevice);
1796 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1798 if (pDevice->Flags & USING_MSI_FLAG){
1800 /* int test to check support on older machines */
1801 if (b57_test_intr(pUmDevice) != 1) {
1803 LM_DisableInterrupt(pDevice);
1804 free_irq(pUmDevice->pdev->irq, dev);
1805 pci_disable_msi(pUmDevice->pdev);
1806 REG_WR(pDevice, Msi.Mode, 1 );
1807 pDevice->Flags &= ~USING_MSI_FLAG;
1809 rc = LM_ResetAdapter(pDevice);
1810 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1812 if (rc == LM_STATUS_SUCCESS)
1813 rc = 0;
1814 else
1815 rc = -ENODEV;
1817 if(rc == 0){
1818 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1819 SA_SHIRQ, dev->name, dev);
1822 if(rc){
1823 LM_Halt(pDevice);
1824 bcm5700_freemem(dev);
1825 pUmDevice->opened = 0;
1826 return rc;
1830 pDevice->InitDone = TRUE;
1831 atomic_set(&pUmDevice->intr_sem, 0);
1832 LM_EnableInterrupt(pDevice);
1835 #endif
1837 init_timer(&pUmDevice->timer);
1838 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1839 pUmDevice->timer.data = (unsigned long)dev;
1840 pUmDevice->timer.function = &bcm5700_timer;
1841 add_timer(&pUmDevice->timer);
1843 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1844 init_timer(&pUmDevice->statstimer);
1845 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1846 pUmDevice->statstimer.data = (unsigned long)dev;
1847 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1848 add_timer(&pUmDevice->statstimer);
1851 if(pDevice->Flags & USING_MSI_FLAG)
1852 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1853 else
1854 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1856 netif_start_queue(dev);
1858 return 0;
1862 STATIC void
1863 bcm5700_stats_timer(unsigned long data)
1865 struct net_device *dev = (struct net_device *)data;
1866 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1867 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1868 unsigned long flags = 0;
1870 if (!pUmDevice->opened)
1871 return;
1873 if (!atomic_read(&pUmDevice->intr_sem) &&
1874 !pUmDevice->suspended &&
1875 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1876 BCM5700_LOCK(pUmDevice, flags);
1877 LM_GetStats(pDevice);
1878 BCM5700_UNLOCK(pUmDevice, flags);
1881 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1883 add_timer(&pUmDevice->statstimer);
1887 STATIC void
1888 bcm5700_timer(unsigned long data)
1890 struct net_device *dev = (struct net_device *)data;
1891 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1892 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1893 unsigned long flags = 0;
1894 LM_UINT32 value32;
1896 if (!pUmDevice->opened)
1897 return;
1899 /* BCM4785: Flush posted writes from GbE to host memory. */
1900 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
1901 REG_RD(pDevice, HostCoalesce.Mode);
1903 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1904 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1905 add_timer(&pUmDevice->timer);
1906 return;
1909 #ifdef INCLUDE_TBI_SUPPORT
1910 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1911 (--pUmDevice->poll_tbi_expiry <= 0)) {
1913 BCM5700_PHY_LOCK(pUmDevice, flags);
1914 value32 = REG_RD(pDevice, MacCtrl.Status);
1915 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1916 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1917 MAC_STATUS_CFG_CHANGED)) ||
1918 !(value32 & MAC_STATUS_PCS_SYNCED)))
1920 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1921 (value32 & (MAC_STATUS_PCS_SYNCED |
1922 MAC_STATUS_SIGNAL_DETECTED))))
1924 LM_SetupPhy(pDevice);
1926 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1927 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1930 #endif
1932 if (pUmDevice->delayed_link_ind > 0) {
1933 if (pUmDevice->delayed_link_ind == 1)
1934 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1935 else
1936 pUmDevice->delayed_link_ind--;
1939 if (pUmDevice->crc_counter_expiry > 0)
1940 pUmDevice->crc_counter_expiry--;
1942 if (!pUmDevice->interrupt) {
1943 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1944 BCM5700_LOCK(pUmDevice, flags);
1945 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1946 /* This will generate an interrupt */
1947 REG_WR(pDevice, Grc.LocalCtrl,
1948 pDevice->GrcLocalCtrl |
1949 GRC_MISC_LOCAL_CTRL_SET_INT);
1951 else {
1952 REG_WR(pDevice, HostCoalesce.Mode,
1953 pDevice->CoalesceMode |
1954 HOST_COALESCE_ENABLE |
1955 HOST_COALESCE_NOW);
1957 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1958 DMA_WRITE_MODE_ENABLE)) {
1959 BCM5700_UNLOCK(pUmDevice, flags);
1960 bcm5700_reset(dev);
1962 else {
1963 BCM5700_UNLOCK(pUmDevice, flags);
1965 if (pUmDevice->tx_queued) {
1966 pUmDevice->tx_queued = 0;
1967 netif_wake_queue(dev);
1970 #if (LINUX_VERSION_CODE < 0x02032b)
1971 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1972 pDevice->TxPacketDescCnt) &&
1973 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1975 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1976 bcm5700_reset(dev);
1978 #endif
1980 #ifdef BCM_INT_COAL
1981 #ifndef BCM_NAPI_RXPOLL
1982 if (pUmDevice->adaptive_coalesce) {
1983 pUmDevice->adaptive_expiry--;
1984 if (pUmDevice->adaptive_expiry == 0) {
1985 pUmDevice->adaptive_expiry = HZ /
1986 pUmDevice->timer_interval;
1987 bcm5700_adapt_coalesce(pUmDevice);
1990 #endif
1991 #endif
1992 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1993 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1994 /* Generate interrupt and let isr allocate buffers */
1995 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1996 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1999 #ifdef BCM_ASF
2000 if (pDevice->AsfFlags & ASF_ENABLED) {
2001 pUmDevice->asf_heartbeat--;
2002 if (pUmDevice->asf_heartbeat == 0) {
2003 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
2004 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
2005 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
2006 T3_CMD_NICDRV_ALIVE2);
2007 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
2009 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
2010 } else {
2011 LM_RegWr(pDevice,
2012 (T3_NIC_MBUF_POOL_ADDR +
2013 T3_CMD_MAILBOX),
2014 T3_CMD_NICDRV_ALIVE2, 1);
2015 LM_RegWr(pDevice,
2016 (T3_NIC_MBUF_POOL_ADDR +
2017 T3_CMD_LENGTH_MAILBOX),4,1);
2018 LM_RegWr(pDevice,
2019 (T3_NIC_MBUF_POOL_ADDR +
2020 T3_CMD_DATA_MAILBOX),5,1);
2023 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
2024 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
2025 pUmDevice->asf_heartbeat = (2 * HZ) /
2026 pUmDevice->timer_interval;
2029 #endif
2031 if (pDevice->PhyFlags & PHY_IS_FIBER){
2032 BCM5700_PHY_LOCK(pUmDevice, flags);
2033 LM_5714_FamFiberCheckLink(pDevice);
2034 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2037 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
2038 add_timer(&pUmDevice->timer);
2041 STATIC int
2042 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
2044 #ifdef BCM_INT_COAL
2045 #ifndef BCM_NAPI_RXPOLL
2046 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2048 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
2049 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
2050 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
2051 pUmDevice->rx_last_cnt = 0;
2052 pUmDevice->tx_last_cnt = 0;
2053 #endif
2054 #endif
2055 pUmDevice->phy_crc_count = 0;
2056 #if TIGON3_DEBUG
2057 pUmDevice->tx_zc_count = 0;
2058 pUmDevice->tx_chksum_count = 0;
2059 pUmDevice->tx_himem_count = 0;
2060 pUmDevice->rx_good_chksum_count = 0;
2061 pUmDevice->rx_bad_chksum_count = 0;
2062 #endif
2063 #ifdef BCM_TSO
2064 pUmDevice->tso_pkt_count = 0;
2065 #endif
2066 return 0;
2069 #ifdef BCM_INT_COAL
2070 #ifndef BCM_NAPI_RXPOLL
2071 STATIC int
2072 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
2073 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
2075 unsigned long flags = 0;
2076 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2078 if (pUmDevice->do_global_lock) {
2079 if (spin_is_locked(&pUmDevice->global_lock))
2080 return 0;
2081 spin_lock_irqsave(&pUmDevice->global_lock, flags);
2083 pUmDevice->rx_curr_coalesce_frames = rx_frames;
2084 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
2085 pUmDevice->tx_curr_coalesce_frames = tx_frames;
2086 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
2087 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
2089 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
2091 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
2093 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
2094 rx_frames_intr);
2096 BCM5700_UNLOCK(pUmDevice, flags);
2097 return 0;
2100 STATIC int
2101 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
2103 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2104 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
2106 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
2107 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
2108 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
2109 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
2111 /* skip if there is counter rollover */
2112 pUmDevice->rx_last_cnt = rx_curr_cnt;
2113 pUmDevice->tx_last_cnt = tx_curr_cnt;
2114 return 0;
2117 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
2118 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
2119 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
2121 pUmDevice->rx_last_cnt = rx_curr_cnt;
2122 pUmDevice->tx_last_cnt = tx_curr_cnt;
2124 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
2125 if (pUmDevice->rx_curr_coalesce_frames !=
2126 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
2128 bcm5700_do_adapt_coalesce(pUmDevice,
2129 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
2130 ADAPTIVE_LO_RX_COALESCING_TICKS,
2131 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
2132 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
2135 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
2136 if (pUmDevice->rx_curr_coalesce_frames !=
2137 DEFAULT_RX_MAX_COALESCED_FRAMES) {
2139 bcm5700_do_adapt_coalesce(pUmDevice,
2140 DEFAULT_RX_MAX_COALESCED_FRAMES,
2141 DEFAULT_RX_COALESCING_TICKS,
2142 DEFAULT_TX_MAX_COALESCED_FRAMES,
2143 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
2146 else {
2147 if (pUmDevice->rx_curr_coalesce_frames !=
2148 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
2150 bcm5700_do_adapt_coalesce(pUmDevice,
2151 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
2152 ADAPTIVE_HI_RX_COALESCING_TICKS,
2153 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
2154 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
2157 return 0;
2159 #endif
2160 #endif
2162 STATIC void
2163 bcm5700_reset(struct net_device *dev)
2165 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2166 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2167 unsigned long flags;
2169 #ifdef BCM_TSO
2171 if( (dev->features & NETIF_F_TSO) &&
2172 (pUmDevice->tx_full) ) {
2174 dev->features &= ~NETIF_F_TSO;
2176 #endif
2178 netif_stop_queue(dev);
2179 bcm5700_intr_off(pUmDevice);
2180 BCM5700_PHY_LOCK(pUmDevice, flags);
2181 LM_ResetAdapter(pDevice);
2182 pDevice->InitDone = TRUE;
2183 bcm5700_do_rx_mode(dev);
2184 bcm5700_set_vlan_mode(pUmDevice);
2185 bcm5700_init_counters(pUmDevice);
2186 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
2187 LM_SetMacAddress(pDevice, dev->dev_addr);
2189 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2190 atomic_set(&pUmDevice->intr_sem, 1);
2191 bcm5700_intr_on(pUmDevice);
2192 netif_wake_queue(dev);
2195 STATIC void
2196 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
2198 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2199 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
2200 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
2202 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
2203 if (pDevice->AsfFlags & ASF_ENABLED) {
2204 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2206 else {
2207 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2210 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2211 ReceiveMask |= LM_KEEP_VLAN_TAG;
2212 #ifdef BCM_VLAN
2213 if (pUmDevice->vlgrp)
2214 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2215 #endif
2216 #ifdef NICE_SUPPORT
2217 if (pUmDevice->nice_rx)
2218 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2219 #endif
2221 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2222 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2224 if (ReceiveMask != pDevice->ReceiveMask)
2226 LM_SetReceiveMask(pDevice, ReceiveMask);
2230 static void
2231 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2233 #ifdef BCM_NAPI_RXPOLL
2234 while (pUmDevice->lm_dev.RxPoll) {
2235 current->state = TASK_INTERRUPTIBLE;
2236 schedule_timeout(1);
2238 #endif
2242 #ifdef BCM_VLAN
2243 STATIC void
2244 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2246 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2248 bcm5700_intr_off(pUmDevice);
2249 bcm5700_poll_wait(pUmDevice);
2250 pUmDevice->vlgrp = vlgrp;
2251 bcm5700_set_vlan_mode(pUmDevice);
2252 bcm5700_intr_on(pUmDevice);
2255 STATIC void
2256 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2258 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2260 bcm5700_intr_off(pUmDevice);
2261 bcm5700_poll_wait(pUmDevice);
2262 if (pUmDevice->vlgrp) {
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2264 vlan_group_set_device(pUmDevice->vlgrp, vid, NULL);
2265 #else
2266 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2267 #endif
2269 bcm5700_intr_on(pUmDevice);
2271 #endif
2273 STATIC int
2274 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2276 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2277 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2278 PLM_PACKET pPacket;
2279 PUM_PACKET pUmPacket;
2280 unsigned long flags = 0;
2281 int frag_no;
2282 #ifdef NICE_SUPPORT
2283 vlan_tag_t *vlan_tag;
2284 #endif
2285 #ifdef BCM_TSO
2286 LM_UINT32 mss = 0 ;
2287 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2288 #endif
2289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2290 struct tcphdr *th;
2291 struct iphdr *iph;
2292 #endif
2294 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2295 !pDevice->InitDone || pUmDevice->suspended)
2297 dev_kfree_skb(skb);
2298 return 0;
2301 #if (LINUX_VERSION_CODE < 0x02032b)
2302 if (test_and_set_bit(0, &dev->tbusy)) {
2303 return 1;
2305 #endif
2307 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2308 netif_stop_queue(dev);
2309 pUmDevice->tx_queued = 1;
2310 if (!pUmDevice->interrupt) {
2311 netif_wake_queue(dev);
2312 pUmDevice->tx_queued = 0;
2314 return 1;
2317 pPacket = (PLM_PACKET)
2318 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2319 if (pPacket == 0) {
2320 netif_stop_queue(dev);
2321 pUmDevice->tx_full = 1;
2322 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2323 netif_wake_queue(dev);
2324 pUmDevice->tx_full = 0;
2326 return 1;
2328 pUmPacket = (PUM_PACKET) pPacket;
2329 pUmPacket->skbuff = skb;
2330 pUmDevice->stats.tx_bytes += skb->len;
2332 if (skb->ip_summed == CHECKSUM_HW) {
2333 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2334 #if TIGON3_DEBUG
2335 pUmDevice->tx_chksum_count++;
2336 #endif
2338 else {
2339 pPacket->Flags = 0;
2341 #if MAX_SKB_FRAGS
2342 frag_no = skb_shinfo(skb)->nr_frags;
2343 #else
2344 frag_no = 0;
2345 #endif
2346 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2347 netif_stop_queue(dev);
2348 pUmDevice->tx_full = 1;
2349 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2350 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2351 netif_wake_queue(dev);
2352 pUmDevice->tx_full = 0;
2354 return 1;
2357 pPacket->u.Tx.FragCount = frag_no + 1;
2358 #if TIGON3_DEBUG
2359 if (pPacket->u.Tx.FragCount > 1)
2360 pUmDevice->tx_zc_count++;
2361 #endif
2363 #ifdef BCM_VLAN
2364 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2365 pPacket->VlanTag = vlan_tx_tag_get(skb);
2366 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2368 #endif
2369 #ifdef NICE_SUPPORT
2370 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2371 if (vlan_tag->signature == 0x5555) {
2372 pPacket->VlanTag = vlan_tag->tag;
2373 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2374 vlan_tag->signature = 0;
2376 #endif
2378 #ifdef BCM_TSO
2379 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
2380 if ((mss = (LM_UINT32) skb_shinfo(skb)->gso_size) &&
2381 (skb->len > pDevice->TxMtu)) {
2382 #else
2383 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2384 (skb->len > pDevice->TxMtu)) {
2385 #endif
2387 #if (LINUX_VERSION_CODE >= 0x02060c)
2389 if (skb_header_cloned(skb) &&
2390 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2392 dev_kfree_skb(skb);
2393 return 0;
2395 #endif
2396 pUmDevice->tso_pkt_count++;
2398 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2399 SND_BD_FLAG_CPU_POST_DMA;
2401 tcp_opt_len = 0;
2402 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2403 th = (struct tcphdr *)skb_transport_header(skb);
2404 iph = (struct iphdr *)skb_network_header(skb);
2406 ASSERT((iph != NULL) && (th != NULL));
2408 if (th->doff > 5) {
2409 tcp_opt_len = (th->doff - 5) << 2;
2411 ip_tcp_len = (iph->ihl << 2) + sizeof(struct tcphdr);
2412 iph->check = 0;
2414 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2415 th->check = 0;
2416 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2418 else {
2419 th->check = ~csum_tcpudp_magic(
2420 iph->saddr, iph->daddr,
2421 0, IPPROTO_TCP, 0);
2424 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2425 tcp_seg_flags = 0;
2427 if (tcp_opt_len || (iph->ihl > 5)) {
2428 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2429 tcp_seg_flags =
2430 ((iph->ihl - 5) +
2431 (tcp_opt_len >> 2)) << 11;
2433 else {
2434 pPacket->Flags |=
2435 ((iph->ihl - 5) +
2436 (tcp_opt_len >> 2)) << 12;
2439 #else
2440 if (skb->h.th->doff > 5) {
2441 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2443 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2444 skb->nh.iph->check = 0;
2446 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2447 skb->h.th->check = 0;
2448 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2450 else {
2451 skb->h.th->check = ~csum_tcpudp_magic(
2452 skb->nh.iph->saddr, skb->nh.iph->daddr,
2453 0, IPPROTO_TCP, 0);
2456 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2457 tcp_seg_flags = 0;
2459 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2460 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2461 tcp_seg_flags =
2462 ((skb->nh.iph->ihl - 5) +
2463 (tcp_opt_len >> 2)) << 11;
2465 else {
2466 pPacket->Flags |=
2467 ((skb->nh.iph->ihl - 5) +
2468 (tcp_opt_len >> 2)) << 12;
2471 #endif
2472 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2474 else
2476 pPacket->u.Tx.MaxSegmentSize = 0;
2478 #endif
2479 BCM5700_LOCK(pUmDevice, flags);
2480 LM_SendPacket(pDevice, pPacket);
2481 BCM5700_UNLOCK(pUmDevice, flags);
2483 #if (LINUX_VERSION_CODE < 0x02032b)
2484 netif_wake_queue(dev);
2485 #endif
2486 dev->trans_start = jiffies;
2489 return 0;
2492 #ifdef BCM_NAPI_RXPOLL
2493 STATIC int
2494 bcm5700_poll(struct net_device *dev, int *budget)
2496 int orig_budget = *budget;
2497 int work_done;
2498 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2499 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2500 unsigned long flags = 0;
2501 LM_UINT32 tag;
2503 if (orig_budget > dev->quota)
2504 orig_budget = dev->quota;
2506 BCM5700_LOCK(pUmDevice, flags);
2507 /* BCM4785: Flush posted writes from GbE to host memory. */
2508 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2509 REG_RD(pDevice, HostCoalesce.Mode);
2510 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2511 *budget -= work_done;
2512 dev->quota -= work_done;
2514 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2515 replenish_rx_buffers(pUmDevice, 0);
2517 BCM5700_UNLOCK(pUmDevice, flags);
2518 if (work_done) {
2519 MM_IndicateRxPackets(pDevice);
2520 BCM5700_LOCK(pUmDevice, flags);
2521 LM_QueueRxPackets(pDevice);
2522 BCM5700_UNLOCK(pUmDevice, flags);
2524 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2525 pUmDevice->suspended) {
2527 netif_rx_complete(dev);
2528 BCM5700_LOCK(pUmDevice, flags);
2529 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2530 pDevice->RxPoll = FALSE;
2531 if (pDevice->RxPoll) {
2532 BCM5700_UNLOCK(pUmDevice, flags);
2533 return 0;
2535 /* Take care of possible missed rx interrupts */
2536 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2537 tag = pDevice->pStatusBlkVirt->StatusTag;
2538 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2539 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2540 pDevice->RcvRetConIdx)) {
2542 REG_WR(pDevice, HostCoalesce.Mode,
2543 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2544 HOST_COALESCE_NOW);
2546 /* If a new status block is pending in the WDMA state machine */
2547 /* before the register write to enable the rx interrupt, */
2548 /* the new status block may DMA with no interrupt. In this */
2549 /* scenario, the tag read above will be older than the tag in */
2550 /* the pending status block and writing the older tag will */
2551 /* cause interrupt to be generated. */
2552 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2553 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2554 tag << 24);
2555 /* Make sure we service tx in case some tx interrupts */
2556 /* are cleared */
2557 if (atomic_read(&pDevice->SendBdLeft) <
2558 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2559 REG_WR(pDevice, HostCoalesce.Mode,
2560 pDevice->CoalesceMode |
2561 HOST_COALESCE_ENABLE |
2562 HOST_COALESCE_NOW);
2565 BCM5700_UNLOCK(pUmDevice, flags);
2566 return 0;
2568 return 1;
2570 #endif /* BCM_NAPI_RXPOLL */
2572 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2573 STATIC irqreturn_t
2574 bcm5700_interrupt(int irq, void *dev_instance)
2575 #else
2576 STATIC irqreturn_t
2577 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2578 #endif
2580 struct net_device *dev = (struct net_device *)dev_instance;
2581 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2582 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2583 LM_UINT32 oldtag, newtag;
2584 int i, max_intr_loop;
2585 #ifdef BCM_TASKLET
2586 int repl_buf_count;
2587 #endif
2588 unsigned int handled = 1;
2590 if (!pDevice->InitDone) {
2591 handled = 0;
2592 return IRQ_RETVAL(handled);
2595 bcm5700_intr_lock(pUmDevice);
2596 if (atomic_read(&pUmDevice->intr_sem)) {
2597 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2598 bcm5700_intr_unlock(pUmDevice);
2599 handled = 0;
2600 return IRQ_RETVAL(handled);
2603 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2604 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2605 dev->name);
2606 bcm5700_intr_unlock(pUmDevice);
2607 handled = 0;
2608 return IRQ_RETVAL(handled);
2611 /* BCM4785: Flush posted writes from GbE to host memory. */
2612 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2613 REG_RD(pDevice, HostCoalesce.Mode);
2615 if ((pDevice->Flags & USING_MSI_FLAG) ||
2616 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2617 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2620 if (pUmDevice->intr_test) {
2621 if (!(REG_RD(pDevice, PciCfg.PciState) &
2622 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2623 pDevice->Flags & USING_MSI_FLAG ) {
2624 pUmDevice->intr_test_result = 1;
2626 pUmDevice->intr_test = 0;
2629 #ifdef BCM_NAPI_RXPOLL
2630 max_intr_loop = 1;
2631 #else
2632 max_intr_loop = 50;
2633 #endif
2634 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2635 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2636 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2638 for (i = 0; ; i++) {
2639 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2641 LM_ServiceInterrupts(pDevice);
2642 /* BCM4785: Flush GbE posted writes to host memory. */
2643 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2644 MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2645 newtag = pDevice->pStatusBlkVirt->StatusTag;
2646 if ((newtag == oldtag) || (i > max_intr_loop)) {
2647 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2648 pDevice->LastTag = oldtag;
2649 if (pDevice->Flags & UNDI_FIX_FLAG) {
2650 REG_WR(pDevice, Grc.LocalCtrl,
2651 pDevice->GrcLocalCtrl | 0x2);
2653 break;
2655 oldtag = newtag;
2658 else
2660 i = 0;
2661 do {
2662 uint dummy;
2664 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2665 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2666 LM_ServiceInterrupts(pDevice);
2667 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2668 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2669 i++;
2671 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2672 (i < max_intr_loop));
2674 if (pDevice->Flags & UNDI_FIX_FLAG) {
2675 REG_WR(pDevice, Grc.LocalCtrl,
2676 pDevice->GrcLocalCtrl | 0x2);
2680 else
2682 /* not my interrupt */
2683 handled = 0;
2686 #ifdef BCM_TASKLET
2687 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2688 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2689 pDevice->QueueAgain) &&
2690 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2692 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2693 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2695 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2696 !pUmDevice->tasklet_pending) {
2698 pUmDevice->tasklet_pending = 1;
2699 tasklet_schedule(&pUmDevice->tasklet);
2701 #else
2702 #ifdef BCM_NAPI_RXPOLL
2703 if (!pDevice->RxPoll &&
2704 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2705 pDevice->RxPoll = 1;
2706 MM_ScheduleRxPoll(pDevice);
2708 #else
2709 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2710 replenish_rx_buffers(pUmDevice, 0);
2713 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2714 pDevice->QueueAgain) {
2716 LM_QueueRxPackets(pDevice);
2718 #endif
2719 #endif
2721 clear_bit(0, (void*)&pUmDevice->interrupt);
2722 bcm5700_intr_unlock(pUmDevice);
2723 if (pUmDevice->tx_queued) {
2724 pUmDevice->tx_queued = 0;
2725 netif_wake_queue(dev);
2727 return IRQ_RETVAL(handled);
2731 #ifdef BCM_TASKLET
2732 STATIC void
2733 bcm5700_tasklet(unsigned long data)
2735 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2736 unsigned long flags = 0;
2738 /* RH 7.2 Beta 3 tasklets are reentrant */
2739 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2740 pUmDevice->tasklet_pending = 0;
2741 return;
2744 pUmDevice->tasklet_pending = 0;
2745 if (pUmDevice->opened && !pUmDevice->suspended) {
2746 BCM5700_LOCK(pUmDevice, flags);
2747 replenish_rx_buffers(pUmDevice, 0);
2748 BCM5700_UNLOCK(pUmDevice, flags);
2751 clear_bit(0, &pUmDevice->tasklet_busy);
2753 #endif
2755 STATIC int
2756 bcm5700_close(struct net_device *dev)
2759 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2760 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2762 #if (LINUX_VERSION_CODE < 0x02032b)
2763 dev->start = 0;
2764 #endif
2765 netif_stop_queue(dev);
2766 pUmDevice->opened = 0;
2768 #ifdef BCM_ASF
2769 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2770 #endif
2771 #ifdef BCM_WOL
2772 if( enable_wol[pUmDevice->index] == 0 )
2773 #endif
2774 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
2776 if (tigon3_debug > 1)
2777 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2778 dev->name);
2780 LM_MulticastClear(pDevice);
2781 bcm5700_shutdown(pUmDevice);
2783 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2784 del_timer_sync(&pUmDevice->statstimer);
2787 del_timer_sync(&pUmDevice->timer);
2789 free_irq(pUmDevice->pdev->irq, dev);
2791 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2793 if(pDevice->Flags & USING_MSI_FLAG) {
2794 pci_disable_msi(pUmDevice->pdev);
2795 REG_WR(pDevice, Msi.Mode, 1 );
2796 pDevice->Flags &= ~USING_MSI_FLAG;
2799 #endif
2802 #if (LINUX_VERSION_CODE < 0x020300)
2803 MOD_DEC_USE_COUNT;
2804 #endif
2806 /* BCM4785: Don't go to low-power state because it will power down the smbus block. */
2807 if (!(pDevice->Flags & SB_CORE_FLAG))
2808 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2811 bcm5700_freemem(dev);
2813 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2814 MAX_RX_PACKET_DESC_COUNT);
2816 return 0;
2819 STATIC int
2820 bcm5700_freemem(struct net_device *dev)
2822 int i;
2823 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2824 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2826 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2827 if (pUmDevice->mem_size_list[i] == 0) {
2828 kfree(pUmDevice->mem_list[i]);
2830 else {
2831 pci_free_consistent(pUmDevice->pdev,
2832 (size_t) pUmDevice->mem_size_list[i],
2833 pUmDevice->mem_list[i],
2834 pUmDevice->dma_list[i]);
2838 pDevice->pStatusBlkVirt = 0;
2839 pDevice->pStatsBlkVirt = 0;
2840 pUmDevice->mem_list_num = 0;
2842 #ifdef NICE_SUPPORT
2843 if (!pUmDevice->opened) {
2844 for (i = 0; i < MAX_MEM2; i++) {
2845 if (pUmDevice->mem_size_list2[i]) {
2846 bcm5700_freemem2(pUmDevice, i);
2850 #endif
2851 return 0;
2854 #ifdef NICE_SUPPORT
2855 /* Frees consistent memory allocated through ioctl */
2856 /* The memory to be freed is in mem_list2[index] */
2857 STATIC int
2858 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2860 #if (LINUX_VERSION_CODE >= 0x020400)
2861 void *ptr;
2862 struct page *pg, *last_pg;
2864 /* Probably won't work on some architectures */
2865 ptr = pUmDevice->mem_list2[index],
2866 pg = virt_to_page(ptr);
2867 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2868 for (; ; pg++) {
2869 #if (LINUX_VERSION_CODE > 0x020500)
2870 ClearPageReserved(pg);
2871 #else
2872 mem_map_unreserve(pg);
2873 #endif
2874 if (pg == last_pg)
2875 break;
2877 pci_free_consistent(pUmDevice->pdev,
2878 (size_t) pUmDevice->mem_size_list2[index],
2879 pUmDevice->mem_list2[index],
2880 pUmDevice->dma_list2[index]);
2881 pUmDevice->mem_size_list2[index] = 0;
2882 #endif
2883 return 0;
2885 #endif
2887 uint64_t
2888 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2890 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2891 LM_UINT32 Value32;
2892 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2893 unsigned long flags;
2895 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2896 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2897 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2899 if (!pUmDevice->opened || !pDevice->InitDone)
2902 return 0;
2905 /* regulate MDIO access during run time */
2906 if (pUmDevice->crc_counter_expiry > 0)
2907 return pUmDevice->phy_crc_count;
2909 pUmDevice->crc_counter_expiry = (5 * HZ) /
2910 pUmDevice->timer_interval;
2912 BCM5700_PHY_LOCK(pUmDevice, flags);
2913 LM_ReadPhy(pDevice, 0x1e, &Value32);
2914 if ((Value32 & 0x8000) == 0)
2915 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2916 LM_ReadPhy(pDevice, 0x14, &Value32);
2917 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2918 /* Sometimes data on the MDIO bus can be corrupted */
2919 if (Value32 != 0xffff)
2920 pUmDevice->phy_crc_count += Value32;
2921 return pUmDevice->phy_crc_count;
2923 else if (pStats == 0) {
2924 return 0;
2926 else {
2927 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2931 uint64_t
2932 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2934 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2935 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2937 if (pStats == 0)
2938 return 0;
2939 return (bcm5700_crc_count(pUmDevice) +
2940 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2941 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2942 MM_GETSTATS64(pStats->etherStatsFragments) +
2943 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2944 MM_GETSTATS64(pStats->etherStatsJabbers));
2947 STATIC struct net_device_stats *
2948 bcm5700_get_stats(struct net_device *dev)
2950 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2951 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2952 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2953 struct net_device_stats *p_netstats = &pUmDevice->stats;
2955 if (pStats == 0)
2956 return p_netstats;
2958 /* Get stats from LM */
2959 p_netstats->rx_packets =
2960 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2961 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2962 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2963 p_netstats->tx_packets =
2964 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2965 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2966 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2967 /* There counters seem to be innacurate. Use byte number accumulation
2968 instead.
2969 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2970 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2972 p_netstats->tx_errors =
2973 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2974 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2975 MM_GETSTATS(pStats->ifOutDiscards) +
2976 MM_GETSTATS(pStats->ifOutErrors);
2977 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2978 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2979 p_netstats->rx_length_errors =
2980 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2981 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2982 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2983 p_netstats->rx_frame_errors =
2984 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2985 p_netstats->rx_crc_errors = (unsigned long)
2986 bcm5700_crc_count(pUmDevice);
2987 p_netstats->rx_errors = (unsigned long)
2988 bcm5700_rx_err_count(pUmDevice);
2990 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2991 p_netstats->tx_carrier_errors =
2992 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2994 return p_netstats;
2997 void
2998 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
3000 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3002 if (pUmDevice->opened) {
3003 bcm5700_intr_off(pUmDevice);
3004 netif_carrier_off(pUmDevice->dev);
3005 netif_stop_queue(pUmDevice->dev);
3006 #ifdef BCM_TASKLET
3007 tasklet_kill(&pUmDevice->tasklet);
3008 #endif
3009 bcm5700_poll_wait(pUmDevice);
3011 pUmDevice->suspended = 1;
3012 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
3015 void
3016 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
3018 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3020 if (pUmDevice->suspended) {
3021 pUmDevice->suspended = 0;
3022 if (pUmDevice->opened) {
3023 bcm5700_reset(pUmDevice->dev);
3025 else {
3026 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
3031 /* Returns 0 on failure, 1 on success */
3033 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
3035 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3036 int j;
3038 if (!pUmDevice->opened)
3039 return 0;
3040 pUmDevice->intr_test_result = 0;
3041 pUmDevice->intr_test = 1;
3043 REG_WR(pDevice, HostCoalesce.Mode,
3044 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
3045 HOST_COALESCE_NOW);
3047 for (j = 0; j < 10; j++) {
3048 if (pUmDevice->intr_test_result){
3049 break;
3052 REG_WR(pDevice, HostCoalesce.Mode,
3053 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
3054 HOST_COALESCE_NOW);
3056 MM_Sleep(pDevice, 1);
3059 return pUmDevice->intr_test_result;
3063 #ifdef SIOCETHTOOL
3065 #ifdef ETHTOOL_GSTRINGS
3067 #define ETH_NUM_STATS 30
3068 #define RX_CRC_IDX 5
3069 #define RX_MAC_ERR_IDX 14
3071 struct {
3072 char string[ETH_GSTRING_LEN];
3073 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
3074 { "rx_unicast_packets" },
3075 { "rx_multicast_packets" },
3076 { "rx_broadcast_packets" },
3077 { "rx_bytes" },
3078 { "rx_fragments" },
3079 { "rx_crc_errors" }, /* this needs to be calculated */
3080 { "rx_align_errors" },
3081 { "rx_xon_frames" },
3082 { "rx_xoff_frames" },
3083 { "rx_long_frames" },
3084 { "rx_short_frames" },
3085 { "rx_jabber" },
3086 { "rx_discards" },
3087 { "rx_errors" },
3088 { "rx_mac_errors" }, /* this needs to be calculated */
3089 { "tx_unicast_packets" },
3090 { "tx_multicast_packets" },
3091 { "tx_broadcast_packets" },
3092 { "tx_bytes" },
3093 { "tx_deferred" },
3094 { "tx_single_collisions" },
3095 { "tx_multi_collisions" },
3096 { "tx_total_collisions" },
3097 { "tx_excess_collisions" },
3098 { "tx_late_collisions" },
3099 { "tx_xon_frames" },
3100 { "tx_xoff_frames" },
3101 { "tx_internal_mac_errors" },
3102 { "tx_carrier_errors" },
3103 { "tx_errors" },
3106 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
3108 #ifdef __BIG_ENDIAN
3109 #define SWAP_DWORD_64(x) (x)
3110 #else
3111 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
3112 #endif
3114 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
3115 STATS_OFFSET(ifHCInUcastPkts),
3116 STATS_OFFSET(ifHCInMulticastPkts),
3117 STATS_OFFSET(ifHCInBroadcastPkts),
3118 STATS_OFFSET(ifHCInOctets),
3119 STATS_OFFSET(etherStatsFragments),
3121 STATS_OFFSET(dot3StatsAlignmentErrors),
3122 STATS_OFFSET(xonPauseFramesReceived),
3123 STATS_OFFSET(xoffPauseFramesReceived),
3124 STATS_OFFSET(dot3StatsFramesTooLong),
3125 STATS_OFFSET(etherStatsUndersizePkts),
3126 STATS_OFFSET(etherStatsJabbers),
3127 STATS_OFFSET(ifInDiscards),
3128 STATS_OFFSET(ifInErrors),
3130 STATS_OFFSET(ifHCOutUcastPkts),
3131 STATS_OFFSET(ifHCOutMulticastPkts),
3132 STATS_OFFSET(ifHCOutBroadcastPkts),
3133 STATS_OFFSET(ifHCOutOctets),
3134 STATS_OFFSET(dot3StatsDeferredTransmissions),
3135 STATS_OFFSET(dot3StatsSingleCollisionFrames),
3136 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
3137 STATS_OFFSET(etherStatsCollisions),
3138 STATS_OFFSET(dot3StatsExcessiveCollisions),
3139 STATS_OFFSET(dot3StatsLateCollisions),
3140 STATS_OFFSET(outXonSent),
3141 STATS_OFFSET(outXoffSent),
3142 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
3143 STATS_OFFSET(dot3StatsCarrierSenseErrors),
3144 STATS_OFFSET(ifOutErrors),
3147 #endif /* ETHTOOL_GSTRINGS */
3149 #ifdef ETHTOOL_TEST
3150 #define ETH_NUM_TESTS 6
3151 struct {
3152 char string[ETH_GSTRING_LEN];
3153 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
3154 { "register test (offline)" },
3155 { "memory test (offline)" },
3156 { "loopback test (offline)" },
3157 { "nvram test (online)" },
3158 { "interrupt test (online)" },
3159 { "link test (online)" },
3162 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
3163 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
3164 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
3165 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
3166 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
3167 #endif
3169 #ifdef ETHTOOL_GREGS
3170 #if (LINUX_VERSION_CODE >= 0x02040f)
3171 static void
3172 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
3173 int reserved)
3175 u32 offset;
3176 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3178 if (reserved) {
3179 memset(*buf, 0, end - start);
3180 *buf = *buf + (end - start)/4;
3181 return;
3183 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
3184 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
3185 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
3186 ((offset >= 0x5400) && (offset < 0x5800)) ||
3187 ((offset >= 0x6400) && (offset < 0x6800))) {
3188 **buf = 0;
3189 continue;
3192 **buf = REG_RD_OFFSET(pDevice, offset);
3195 #endif
3196 #endif
3198 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
3200 struct ethtool_cmd ethcmd;
3201 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3202 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3204 if (mm_copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
3205 return -EFAULT;
3207 switch (ethcmd.cmd) {
3208 #ifdef ETHTOOL_GDRVINFO
3209 case ETHTOOL_GDRVINFO: {
3210 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
3212 strcpy(info.driver, bcm5700_driver);
3213 #ifdef INCLUDE_5701_AX_FIX
3214 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
3215 extern int t3FwReleaseMajor;
3216 extern int t3FwReleaseMinor;
3217 extern int t3FwReleaseFix;
3219 sprintf(info.fw_version, "%i.%i.%i",
3220 t3FwReleaseMajor, t3FwReleaseMinor,
3221 t3FwReleaseFix);
3223 #endif
3224 strcpy(info.fw_version, pDevice->BootCodeVer);
3225 strcpy(info.version, bcm5700_version);
3226 #if (LINUX_VERSION_CODE <= 0x020422)
3227 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
3228 #else
3229 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
3230 #endif
3234 #ifdef ETHTOOL_GEEPROM
3235 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
3236 #endif
3237 #ifdef ETHTOOL_GREGS
3238 /* dump everything, including holes in the register space */
3239 info.regdump_len = 0x6c00;
3240 #endif
3241 #ifdef ETHTOOL_GSTATS
3242 info.n_stats = ETH_NUM_STATS;
3243 #endif
3244 #ifdef ETHTOOL_TEST
3245 info.testinfo_len = ETH_NUM_TESTS;
3246 #endif
3247 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
3248 return -EFAULT;
3249 return 0;
3251 #endif
3252 case ETHTOOL_GSET: {
3253 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
3254 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3255 ethcmd.supported =
3256 (SUPPORTED_1000baseT_Full |
3257 SUPPORTED_Autoneg);
3258 ethcmd.supported |= SUPPORTED_FIBRE;
3259 ethcmd.port = PORT_FIBRE;
3260 } else {
3261 ethcmd.supported =
3262 (SUPPORTED_10baseT_Half |
3263 SUPPORTED_10baseT_Full |
3264 SUPPORTED_100baseT_Half |
3265 SUPPORTED_100baseT_Full |
3266 SUPPORTED_1000baseT_Half |
3267 SUPPORTED_1000baseT_Full |
3268 SUPPORTED_Autoneg);
3269 ethcmd.supported |= SUPPORTED_TP;
3270 ethcmd.port = PORT_TP;
3273 ethcmd.transceiver = XCVR_INTERNAL;
3274 ethcmd.phy_address = 0;
3276 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3277 ethcmd.speed = SPEED_1000;
3278 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3279 ethcmd.speed = SPEED_100;
3280 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3281 ethcmd.speed = SPEED_10;
3282 else
3283 ethcmd.speed = 0;
3285 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3286 ethcmd.duplex = DUPLEX_FULL;
3287 else
3288 ethcmd.duplex = DUPLEX_HALF;
3290 if (pDevice->DisableAutoNeg == FALSE) {
3291 ethcmd.autoneg = AUTONEG_ENABLE;
3292 ethcmd.advertising = ADVERTISED_Autoneg;
3293 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3294 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3295 ethcmd.advertising |=
3296 ADVERTISED_1000baseT_Full |
3297 ADVERTISED_FIBRE;
3299 else {
3300 ethcmd.advertising |=
3301 ADVERTISED_TP;
3302 if (pDevice->advertising &
3303 PHY_AN_AD_10BASET_HALF) {
3305 ethcmd.advertising |=
3306 ADVERTISED_10baseT_Half;
3308 if (pDevice->advertising &
3309 PHY_AN_AD_10BASET_FULL) {
3311 ethcmd.advertising |=
3312 ADVERTISED_10baseT_Full;
3314 if (pDevice->advertising &
3315 PHY_AN_AD_100BASETX_HALF) {
3317 ethcmd.advertising |=
3318 ADVERTISED_100baseT_Half;
3320 if (pDevice->advertising &
3321 PHY_AN_AD_100BASETX_FULL) {
3323 ethcmd.advertising |=
3324 ADVERTISED_100baseT_Full;
3326 if (pDevice->advertising1000 &
3327 BCM540X_AN_AD_1000BASET_HALF) {
3329 ethcmd.advertising |=
3330 ADVERTISED_1000baseT_Half;
3332 if (pDevice->advertising1000 &
3333 BCM540X_AN_AD_1000BASET_FULL) {
3335 ethcmd.advertising |=
3336 ADVERTISED_1000baseT_Full;
3340 else {
3341 ethcmd.autoneg = AUTONEG_DISABLE;
3342 ethcmd.advertising = 0;
3345 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3346 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3348 if(mm_copy_to_user(useraddr, &ethcmd, sizeof(ethcmd)))
3349 return -EFAULT;
3350 return 0;
3352 case ETHTOOL_SSET: {
3353 unsigned long flags;
3355 if(!capable(CAP_NET_ADMIN))
3356 return -EPERM;
3357 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3358 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3359 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3360 pDevice->DisableAutoNeg = FALSE;
3362 else {
3363 if (ethcmd.speed == SPEED_1000 &&
3364 pDevice->PhyFlags & PHY_NO_GIGABIT)
3365 return -EINVAL;
3367 if (ethcmd.speed == SPEED_1000 &&
3368 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3369 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3371 pDevice->RequestedLineSpeed =
3372 LM_LINE_SPEED_1000MBPS;
3374 pDevice->RequestedDuplexMode =
3375 LM_DUPLEX_MODE_FULL;
3377 else if (ethcmd.speed == SPEED_100 &&
3378 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3379 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3381 pDevice->RequestedLineSpeed =
3382 LM_LINE_SPEED_100MBPS;
3384 else if (ethcmd.speed == SPEED_10 &&
3385 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3386 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3388 pDevice->RequestedLineSpeed =
3389 LM_LINE_SPEED_10MBPS;
3391 else {
3392 return -EINVAL;
3395 pDevice->DisableAutoNeg = TRUE;
3396 if (ethcmd.duplex == DUPLEX_FULL) {
3397 pDevice->RequestedDuplexMode =
3398 LM_DUPLEX_MODE_FULL;
3400 else {
3401 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3402 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3404 pDevice->RequestedDuplexMode =
3405 LM_DUPLEX_MODE_HALF;
3409 if (netif_running(dev)) {
3410 BCM5700_PHY_LOCK(pUmDevice, flags);
3411 LM_SetupPhy(pDevice);
3412 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3414 return 0;
3416 #ifdef ETHTOOL_GWOL
3417 #ifdef BCM_WOL
3418 case ETHTOOL_GWOL: {
3419 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3421 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3422 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3423 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3424 wol.supported = 0;
3425 wol.wolopts = 0;
3427 else {
3428 wol.supported = WAKE_MAGIC;
3429 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3431 wol.wolopts = WAKE_MAGIC;
3433 else {
3434 wol.wolopts = 0;
3437 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3438 return -EFAULT;
3439 return 0;
3441 case ETHTOOL_SWOL: {
3442 struct ethtool_wolinfo wol;
3444 if(!capable(CAP_NET_ADMIN))
3445 return -EPERM;
3446 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3447 return -EFAULT;
3448 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3449 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3450 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3451 wol.wolopts) {
3452 return -EINVAL;
3455 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3456 return -EINVAL;
3458 if (wol.wolopts & WAKE_MAGIC) {
3459 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3460 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3462 else {
3463 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3464 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3466 return 0;
3468 #endif
3469 #endif
3470 #ifdef ETHTOOL_GLINK
3471 case ETHTOOL_GLINK: {
3472 struct ethtool_value edata = {ETHTOOL_GLINK};
3474 /* ifup only waits for 5 seconds for link up */
3475 /* NIC may take more than 5 seconds to establish link */
3476 if ((pUmDevice->delayed_link_ind > 0) &&
3477 delay_link[pUmDevice->index])
3478 return -EOPNOTSUPP;
3480 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3481 edata.data = 1;
3483 else {
3484 edata.data = 0;
3486 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3487 return -EFAULT;
3488 return 0;
3490 #endif
3491 #ifdef ETHTOOL_NWAY_RST
3492 case ETHTOOL_NWAY_RST: {
3493 LM_UINT32 phyctrl;
3494 unsigned long flags;
3496 if(!capable(CAP_NET_ADMIN))
3497 return -EPERM;
3498 if (pDevice->DisableAutoNeg) {
3499 return -EINVAL;
3501 if (!netif_running(dev))
3502 return -EAGAIN;
3503 BCM5700_PHY_LOCK(pUmDevice, flags);
3504 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3505 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3506 pDevice->DisableAutoNeg = TRUE;
3507 LM_SetupPhy(pDevice);
3509 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3510 pDevice->DisableAutoNeg = FALSE;
3511 LM_SetupPhy(pDevice);
3513 else {
3514 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3515 T3_ASIC_REV_5703) ||
3516 (T3_ASIC_REV(pDevice->ChipRevId) ==
3517 T3_ASIC_REV_5704) ||
3518 (T3_ASIC_REV(pDevice->ChipRevId) ==
3519 T3_ASIC_REV_5705))
3521 LM_ResetPhy(pDevice);
3522 LM_SetupPhy(pDevice);
3524 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3525 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3526 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3527 PHY_CTRL_AUTO_NEG_ENABLE |
3528 PHY_CTRL_RESTART_AUTO_NEG);
3530 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3531 return 0;
3533 #endif
3534 #ifdef ETHTOOL_GEEPROM
3535 case ETHTOOL_GEEPROM: {
3536 struct ethtool_eeprom eeprom;
3537 LM_UINT32 *buf = 0;
3538 LM_UINT32 buf1[64/4];
3539 int i, j, offset, len;
3541 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3542 return -EFAULT;
3544 if (eeprom.offset >= pDevice->NvramSize)
3545 return -EFAULT;
3547 /* maximum data limited */
3548 /* to read more, call again with a different offset */
3549 if (eeprom.len > 0x800) {
3550 eeprom.len = 0x800;
3551 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3552 return -EFAULT;
3555 if (eeprom.len > 64) {
3556 buf = kmalloc(eeprom.len, GFP_KERNEL);
3557 if (!buf)
3558 return -ENOMEM;
3560 else {
3561 buf = buf1;
3563 useraddr += offsetof(struct ethtool_eeprom, data);
3565 offset = eeprom.offset;
3566 len = eeprom.len;
3567 if (offset & 3) {
3568 offset &= 0xfffffffc;
3569 len += (offset & 3);
3571 len = (len + 3) & 0xfffffffc;
3572 for (i = 0, j = 0; j < len; i++, j += 4) {
3573 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3574 LM_STATUS_SUCCESS) {
3575 break;
3578 if (j >= len) {
3579 buf += (eeprom.offset & 3);
3580 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3582 if (eeprom.len > 64) {
3583 kfree(buf);
3585 if ((j < len) || i)
3586 return -EFAULT;
3587 return 0;
3589 case ETHTOOL_SEEPROM: {
3590 struct ethtool_eeprom eeprom;
3591 LM_UINT32 buf[64/4];
3592 int i, offset, len;
3594 if(!capable(CAP_NET_ADMIN))
3595 return -EPERM;
3596 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3597 return -EFAULT;
3599 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3600 (eeprom.offset >= pDevice->NvramSize)) {
3601 return -EFAULT;
3604 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3605 eeprom.len = pDevice->NvramSize - eeprom.offset;
3608 useraddr += offsetof(struct ethtool_eeprom, data);
3610 len = eeprom.len;
3611 offset = eeprom.offset;
3612 for (; len > 0; ) {
3613 if (len < 64)
3614 i = len;
3615 else
3616 i = 64;
3617 if (mm_copy_from_user(&buf, useraddr, i))
3618 return -EFAULT;
3620 bcm5700_intr_off(pUmDevice);
3621 /* Prevent race condition on Grc.Mode register */
3622 bcm5700_poll_wait(pUmDevice);
3624 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3625 LM_STATUS_SUCCESS) {
3626 bcm5700_intr_on(pUmDevice);
3627 return -EFAULT;
3629 bcm5700_intr_on(pUmDevice);
3630 len -= i;
3631 offset += i;
3632 useraddr += i;
3634 return 0;
3636 #endif
3637 #ifdef ETHTOOL_GREGS
3638 #if (LINUX_VERSION_CODE >= 0x02040f)
3639 case ETHTOOL_GREGS: {
3640 struct ethtool_regs eregs;
3641 LM_UINT32 *buf, *buf1;
3642 unsigned int i;
3644 if(!capable(CAP_NET_ADMIN))
3645 return -EPERM;
3646 if (pDevice->Flags & UNDI_FIX_FLAG)
3647 return -EOPNOTSUPP;
3648 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3649 return -EFAULT;
3650 if (eregs.len > 0x6c00)
3651 eregs.len = 0x6c00;
3652 eregs.version = 0x0;
3653 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3654 return -EFAULT;
3655 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3656 if (!buf)
3657 return -ENOMEM;
3658 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3659 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3660 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3661 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3662 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3663 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3664 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3665 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3666 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3667 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3668 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3669 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3670 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3671 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3672 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3673 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3674 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3675 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3676 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3677 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3678 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3679 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3680 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3681 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3682 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3683 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3684 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3685 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3686 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3687 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3688 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3689 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3690 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3691 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3692 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3693 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3694 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3695 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3696 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3697 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3698 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3699 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3700 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3701 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3702 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3703 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3704 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3705 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3706 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3707 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3709 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3710 kfree(buf1);
3711 if (i)
3712 return -EFAULT;
3713 return 0;
3715 #endif
3716 #endif
3717 #ifdef ETHTOOL_GPAUSEPARAM
3718 case ETHTOOL_GPAUSEPARAM: {
3719 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3721 if (!pDevice->DisableAutoNeg) {
3722 epause.autoneg = (pDevice->FlowControlCap &
3723 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3725 else {
3726 epause.autoneg = 0;
3728 epause.rx_pause =
3729 (pDevice->FlowControl &
3730 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3731 epause.tx_pause =
3732 (pDevice->FlowControl &
3733 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3734 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3735 return -EFAULT;
3737 return 0;
3739 case ETHTOOL_SPAUSEPARAM: {
3740 struct ethtool_pauseparam epause;
3741 unsigned long flags;
3743 if(!capable(CAP_NET_ADMIN))
3744 return -EPERM;
3745 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3746 return -EFAULT;
3747 pDevice->FlowControlCap = 0;
3748 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3749 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3751 if (epause.rx_pause) {
3752 pDevice->FlowControlCap |=
3753 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3755 if (epause.tx_pause) {
3756 pDevice->FlowControlCap |=
3757 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3759 if (netif_running(dev)) {
3760 BCM5700_PHY_LOCK(pUmDevice, flags);
3761 LM_SetupPhy(pDevice);
3762 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3765 return 0;
3767 #endif
3768 #ifdef ETHTOOL_GRXCSUM
3769 case ETHTOOL_GRXCSUM: {
3770 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3772 edata.data =
3773 (pDevice->TaskToOffload &
3774 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3775 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3776 return -EFAULT;
3778 return 0;
3780 case ETHTOOL_SRXCSUM: {
3781 struct ethtool_value edata;
3783 if(!capable(CAP_NET_ADMIN))
3784 return -EPERM;
3785 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3786 return -EFAULT;
3787 if (edata.data) {
3788 if (!(pDevice->TaskOffloadCap &
3789 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3791 return -EINVAL;
3793 pDevice->TaskToOffload |=
3794 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3795 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3797 else {
3798 pDevice->TaskToOffload &=
3799 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3800 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3802 return 0;
3804 case ETHTOOL_GTXCSUM: {
3805 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3807 edata.data =
3808 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3809 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3810 return -EFAULT;
3812 return 0;
3814 case ETHTOOL_STXCSUM: {
3815 struct ethtool_value edata;
3817 if(!capable(CAP_NET_ADMIN))
3818 return -EPERM;
3819 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3820 return -EFAULT;
3821 if (edata.data) {
3822 if (!(pDevice->TaskOffloadCap &
3823 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3825 return -EINVAL;
3827 dev->features |= get_csum_flag( pDevice->ChipRevId);
3828 pDevice->TaskToOffload |=
3829 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3830 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3832 else {
3833 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3834 pDevice->TaskToOffload &=
3835 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3836 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3838 return 0;
3840 case ETHTOOL_GSG: {
3841 struct ethtool_value edata = { ETHTOOL_GSG };
3843 edata.data =
3844 (dev->features & NETIF_F_SG) != 0;
3845 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3846 return -EFAULT;
3847 return 0;
3849 case ETHTOOL_SSG: {
3850 struct ethtool_value edata;
3852 if(!capable(CAP_NET_ADMIN))
3853 return -EPERM;
3854 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3855 return -EFAULT;
3856 if (edata.data) {
3857 dev->features |= NETIF_F_SG;
3859 else {
3860 dev->features &= ~NETIF_F_SG;
3862 return 0;
3864 #endif
3865 #ifdef ETHTOOL_GRINGPARAM
3866 case ETHTOOL_GRINGPARAM: {
3867 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3869 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3870 ering.rx_pending = pDevice->RxStdDescCnt;
3871 ering.rx_mini_max_pending = 0;
3872 ering.rx_mini_pending = 0;
3873 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3874 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3875 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3876 #else
3877 ering.rx_jumbo_max_pending = 0;
3878 ering.rx_jumbo_pending = 0;
3879 #endif
3880 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3881 ering.tx_pending = pDevice->TxPacketDescCnt;
3882 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3883 return -EFAULT;
3884 return 0;
3886 #endif
3887 #ifdef ETHTOOL_PHYS_ID
3888 case ETHTOOL_PHYS_ID: {
3889 struct ethtool_value edata;
3891 if(!capable(CAP_NET_ADMIN))
3892 return -EPERM;
3893 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3894 return -EFAULT;
3895 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3896 return 0;
3897 return -EINTR;
3899 #endif
3900 #ifdef ETHTOOL_GSTRINGS
3901 case ETHTOOL_GSTRINGS: {
3902 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3904 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3905 return -EFAULT;
3906 switch(egstr.string_set) {
3907 #ifdef ETHTOOL_GSTATS
3908 case ETH_SS_STATS:
3909 egstr.len = ETH_NUM_STATS;
3910 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3911 return -EFAULT;
3912 if (mm_copy_to_user(useraddr + sizeof(egstr),
3913 bcm5700_stats_str_arr,
3914 sizeof(bcm5700_stats_str_arr)))
3915 return -EFAULT;
3916 return 0;
3917 #endif
3918 #ifdef ETHTOOL_TEST
3919 case ETH_SS_TEST:
3920 egstr.len = ETH_NUM_TESTS;
3921 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3922 return -EFAULT;
3923 if (mm_copy_to_user(useraddr + sizeof(egstr),
3924 bcm5700_tests_str_arr,
3925 sizeof(bcm5700_tests_str_arr)))
3926 return -EFAULT;
3927 return 0;
3928 #endif
3929 default:
3930 return -EOPNOTSUPP;
3933 #endif
3934 #ifdef ETHTOOL_GSTATS
3935 case ETHTOOL_GSTATS: {
3936 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3937 uint64_t stats[ETH_NUM_STATS];
3938 int i;
3939 uint64_t *pStats =
3940 (uint64_t *) pDevice->pStatsBlkVirt;
3942 estats.n_stats = ETH_NUM_STATS;
3943 if (pStats == 0) {
3944 memset(stats, 0, sizeof(stats));
3946 else {
3948 for (i = 0; i < ETH_NUM_STATS; i++) {
3949 if (bcm5700_stats_offset_arr[i] != 0) {
3950 stats[i] = SWAP_DWORD_64(*(pStats +
3951 bcm5700_stats_offset_arr[i]));
3953 else if (i == RX_CRC_IDX) {
3954 stats[i] =
3955 bcm5700_crc_count(pUmDevice);
3957 else if (i == RX_MAC_ERR_IDX) {
3958 stats[i] =
3959 bcm5700_rx_err_count(pUmDevice);
3963 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3964 return -EFAULT;
3966 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3967 sizeof(stats))) {
3968 return -EFAULT;
3970 return 0;
3972 #endif
3973 #ifdef ETHTOOL_TEST
3974 case ETHTOOL_TEST: {
3975 struct ethtool_test etest;
3976 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3977 LM_POWER_STATE old_power_level;
3979 printk( KERN_ALERT "Performing ethtool test.\n"
3980 "This test will take a few seconds to complete.\n" );
3982 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3983 return -EFAULT;
3985 etest.len = ETH_NUM_TESTS;
3986 old_power_level = pDevice->PowerLevel;
3987 if (old_power_level != LM_POWER_STATE_D0) {
3988 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3989 LM_SwitchClocks(pDevice);
3991 MM_Sleep(pDevice, 1000);
3992 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3993 b57_suspend_chip(pUmDevice);
3994 MM_Sleep(pDevice, 1000);
3995 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3996 MM_Sleep(pDevice, 1000);
3997 if (b57_test_registers(pUmDevice) == 0) {
3998 etest.flags |= ETH_TEST_FL_FAILED;
3999 tests[0] = 1;
4001 MM_Sleep(pDevice, 1000);
4002 if (b57_test_memory(pUmDevice) == 0) {
4003 etest.flags |= ETH_TEST_FL_FAILED;
4004 tests[1] = 1;
4006 MM_Sleep(pDevice, 1000);
4007 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
4008 etest.flags |= ETH_TEST_FL_FAILED;
4009 tests[2] = 1;
4011 MM_Sleep(pDevice, 1000);
4012 b57_resume_chip(pUmDevice);
4013 /* wait for link to come up for the link test */
4014 MM_Sleep(pDevice, 4000);
4015 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
4016 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4018 /* wait a little longer for linkup on copper */
4019 MM_Sleep(pDevice, 3000);
4022 if (b57_test_nvram(pUmDevice) == 0) {
4023 etest.flags |= ETH_TEST_FL_FAILED;
4024 tests[3] = 1;
4026 MM_Sleep(pDevice, 1000);
4027 if (b57_test_intr(pUmDevice) == 0) {
4028 etest.flags |= ETH_TEST_FL_FAILED;
4029 tests[4] = 1;
4031 MM_Sleep(pDevice, 1000);
4032 if (b57_test_link(pUmDevice) == 0) {
4033 etest.flags |= ETH_TEST_FL_FAILED;
4034 tests[5] = 1;
4036 MM_Sleep(pDevice, 1000);
4037 if (old_power_level != LM_POWER_STATE_D0) {
4038 LM_SetPowerState(pDevice, old_power_level);
4040 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
4041 return -EFAULT;
4043 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
4044 sizeof(tests))) {
4045 return -EFAULT;
4047 return 0;
4049 #endif
4050 #ifdef ETHTOOL_GTSO
4051 case ETHTOOL_GTSO: {
4052 struct ethtool_value edata = { ETHTOOL_GTSO };
4054 #ifdef BCM_TSO
4055 edata.data =
4056 (dev->features & NETIF_F_TSO) != 0;
4057 #else
4058 edata.data = 0;
4059 #endif
4060 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
4061 return -EFAULT;
4062 return 0;
4064 #endif
4065 #ifdef ETHTOOL_STSO
4066 case ETHTOOL_STSO: {
4067 #ifdef BCM_TSO
4068 struct ethtool_value edata;
4070 if (!capable(CAP_NET_ADMIN))
4071 return -EPERM;
4073 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
4074 return -EFAULT;
4076 if (!(pDevice->TaskToOffload &
4077 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
4078 return -EINVAL;
4081 dev->features &= ~NETIF_F_TSO;
4083 if (edata.data) {
4084 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4085 (dev->mtu > 1500)) {
4086 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4087 return -EINVAL;
4088 } else {
4089 dev->features |= NETIF_F_TSO;
4092 return 0;
4093 #else
4094 return -EINVAL;
4095 #endif
4097 #endif
4100 return -EOPNOTSUPP;
4102 #endif /* #ifdef SIOCETHTOOL */
4104 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
4105 #include <linux/iobuf.h>
4106 #endif
4108 #ifdef BCMDBG
4109 STATIC void
4110 b57_dump(struct net_device *dev, struct bcmstrbuf *b)
4112 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4113 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4114 struct net_device_stats *st;
4115 char macaddr[32];
4117 bcm_bprintf(b, "b57%d: %s %s version %s\n", pUmDevice->index,
4118 __DATE__, __TIME__, EPI_VERSION_STR);
4120 bcm_bprintf(b, "dev 0x%x pdev 0x%x unit %d msglevel %d flags 0x%x boardflags 0x%x\n",
4121 (uint)dev, (uint)pDevice, pUmDevice->index, b57_msg_level,
4122 pDevice->Flags, pUmDevice->boardflags);
4123 bcm_bprintf(b, "speed/duplex %d/%s promisc 0x%x loopbk %d advertise 0x%x\n",
4124 pDevice->LineSpeed,
4125 (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL) ? "full" : "half",
4126 pDevice->ReceiveMask & LM_PROMISCUOUS_MODE,
4127 pDevice->LoopBackMode,
4128 pDevice->advertising);
4129 bcm_bprintf(b, "allmulti %d qos %d phyaddr %d linkstat %d\n",
4130 pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST, pUmDevice->qos,
4131 pDevice->PhyAddr, pDevice->LinkStatus);
4132 bcm_bprintf(b, "vendor 0x%x device 0x%x rev %d subsys vendor 0x%x subsys id 0x%x\n",
4133 pDevice->PciVendorId, pDevice->PciDeviceId, pDevice->PciRevId,
4134 pDevice->SubsystemVendorId, pDevice->SubsystemId);
4135 bcm_bprintf(b, "MAC addr %s\n", bcm_ether_ntoa((struct ether_addr *)&pDevice->NodeAddress,
4136 macaddr));
4138 if ((st = bcm5700_get_stats(dev)) != NULL) {
4139 bcm_bprintf(b, "txframe %d txbyte %d txerror %d rxframe %d rxbyte %d rxerror %d\n",
4140 st->tx_packets, st->tx_bytes, st->tx_errors,
4141 st->rx_packets, st->rx_bytes, st->rx_errors);
4142 bcm_bprintf(b, "multicast %d collisions %d tx_abort %d tx_carrier %d\n",
4143 st->multicast, st->collisions, st->tx_aborted_errors,
4144 st->tx_carrier_errors);
4145 bcm_bprintf(b, "rx_length %d rx_over %d rx_frame %d rx_crc %d\n",
4146 st->rx_length_errors, st->rx_over_errors, st->rx_frame_errors,
4147 st->rx_crc_errors);
4149 if (pDevice->Flags & ROBO_SWITCH_FLAG)
4150 robo_dump_regs(pUmDevice->robo, b);
4152 bcm_bprintf(b, "\n");
4154 #endif /* BCMDBG */
4156 /* Provide ioctl() calls to examine the MII xcvr state. */
4157 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4159 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4160 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4161 u16 *data = (u16 *)&rq->ifr_data;
4162 u32 value;
4163 unsigned long flags;
4165 switch(cmd) {
4166 #ifdef SIOCGMIIPHY
4167 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
4169 data[0] = pDevice->PhyAddr;
4170 return 0;
4171 #endif
4173 #ifdef SIOCGMIIREG
4174 case SIOCGMIIREG: /* Read the specified MII register. */
4176 uint32 savephyaddr = 0;
4178 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4179 return -EOPNOTSUPP;
4181 /* ifup only waits for 5 seconds for link up */
4182 /* NIC may take more than 5 seconds to establish link */
4183 if ((pUmDevice->delayed_link_ind > 0) &&
4184 delay_link[pUmDevice->index]) {
4185 return -EAGAIN;
4188 BCM5700_PHY_LOCK(pUmDevice, flags);
4189 if (data[0] != 0xffff) {
4190 savephyaddr = pDevice->PhyAddr;
4191 pDevice->PhyAddr = data[0];
4193 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *)&value);
4194 if (data[0] != 0xffff)
4195 pDevice->PhyAddr = savephyaddr;
4196 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4197 data[3] = value & 0xffff;
4198 return 0;
4200 #endif
4202 case SIOCGETCPHYRD: /* Read the specified MII register. */
4203 case SIOCGETCPHYRD2:
4205 int args[2];
4206 uint32 savephyaddr = 0;
4208 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4209 return -EOPNOTSUPP;
4211 /* ifup only waits for 5 seconds for link up */
4212 /* NIC may take more than 5 seconds to establish link */
4213 if ((pUmDevice->delayed_link_ind > 0) &&
4214 delay_link[pUmDevice->index]) {
4215 return -EAGAIN;
4218 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4219 return -EFAULT;
4221 BCM5700_PHY_LOCK(pUmDevice, flags);
4222 if (cmd == SIOCGETCPHYRD2) {
4223 savephyaddr = pDevice->PhyAddr;
4224 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4226 LM_ReadPhy(pDevice, args[0] & 0xffff, (LM_UINT32 *)&value);
4227 if (cmd == SIOCGETCPHYRD2)
4228 pDevice->PhyAddr = savephyaddr;
4229 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4231 args[1] = value & 0xffff;
4232 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4233 return -EFAULT;
4235 return 0;
4238 #ifdef SIOCSMIIREG
4239 case SIOCSMIIREG: /* Write the specified MII register */
4241 uint32 savephyaddr = 0;
4243 if (!capable(CAP_NET_ADMIN))
4244 return -EPERM;
4246 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4247 return -EOPNOTSUPP;
4249 BCM5700_PHY_LOCK(pUmDevice, flags);
4250 if (data[0] != 0xffff) {
4251 savephyaddr = pDevice->PhyAddr;
4252 pDevice->PhyAddr = data[0];
4254 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
4255 if (data[0] != 0xffff)
4256 pDevice->PhyAddr = savephyaddr;
4257 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4258 data[3] = 0;
4259 return 0;
4261 #endif
4263 case SIOCSETCPHYWR: /* Write the specified MII register */
4264 case SIOCSETCPHYWR2:
4266 int args[2];
4267 uint32 savephyaddr = 0;
4269 if (!capable(CAP_NET_ADMIN))
4270 return -EPERM;
4272 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4273 return -EOPNOTSUPP;
4275 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4276 return -EFAULT;
4278 BCM5700_PHY_LOCK(pUmDevice, flags);
4279 if (cmd == SIOCSETCPHYWR2) {
4280 savephyaddr = pDevice->PhyAddr;
4281 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4283 LM_WritePhy(pDevice, args[0] & 0xffff, args[1]);
4284 if (cmd == SIOCSETCPHYWR2)
4285 pDevice->PhyAddr = savephyaddr;
4286 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4287 return 0;
4290 case SIOCGETCROBORD: /* Read the specified ROBO register. */
4292 int args[2];
4293 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4295 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4296 return -ENXIO;
4298 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4299 return -EFAULT;
4301 if (robo->ops->read_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff, &value, 2))
4302 return -EIO;
4304 args[1] = value & 0xffff;
4305 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4306 return -EFAULT;
4308 return 0;
4311 case SIOCSETCROBOWR: /* Write the specified ROBO register. */
4313 int args[2];
4314 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4316 if (!capable(CAP_NET_ADMIN))
4317 return -EPERM;
4319 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4320 return -ENXIO;
4322 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4323 return -EFAULT;
4325 if (robo->ops->write_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff,
4326 &args[1], 2))
4327 return -EIO;
4329 return 0;
4332 case SIOCSETGETVAR:
4334 int ret = 0;
4335 void *buffer = NULL;
4336 bool get = FALSE, set = TRUE;
4337 et_var_t var;
4339 if (set && mm_copy_from_user(&var, rq->ifr_data, sizeof(var)))
4340 return -EFAULT;
4342 /* prepare buffer if any */
4343 if (var.buf) {
4344 if (!var.set)
4345 get = TRUE;
4347 if (!(buffer = (void *) MALLOC(SI_OSH, var.len))) {
4348 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__,
4349 MALLOCED(SI_OSH)));
4350 return -ENOMEM;
4353 if (mm_copy_from_user(buffer, var.buf, var.len)) {
4354 MFREE(SI_OSH, buffer, var.len);
4355 return -EFAULT;
4359 /* do var.cmd */
4360 switch (var.cmd) {
4361 case IOV_ET_ROBO_DEVID:
4363 uint *vecarg = (uint *)buffer;
4364 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4366 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) ||
4367 (robo == NULL)) {
4368 ret = -ENXIO;
4369 break;
4372 /* get robo device id */
4373 *vecarg = robo->devid;
4375 if (mm_copy_to_user(var.buf, buffer, var.len)) {
4376 ret = -EFAULT;
4377 break;
4380 break;
4383 default:
4384 ret = -EOPNOTSUPP;
4385 break;
4388 if (buffer)
4389 MFREE(SI_OSH, buffer, var.len);
4391 return ret;
4394 case SIOCSETCSETMSGLEVEL:
4395 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4396 return -EFAULT;
4398 b57_msg_level = value;
4399 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
4400 return 0;
4402 case SIOCSETCQOS: /* Set the qos flag */
4403 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4404 return -EFAULT;
4406 pUmDevice->qos = value;
4407 B57_INFO(("Qos flag now: %d\n", pUmDevice->qos));
4408 return 0;
4410 case SIOCGETCDUMP:
4412 char *buf;
4414 if ((buf = MALLOC(SI_OSH, 4096)) == NULL) {
4415 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__,
4416 MALLOCED(SI_OSH)));
4417 return (-ENOMEM);
4420 if (b57_msg_level & 0x10000)
4421 bcmdumplog(buf, 4096);
4422 #ifdef BCMDBG
4423 else {
4424 struct bcmstrbuf b;
4425 bcm_binit(&b, buf, 4096);
4426 b57_dump(dev, &b);
4428 #endif /* BCMDBG */
4429 value = mm_copy_to_user(rq->ifr_data, buf, 4096);
4431 MFREE(SI_OSH, buf, 4096);
4433 if (value)
4434 return -EFAULT;
4435 else
4436 return 0;
4439 #ifdef NICE_SUPPORT
4440 case SIOCNICE:
4442 struct nice_req* nrq;
4444 if (!capable(CAP_NET_ADMIN))
4445 return -EPERM;
4447 nrq = (struct nice_req*)&rq->ifr_ifru;
4448 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
4449 nrq->nrq_magic = NICE_DEVICE_MAGIC;
4450 nrq->nrq_support_rx = 1;
4451 nrq->nrq_support_vlan = 1;
4452 nrq->nrq_support_get_speed = 1;
4453 #ifdef BCM_NAPI_RXPOLL
4454 nrq->nrq_support_rx_napi = 1;
4455 #endif
4456 return 0;
4458 #ifdef BCM_NAPI_RXPOLL
4459 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
4460 #else
4461 else if( nrq->cmd == NICE_CMD_SET_RX )
4462 #endif
4464 pUmDevice->nice_rx = nrq->nrq_rx;
4465 pUmDevice->nice_ctx = nrq->nrq_ctx;
4466 bcm5700_set_vlan_mode(pUmDevice);
4467 return 0;
4469 #ifdef BCM_NAPI_RXPOLL
4470 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
4471 #else
4472 else if( nrq->cmd == NICE_CMD_GET_RX )
4473 #endif
4475 nrq->nrq_rx = pUmDevice->nice_rx;
4476 nrq->nrq_ctx = pUmDevice->nice_ctx;
4477 return 0;
4479 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
4480 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
4481 nrq->nrq_speed = 0;
4483 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
4484 nrq->nrq_speed = SPEED_1000;
4485 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
4486 nrq->nrq_speed = SPEED_100;
4487 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
4488 nrq->nrq_speed = SPEED_100;
4489 } else {
4490 nrq->nrq_speed = 0;
4492 return 0;
4494 else {
4495 if (!pUmDevice->opened)
4496 return -EINVAL;
4498 switch (nrq->cmd) {
4499 case NICE_CMD_BLINK_LED:
4500 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
4501 LM_STATUS_SUCCESS) {
4502 return 0;
4504 return -EINTR;
4506 case NICE_CMD_DIAG_SUSPEND:
4507 b57_suspend_chip(pUmDevice);
4508 return 0;
4510 case NICE_CMD_DIAG_RESUME:
4511 b57_resume_chip(pUmDevice);
4512 return 0;
4514 case NICE_CMD_REG_READ:
4515 if (nrq->nrq_offset >= 0x10000) {
4516 nrq->nrq_data = LM_RegRdInd(pDevice,
4517 nrq->nrq_offset);
4519 else {
4520 nrq->nrq_data = LM_RegRd(pDevice,
4521 nrq->nrq_offset);
4523 return 0;
4525 case NICE_CMD_REG_WRITE:
4526 if (nrq->nrq_offset >= 0x10000) {
4527 LM_RegWrInd(pDevice, nrq->nrq_offset,
4528 nrq->nrq_data);
4530 else {
4531 LM_RegWr(pDevice, nrq->nrq_offset,
4532 nrq->nrq_data, FALSE);
4534 return 0;
4536 case NICE_CMD_REG_READ_DIRECT:
4537 case NICE_CMD_REG_WRITE_DIRECT:
4538 if ((nrq->nrq_offset >= 0x10000) ||
4539 (pDevice->Flags & UNDI_FIX_FLAG)) {
4540 return -EINVAL;
4543 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
4544 nrq->nrq_data = REG_RD_OFFSET(pDevice,
4545 nrq->nrq_offset);
4547 else {
4548 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4549 nrq->nrq_data);
4551 return 0;
4553 case NICE_CMD_MEM_READ:
4554 nrq->nrq_data = LM_MemRdInd(pDevice,
4555 nrq->nrq_offset);
4556 return 0;
4558 case NICE_CMD_MEM_WRITE:
4559 LM_MemWrInd(pDevice, nrq->nrq_offset,
4560 nrq->nrq_data);
4561 return 0;
4563 case NICE_CMD_CFG_READ32:
4564 pci_read_config_dword(pUmDevice->pdev,
4565 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4566 return 0;
4568 case NICE_CMD_CFG_READ16:
4569 pci_read_config_word(pUmDevice->pdev,
4570 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4571 return 0;
4573 case NICE_CMD_CFG_READ8:
4574 pci_read_config_byte(pUmDevice->pdev,
4575 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4576 return 0;
4578 case NICE_CMD_CFG_WRITE32:
4579 pci_write_config_dword(pUmDevice->pdev,
4580 nrq->nrq_offset, (u32)nrq->nrq_data);
4581 return 0;
4583 case NICE_CMD_CFG_WRITE16:
4584 pci_write_config_word(pUmDevice->pdev,
4585 nrq->nrq_offset, (u16)nrq->nrq_data);
4586 return 0;
4588 case NICE_CMD_CFG_WRITE8:
4589 pci_write_config_byte(pUmDevice->pdev,
4590 nrq->nrq_offset, (u8)nrq->nrq_data);
4591 return 0;
4593 case NICE_CMD_RESET:
4594 bcm5700_reset(dev);
4595 return 0;
4597 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4598 if (pDevice->LoopBackMode != 0) {
4599 return -EINVAL;
4602 BCM5700_PHY_LOCK(pUmDevice, flags);
4603 LM_EnableMacLoopBack(pDevice);
4604 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4605 return 0;
4607 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4608 if (pDevice->LoopBackMode !=
4609 LM_MAC_LOOP_BACK_MODE) {
4610 return -EINVAL;
4613 BCM5700_PHY_LOCK(pUmDevice, flags);
4614 LM_DisableMacLoopBack(pDevice);
4615 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4616 return 0;
4618 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4619 if (pDevice->LoopBackMode != 0) {
4620 return -EINVAL;
4623 BCM5700_PHY_LOCK(pUmDevice, flags);
4624 LM_EnablePhyLoopBack(pDevice);
4625 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4626 return 0;
4628 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4629 if (pDevice->LoopBackMode !=
4630 LM_PHY_LOOP_BACK_MODE) {
4631 return -EINVAL;
4634 BCM5700_PHY_LOCK(pUmDevice, flags);
4635 LM_DisablePhyLoopBack(pDevice);
4636 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4637 return 0;
4639 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4640 if (pDevice->LoopBackMode != 0) {
4641 return -EINVAL;
4644 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4645 if (nrq->nrq_speed != 1000)
4646 return -EINVAL;
4648 else {
4649 if ((nrq->nrq_speed != 1000) &&
4650 (nrq->nrq_speed != 100) &&
4651 (nrq->nrq_speed != 10)) {
4652 return -EINVAL;
4655 BCM5700_PHY_LOCK(pUmDevice, flags);
4656 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4657 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4658 return 0;
4660 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4661 if (pDevice->LoopBackMode !=
4662 LM_EXT_LOOP_BACK_MODE) {
4663 return -EINVAL;
4666 BCM5700_PHY_LOCK(pUmDevice, flags);
4667 LM_DisableExtLoopBack(pDevice);
4668 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4669 return 0;
4671 case NICE_CMD_INTERRUPT_TEST:
4672 nrq->nrq_intr_test_result =
4673 b57_test_intr(pUmDevice);
4674 return 0;
4676 case NICE_CMD_LOOPBACK_TEST:
4677 value = 0;
4678 switch (nrq->nrq_looptype) {
4679 case NICE_LOOPBACK_TESTTYPE_EXT:
4680 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4681 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4682 break;
4683 switch (nrq->nrq_loopspeed) {
4684 case NICE_LOOPBACK_TEST_10MBPS:
4685 value = LM_LINE_SPEED_10MBPS;
4686 break;
4687 case NICE_LOOPBACK_TEST_100MBPS:
4688 value = LM_LINE_SPEED_100MBPS;
4689 break;
4690 case NICE_LOOPBACK_TEST_1000MBPS:
4691 value = LM_LINE_SPEED_1000MBPS;
4692 break;
4694 /* Fall through */
4696 case NICE_LOOPBACK_TESTTYPE_MAC:
4697 case NICE_LOOPBACK_TESTTYPE_PHY:
4698 b57_suspend_chip(pUmDevice);
4699 value = b57_test_loopback(pUmDevice,
4700 nrq->nrq_looptype, value);
4701 b57_resume_chip(pUmDevice);
4702 break;
4705 if (value == 1) {
4706 /* A '1' indicates success */
4707 value = 0;
4708 } else {
4709 value = -EINTR;
4712 return value;
4714 case NICE_CMD_KMALLOC_PHYS: {
4715 #if (LINUX_VERSION_CODE >= 0x020400)
4716 dma_addr_t mapping;
4717 __u64 cpu_pa;
4718 void *ptr;
4719 int i;
4720 struct page *pg, *last_pg;
4722 for (i = 0; i < MAX_MEM2; i++) {
4723 if (pUmDevice->mem_size_list2[i] == 0)
4724 break;
4726 if (i >= MAX_MEM2)
4727 return -EFAULT;
4728 ptr = pci_alloc_consistent(pUmDevice->pdev,
4729 nrq->nrq_size, &mapping);
4730 if (!ptr) {
4731 return -EFAULT;
4733 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4734 pUmDevice->mem_list2[i] = ptr;
4735 pUmDevice->dma_list2[i] = mapping;
4737 /* put pci mapping at the beginning of buffer */
4738 *((__u64 *) ptr) = (__u64) mapping;
4740 /* Probably won't work on some architectures */
4741 /* get CPU mapping */
4742 cpu_pa = (__u64) virt_to_phys(ptr);
4743 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4744 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4745 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4747 pg = virt_to_page(ptr);
4748 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4749 for (; ; pg++) {
4750 #if (LINUX_VERSION_CODE > 0x020500)
4751 SetPageReserved(pg);
4752 #else
4753 mem_map_reserve(pg);
4754 #endif
4755 if (pg == last_pg)
4756 break;
4758 return 0;
4759 #else
4760 return -EOPNOTSUPP;
4761 #endif
4764 case NICE_CMD_KFREE_PHYS: {
4765 int i;
4766 __u64 cpu_pa;
4768 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4769 ((__u64) nrq->nrq_phys_addr_hi << 32);
4770 for (i = 0; i < MAX_MEM2; i++) {
4771 if (pUmDevice->cpu_pa_list2[i] ==
4772 cpu_pa)
4774 break;
4777 if (i >= MAX_MEM2)
4778 return -EFAULT;
4780 bcm5700_freemem2(pUmDevice, i);
4781 return 0;
4784 case NICE_CMD_SET_WRITE_PROTECT:
4785 if (nrq->nrq_write_protect)
4786 pDevice->Flags |= EEPROM_WP_FLAG;
4787 else
4788 pDevice->Flags &= ~EEPROM_WP_FLAG;
4789 return 0;
4790 case NICE_CMD_GET_STATS_BLOCK: {
4791 PT3_STATS_BLOCK pStats =
4792 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4793 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4794 pStats, nrq->nrq_stats_size)) {
4795 return -EFAULT;
4797 return 0;
4799 case NICE_CMD_CLR_STATS_BLOCK: {
4800 int j;
4801 PT3_STATS_BLOCK pStats =
4802 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4804 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4805 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4806 T3_ASIC_REV_5705) {
4807 return 0;
4809 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4810 MEM_WR_OFFSET(pDevice, j, 0);
4813 return 0;
4818 return -EOPNOTSUPP;
4820 #endif /* NICE_SUPPORT */
4821 #ifdef SIOCETHTOOL
4822 case SIOCETHTOOL:
4823 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4824 #endif
4825 default:
4826 return -EOPNOTSUPP;
4828 return -EOPNOTSUPP;
4831 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4833 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4834 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4835 int i;
4836 struct dev_mc_list *mclist;
4838 LM_MulticastClear(pDevice);
4839 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4840 i++, mclist = mclist->next) {
4841 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4843 if (dev->flags & IFF_ALLMULTI) {
4844 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4845 LM_SetReceiveMask(pDevice,
4846 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4849 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4850 LM_SetReceiveMask(pDevice,
4851 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4853 if (dev->flags & IFF_PROMISC) {
4854 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4855 LM_SetReceiveMask(pDevice,
4856 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4859 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4860 LM_SetReceiveMask(pDevice,
4861 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4866 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4868 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4869 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4870 int i;
4871 struct dev_mc_list *mclist;
4872 unsigned long flags;
4874 BCM5700_PHY_LOCK(pUmDevice, flags);
4876 LM_MulticastClear(pDevice);
4877 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4878 i++, mclist = mclist->next) {
4879 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4881 if (dev->flags & IFF_ALLMULTI) {
4882 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4883 LM_SetReceiveMask(pDevice,
4884 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4887 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4888 LM_SetReceiveMask(pDevice,
4889 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4891 if (dev->flags & IFF_PROMISC) {
4892 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4893 LM_SetReceiveMask(pDevice,
4894 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4897 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4898 LM_SetReceiveMask(pDevice,
4899 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4902 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4906 * Set the hardware MAC address.
4908 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4910 struct sockaddr *addr=p;
4911 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4912 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4914 if(is_valid_ether_addr(addr->sa_data)){
4916 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4917 if (pUmDevice->opened)
4918 LM_SetMacAddress(pDevice, dev->dev_addr);
4919 return 0;
4921 return -EINVAL;
4924 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4925 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4927 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4928 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4929 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4930 unsigned long flags;
4931 int reinit = 0;
4933 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4934 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4936 return -EINVAL;
4938 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4939 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4941 return -EINVAL;
4943 if (pUmDevice->suspended)
4944 return -EAGAIN;
4946 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4947 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4948 reinit = 1;
4951 BCM5700_PHY_LOCK(pUmDevice, flags);
4952 if (reinit) {
4953 netif_stop_queue(dev);
4954 bcm5700_shutdown(pUmDevice);
4955 bcm5700_freemem(dev);
4958 dev->mtu = new_mtu;
4959 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4960 pDevice->RxMtu = pDevice->TxMtu =
4961 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4963 else {
4964 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4967 if (dev->mtu <= 1514) {
4968 pDevice->RxJumboDescCnt = 0;
4970 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4971 pDevice->RxJumboDescCnt =
4972 rx_jumbo_desc_cnt[pUmDevice->index];
4974 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4975 pDevice->RxStdDescCnt;
4977 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4978 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4980 #ifdef BCM_TSO
4981 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4982 (dev->mtu > 1514) ) {
4983 if (dev->features & NETIF_F_TSO) {
4984 dev->features &= ~NETIF_F_TSO;
4985 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4988 #endif
4990 if (reinit) {
4991 LM_InitializeAdapter(pDevice);
4992 bcm5700_do_rx_mode(dev);
4993 bcm5700_set_vlan_mode(pUmDevice);
4994 bcm5700_init_counters(pUmDevice);
4995 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4996 LM_SetMacAddress(pDevice, dev->dev_addr);
4998 netif_start_queue(dev);
4999 bcm5700_intr_on(pUmDevice);
5001 BCM5700_PHY_UNLOCK(pUmDevice, flags);
5003 return 0;
5005 #endif
5008 #if (LINUX_VERSION_CODE < 0x020300)
5010 bcm5700_probe(struct net_device *dev)
5012 int cards_found = 0;
5013 struct pci_dev *pdev = NULL;
5014 struct pci_device_id *pci_tbl;
5015 u16 ssvid, ssid;
5017 if ( ! pci_present())
5018 return -ENODEV;
5020 pci_tbl = bcm5700_pci_tbl;
5021 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
5022 int idx;
5024 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
5025 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
5026 for (idx = 0; pci_tbl[idx].vendor; idx++) {
5027 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
5028 pci_tbl[idx].vendor == pdev->vendor) &&
5029 (pci_tbl[idx].device == PCI_ANY_ID ||
5030 pci_tbl[idx].device == pdev->device) &&
5031 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
5032 pci_tbl[idx].subvendor == ssvid) &&
5033 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
5034 pci_tbl[idx].subdevice == ssid))
5037 break;
5040 if (pci_tbl[idx].vendor == 0)
5041 continue;
5044 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
5045 cards_found++;
5048 return cards_found ? 0 : -ENODEV;
5051 #ifdef MODULE
5052 int init_module(void)
5054 return bcm5700_probe(NULL);
5057 void cleanup_module(void)
5059 struct net_device *next_dev;
5060 PUM_DEVICE_BLOCK pUmDevice;
5062 #ifdef BCM_PROC_FS
5063 bcm5700_proc_remove_notifier();
5064 #endif
5065 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
5066 while (root_tigon3_dev) {
5067 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
5068 #ifdef BCM_PROC_FS
5069 bcm5700_proc_remove_dev(root_tigon3_dev);
5070 #endif
5071 next_dev = pUmDevice->next_module;
5072 unregister_netdev(root_tigon3_dev);
5073 if (pUmDevice->lm_dev.pMappedMemBase)
5074 iounmap(pUmDevice->lm_dev.pMappedMemBase);
5075 #if (LINUX_VERSION_CODE < 0x020600)
5076 kfree(root_tigon3_dev);
5077 #else
5078 free_netdev(root_tigon3_dev);
5079 #endif
5080 root_tigon3_dev = next_dev;
5082 #ifdef BCM_IOCTL32
5083 unregister_ioctl32_conversion(SIOCNICE);
5084 #endif
5087 #endif /* MODULE */
5088 #else /* LINUX_VERSION_CODE < 0x020300 */
5090 #if (LINUX_VERSION_CODE >= 0x020406)
5091 static int bcm5700_suspend (struct pci_dev *pdev, DRV_SUSPEND_STATE_TYPE state)
5092 #else
5093 static void bcm5700_suspend (struct pci_dev *pdev)
5094 #endif
5096 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
5097 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
5098 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
5100 if (!netif_running(dev))
5101 #if (LINUX_VERSION_CODE >= 0x020406)
5102 return 0;
5103 #else
5104 return;
5105 #endif
5107 netif_device_detach (dev);
5108 bcm5700_shutdown(pUmDevice);
5110 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
5112 /* pci_power_off(pdev, -1);*/
5113 #if (LINUX_VERSION_CODE >= 0x020406)
5114 return 0;
5115 #endif
5119 #if (LINUX_VERSION_CODE >= 0x020406)
5120 static int bcm5700_resume(struct pci_dev *pdev)
5121 #else
5122 static void bcm5700_resume(struct pci_dev *pdev)
5123 #endif
5125 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
5126 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
5127 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
5129 if (!netif_running(dev))
5130 #if (LINUX_VERSION_CODE >= 0x020406)
5131 return 0;
5132 #else
5133 return;
5134 #endif
5135 /* pci_power_on(pdev);*/
5136 netif_device_attach(dev);
5137 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
5138 MM_InitializeUmPackets(pDevice);
5139 bcm5700_reset(dev);
5140 #if (LINUX_VERSION_CODE >= 0x020406)
5141 return 0;
5142 #endif
5146 static struct pci_driver bcm5700_pci_driver = {
5147 name: bcm5700_driver,
5148 id_table: bcm5700_pci_tbl,
5149 probe: bcm5700_init_one,
5150 remove: __devexit_p(bcm5700_remove_one),
5151 suspend: bcm5700_suspend,
5152 resume: bcm5700_resume,
5155 static int
5156 bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused)
5158 switch (event) {
5159 case SYS_HALT:
5160 case SYS_POWER_OFF:
5161 case SYS_RESTART:
5162 break;
5163 default:
5164 return NOTIFY_DONE;
5167 B57_INFO(("bcm5700 reboot notification\n"));
5168 pci_unregister_driver(&bcm5700_pci_driver);
5169 return NOTIFY_DONE;
5172 static int __init bcm5700_init_module (void)
5174 if (msglevel != 0xdeadbeef) {
5175 b57_msg_level = msglevel;
5176 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
5177 } else
5178 b57_msg_level = B57_ERR_VAL;
5180 return pci_module_init(&bcm5700_pci_driver);
5183 static void __exit bcm5700_cleanup_module (void)
5185 #ifdef BCM_PROC_FS
5186 bcm5700_proc_remove_notifier();
5187 #endif
5188 unregister_reboot_notifier(&bcm5700_reboot_notifier);
5189 pci_unregister_driver(&bcm5700_pci_driver);
5192 module_init(bcm5700_init_module);
5193 module_exit(bcm5700_cleanup_module);
5194 #endif
5197 * Middle Module
5202 #ifdef BCM_NAPI_RXPOLL
5203 LM_STATUS
5204 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
5206 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
5208 if (netif_rx_schedule_prep(dev)) {
5209 __netif_rx_schedule(dev);
5210 return LM_STATUS_SUCCESS;
5212 return LM_STATUS_FAILURE;
5214 #endif
5216 LM_STATUS
5217 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5218 LM_UINT16 *pValue16)
5220 UM_DEVICE_BLOCK *pUmDevice;
5222 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5223 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
5224 return LM_STATUS_SUCCESS;
5227 LM_STATUS
5228 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5229 LM_UINT32 *pValue32)
5231 UM_DEVICE_BLOCK *pUmDevice;
5233 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5234 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
5235 return LM_STATUS_SUCCESS;
5238 LM_STATUS
5239 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5240 LM_UINT16 Value16)
5242 UM_DEVICE_BLOCK *pUmDevice;
5244 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5245 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
5246 return LM_STATUS_SUCCESS;
5249 LM_STATUS
5250 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5251 LM_UINT32 Value32)
5253 UM_DEVICE_BLOCK *pUmDevice;
5255 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5256 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
5257 return LM_STATUS_SUCCESS;
5260 LM_STATUS
5261 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5262 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
5263 LM_BOOL Cached)
5265 PLM_VOID pvirt;
5266 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5267 dma_addr_t mapping;
5269 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
5270 &mapping);
5271 if (!pvirt) {
5272 return LM_STATUS_FAILURE;
5274 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5275 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
5276 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
5277 memset(pvirt, 0, BlockSize);
5278 *pMemoryBlockVirt = (PLM_VOID) pvirt;
5279 MM_SetAddr(pMemoryBlockPhy, mapping);
5280 return LM_STATUS_SUCCESS;
5283 LM_STATUS
5284 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5285 PLM_VOID *pMemoryBlockVirt)
5287 PLM_VOID pvirt;
5288 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5291 /* Maximum in slab.c */
5292 if (BlockSize > 131072) {
5293 goto MM_Alloc_error;
5296 pvirt = kmalloc(BlockSize, GFP_ATOMIC);
5297 if (!pvirt) {
5298 goto MM_Alloc_error;
5300 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5301 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
5302 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
5303 /* mem_size_list[i] == 0 indicates that the memory should be freed */
5304 /* using kfree */
5305 memset(pvirt, 0, BlockSize);
5306 *pMemoryBlockVirt = pvirt;
5307 return LM_STATUS_SUCCESS;
5309 MM_Alloc_error:
5310 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
5311 return LM_STATUS_FAILURE;
5314 LM_STATUS
5315 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
5317 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5319 pDevice->pMappedMemBase = ioremap_nocache(
5320 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
5321 if (pDevice->pMappedMemBase == 0)
5322 return LM_STATUS_FAILURE;
5324 return LM_STATUS_SUCCESS;
5327 LM_STATUS
5328 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
5330 unsigned int i;
5331 struct sk_buff *skb;
5332 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5333 PUM_PACKET pUmPacket;
5334 PLM_PACKET pPacket;
5336 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
5337 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
5338 pUmPacket = (PUM_PACKET) pPacket;
5339 if (pPacket == 0) {
5340 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
5342 if (pUmPacket->skbuff == 0) {
5343 #ifdef BCM_WL_EMULATOR
5344 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5345 #else
5346 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5347 #endif
5348 if (skb == 0) {
5349 pUmPacket->skbuff = 0;
5350 QQ_PushTail(
5351 &pUmDevice->rx_out_of_buf_q.Container,
5352 pPacket);
5353 continue;
5355 pUmPacket->skbuff = skb;
5356 skb->dev = pUmDevice->dev;
5357 #ifndef BCM_WL_EMULATOR
5358 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5359 #endif
5361 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5363 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
5364 /* reallocate buffers in the ISR */
5365 pUmDevice->rx_buf_repl_thresh = 0;
5366 pUmDevice->rx_buf_repl_panic_thresh = 0;
5367 pUmDevice->rx_buf_repl_isr_limit = 0;
5369 else {
5370 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
5371 pUmDevice->rx_buf_repl_panic_thresh =
5372 pDevice->RxPacketDescCnt * 7 / 8;
5374 /* This limits the time spent in the ISR when the receiver */
5375 /* is in a steady state of being overrun. */
5376 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
5378 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5379 if (pDevice->RxJumboDescCnt != 0) {
5380 if (pUmDevice->rx_buf_repl_thresh >=
5381 pDevice->RxJumboDescCnt) {
5383 pUmDevice->rx_buf_repl_thresh =
5384 pUmDevice->rx_buf_repl_panic_thresh =
5385 pDevice->RxJumboDescCnt - 1;
5387 if (pUmDevice->rx_buf_repl_thresh >=
5388 pDevice->RxStdDescCnt) {
5390 pUmDevice->rx_buf_repl_thresh =
5391 pUmDevice->rx_buf_repl_panic_thresh =
5392 pDevice->RxStdDescCnt - 1;
5395 #endif
5397 return LM_STATUS_SUCCESS;
5400 LM_STATUS
5401 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
5403 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5404 int index = pUmDevice->index;
5405 struct net_device *dev = pUmDevice->dev;
5407 if (index >= MAX_UNITS)
5408 return LM_STATUS_SUCCESS;
5410 #if LINUX_KERNEL_VERSION < 0x0020609
5412 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
5413 0, 1, 1);
5414 if (auto_speed[index] == 0)
5415 pDevice->DisableAutoNeg = TRUE;
5416 else
5417 pDevice->DisableAutoNeg = FALSE;
5419 if (line_speed[index] == 0) {
5420 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5421 pDevice->DisableAutoNeg = FALSE;
5423 else {
5424 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
5425 "full_duplex", 0, 1, 1);
5426 if (full_duplex[index]) {
5427 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5429 else {
5430 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
5433 if (line_speed[index] == 1000) {
5434 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
5435 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
5436 pDevice->RequestedLineSpeed =
5437 LM_LINE_SPEED_100MBPS;
5438 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
5440 else {
5441 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5442 !full_duplex[index]) {
5443 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
5444 pDevice->RequestedDuplexMode =
5445 LM_DUPLEX_MODE_FULL;
5448 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5449 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
5450 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
5451 pDevice->DisableAutoNeg = FALSE;
5455 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
5456 (pDevice->PhyFlags & PHY_IS_FIBER)){
5457 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5458 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5459 pDevice->DisableAutoNeg = FALSE;
5460 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
5462 else if (line_speed[index] == 100) {
5464 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
5466 else if (line_speed[index] == 10) {
5468 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
5470 else {
5471 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5472 pDevice->DisableAutoNeg = FALSE;
5473 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
5478 #endif /* LINUX_KERNEL_VERSION */
5480 /* This is an unmanageable switch nic and will have link problems if
5481 not set to auto
5483 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
5485 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
5487 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
5488 bcm5700_driver, index, line_speed[index]);
5490 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5491 pDevice->DisableAutoNeg = FALSE;
5494 #if LINUX_KERNEL_VERSION < 0x0020609
5496 pDevice->FlowControlCap = 0;
5497 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
5498 "rx_flow_control", 0, 1, 0);
5499 if (rx_flow_control[index] != 0) {
5500 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
5502 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
5503 "tx_flow_control", 0, 1, 0);
5504 if (tx_flow_control[index] != 0) {
5505 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
5507 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
5508 "auto_flow_control", 0, 1, 0);
5509 if (auto_flow_control[index] != 0) {
5510 if (pDevice->DisableAutoNeg == FALSE) {
5512 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
5513 if ((tx_flow_control[index] == 0) &&
5514 (rx_flow_control[index] == 0)) {
5516 pDevice->FlowControlCap |=
5517 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
5518 LM_FLOW_CONTROL_RECEIVE_PAUSE;
5523 if (dev->mtu > 1500) {
5524 #ifdef BCM_TSO
5525 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
5526 (dev->features & NETIF_F_TSO)) {
5527 dev->features &= ~NETIF_F_TSO;
5528 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
5530 #endif
5531 pDevice->RxMtu = dev->mtu + 14;
5534 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
5535 !(pDevice->Flags & BCM5788_FLAG)) {
5536 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
5537 pUmDevice->timer_interval = HZ;
5538 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
5539 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
5540 pUmDevice->timer_interval = HZ/4;
5543 else {
5544 pUmDevice->timer_interval = HZ/10;
5547 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
5548 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
5549 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
5550 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
5551 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
5552 RX_DESC_CNT);
5553 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
5555 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5556 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
5557 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
5558 JBO_DESC_CNT);
5560 if (mtu[index] <= 1514)
5561 pDevice->RxJumboDescCnt = 0;
5562 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
5563 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5565 #endif
5567 #ifdef BCM_INT_COAL
5568 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5569 "adaptive_coalesce", 0, 1, 1);
5570 #ifdef BCM_NAPI_RXPOLL
5571 if (adaptive_coalesce[index]) {
5572 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5573 adaptive_coalesce[index] = 0;
5576 #endif
5577 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5578 if (!pUmDevice->adaptive_coalesce) {
5579 bcm5700_validate_param_range(pUmDevice,
5580 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5581 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5582 if ((rx_coalesce_ticks[index] == 0) &&
5583 (rx_max_coalesce_frames[index] == 0)) {
5585 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5586 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5588 rx_coalesce_ticks[index] = RX_COAL_TK;
5589 rx_max_coalesce_frames[index] = RX_COAL_FM;
5591 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5592 rx_coalesce_ticks[index];
5593 #ifdef BCM_NAPI_RXPOLL
5594 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5595 #endif
5597 bcm5700_validate_param_range(pUmDevice,
5598 &rx_max_coalesce_frames[index],
5599 "rx_max_coalesce_frames", 0,
5600 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5602 pDevice->RxMaxCoalescedFrames =
5603 pUmDevice->rx_curr_coalesce_frames =
5604 rx_max_coalesce_frames[index];
5605 #ifdef BCM_NAPI_RXPOLL
5606 pDevice->RxMaxCoalescedFramesDuringInt =
5607 rx_max_coalesce_frames[index];
5608 #endif
5610 bcm5700_validate_param_range(pUmDevice,
5611 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5612 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5613 if ((tx_coalesce_ticks[index] == 0) &&
5614 (tx_max_coalesce_frames[index] == 0)) {
5616 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5617 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5619 tx_coalesce_ticks[index] = TX_COAL_TK;
5620 tx_max_coalesce_frames[index] = TX_COAL_FM;
5622 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5623 bcm5700_validate_param_range(pUmDevice,
5624 &tx_max_coalesce_frames[index],
5625 "tx_max_coalesce_frames", 0,
5626 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5627 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5628 pUmDevice->tx_curr_coalesce_frames =
5629 pDevice->TxMaxCoalescedFrames;
5631 bcm5700_validate_param_range(pUmDevice,
5632 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5633 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5634 if (adaptive_coalesce[index]) {
5635 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5636 }else{
5637 if ((stats_coalesce_ticks[index] > 0) &&
5638 (stats_coalesce_ticks[index] < 100)) {
5639 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5640 stats_coalesce_ticks[index] = 100;
5641 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5642 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5646 else {
5647 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5648 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5649 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5651 #endif
5653 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5654 unsigned int tmpvar;
5656 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5659 * If the result is zero, the request is too demanding.
5661 if (tmpvar == 0) {
5662 tmpvar = 1;
5665 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5667 pUmDevice->statstimer_interval = tmpvar;
5670 #ifdef BCM_WOL
5671 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5672 "enable_wol", 0, 1, 0);
5673 if (enable_wol[index]) {
5674 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5675 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5677 #endif
5678 #ifdef INCLUDE_TBI_SUPPORT
5679 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5680 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5681 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5682 /* just poll since we have hardware autoneg. in 5704 */
5683 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5685 else {
5686 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5689 #endif
5690 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5691 "scatter_gather", 0, 1, 1);
5692 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5693 "tx_checksum", 0, 1, 1);
5694 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5695 "rx_checksum", 0, 1, 1);
5696 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5697 if (tx_checksum[index] || rx_checksum[index]) {
5699 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5700 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5703 else {
5704 if (rx_checksum[index]) {
5705 pDevice->TaskToOffload |=
5706 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5707 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5709 if (tx_checksum[index]) {
5710 pDevice->TaskToOffload |=
5711 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5712 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5713 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5716 #ifdef BCM_TSO
5717 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5718 "enable_tso", 0, 1, 1);
5720 /* Always enable TSO firmware if supported */
5721 /* This way we can turn it on or off on the fly */
5722 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5724 pDevice->TaskToOffload |=
5725 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5727 if (enable_tso[index] &&
5728 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5730 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5732 #endif
5733 #ifdef BCM_ASF
5734 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5735 "vlan_strip_mode", 0, 2, 0);
5736 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5737 #else
5738 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5739 #endif
5741 #endif /* LINUX_KERNEL_VERSION */
5743 #ifdef BCM_NIC_SEND_BD
5744 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5745 0, 1, 0);
5746 if (nic_tx_bd[index])
5747 pDevice->Flags |= NIC_SEND_BD_FLAG;
5748 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5749 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5750 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5751 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5752 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5755 #endif
5756 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5757 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5758 "disable_msi", 0, 1, 0);
5759 #endif
5761 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5762 "delay_link", 0, 1, 0);
5764 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5765 "disable_d3hot", 0, 1, 0);
5766 if (disable_d3hot[index]) {
5768 #ifdef BCM_WOL
5769 if (enable_wol[index]) {
5770 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5771 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5772 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5774 #endif
5775 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5778 return LM_STATUS_SUCCESS;
5781 /* From include/proto/ethernet.h */
5782 #define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
5784 /* From include/proto/vlan.h */
5785 #define VLAN_PRI_MASK 7 /* 3 bits of priority */
5786 #define VLAN_PRI_SHIFT 13
5788 /* Replace the priority in a vlan tag */
5789 #define UPD_VLANTAG_PRIO(tag, prio) do { \
5790 tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); \
5791 tag |= prio << VLAN_PRI_SHIFT; \
5792 } while (0)
5794 LM_STATUS
5795 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5797 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5798 PLM_PACKET pPacket;
5799 PUM_PACKET pUmPacket;
5800 struct sk_buff *skb;
5801 int size;
5802 int vlan_tag_size = 0;
5803 uint16 dscp_prio;
5805 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5806 vlan_tag_size = 4;
5808 while (1) {
5809 pPacket = (PLM_PACKET)
5810 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5811 if (pPacket == 0)
5812 break;
5813 pUmPacket = (PUM_PACKET) pPacket;
5814 #if !defined(NO_PCI_UNMAP)
5815 pci_unmap_single(pUmDevice->pdev,
5816 pci_unmap_addr(pUmPacket, map[0]),
5817 pPacket->u.Rx.RxBufferSize,
5818 PCI_DMA_FROMDEVICE);
5819 #endif
5820 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5821 ((size = pPacket->PacketSize) >
5822 (pDevice->RxMtu + vlan_tag_size))) {
5824 /* reuse skb */
5825 #ifdef BCM_TASKLET
5826 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5827 #else
5828 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5829 #endif
5830 pUmDevice->rx_misc_errors++;
5831 continue;
5833 skb = pUmPacket->skbuff;
5834 skb_put(skb, size);
5835 skb->pkt_type = 0;
5837 #ifdef HNDCTF
5838 if (CTF_ENAB(pUmDevice->cih)) {
5839 if (ctf_forward(pUmDevice->cih, skb, skb->dev) != BCME_ERROR) {
5840 pUmDevice->dev->last_rx = jiffies;
5841 pUmDevice->stats.rx_bytes += skb->len;
5842 goto drop_rx;
5845 /* clear skipct flag before sending up */
5846 PKTCLRSKIPCT(pUmDevice->osh, skb);
5848 #endif /* HNDCTF */
5850 /* Extract priority from payload and put it in skb->priority */
5851 dscp_prio = 0;
5852 if (pUmDevice->qos) {
5853 uint rc;
5855 rc = pktsetprio(skb, TRUE);
5856 if (rc & (PKTPRIO_VDSCP | PKTPRIO_DSCP))
5857 dscp_prio = rc & VLAN_PRI_MASK;
5858 if (rc != 0)
5859 B57_INFO(("pktsetprio returned 0x%x, skb->priority: %d\n",
5860 rc, skb->priority));
5862 skb->protocol = eth_type_trans(skb, skb->dev);
5863 if (size > pDevice->RxMtu) {
5864 /* Make sure we have a valid VLAN tag */
5865 if (htons(skb->protocol) != ETHER_TYPE_8021Q) {
5866 dev_kfree_skb_irq(skb);
5867 pUmDevice->rx_misc_errors++;
5868 goto drop_rx;
5872 pUmDevice->stats.rx_bytes += skb->len;
5874 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5875 (pDevice->TaskToOffload &
5876 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5877 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5879 skb->ip_summed = CHECKSUM_UNNECESSARY;
5880 #if TIGON3_DEBUG
5881 pUmDevice->rx_good_chksum_count++;
5882 #endif
5884 else {
5885 skb->ip_summed = CHECKSUM_NONE;
5886 pUmDevice->rx_bad_chksum_count++;
5889 else {
5890 skb->ip_summed = CHECKSUM_NONE;
5892 #ifdef NICE_SUPPORT
5893 if( pUmDevice->nice_rx ) {
5894 vlan_tag_t *vlan_tag;
5896 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5897 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5898 vlan_tag->signature = 0x7777;
5899 vlan_tag->tag = pPacket->VlanTag;
5900 /* Override vlan priority with dscp priority */
5901 if (dscp_prio)
5902 UPD_VLANTAG_PRIO(vlan_tag->tag, dscp_prio);
5903 } else {
5904 vlan_tag->signature = 0;
5906 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5907 } else
5908 #endif
5910 #ifdef BCM_VLAN
5911 if (pUmDevice->vlgrp &&
5912 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5913 /* Override vlan priority with dscp priority */
5914 if (dscp_prio)
5915 UPD_VLANTAG_PRIO(pPacket->VlanTag, dscp_prio);
5916 #ifdef BCM_NAPI_RXPOLL
5917 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5918 pPacket->VlanTag);
5919 #else
5920 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5921 pPacket->VlanTag);
5922 #endif
5923 } else
5924 #endif
5926 #ifdef BCM_WL_EMULATOR
5927 if(pDevice->wl_emulate_rx) {
5928 /* bcmstats("emu recv %d %d"); */
5929 wlcemu_receive_skb(pDevice->wlc, skb);
5930 /* bcmstats("emu recv end %d %d"); */
5932 else
5933 #endif /* BCM_WL_EMULATOR */
5935 #ifdef BCM_NAPI_RXPOLL
5936 netif_receive_skb(skb);
5937 #else
5938 netif_rx(skb);
5939 #endif
5943 pUmDevice->dev->last_rx = jiffies;
5945 drop_rx:
5946 #ifdef BCM_TASKLET
5947 pUmPacket->skbuff = 0;
5948 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5949 #else
5950 #ifdef BCM_WL_EMULATOR
5951 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5952 #else
5953 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5954 #endif /* BCM_WL_EMULATOR */
5955 if (skb == 0) {
5956 pUmPacket->skbuff = 0;
5957 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5959 else {
5960 pUmPacket->skbuff = skb;
5961 skb->dev = pUmDevice->dev;
5962 #ifndef BCM_WL_EMULATOR
5963 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5964 #endif
5965 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5967 #endif
5969 return LM_STATUS_SUCCESS;
5972 LM_STATUS
5973 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5975 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5976 struct sk_buff *skb = pUmPacket->skbuff;
5977 struct sk_buff *nskb;
5978 #if !defined(NO_PCI_UNMAP)
5979 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5981 pci_unmap_single(pUmDevice->pdev,
5982 pci_unmap_addr(pUmPacket, map[0]),
5983 pci_unmap_len(pUmPacket, map_len[0]),
5984 PCI_DMA_TODEVICE);
5985 #if MAX_SKB_FRAGS
5987 int i;
5989 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5990 pci_unmap_page(pUmDevice->pdev,
5991 pci_unmap_addr(pUmPacket, map[i + 1]),
5992 pci_unmap_len(pUmPacket, map_len[i + 1]),
5993 PCI_DMA_TODEVICE);
5996 #endif
5997 #endif
5998 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5999 pUmPacket->lm_packet.u.Tx.FragCount = 1;
6000 dev_kfree_skb(skb);
6001 pUmPacket->skbuff = nskb;
6002 return LM_STATUS_SUCCESS;
6004 dev_kfree_skb(skb);
6005 pUmPacket->skbuff = 0;
6006 return LM_STATUS_FAILURE;
6009 /* Returns 1 if not all buffers are allocated */
6010 STATIC int
6011 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
6013 PLM_PACKET pPacket;
6014 PUM_PACKET pUmPacket;
6015 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
6016 struct sk_buff *skb;
6017 int queue_rx = 0;
6018 int alloc_cnt = 0;
6019 int ret = 0;
6021 while ((pUmPacket = (PUM_PACKET)
6022 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
6023 pPacket = (PLM_PACKET) pUmPacket;
6024 if (pUmPacket->skbuff) {
6025 /* reuse an old skb */
6026 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
6027 queue_rx = 1;
6028 continue;
6030 #ifdef BCM_WL_EMULATOR
6031 if ((skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2)) == 0)
6032 #else
6033 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR)) == 0)
6034 #endif /* BCM_WL_EMULATOR */
6036 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
6037 pPacket);
6038 ret = 1;
6039 break;
6041 pUmPacket->skbuff = skb;
6042 skb->dev = pUmDevice->dev;
6043 #ifndef BCM_WL_EMULATOR
6044 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
6045 #endif
6046 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
6047 queue_rx = 1;
6048 if (max > 0) {
6049 alloc_cnt++;
6050 if (alloc_cnt >= max)
6051 break;
6054 if (queue_rx || pDevice->QueueAgain) {
6055 LM_QueueRxPackets(pDevice);
6057 return ret;
6060 LM_STATUS
6061 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
6063 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
6064 PLM_PACKET pPacket;
6065 PUM_PACKET pUmPacket;
6066 struct sk_buff *skb;
6067 #if !defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
6068 int i;
6069 #endif
6071 while (1) {
6072 pPacket = (PLM_PACKET)
6073 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
6074 if (pPacket == 0)
6075 break;
6076 pUmPacket = (PUM_PACKET) pPacket;
6077 skb = pUmPacket->skbuff;
6078 #if !defined(NO_PCI_UNMAP)
6079 pci_unmap_single(pUmDevice->pdev,
6080 pci_unmap_addr(pUmPacket, map[0]),
6081 pci_unmap_len(pUmPacket, map_len[0]),
6082 PCI_DMA_TODEVICE);
6083 #if MAX_SKB_FRAGS
6084 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6085 pci_unmap_page(pUmDevice->pdev,
6086 pci_unmap_addr(pUmPacket, map[i + 1]),
6087 pci_unmap_len(pUmPacket, map_len[i + 1]),
6088 PCI_DMA_TODEVICE);
6090 #endif
6091 #endif
6092 dev_kfree_skb_irq(skb);
6093 pUmPacket->skbuff = 0;
6094 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
6096 if (pUmDevice->tx_full) {
6097 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
6098 (pDevice->TxPacketDescCnt >> 1)) {
6100 pUmDevice->tx_full = 0;
6101 netif_wake_queue(pUmDevice->dev);
6104 return LM_STATUS_SUCCESS;
6107 LM_STATUS
6108 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
6110 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
6111 struct net_device *dev = pUmDevice->dev;
6112 LM_FLOW_CONTROL flow_control;
6113 int speed = 0;
6115 if (!pUmDevice->opened)
6116 return LM_STATUS_SUCCESS;
6118 if (!pUmDevice->suspended) {
6119 if (Status == LM_STATUS_LINK_DOWN) {
6120 netif_carrier_off(dev);
6122 else if (Status == LM_STATUS_LINK_ACTIVE) {
6123 netif_carrier_on(dev);
6127 if (pUmDevice->delayed_link_ind > 0) {
6128 pUmDevice->delayed_link_ind = 0;
6129 if (Status == LM_STATUS_LINK_DOWN) {
6130 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
6132 else if (Status == LM_STATUS_LINK_ACTIVE) {
6133 B57_INFO(("%s: %s NIC Link is UP, ", bcm5700_driver, dev->name));
6136 else {
6137 if (Status == LM_STATUS_LINK_DOWN) {
6138 B57_INFO(("%s: %s NIC Link is Down\n", bcm5700_driver, dev->name));
6140 else if (Status == LM_STATUS_LINK_ACTIVE) {
6141 B57_INFO(("%s: %s NIC Link is Up, ", bcm5700_driver, dev->name));
6145 if (Status == LM_STATUS_LINK_ACTIVE) {
6146 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
6147 speed = 1000;
6148 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
6149 speed = 100;
6150 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
6151 speed = 10;
6153 B57_INFO(("%d Mbps ", speed));
6155 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
6156 B57_INFO(("full duplex"));
6157 else
6158 B57_INFO(("half duplex"));
6160 flow_control = pDevice->FlowControl &
6161 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
6162 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
6163 if (flow_control) {
6164 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
6165 B57_INFO((", receive "));
6166 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
6167 B57_INFO(("& transmit "));
6169 else {
6170 B57_INFO((", transmit "));
6172 B57_INFO(("flow control ON"));
6174 B57_INFO(("\n"));
6176 return LM_STATUS_SUCCESS;
6179 void
6180 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
6182 #if !defined(NO_PCI_UNMAP)
6183 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6184 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
6186 if (!pUmPacket->skbuff)
6187 return;
6189 pci_unmap_single(pUmDevice->pdev,
6190 pci_unmap_addr(pUmPacket, map[0]),
6191 pPacket->u.Rx.RxBufferSize,
6192 PCI_DMA_FROMDEVICE);
6193 #endif
6196 LM_STATUS
6197 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
6199 PUM_PACKET pUmPacket;
6200 struct sk_buff *skb;
6202 if (pPacket == 0)
6203 return LM_STATUS_SUCCESS;
6204 pUmPacket = (PUM_PACKET) pPacket;
6205 if ((skb = pUmPacket->skbuff)) {
6206 /* DMA address already unmapped */
6207 dev_kfree_skb(skb);
6209 pUmPacket->skbuff = 0;
6210 return LM_STATUS_SUCCESS;
6213 LM_STATUS
6214 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
6216 current->state = TASK_INTERRUPTIBLE;
6217 if (schedule_timeout(HZ * msec / 1000) != 0) {
6218 return LM_STATUS_FAILURE;
6220 if (signal_pending(current))
6221 return LM_STATUS_FAILURE;
6223 return LM_STATUS_SUCCESS;
6226 void
6227 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
6229 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
6231 bcm5700_intr_off(pUmDevice);
6232 netif_carrier_off(pUmDevice->dev);
6233 #ifdef BCM_TASKLET
6234 tasklet_kill(&pUmDevice->tasklet);
6235 #endif
6236 bcm5700_poll_wait(pUmDevice);
6238 LM_Halt(pDevice);
6240 pDevice->InitDone = 0;
6241 bcm5700_free_remaining_rx_bufs(pUmDevice);
6244 void
6245 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
6247 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
6248 UM_PACKET *pUmPacket;
6249 int cnt, i;
6251 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
6252 for (i = 0; i < cnt; i++) {
6253 if ((pUmPacket =
6254 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
6255 != 0) {
6257 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
6258 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
6259 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
6260 pUmPacket);
6265 void
6266 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
6267 char *param_name, int min, int max, int deflt)
6269 if (((unsigned int) *param < (unsigned int) min) ||
6270 ((unsigned int) *param > (unsigned int) max)) {
6272 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
6273 *param = deflt;
6277 struct net_device *
6278 bcm5700_find_peer(struct net_device *dev)
6280 struct net_device *tmp_dev;
6281 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
6282 LM_DEVICE_BLOCK *pDevice;
6284 tmp_dev = 0;
6285 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
6286 pDevice = &pUmDevice->lm_dev;
6287 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
6288 tmp_dev = root_tigon3_dev;
6289 while (tmp_dev) {
6290 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
6291 if ((tmp_dev != dev) &&
6292 (pUmDevice->pdev->bus->number ==
6293 pUmTmp->pdev->bus->number) &&
6294 PCI_SLOT(pUmDevice->pdev->devfn) ==
6295 PCI_SLOT(pUmTmp->pdev->devfn)) {
6297 break;
6299 tmp_dev = pUmTmp->next_module;
6302 return tmp_dev;
6305 LM_DEVICE_BLOCK *
6306 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
6308 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6309 struct net_device *dev = pUmDevice->dev;
6310 struct net_device *peer_dev;
6312 peer_dev = bcm5700_find_peer(dev);
6313 if (!peer_dev)
6314 return 0;
6315 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
6318 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
6320 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6321 return (pci_find_capability(pUmDevice->pdev, capability));
6324 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
6325 STATIC void
6326 poll_bcm5700(struct net_device *dev)
6328 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
6330 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
6331 if (netdump_mode) {
6332 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6333 #ifdef BCM_NAPI_RXPOLL
6334 if (dev->poll_list.prev) {
6335 int budget = 64;
6337 bcm5700_poll(dev, &budget);
6339 #endif
6341 else
6342 #endif
6344 disable_irq(pUmDevice->pdev->irq);
6345 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6346 enable_irq(pUmDevice->pdev->irq);
6349 #endif