allow coexistance of N build and AC build.
[tomato.git] / release / src-rt-6.x / bcm57xx / linux / b57um.c
blobd185fc48ba9e71669ea4b2a1a6ed0a4f4686b1ff
1 /******************************************************************************/
2 /* */
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
4 /* Corporation. */
5 /* All rights reserved. */
6 /* */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
10 /* */
11 /******************************************************************************/
13 /* $Id: b57um.c 320789 2012-03-13 04:01:27Z $ */
15 char bcm5700_driver[] = "bcm5700";
16 char bcm5700_version[] = "8.3.14";
17 char bcm5700_date[] = "(11/2/05)";
19 #define B57UM
20 #include "mm.h"
22 #include "typedefs.h"
23 #include <epivers.h>
24 #include "osl.h"
25 #include "bcmdefs.h"
26 #include "bcmdevs.h"
27 #include "bcmutils.h"
28 #include "hndsoc.h"
29 #include "siutils.h"
30 #include "hndgige.h"
31 #include "etioctl.h"
32 #include "bcmrobo.h"
34 /* this is needed to get good and stable performances */
35 #define EXTRA_HDR BCMEXTRAHDROOM
37 /* A few user-configurable values. */
39 #define MAX_UNITS 16
40 /* Used to pass the full-duplex flag, etc. */
41 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
42 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
43 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
44 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
45 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
46 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
47 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
48 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
49 #endif
50 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
51 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
52 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
54 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
55 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
56 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
57 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
58 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
59 TX_DESC_CNT};
61 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
62 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
63 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
64 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
65 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
66 RX_DESC_CNT };
68 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
69 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
70 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
71 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
72 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
73 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
74 JBO_DESC_CNT };
75 #endif
77 #ifdef BCM_INT_COAL
78 #ifdef BCM_NAPI_RXPOLL
79 static unsigned int adaptive_coalesce[MAX_UNITS] =
80 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
81 #else
82 static unsigned int adaptive_coalesce[MAX_UNITS] =
83 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
84 #endif
86 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
87 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
88 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
89 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
90 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
91 RX_COAL_TK};
93 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
94 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
95 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
96 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
97 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
98 RX_COAL_FM};
100 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
101 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
102 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
103 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
104 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
105 TX_COAL_TK};
107 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
108 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
109 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
110 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
111 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
112 TX_COAL_FM};
114 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
115 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
116 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
117 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
118 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
119 ST_COAL_TK,};
121 #endif
122 #ifdef BCM_WOL
123 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
124 #endif
125 #ifdef BCM_TSO
126 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
127 #endif
128 #ifdef BCM_NIC_SEND_BD
129 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
130 #endif
131 #ifdef BCM_ASF
132 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
133 #endif
134 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
135 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
137 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
138 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
139 static int bcm_msi_chipset_bug = 0;
140 #endif
142 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
144 /* Hack to hook the data path to the BCM WL dirver */
145 #ifdef BCM_WL_EMULATOR
146 #include "bcmnvram.h"
147 #include "wl_bcm57emu.h"
148 #ifdef SKB_MANAGER
149 int skb_old_alloc = 0;
150 #endif
151 #endif /* BCM_WL_EMULATOR */
153 /* Operational parameters that usually are not changed. */
154 /* Time in jiffies before concluding the transmitter is hung. */
155 #define TX_TIMEOUT (2*HZ)
157 #if (LINUX_VERSION_CODE < 0x02030d)
158 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
159 #elif (LINUX_VERSION_CODE < 0x02032b)
160 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
161 #endif
163 #if (LINUX_VERSION_CODE < 0x02032b)
164 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
165 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
166 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
168 static inline void netif_start_queue(struct net_device *dev)
170 dev->tbusy = 0;
171 dev->interrupt = 0;
172 dev->start = 1;
175 #define netif_queue_stopped(dev) dev->tbusy
176 #define netif_running(dev) dev->start
178 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
180 queue_task(tasklet, &tq_immediate);
181 mark_bh(IMMEDIATE_BH);
184 static inline void tasklet_init(struct tasklet_struct *tasklet,
185 void (*func)(unsigned long),
186 unsigned long data)
188 tasklet->next = NULL;
189 tasklet->sync = 0;
190 tasklet->routine = (void (*)(void *))func;
191 tasklet->data = (void *)data;
194 #define tasklet_kill(tasklet)
196 #endif
198 #if (LINUX_VERSION_CODE < 0x020300)
199 struct pci_device_id {
200 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
201 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
202 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
203 unsigned long driver_data; /* Data private to the driver */
206 #define PCI_ANY_ID 0
208 #define pci_set_drvdata(pdev, dev)
209 #define pci_get_drvdata(pdev) 0
211 #define pci_enable_device(pdev) 0
213 #define __devinit __init
214 #define __devinitdata __initdata
215 #define __devexit
217 #define SET_MODULE_OWNER(dev)
218 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
220 #endif
222 #if (LINUX_VERSION_CODE < 0x020411)
223 #ifndef __devexit_p
224 #define __devexit_p(x) x
225 #endif
226 #endif
228 #ifndef MODULE_LICENSE
229 #define MODULE_LICENSE(license)
230 #endif
232 #ifndef IRQ_RETVAL
233 typedef void irqreturn_t;
234 #define IRQ_RETVAL(x)
235 #endif
237 #if (LINUX_VERSION_CODE < 0x02032a)
238 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
239 dma_addr_t *dma_handle)
241 void *virt_ptr;
243 /* Maximum in slab.c */
244 if (size > 131072)
245 return 0;
247 virt_ptr = kmalloc(size, GFP_KERNEL);
248 *dma_handle = virt_to_bus(virt_ptr);
249 return virt_ptr;
251 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
253 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
256 #if (LINUX_VERSION_CODE < 0x02040d)
258 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
260 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
261 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
263 #else
264 /* pci_set_dma_mask is using dma_addr_t */
266 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
267 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
269 #endif
271 #else /* (LINUX_VERSION_CODE < 0x02040d) */
273 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
274 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
275 #endif
277 #if (LINUX_VERSION_CODE < 0x020329)
278 #define pci_set_dma_mask(pdev, mask) (0)
279 #else
280 #if (LINUX_VERSION_CODE < 0x020403)
282 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
284 if(! pci_dma_supported(dev, mask))
285 return -EIO;
287 dev->dma_mask = mask;
289 return 0;
291 #endif
292 #endif
294 #if (LINUX_VERSION_CODE < 0x020547)
295 #define pci_set_consistent_dma_mask(pdev, mask) (0)
296 #endif
298 #if (LINUX_VERSION_CODE < 0x020402)
299 #define pci_request_regions(pdev, name) (0)
300 #define pci_release_regions(pdev)
301 #endif
303 #if !defined(spin_is_locked)
304 #define spin_is_locked(lock) (test_bit(0,(lock)))
305 #endif
307 #define BCM5700_LOCK(pUmDevice, flags) \
308 if ((pUmDevice)->do_global_lock) { \
309 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
312 #define BCM5700_UNLOCK(pUmDevice, flags) \
313 if ((pUmDevice)->do_global_lock) { \
314 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
317 inline void
318 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
320 if (pUmDevice->do_global_lock) {
321 spin_lock(&pUmDevice->global_lock);
325 inline void
326 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
328 if (pUmDevice->do_global_lock) {
329 spin_unlock(&pUmDevice->global_lock);
333 void
334 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
336 atomic_inc(&pUmDevice->intr_sem);
337 LM_DisableInterrupt(&pUmDevice->lm_dev);
338 #if (LINUX_VERSION_CODE >= 0x2051c)
339 synchronize_irq(pUmDevice->dev->irq);
340 #else
341 synchronize_irq();
342 #endif
343 LM_DisableInterrupt(&pUmDevice->lm_dev);
346 void
347 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
349 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
350 LM_EnableInterrupt(&pUmDevice->lm_dev);
355 * Broadcom NIC Extension support
356 * -ffan
358 #ifdef NICE_SUPPORT
359 #include "nicext.h"
361 typedef struct {
362 ushort tag;
363 ushort signature;
364 } vlan_tag_t;
366 #endif /* NICE_SUPPORT */
368 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
370 #if defined(MODULE)
371 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
372 MODULE_DESCRIPTION("BCM5700 Driver");
373 MODULE_LICENSE("GPL");
375 #if (LINUX_VERSION_CODE < 0x020605)
377 MODULE_PARM(debug, "i");
378 MODULE_PARM(msglevel, "i");
379 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
380 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
381 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
385 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
386 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
387 #endif
388 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
389 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
390 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
391 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
392 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
393 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
394 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
395 #endif
396 #ifdef BCM_INT_COAL
397 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
398 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
399 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
400 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
401 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
402 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
403 #endif
404 #ifdef BCM_WOL
405 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
406 #endif
407 #ifdef BCM_TSO
408 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
409 #endif
410 #ifdef BCM_NIC_SEND_BD
411 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
412 #endif
413 #ifdef BCM_ASF
414 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
415 #endif
416 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
417 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
419 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
420 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
421 #endif
423 #else /* parms*/
425 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
427 static int var;
429 #define numvar var
431 #endif
433 #if (LINUX_VERSION_CODE >= 0x2060a)
435 #define numvar NULL
437 #endif
439 module_param_array(line_speed, int, numvar, 0);
440 module_param_array(auto_speed, int, numvar, 0);
441 module_param_array(full_duplex, int, numvar, 0);
442 module_param_array(rx_flow_control, int, numvar, 0);
443 module_param_array(tx_flow_control, int, numvar, 0);
444 module_param_array(auto_flow_control, int, numvar, 0);
445 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
446 module_param_array(mtu, int, numvar, 0);
447 #endif
448 module_param_array(tx_checksum, int, numvar, 0);
449 module_param_array(rx_checksum, int, numvar, 0);
450 module_param_array(scatter_gather, int, numvar, 0);
451 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
452 module_param_array(rx_std_desc_cnt, int, numvar, 0);
453 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
454 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
455 #endif
456 #ifdef BCM_INT_COAL
457 module_param_array(adaptive_coalesce, int, numvar, 0);
458 module_param_array(rx_coalesce_ticks, int, numvar, 0);
459 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
460 module_param_array(tx_coalesce_ticks, int, numvar, 0);
461 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
462 module_param_array(stats_coalesce_ticks, int, numvar, 0);
463 #endif
464 #ifdef BCM_WOL
465 module_param_array(enable_wol, int, numvar, 0);
466 #endif
467 #ifdef BCM_TSO
468 module_param_array(enable_tso, int, numvar, 0);
469 #endif
470 #ifdef BCM_NIC_SEND_BD
471 module_param_array(nic_tx_bd, int, numvar, 0);
472 #endif
473 #ifdef BCM_ASF
474 module_param_array(vlan_tag_mode, int, numvar, 0);
475 #endif
476 module_param_array(delay_link, int, numvar, 0);
477 module_param_array(disable_d3hot, int, numvar, 0);
479 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
480 module_param_array(disable_msi, int, numvar, 0);
481 #endif
484 #endif /* params */
487 #endif
489 #define RUN_AT(x) (jiffies + (x))
491 char kernel_version[] = UTS_RELEASE;
493 #define PCI_SUPPORT_VER2
495 #if !defined(CAP_NET_ADMIN)
496 #define capable(CAP_XXX) (suser())
497 #endif
499 #define tigon3_debug debug
500 #if TIGON3_DEBUG
501 static int tigon3_debug = TIGON3_DEBUG;
502 #else
503 static int tigon3_debug = 0;
504 #endif
505 static int msglevel = 0xdeadbeef;
506 int b57_msg_level;
508 int bcm5700_open(struct net_device *dev);
509 STATIC void bcm5700_timer(unsigned long data);
510 STATIC void bcm5700_stats_timer(unsigned long data);
511 STATIC void bcm5700_reset(struct net_device *dev);
512 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
513 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
514 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance);
515 #else
516 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
517 #endif
518 #ifdef BCM_TASKLET
519 STATIC void bcm5700_tasklet(unsigned long data);
520 #endif
521 STATIC int bcm5700_close(struct net_device *dev);
522 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
523 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
524 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
525 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
526 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
527 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
528 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
529 #endif
530 #ifdef BCM_NAPI_RXPOLL
531 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
532 #endif
533 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
534 STATIC int bcm5700_freemem(struct net_device *dev);
535 #ifdef NICE_SUPPORT
536 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
537 #endif
538 #ifdef BCM_INT_COAL
539 #ifndef BCM_NAPI_RXPOLL
540 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
541 #endif
542 #endif
543 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
544 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
545 #ifdef BCM_VLAN
546 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
547 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
548 #endif
549 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
550 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
551 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
552 char *param_name, int min, int max, int deflt);
554 static int bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused);
555 static struct notifier_block bcm5700_reboot_notifier = {
556 bcm5700_notify_reboot,
557 NULL,
561 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
562 STATIC void poll_bcm5700(struct net_device *dev);
563 #endif
565 /* A list of all installed bcm5700 devices. */
566 static struct net_device *root_tigon3_dev = NULL;
568 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
570 #ifdef NICE_SUPPORT
571 #if (LINUX_VERSION_CODE < 0x20500)
572 extern int register_ioctl32_conversion(unsigned int cmd,
573 int (*handler)(unsigned int, unsigned int, unsigned long,
574 struct file *));
575 int unregister_ioctl32_conversion(unsigned int cmd);
576 #else
577 #include <linux/ioctl32.h>
578 #endif
580 #define BCM_IOCTL32 1
582 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
584 static int
585 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
586 struct file *filep)
588 struct ifreq rq;
589 struct net_device *tmp_dev = root_tigon3_dev;
590 int ret;
591 struct nice_req* nrq;
592 struct ifreq_nice32 {
593 char ifnr_name[16];
594 __u32 cmd;
595 __u32 nrq1;
596 __u32 nrq2;
597 __u32 nrq3;
598 } nrq32;
600 if (!capable(CAP_NET_ADMIN))
601 return -EPERM;
603 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
604 return -EFAULT;
606 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
608 nrq = (struct nice_req*) &rq.ifr_ifru;
609 nrq->cmd = nrq32.cmd;
610 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
611 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
612 nrq->nrq_stats_size = nrq32.nrq2;
614 else {
615 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
617 while (tmp_dev) {
618 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
619 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
620 if (ret == 0) {
621 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
622 return ret;
624 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
625 if (mm_copy_to_user((char *) arg, &nrq32, 32))
626 return -EFAULT;
628 return ret;
630 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
632 return -ENODEV;
634 #endif /* NICE_SUPPORT */
635 #endif
637 typedef enum {
638 BCM5700A6 = 0,
639 BCM5700T6,
640 BCM5700A9,
641 BCM5700T9,
642 BCM5700,
643 BCM5701A5,
644 BCM5701T1,
645 BCM5701T8,
646 BCM5701A7,
647 BCM5701A10,
648 BCM5701A12,
649 BCM5701,
650 BCM5702,
651 BCM5703,
652 BCM5703A31,
653 BCM5703ARBUCKLE,
654 TC996T,
655 TC996ST,
656 TC996SSX,
657 TC996SX,
658 TC996BT,
659 TC997T,
660 TC997SX,
661 TC1000T,
662 TC1000BT,
663 TC940BR01,
664 TC942BR01,
665 TC998T,
666 TC998SX,
667 TC999T,
668 NC6770,
669 NC1020,
670 NC150T,
671 NC7760,
672 NC7761,
673 NC7770,
674 NC7771,
675 NC7780,
676 NC7781,
677 NC7772,
678 NC7782,
679 NC7783,
680 NC320T,
681 NC320I,
682 NC325I,
683 NC324I,
684 NC326I,
685 BCM5704CIOBE,
686 BCM5704,
687 BCM5704S,
688 BCM5705,
689 BCM5705M,
690 BCM5705F,
691 BCM5901,
692 BCM5782,
693 BCM5788,
694 BCM5789,
695 BCM5750,
696 BCM5750M,
697 BCM5720,
698 BCM5751,
699 BCM5751M,
700 BCM5751F,
701 BCM5721,
702 BCM5753,
703 BCM5753M,
704 BCM5753F,
705 BCM5781,
706 BCM5752,
707 BCM5752M,
708 BCM5714,
709 BCM5780,
710 BCM5780S,
711 BCM5715,
712 BCM4785,
713 BCM5903M,
714 UNK5788
715 } board_t;
718 /* indexed by board_t, above */
719 static struct {
720 char *name;
721 } board_info[] __devinitdata = {
722 { "Broadcom BCM5700 1000Base-T" },
723 { "Broadcom BCM5700 1000Base-SX" },
724 { "Broadcom BCM5700 1000Base-SX" },
725 { "Broadcom BCM5700 1000Base-T" },
726 { "Broadcom BCM5700" },
727 { "Broadcom BCM5701 1000Base-T" },
728 { "Broadcom BCM5701 1000Base-T" },
729 { "Broadcom BCM5701 1000Base-T" },
730 { "Broadcom BCM5701 1000Base-SX" },
731 { "Broadcom BCM5701 1000Base-T" },
732 { "Broadcom BCM5701 1000Base-T" },
733 { "Broadcom BCM5701" },
734 { "Broadcom BCM5702 1000Base-T" },
735 { "Broadcom BCM5703 1000Base-T" },
736 { "Broadcom BCM5703 1000Base-SX" },
737 { "Broadcom B5703 1000Base-SX" },
738 { "3Com 3C996 10/100/1000 Server NIC" },
739 { "3Com 3C996 10/100/1000 Server NIC" },
740 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
741 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
742 { "3Com 3C996B Gigabit Server NIC" },
743 { "3Com 3C997 Gigabit Server NIC" },
744 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
745 { "3Com 3C1000 Gigabit NIC" },
746 { "3Com 3C1000B-T 10/100/1000 PCI" },
747 { "3Com 3C940 Gigabit LOM (21X21)" },
748 { "3Com 3C942 Gigabit LOM (31X31)" },
749 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
750 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
751 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
752 { "HP NC6770 Gigabit Server Adapter" },
753 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
754 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
755 { "HP NC7760 Gigabit Server Adapter" },
756 { "HP NC7761 Gigabit Server Adapter" },
757 { "HP NC7770 Gigabit Server Adapter" },
758 { "HP NC7771 Gigabit Server Adapter" },
759 { "HP NC7780 Gigabit Server Adapter" },
760 { "HP NC7781 Gigabit Server Adapter" },
761 { "HP NC7772 Gigabit Server Adapter" },
762 { "HP NC7782 Gigabit Server Adapter" },
763 { "HP NC7783 Gigabit Server Adapter" },
764 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
765 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
766 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
767 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
768 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
769 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
770 { "Broadcom BCM5704 1000Base-T" },
771 { "Broadcom BCM5704 1000Base-SX" },
772 { "Broadcom BCM5705 1000Base-T" },
773 { "Broadcom BCM5705M 1000Base-T" },
774 { "Broadcom 570x 10/100 Integrated Controller" },
775 { "Broadcom BCM5901 100Base-TX" },
776 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
777 { "Broadcom BCM5788 NetLink 1000Base-T" },
778 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
779 { "Broadcom BCM5750 1000Base-T PCI" },
780 { "Broadcom BCM5750M 1000Base-T PCI" },
781 { "Broadcom BCM5720 1000Base-T PCI" },
782 { "Broadcom BCM5751 1000Base-T PCI Express" },
783 { "Broadcom BCM5751M 1000Base-T PCI Express" },
784 { "Broadcom BCM5751F 100Base-TX PCI Express" },
785 { "Broadcom BCM5721 1000Base-T PCI Express" },
786 { "Broadcom BCM5753 1000Base-T PCI Express" },
787 { "Broadcom BCM5753M 1000Base-T PCI Express" },
788 { "Broadcom BCM5753F 100Base-TX PCI Express" },
789 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
790 { "Broadcom BCM5752 1000Base-T PCI Express" },
791 { "Broadcom BCM5752M 1000Base-T PCI Express" },
792 { "Broadcom BCM5714 1000Base-T " },
793 { "Broadcom BCM5780 1000Base-T" },
794 { "Broadcom BCM5780S 1000Base-SX" },
795 { "Broadcom BCM5715 1000Base-T " },
796 { "Broadcom BCM4785 10/100/1000 Integrated Controller" },
797 { "Broadcom BCM5903M Gigabit Ethernet " },
798 { "Unknown BCM5788 Gigabit Ethernet " },
799 { 0 }
802 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
803 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
804 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
805 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
806 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
807 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
808 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
809 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
810 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
811 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
812 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
813 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
814 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
815 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
816 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
817 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
818 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
819 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
820 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
821 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
822 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
823 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
824 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
825 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
826 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
827 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
828 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
829 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
830 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
831 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
832 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
833 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
834 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
835 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
836 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
837 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
838 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
839 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
840 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
841 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
842 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
843 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
844 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
845 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
846 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
847 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
848 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
849 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
850 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
851 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
852 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
853 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
854 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
855 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
856 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
857 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
858 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
859 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
860 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
861 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
862 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
863 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
864 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
865 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
866 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
867 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
868 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
869 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
870 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
871 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
872 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
873 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
874 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
875 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
876 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
877 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
878 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
879 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
880 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
881 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
882 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
883 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
884 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
885 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
886 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
887 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
888 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
889 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
890 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
891 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
892 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
893 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
894 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
895 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
896 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
897 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
898 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
899 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
900 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
901 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
902 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
903 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
904 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
905 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
906 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
907 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
908 {0x14e4, 0x471f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM4785 },
909 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
910 {0x173b, 0x03ed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, UNK5788 },
911 {0,}
914 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
916 #ifdef BCM_PROC_FS
917 extern int bcm5700_proc_create(void);
918 extern int bcm5700_proc_create_dev(struct net_device *dev);
919 extern int bcm5700_proc_remove_dev(struct net_device *dev);
920 extern int bcm5700_proc_remove_notifier(void);
921 #endif
923 #if (LINUX_VERSION_CODE >= 0x2060a)
924 static struct pci_device_id pci_AMD762id[]={
925 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
926 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
929 #endif
931 static int sbgige = -1;
933 /*******************************************************************************
934 *******************************************************************************
937 int get_csum_flag(LM_UINT32 ChipRevId)
939 return NETIF_F_IP_CSUM;
942 /*******************************************************************************
943 *******************************************************************************
945 This function returns true if the device passed to it is attached to an
946 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
947 or newer, it returns false.
949 This function determines which bridge it is attached to by scaning the pci
950 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
951 the bridge's subordinate's secondary bus number is compared with this
952 devices bus number. If they match, then the device is attached to this
953 bridge. The bridge's device id is compared to a list of known device ids for
954 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
955 chip revision must also be checked to determine if the chip is older than an
956 ICH5.
958 To scan the bus, one of two functions is used depending on the kernel
959 version. For 2.4 kernels, the pci_find_device function is used. This
960 function has been depricated in the 2.6 kernel and replaced with the
961 fucntion pci_get_device. The macro walk_pci_bus determines which function to
962 use when the driver is built.
965 #if (LINUX_VERSION_CODE >= 0x2060a)
966 #define walk_pci_bus(d) while ((d = pci_get_device( \
967 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
969 #define unwalk_pci_bus(d) pci_dev_put(d)
971 #else
972 #define walk_pci_bus(d) while ((d = pci_find_device( \
973 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
974 #define unwalk_pci_bus(d)
976 #endif
978 #define ICH5_CHIP_VERSION 0xc0
980 static struct pci_device_id pci_ICHtable[] = {
981 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
982 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
983 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
984 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
985 {0, 0}
988 int attached_to_ICH4_or_older( struct pci_dev *pdev)
990 struct pci_dev *tmp_pdev = NULL;
991 struct pci_device_id *ich_table;
992 u8 chip_rev;
994 walk_pci_bus (tmp_pdev) {
995 if ((tmp_pdev->hdr_type == 1) &&
996 (tmp_pdev->subordinate != NULL) &&
997 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
999 ich_table = pci_ICHtable;
1001 while (ich_table->vendor) {
1002 if ((ich_table->vendor == tmp_pdev->vendor) &&
1003 (ich_table->device == tmp_pdev->device)) {
1005 pci_read_config_byte( tmp_pdev,
1006 PCI_REVISION_ID, &chip_rev);
1008 if (chip_rev < ICH5_CHIP_VERSION) {
1009 unwalk_pci_bus( tmp_pdev);
1010 return 1;
1013 ich_table++;
1017 return 0;
1020 static int
1021 __devinit bcm5700_init_board(struct pci_dev *pdev, struct net_device **dev_out, int board_idx)
1023 struct net_device *dev;
1024 PUM_DEVICE_BLOCK pUmDevice;
1025 PLM_DEVICE_BLOCK pDevice;
1026 bool rgmii = FALSE;
1027 si_t *sih = NULL;
1028 int rc;
1030 *dev_out = NULL;
1032 /* dev zeroed in init_etherdev */
1033 #if (LINUX_VERSION_CODE >= 0x20600)
1034 dev = alloc_etherdev(sizeof(*pUmDevice));
1035 #else
1036 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1037 #endif
1038 if (dev == NULL) {
1039 printk(KERN_ERR "%s: unable to alloc new ethernet\n", bcm5700_driver);
1040 return -ENOMEM;
1042 SET_MODULE_OWNER(dev);
1043 #if (LINUX_VERSION_CODE >= 0x20600)
1044 SET_NETDEV_DEV(dev, &pdev->dev);
1045 #endif
1046 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1048 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1049 rc = pci_enable_device(pdev);
1050 if (rc)
1051 goto err_out;
1053 /* init core specific stuff */
1054 if (pdev->device == T3_PCI_DEVICE_ID(T3_PCI_ID_BCM471F)) {
1055 sih = si_kattach(SI_OSH);
1056 hndgige_init(sih, ++sbgige, &rgmii);
1059 rc = pci_request_regions(pdev, bcm5700_driver);
1060 if (rc) {
1061 if (!sih)
1062 goto err_out;
1063 printk(KERN_INFO "bcm5700_init_board: pci_request_regions returned error %d\n"
1064 "This may be because the region is already requested by"
1065 " the SMBus driver. Ignore the PCI error messages.\n", rc);
1068 pci_set_master(pdev);
1070 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1071 pUmDevice->using_dac = 1;
1072 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0) {
1073 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1074 pci_release_regions(pdev);
1075 goto err_out;
1077 } else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1078 pUmDevice->using_dac = 0;
1079 } else {
1080 printk(KERN_ERR "System does not support DMA\n");
1081 pci_release_regions(pdev);
1082 goto err_out;
1085 pUmDevice->dev = dev;
1086 pUmDevice->pdev = pdev;
1087 pUmDevice->mem_list_num = 0;
1088 pUmDevice->next_module = root_tigon3_dev;
1089 pUmDevice->index = board_idx;
1090 pUmDevice->sih = (void *)sih;
1091 root_tigon3_dev = dev;
1093 spin_lock_init(&pUmDevice->global_lock);
1095 spin_lock_init(&pUmDevice->undi_lock);
1097 spin_lock_init(&pUmDevice->phy_lock);
1099 pDevice = &pUmDevice->lm_dev;
1100 pDevice->Flags = 0;
1101 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1102 pUmDevice->boardflags = getintvar(NULL, "boardflags");
1103 if (sih) {
1104 if (pUmDevice->boardflags & BFL_ENETROBO)
1105 pDevice->Flags |= ROBO_SWITCH_FLAG;
1106 pDevice->Flags |= rgmii ? RGMII_MODE_FLAG : 0;
1107 if ((sih->chip == BCM4785_CHIP_ID) && (sih->chiprev < 2))
1108 pDevice->Flags |= ONE_DMA_AT_ONCE_FLAG;
1109 pDevice->Flags |= SB_CORE_FLAG;
1110 if (sih->chip == BCM4785_CHIP_ID)
1111 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1114 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1115 if (board_idx < MAX_UNITS) {
1116 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1117 dev->mtu = mtu[board_idx];
1119 #endif
1121 if (attached_to_ICH4_or_older(pdev)) {
1122 pDevice->Flags |= UNDI_FIX_FLAG;
1125 #if (LINUX_VERSION_CODE >= 0x2060a)
1126 if (pci_dev_present(pci_AMD762id)) {
1127 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1128 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1130 #else
1131 if (pci_find_device(0x1022, 0x700c, NULL)) {
1132 /* AMD762 writes I/O out of order */
1133 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1134 /* in all cases */
1135 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1136 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1138 #endif
1139 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1140 rc = -ENODEV;
1141 goto err_out_unmap;
1144 if (pDevice->Flags & ROBO_SWITCH_FLAG) {
1145 robo_info_t *robo;
1147 if ((robo = bcm_robo_attach(sih, pDevice, NULL,
1148 robo_miird, robo_miiwr)) == NULL) {
1149 B57_ERR(("robo_setup: failed to attach robo switch \n"));
1150 goto robo_fail;
1153 if (bcm_robo_enable_device(robo)) {
1154 B57_ERR(("robo_setup: failed to enable robo switch \n"));
1155 goto robo_fail;
1158 /* Configure the switch to do VLAN */
1159 if ((pUmDevice->boardflags & BFL_ENETVLAN) &&
1160 bcm_robo_config_vlan(robo, pDevice->PermanentNodeAddress)) {
1161 B57_ERR(("robo_setup: robo_config_vlan failed\n"));
1162 goto robo_fail;
1165 /* Enable the switch */
1166 if (bcm_robo_enable_switch(robo)) {
1167 B57_ERR(("robo_setup: robo_enable_switch failed\n"));
1168 robo_fail:
1169 bcm_robo_detach(robo);
1170 rc = -ENODEV;
1171 goto err_out_unmap;
1173 pUmDevice->robo = (void *)robo;
1176 if ((pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0) {
1177 if (dev->mtu > 1500) {
1178 dev->mtu = 1500;
1179 printk(KERN_WARNING
1180 "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n",
1181 bcm5700_driver, pUmDevice->index);
1185 pUmDevice->do_global_lock = 0;
1186 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1187 /* The 5700 chip works best without interleaved register */
1188 /* accesses on certain machines. */
1189 pUmDevice->do_global_lock = 1;
1192 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1193 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1195 pUmDevice->rx_buf_align = 0;
1196 } else {
1197 pUmDevice->rx_buf_align = 2;
1199 dev->mem_start = pci_resource_start(pdev, 0);
1200 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1201 dev->irq = pdev->irq;
1203 *dev_out = dev;
1205 #ifdef HNDCTF
1206 pUmDevice->osh = osl_attach(pdev, PCI_BUS, FALSE);
1208 pUmDevice->cih = ctf_attach(pUmDevice->osh, dev->name, &b57_msg_level, NULL, NULL);
1210 ctf_dev_register(pUmDevice->cih, dev, FALSE);
1211 ctf_enable(pUmDevice->cih, dev, TRUE, NULL);
1212 #endif /* HNDCTF */
1214 return 0;
1216 err_out_unmap:
1217 pci_release_regions(pdev);
1218 bcm5700_freemem(dev);
1220 err_out:
1221 #if (LINUX_VERSION_CODE < 0x020600)
1222 unregister_netdev(dev);
1223 kfree(dev);
1224 #else
1225 free_netdev(dev);
1226 #endif
1227 return rc;
1230 static int __devinit
1231 bcm5700_print_ver(void)
1233 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1234 bcm5700_driver);
1235 #ifdef NICE_SUPPORT
1236 printk("with Broadcom NIC Extension (NICE) ");
1237 #endif
1238 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1239 return 0;
1242 static int __devinit
1243 bcm5700_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1245 struct net_device *dev = NULL;
1246 PUM_DEVICE_BLOCK pUmDevice;
1247 PLM_DEVICE_BLOCK pDevice;
1248 int i;
1249 static int board_idx = -1;
1250 static int printed_version = 0;
1251 struct pci_dev *pci_dev;
1253 board_idx++;
1255 if (!printed_version) {
1256 bcm5700_print_ver();
1257 #ifdef BCM_PROC_FS
1258 bcm5700_proc_create();
1259 #endif
1260 printed_version = 1;
1263 i = bcm5700_init_board(pdev, &dev, board_idx);
1264 if (i < 0) {
1265 return i;
1268 if (dev == NULL)
1269 return -ENOMEM;
1271 #ifdef BCM_IOCTL32
1272 if (atomic_read(&bcm5700_load_count) == 0) {
1273 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1275 atomic_inc(&bcm5700_load_count);
1276 #endif
1277 dev->open = bcm5700_open;
1278 dev->hard_start_xmit = bcm5700_start_xmit;
1279 dev->stop = bcm5700_close;
1280 dev->get_stats = bcm5700_get_stats;
1281 dev->set_multicast_list = bcm5700_set_rx_mode;
1282 dev->do_ioctl = bcm5700_ioctl;
1283 dev->set_mac_address = &bcm5700_set_mac_addr;
1284 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1285 dev->change_mtu = &bcm5700_change_mtu;
1286 #endif
1287 #if (LINUX_VERSION_CODE >= 0x20400)
1288 dev->tx_timeout = bcm5700_reset;
1289 dev->watchdog_timeo = TX_TIMEOUT;
1290 #endif
1291 #ifdef BCM_VLAN
1292 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1293 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1294 #endif
1295 #ifdef BCM_NAPI_RXPOLL
1296 dev->poll = bcm5700_poll;
1297 dev->weight = 64;
1298 #endif
1300 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1301 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1303 dev->base_addr = pci_resource_start(pdev, 0);
1304 dev->irq = pdev->irq;
1305 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1306 dev->poll_controller = poll_bcm5700;
1307 #endif
1309 #if (LINUX_VERSION_CODE >= 0x20600)
1310 if ((i = register_netdev(dev))) {
1311 printk(KERN_ERR "%s: Cannot register net device\n",
1312 bcm5700_driver);
1313 if (pUmDevice->lm_dev.pMappedMemBase)
1314 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1315 pci_release_regions(pdev);
1316 bcm5700_freemem(dev);
1317 free_netdev(dev);
1318 return i;
1320 #endif
1323 pci_set_drvdata(pdev, dev);
1325 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1326 pUmDevice->name = board_info[ent->driver_data].name,
1327 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1328 dev->name, pUmDevice->name, dev->base_addr,
1329 dev->irq);
1330 printk("node addr ");
1331 for (i = 0; i < 6; i++) {
1332 printk("%2.2x", dev->dev_addr[i]);
1334 printk("\n");
1336 printk(KERN_INFO "%s: ", dev->name);
1337 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1338 printk("Broadcom BCM5400 Copper ");
1339 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1340 printk("Broadcom BCM5401 Copper ");
1341 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1342 printk("Broadcom BCM5411 Copper ");
1343 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5461_PHY_ID)
1344 printk("Broadcom BCM5461 Copper ");
1345 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1346 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1347 printk("Broadcom BCM5701 Integrated Copper ");
1349 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1350 printk("Broadcom BCM5703 Integrated ");
1351 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1352 printk("SerDes ");
1353 else
1354 printk("Copper ");
1356 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1357 printk("Broadcom BCM5704 Integrated ");
1358 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1359 printk("SerDes ");
1360 else
1361 printk("Copper ");
1363 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1364 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1365 printk("Broadcom BCM5780S Integrated Serdes ");
1368 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1369 printk("Broadcom BCM5705 Integrated Copper ");
1370 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1371 printk("Broadcom BCM5750 Integrated Copper ");
1373 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1374 printk("Broadcom BCM5714 Integrated Copper ");
1375 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1376 printk("Broadcom BCM5780 Integrated Copper ");
1378 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1379 printk("Broadcom BCM5752 Integrated Copper ");
1380 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1381 printk("Broadcom BCM8002 SerDes ");
1382 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1383 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1384 printk("Broadcom BCM5703 Integrated SerDes ");
1386 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1387 printk("Broadcom BCM5704 Integrated SerDes ");
1389 else {
1390 printk("Agilent HDMP-1636 SerDes ");
1393 else {
1394 printk("Unknown ");
1396 printk("transceiver found\n");
1398 #if (LINUX_VERSION_CODE >= 0x20400)
1399 if (scatter_gather[board_idx]) {
1400 dev->features |= NETIF_F_SG;
1401 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1402 dev->features |= NETIF_F_HIGHDMA;
1404 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1405 tx_checksum[board_idx]) {
1407 dev->features |= get_csum_flag( pDevice->ChipRevId);
1409 #ifdef BCM_VLAN
1410 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1411 #endif
1412 #ifdef BCM_TSO
1413 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1414 the same time. Since only one of these features can be enable at a
1415 time, we'll enable only Jumbo Frames and disable TSO when the user
1416 tries to enable both.
1418 dev->features &= ~NETIF_F_TSO;
1420 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1421 (enable_tso[board_idx])) {
1422 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1423 (dev->mtu > 1500)) {
1424 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1425 } else {
1426 dev->features |= NETIF_F_TSO;
1429 #endif
1430 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1431 dev->name,
1432 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1433 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1434 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1435 #endif
1436 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1437 rx_checksum[board_idx])
1438 printk("Rx Checksum ON");
1439 else
1440 printk("Rx Checksum OFF");
1441 #ifdef BCM_VLAN
1442 printk(", 802.1Q VLAN ON");
1443 #endif
1444 #ifdef BCM_TSO
1445 if (dev->features & NETIF_F_TSO) {
1446 printk(", TSO ON");
1448 else
1449 #endif
1450 #ifdef BCM_NAPI_RXPOLL
1451 printk(", NAPI ON");
1452 #endif
1453 printk("\n");
1455 #ifdef BCM_PROC_FS
1456 bcm5700_proc_create_dev(dev);
1457 #endif
1458 register_reboot_notifier(&bcm5700_reboot_notifier);
1459 #ifdef BCM_TASKLET
1460 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1461 (unsigned long) pUmDevice);
1462 #endif
1463 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1464 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1465 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1467 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1471 #if (LINUX_VERSION_CODE > 0x20605)
1473 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL)))
1474 #else
1475 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL)))
1476 #endif
1478 u32 val;
1480 /* Found AMD 762 North bridge */
1481 pci_read_config_dword(pci_dev, 0x4c, &val);
1482 if ((val & 0x02) == 0) {
1483 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1484 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1488 #if (LINUX_VERSION_CODE > 0x20605)
1490 pci_dev_put(pci_dev);
1492 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1494 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1495 bcm_msi_chipset_bug = 1;
1497 pci_dev_put(pci_dev);
1498 #endif
1499 #endif
1501 return 0;
1505 static void __devexit
1506 bcm5700_remove_one (struct pci_dev *pdev)
1508 struct net_device *dev = pci_get_drvdata (pdev);
1509 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1511 #ifdef BCM_PROC_FS
1512 bcm5700_proc_remove_dev(dev);
1513 #endif
1514 #ifdef BCM_IOCTL32
1515 atomic_dec(&bcm5700_load_count);
1516 if (atomic_read(&bcm5700_load_count) == 0)
1517 unregister_ioctl32_conversion(SIOCNICE);
1518 #endif
1519 #ifdef HNDCTF
1520 ctf_dev_unregister(pUmDevice->cih, dev);
1521 #endif /* HNDCTF */
1522 unregister_netdev(dev);
1524 if (pUmDevice->lm_dev.pMappedMemBase)
1525 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1527 pci_release_regions(pdev);
1529 #if (LINUX_VERSION_CODE < 0x020600)
1530 kfree(dev);
1531 #else
1532 free_netdev(dev);
1533 #endif
1535 pci_set_drvdata(pdev, NULL);
1539 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1541 #ifdef BCM_WL_EMULATOR
1542 /* new transmit callback */
1543 static int bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev);
1544 /* keep track of the 2 gige devices */
1545 static PLM_DEVICE_BLOCK pDev1;
1546 static PLM_DEVICE_BLOCK pDev2;
1548 static void
1549 bcm5700emu_open(struct net_device *dev)
1551 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1552 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1553 static int instance = 0;
1554 static char *wlemu_if = NULL;
1555 char *wlemu_mode = NULL;
1556 //int wlemu_idx = 0;
1557 static int rx_enable = 0;
1558 static int tx_enable = 0;
1560 /* which interface is the emulator ? */
1561 if(instance == 0) {
1562 wlemu_if = nvram_get("wlemu_if");
1563 /* do we emulate rx, tx or both */
1564 wlemu_mode = nvram_get("wlemu_mode");
1565 if(wlemu_mode) {
1566 if (!strcmp(wlemu_mode,"rx"))
1568 rx_enable = 1;
1570 else if (!strcmp(wlemu_mode,"tx"))
1573 tx_enable = 1;
1576 else if (!strcmp(wlemu_mode,"rx_tx"))
1579 rx_enable = 1;
1580 tx_enable = 1;
1585 instance++;
1587 /* The context is used for accessing the OSL for emulating devices */
1588 pDevice->wlc = NULL;
1590 /* determines if this device is an emulator */
1591 pDevice->wl_emulate_rx = 0;
1592 pDevice->wl_emulate_tx = 0;
1594 if(wlemu_if && !strcmp(dev->name,wlemu_if))
1596 /* create an emulator context. */
1597 pDevice->wlc = (void *)wlcemu_wlccreate((void *)dev);
1598 B57_INFO(("Using %s for wl emulation \n", dev->name));
1599 if(rx_enable)
1601 B57_INFO(("Enabling wl RX emulation \n"));
1602 pDevice->wl_emulate_rx = 1;
1604 /* re-direct transmit callback to emulator */
1605 if(tx_enable)
1607 pDevice->wl_emulate_tx = 1;
1608 dev->hard_start_xmit = bcm5700emu_start_xmit;
1609 B57_INFO(("Enabling wl TX emulation \n"));
1612 /* for debug access to configured devices only */
1613 if(instance == 1)
1614 pDev1 = pDevice;
1615 else if (instance == 2)
1616 pDev2 = pDevice;
1619 /* Public API to get current emulation info */
1620 int bcm5700emu_get_info(char *buf)
1622 int len = 0;
1623 PLM_DEVICE_BLOCK p;
1625 /* look for an emulating device */
1626 if(pDev1->wlc) {
1627 p = pDev1;
1628 len += sprintf(buf+len,"emulation device : eth0\n");
1630 else if (pDev2->wlc) {
1631 p = pDev2;
1632 len += sprintf(buf+len,"emulation device : eth1\n");
1634 else {
1635 len += sprintf(buf+len,"emulation not activated\n");
1636 return len;
1638 if(p->wl_emulate_rx)
1639 len += sprintf(buf+len,"RX emulation enabled\n");
1640 else
1641 len += sprintf(buf+len,"RX emulation disabled\n");
1642 if(p->wl_emulate_tx)
1643 len += sprintf(buf+len,"TX emulation enabled\n");
1644 else
1645 len += sprintf(buf+len,"TX emulation disabled\n");
1646 return len;
1651 /* Public API to access the bcm5700_start_xmit callback */
1653 int
1654 bcm5700emu_forward_xmit(struct sk_buff *skb, struct net_device *dev)
1656 return bcm5700_start_xmit(skb, dev);
1660 /* hook to kernel txmit callback */
1661 STATIC int
1662 bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev)
1665 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1666 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1667 return wlcemu_start_xmit(skb,pDevice->wlc);
1670 #endif /* BCM_WL_EMULATOR */
1673 bcm5700_open(struct net_device *dev)
1675 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1676 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1677 int rc;
1679 if (pUmDevice->suspended){
1680 return -EAGAIN;
1683 #ifdef BCM_WL_EMULATOR
1684 bcm5700emu_open(dev);
1685 #endif
1687 /* delay for 6 seconds */
1688 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1690 #ifdef BCM_INT_COAL
1691 #ifndef BCM_NAPI_RXPOLL
1692 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1693 #endif
1694 #endif
1696 #ifdef INCLUDE_TBI_SUPPORT
1697 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1698 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1699 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1700 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1701 pUmDevice->poll_tbi_interval /= 4;
1703 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1705 #endif
1706 /* set this timer for 2 seconds */
1707 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1709 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1712 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1713 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1714 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1715 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1716 !bcm_msi_chipset_bug ){
1718 if (disable_msi[pUmDevice->index]==1){
1719 /* do nothing-it's not turned on */
1720 }else{
1721 pDevice->Flags |= USING_MSI_FLAG;
1723 REG_WR(pDevice, Msi.Mode, 2 );
1725 rc = pci_enable_msi(pUmDevice->pdev);
1727 if(rc!=0){
1728 pDevice->Flags &= ~ USING_MSI_FLAG;
1729 REG_WR(pDevice, Msi.Mode, 1 );
1735 #endif
1737 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, IRQF_SHARED, dev->name, dev)))
1740 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1742 if(pDevice->Flags & USING_MSI_FLAG) {
1744 pci_disable_msi(pUmDevice->pdev);
1745 pDevice->Flags &= ~USING_MSI_FLAG;
1746 REG_WR(pDevice, Msi.Mode, 1 );
1749 #endif
1750 return rc;
1753 pUmDevice->opened = 1;
1754 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1755 pUmDevice->opened = 0;
1756 free_irq(dev->irq, dev);
1757 bcm5700_freemem(dev);
1758 return -EAGAIN;
1761 bcm5700_set_vlan_mode(pUmDevice);
1762 bcm5700_init_counters(pUmDevice);
1764 if (pDevice->Flags & UNDI_FIX_FLAG) {
1765 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1768 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1770 /* Do not use invalid eth addrs: any multicast & all zeros */
1771 if( is_valid_ether_addr(dev->dev_addr) ){
1772 LM_SetMacAddress(pDevice, dev->dev_addr);
1774 else
1776 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1777 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1781 if (tigon3_debug > 1)
1782 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1784 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1785 MAX_RX_PACKET_DESC_COUNT);
1788 #if (LINUX_VERSION_CODE < 0x020300)
1789 MOD_INC_USE_COUNT;
1790 #endif
1792 atomic_set(&pUmDevice->intr_sem, 0);
1794 LM_EnableInterrupt(pDevice);
1796 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1798 if (pDevice->Flags & USING_MSI_FLAG){
1800 /* int test to check support on older machines */
1801 if (b57_test_intr(pUmDevice) != 1) {
1803 LM_DisableInterrupt(pDevice);
1804 free_irq(pUmDevice->pdev->irq, dev);
1805 pci_disable_msi(pUmDevice->pdev);
1806 REG_WR(pDevice, Msi.Mode, 1 );
1807 pDevice->Flags &= ~USING_MSI_FLAG;
1809 rc = LM_ResetAdapter(pDevice);
1810 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1812 if (rc == LM_STATUS_SUCCESS)
1813 rc = 0;
1814 else
1815 rc = -ENODEV;
1817 if(rc == 0){
1818 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1819 SA_SHIRQ, dev->name, dev);
1822 if(rc){
1823 LM_Halt(pDevice);
1824 bcm5700_freemem(dev);
1825 pUmDevice->opened = 0;
1826 return rc;
1830 pDevice->InitDone = TRUE;
1831 atomic_set(&pUmDevice->intr_sem, 0);
1832 LM_EnableInterrupt(pDevice);
1835 #endif
1837 init_timer(&pUmDevice->timer);
1838 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1839 pUmDevice->timer.data = (unsigned long)dev;
1840 pUmDevice->timer.function = &bcm5700_timer;
1841 add_timer(&pUmDevice->timer);
1843 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1844 init_timer(&pUmDevice->statstimer);
1845 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1846 pUmDevice->statstimer.data = (unsigned long)dev;
1847 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1848 add_timer(&pUmDevice->statstimer);
1851 if(pDevice->Flags & USING_MSI_FLAG)
1852 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1853 else
1854 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1856 netif_start_queue(dev);
1858 return 0;
1862 STATIC void
1863 bcm5700_stats_timer(unsigned long data)
1865 struct net_device *dev = (struct net_device *)data;
1866 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1867 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1868 unsigned long flags = 0;
1870 if (!pUmDevice->opened)
1871 return;
1873 if (!atomic_read(&pUmDevice->intr_sem) &&
1874 !pUmDevice->suspended &&
1875 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1876 BCM5700_LOCK(pUmDevice, flags);
1877 LM_GetStats(pDevice);
1878 BCM5700_UNLOCK(pUmDevice, flags);
1881 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1883 add_timer(&pUmDevice->statstimer);
1887 STATIC void
1888 bcm5700_timer(unsigned long data)
1890 struct net_device *dev = (struct net_device *)data;
1891 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1892 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1893 unsigned long flags = 0;
1894 LM_UINT32 value32;
1896 if (!pUmDevice->opened)
1897 return;
1899 /* BCM4785: Flush posted writes from GbE to host memory. */
1900 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
1901 REG_RD(pDevice, HostCoalesce.Mode);
1903 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1904 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1905 add_timer(&pUmDevice->timer);
1906 return;
1909 #ifdef INCLUDE_TBI_SUPPORT
1910 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1911 (--pUmDevice->poll_tbi_expiry <= 0)) {
1913 BCM5700_PHY_LOCK(pUmDevice, flags);
1914 value32 = REG_RD(pDevice, MacCtrl.Status);
1915 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1916 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1917 MAC_STATUS_CFG_CHANGED)) ||
1918 !(value32 & MAC_STATUS_PCS_SYNCED)))
1920 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1921 (value32 & (MAC_STATUS_PCS_SYNCED |
1922 MAC_STATUS_SIGNAL_DETECTED))))
1924 LM_SetupPhy(pDevice);
1926 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1927 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1930 #endif
1932 if (pUmDevice->delayed_link_ind > 0) {
1933 if (pUmDevice->delayed_link_ind == 1)
1934 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1935 else
1936 pUmDevice->delayed_link_ind--;
1939 if (pUmDevice->crc_counter_expiry > 0)
1940 pUmDevice->crc_counter_expiry--;
1942 if (!pUmDevice->interrupt) {
1943 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1944 BCM5700_LOCK(pUmDevice, flags);
1945 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1946 /* This will generate an interrupt */
1947 REG_WR(pDevice, Grc.LocalCtrl,
1948 pDevice->GrcLocalCtrl |
1949 GRC_MISC_LOCAL_CTRL_SET_INT);
1951 else {
1952 REG_WR(pDevice, HostCoalesce.Mode,
1953 pDevice->CoalesceMode |
1954 HOST_COALESCE_ENABLE |
1955 HOST_COALESCE_NOW);
1957 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1958 DMA_WRITE_MODE_ENABLE)) {
1959 BCM5700_UNLOCK(pUmDevice, flags);
1960 bcm5700_reset(dev);
1962 else {
1963 BCM5700_UNLOCK(pUmDevice, flags);
1965 if (pUmDevice->tx_queued) {
1966 pUmDevice->tx_queued = 0;
1967 netif_wake_queue(dev);
1970 #if (LINUX_VERSION_CODE < 0x02032b)
1971 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1972 pDevice->TxPacketDescCnt) &&
1973 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1975 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1976 bcm5700_reset(dev);
1978 #endif
1980 #ifdef BCM_INT_COAL
1981 #ifndef BCM_NAPI_RXPOLL
1982 if (pUmDevice->adaptive_coalesce) {
1983 pUmDevice->adaptive_expiry--;
1984 if (pUmDevice->adaptive_expiry == 0) {
1985 pUmDevice->adaptive_expiry = HZ /
1986 pUmDevice->timer_interval;
1987 bcm5700_adapt_coalesce(pUmDevice);
1990 #endif
1991 #endif
1992 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1993 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1994 /* Generate interrupt and let isr allocate buffers */
1995 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1996 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1999 #ifdef BCM_ASF
2000 if (pDevice->AsfFlags & ASF_ENABLED) {
2001 pUmDevice->asf_heartbeat--;
2002 if (pUmDevice->asf_heartbeat == 0) {
2003 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
2004 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
2005 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
2006 T3_CMD_NICDRV_ALIVE2);
2007 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
2009 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
2010 } else {
2011 LM_RegWr(pDevice,
2012 (T3_NIC_MBUF_POOL_ADDR +
2013 T3_CMD_MAILBOX),
2014 T3_CMD_NICDRV_ALIVE2, 1);
2015 LM_RegWr(pDevice,
2016 (T3_NIC_MBUF_POOL_ADDR +
2017 T3_CMD_LENGTH_MAILBOX),4,1);
2018 LM_RegWr(pDevice,
2019 (T3_NIC_MBUF_POOL_ADDR +
2020 T3_CMD_DATA_MAILBOX),5,1);
2023 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
2024 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
2025 pUmDevice->asf_heartbeat = (2 * HZ) /
2026 pUmDevice->timer_interval;
2029 #endif
2031 if (pDevice->PhyFlags & PHY_IS_FIBER){
2032 BCM5700_PHY_LOCK(pUmDevice, flags);
2033 LM_5714_FamFiberCheckLink(pDevice);
2034 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2037 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
2038 add_timer(&pUmDevice->timer);
2041 STATIC int
2042 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
2044 #ifdef BCM_INT_COAL
2045 #ifndef BCM_NAPI_RXPOLL
2046 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2048 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
2049 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
2050 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
2051 pUmDevice->rx_last_cnt = 0;
2052 pUmDevice->tx_last_cnt = 0;
2053 #endif
2054 #endif
2055 pUmDevice->phy_crc_count = 0;
2056 #if TIGON3_DEBUG
2057 pUmDevice->tx_zc_count = 0;
2058 pUmDevice->tx_chksum_count = 0;
2059 pUmDevice->tx_himem_count = 0;
2060 pUmDevice->rx_good_chksum_count = 0;
2061 pUmDevice->rx_bad_chksum_count = 0;
2062 #endif
2063 #ifdef BCM_TSO
2064 pUmDevice->tso_pkt_count = 0;
2065 #endif
2066 return 0;
2069 #ifdef BCM_INT_COAL
2070 #ifndef BCM_NAPI_RXPOLL
2071 STATIC int
2072 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
2073 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
2075 unsigned long flags = 0;
2076 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2078 if (pUmDevice->do_global_lock) {
2079 if (spin_is_locked(&pUmDevice->global_lock))
2080 return 0;
2081 spin_lock_irqsave(&pUmDevice->global_lock, flags);
2083 pUmDevice->rx_curr_coalesce_frames = rx_frames;
2084 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
2085 pUmDevice->tx_curr_coalesce_frames = tx_frames;
2086 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
2087 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
2089 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
2091 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
2093 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
2094 rx_frames_intr);
2096 BCM5700_UNLOCK(pUmDevice, flags);
2097 return 0;
2100 STATIC int
2101 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
2103 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2104 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
2106 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
2107 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
2108 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
2109 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
2111 /* skip if there is counter rollover */
2112 pUmDevice->rx_last_cnt = rx_curr_cnt;
2113 pUmDevice->tx_last_cnt = tx_curr_cnt;
2114 return 0;
2117 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
2118 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
2119 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
2121 pUmDevice->rx_last_cnt = rx_curr_cnt;
2122 pUmDevice->tx_last_cnt = tx_curr_cnt;
2124 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
2125 if (pUmDevice->rx_curr_coalesce_frames !=
2126 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
2128 bcm5700_do_adapt_coalesce(pUmDevice,
2129 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
2130 ADAPTIVE_LO_RX_COALESCING_TICKS,
2131 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
2132 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
2135 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
2136 if (pUmDevice->rx_curr_coalesce_frames !=
2137 DEFAULT_RX_MAX_COALESCED_FRAMES) {
2139 bcm5700_do_adapt_coalesce(pUmDevice,
2140 DEFAULT_RX_MAX_COALESCED_FRAMES,
2141 DEFAULT_RX_COALESCING_TICKS,
2142 DEFAULT_TX_MAX_COALESCED_FRAMES,
2143 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
2146 else {
2147 if (pUmDevice->rx_curr_coalesce_frames !=
2148 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
2150 bcm5700_do_adapt_coalesce(pUmDevice,
2151 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
2152 ADAPTIVE_HI_RX_COALESCING_TICKS,
2153 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
2154 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
2157 return 0;
2159 #endif
2160 #endif
2162 STATIC void
2163 bcm5700_reset(struct net_device *dev)
2165 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2166 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2167 unsigned long flags;
2169 #ifdef BCM_TSO
2171 if( (dev->features & NETIF_F_TSO) &&
2172 (pUmDevice->tx_full) ) {
2174 dev->features &= ~NETIF_F_TSO;
2176 #endif
2178 netif_stop_queue(dev);
2179 bcm5700_intr_off(pUmDevice);
2180 BCM5700_PHY_LOCK(pUmDevice, flags);
2181 LM_ResetAdapter(pDevice);
2182 pDevice->InitDone = TRUE;
2183 bcm5700_do_rx_mode(dev);
2184 bcm5700_set_vlan_mode(pUmDevice);
2185 bcm5700_init_counters(pUmDevice);
2186 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
2187 LM_SetMacAddress(pDevice, dev->dev_addr);
2189 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2190 atomic_set(&pUmDevice->intr_sem, 1);
2191 bcm5700_intr_on(pUmDevice);
2192 netif_wake_queue(dev);
2195 STATIC void
2196 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
2198 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2199 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
2200 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
2202 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
2203 if (pDevice->AsfFlags & ASF_ENABLED) {
2204 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2206 else {
2207 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2210 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2211 ReceiveMask |= LM_KEEP_VLAN_TAG;
2212 #ifdef BCM_VLAN
2213 if (pUmDevice->vlgrp)
2214 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2215 #endif
2216 #ifdef NICE_SUPPORT
2217 if (pUmDevice->nice_rx)
2218 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2219 #endif
2221 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2222 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2224 if (ReceiveMask != pDevice->ReceiveMask)
2226 LM_SetReceiveMask(pDevice, ReceiveMask);
2230 static void
2231 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2233 #ifdef BCM_NAPI_RXPOLL
2234 while (pUmDevice->lm_dev.RxPoll) {
2235 current->state = TASK_INTERRUPTIBLE;
2236 schedule_timeout(1);
2238 #endif
2242 #ifdef BCM_VLAN
2243 STATIC void
2244 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2246 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2248 bcm5700_intr_off(pUmDevice);
2249 bcm5700_poll_wait(pUmDevice);
2250 pUmDevice->vlgrp = vlgrp;
2251 bcm5700_set_vlan_mode(pUmDevice);
2252 bcm5700_intr_on(pUmDevice);
2255 STATIC void
2256 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2258 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2260 bcm5700_intr_off(pUmDevice);
2261 bcm5700_poll_wait(pUmDevice);
2262 if (pUmDevice->vlgrp) {
2263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2264 vlan_group_set_device(pUmDevice->vlgrp, vid, NULL);
2265 #else
2266 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2267 #endif
2269 bcm5700_intr_on(pUmDevice);
2271 #endif
2273 STATIC int
2274 _bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2276 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2277 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2278 PLM_PACKET pPacket;
2279 PUM_PACKET pUmPacket;
2280 unsigned long flags = 0;
2281 int frag_no;
2282 #ifdef NICE_SUPPORT
2283 vlan_tag_t *vlan_tag;
2284 #endif
2285 #ifdef BCM_TSO
2286 LM_UINT32 mss = 0 ;
2287 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2288 #endif
2289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2290 struct tcphdr *th;
2291 struct iphdr *iph;
2292 #endif
2294 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2295 !pDevice->InitDone || pUmDevice->suspended)
2297 dev_kfree_skb(skb);
2298 return 0;
2301 #if (LINUX_VERSION_CODE < 0x02032b)
2302 if (test_and_set_bit(0, &dev->tbusy)) {
2303 return 1;
2305 #endif
2307 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2308 netif_stop_queue(dev);
2309 pUmDevice->tx_queued = 1;
2310 if (!pUmDevice->interrupt) {
2311 netif_wake_queue(dev);
2312 pUmDevice->tx_queued = 0;
2314 return 1;
2317 pPacket = (PLM_PACKET)
2318 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2319 if (pPacket == 0) {
2320 netif_stop_queue(dev);
2321 pUmDevice->tx_full = 1;
2322 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2323 netif_wake_queue(dev);
2324 pUmDevice->tx_full = 0;
2326 return 1;
2328 pUmPacket = (PUM_PACKET) pPacket;
2329 pUmPacket->skbuff = skb;
2330 pUmDevice->stats.tx_bytes += skb->len;
2332 if (skb->ip_summed == CHECKSUM_HW) {
2333 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2334 #if TIGON3_DEBUG
2335 pUmDevice->tx_chksum_count++;
2336 #endif
2338 else {
2339 pPacket->Flags = 0;
2341 #if MAX_SKB_FRAGS
2342 frag_no = skb_shinfo(skb)->nr_frags;
2343 #else
2344 frag_no = 0;
2345 #endif
2346 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2347 netif_stop_queue(dev);
2348 pUmDevice->tx_full = 1;
2349 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2350 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2351 netif_wake_queue(dev);
2352 pUmDevice->tx_full = 0;
2354 return 1;
2357 pPacket->u.Tx.FragCount = frag_no + 1;
2358 #if TIGON3_DEBUG
2359 if (pPacket->u.Tx.FragCount > 1)
2360 pUmDevice->tx_zc_count++;
2361 #endif
2363 #ifdef BCM_VLAN
2364 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2365 pPacket->VlanTag = vlan_tx_tag_get(skb);
2366 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2368 #endif
2369 #ifdef NICE_SUPPORT
2370 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2371 if (vlan_tag->signature == 0x5555) {
2372 pPacket->VlanTag = vlan_tag->tag;
2373 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2374 vlan_tag->signature = 0;
2376 #endif
2378 #ifdef BCM_TSO
2379 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
2380 if ((mss = (LM_UINT32) skb_shinfo(skb)->gso_size) &&
2381 (skb->len > pDevice->TxMtu)) {
2382 #else
2383 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2384 (skb->len > pDevice->TxMtu)) {
2385 #endif
2387 #if (LINUX_VERSION_CODE >= 0x02060c)
2389 if (skb_header_cloned(skb) &&
2390 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2392 dev_kfree_skb(skb);
2393 return 0;
2395 #endif
2396 pUmDevice->tso_pkt_count++;
2398 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2399 SND_BD_FLAG_CPU_POST_DMA;
2401 tcp_opt_len = 0;
2402 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
2403 th = (struct tcphdr *)skb_transport_header(skb);
2404 iph = (struct iphdr *)skb_network_header(skb);
2406 ASSERT((iph != NULL) && (th != NULL));
2408 if (th->doff > 5) {
2409 tcp_opt_len = (th->doff - 5) << 2;
2411 ip_tcp_len = (iph->ihl << 2) + sizeof(struct tcphdr);
2412 iph->check = 0;
2414 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2415 th->check = 0;
2416 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2418 else {
2419 th->check = ~csum_tcpudp_magic(
2420 iph->saddr, iph->daddr,
2421 0, IPPROTO_TCP, 0);
2424 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2425 tcp_seg_flags = 0;
2427 if (tcp_opt_len || (iph->ihl > 5)) {
2428 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2429 tcp_seg_flags =
2430 ((iph->ihl - 5) +
2431 (tcp_opt_len >> 2)) << 11;
2433 else {
2434 pPacket->Flags |=
2435 ((iph->ihl - 5) +
2436 (tcp_opt_len >> 2)) << 12;
2439 #else
2440 if (skb->h.th->doff > 5) {
2441 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2443 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2444 skb->nh.iph->check = 0;
2446 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2447 skb->h.th->check = 0;
2448 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2450 else {
2451 skb->h.th->check = ~csum_tcpudp_magic(
2452 skb->nh.iph->saddr, skb->nh.iph->daddr,
2453 0, IPPROTO_TCP, 0);
2456 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2457 tcp_seg_flags = 0;
2459 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2460 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2461 tcp_seg_flags =
2462 ((skb->nh.iph->ihl - 5) +
2463 (tcp_opt_len >> 2)) << 11;
2465 else {
2466 pPacket->Flags |=
2467 ((skb->nh.iph->ihl - 5) +
2468 (tcp_opt_len >> 2)) << 12;
2471 #endif
2472 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2474 else
2476 pPacket->u.Tx.MaxSegmentSize = 0;
2478 #endif
2479 BCM5700_LOCK(pUmDevice, flags);
2480 LM_SendPacket(pDevice, pPacket);
2481 BCM5700_UNLOCK(pUmDevice, flags);
2483 #if (LINUX_VERSION_CODE < 0x02032b)
2484 netif_wake_queue(dev);
2485 #endif
2486 dev->trans_start = jiffies;
2489 return 0;
2492 STATIC int
2493 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2495 void *n;
2497 FOREACH_CHAINED_PKT(skb, n) {
2498 _bcm5700_start_xmit(skb, dev);
2502 #ifdef BCM_NAPI_RXPOLL
2503 STATIC int
2504 bcm5700_poll(struct net_device *dev, int *budget)
2506 int orig_budget = *budget;
2507 int work_done;
2508 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2509 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2510 unsigned long flags = 0;
2511 LM_UINT32 tag;
2513 if (orig_budget > dev->quota)
2514 orig_budget = dev->quota;
2516 BCM5700_LOCK(pUmDevice, flags);
2517 /* BCM4785: Flush posted writes from GbE to host memory. */
2518 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2519 REG_RD(pDevice, HostCoalesce.Mode);
2520 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2521 *budget -= work_done;
2522 dev->quota -= work_done;
2524 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2525 replenish_rx_buffers(pUmDevice, 0);
2527 BCM5700_UNLOCK(pUmDevice, flags);
2528 if (work_done) {
2529 MM_IndicateRxPackets(pDevice);
2530 BCM5700_LOCK(pUmDevice, flags);
2531 LM_QueueRxPackets(pDevice);
2532 BCM5700_UNLOCK(pUmDevice, flags);
2534 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2535 pUmDevice->suspended) {
2537 netif_rx_complete(dev);
2538 BCM5700_LOCK(pUmDevice, flags);
2539 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2540 pDevice->RxPoll = FALSE;
2541 if (pDevice->RxPoll) {
2542 BCM5700_UNLOCK(pUmDevice, flags);
2543 return 0;
2545 /* Take care of possible missed rx interrupts */
2546 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2547 tag = pDevice->pStatusBlkVirt->StatusTag;
2548 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2549 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2550 pDevice->RcvRetConIdx)) {
2552 REG_WR(pDevice, HostCoalesce.Mode,
2553 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2554 HOST_COALESCE_NOW);
2556 /* If a new status block is pending in the WDMA state machine */
2557 /* before the register write to enable the rx interrupt, */
2558 /* the new status block may DMA with no interrupt. In this */
2559 /* scenario, the tag read above will be older than the tag in */
2560 /* the pending status block and writing the older tag will */
2561 /* cause interrupt to be generated. */
2562 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2563 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2564 tag << 24);
2565 /* Make sure we service tx in case some tx interrupts */
2566 /* are cleared */
2567 if (atomic_read(&pDevice->SendBdLeft) <
2568 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2569 REG_WR(pDevice, HostCoalesce.Mode,
2570 pDevice->CoalesceMode |
2571 HOST_COALESCE_ENABLE |
2572 HOST_COALESCE_NOW);
2575 BCM5700_UNLOCK(pUmDevice, flags);
2576 return 0;
2578 return 1;
2580 #endif /* BCM_NAPI_RXPOLL */
2582 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
2583 STATIC irqreturn_t
2584 bcm5700_interrupt(int irq, void *dev_instance)
2585 #else
2586 STATIC irqreturn_t
2587 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2588 #endif
2590 struct net_device *dev = (struct net_device *)dev_instance;
2591 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2592 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2593 LM_UINT32 oldtag, newtag;
2594 int i, max_intr_loop;
2595 #ifdef BCM_TASKLET
2596 int repl_buf_count;
2597 #endif
2598 unsigned int handled = 1;
2600 if (!pDevice->InitDone) {
2601 handled = 0;
2602 return IRQ_RETVAL(handled);
2605 bcm5700_intr_lock(pUmDevice);
2606 if (atomic_read(&pUmDevice->intr_sem)) {
2607 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2608 bcm5700_intr_unlock(pUmDevice);
2609 handled = 0;
2610 return IRQ_RETVAL(handled);
2613 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2614 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2615 dev->name);
2616 bcm5700_intr_unlock(pUmDevice);
2617 handled = 0;
2618 return IRQ_RETVAL(handled);
2621 /* BCM4785: Flush posted writes from GbE to host memory. */
2622 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2623 REG_RD(pDevice, HostCoalesce.Mode);
2625 if ((pDevice->Flags & USING_MSI_FLAG) ||
2626 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2627 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2630 if (pUmDevice->intr_test) {
2631 if (!(REG_RD(pDevice, PciCfg.PciState) &
2632 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2633 pDevice->Flags & USING_MSI_FLAG ) {
2634 pUmDevice->intr_test_result = 1;
2636 pUmDevice->intr_test = 0;
2639 #ifdef BCM_NAPI_RXPOLL
2640 max_intr_loop = 1;
2641 #else
2642 max_intr_loop = 50;
2643 #endif
2644 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2645 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2646 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2648 for (i = 0; ; i++) {
2649 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2651 LM_ServiceInterrupts(pDevice);
2652 /* BCM4785: Flush GbE posted writes to host memory. */
2653 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2654 MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2655 newtag = pDevice->pStatusBlkVirt->StatusTag;
2656 if ((newtag == oldtag) || (i > max_intr_loop)) {
2657 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2658 pDevice->LastTag = oldtag;
2659 if (pDevice->Flags & UNDI_FIX_FLAG) {
2660 REG_WR(pDevice, Grc.LocalCtrl,
2661 pDevice->GrcLocalCtrl | 0x2);
2663 break;
2665 oldtag = newtag;
2668 else
2670 i = 0;
2671 do {
2672 uint dummy;
2674 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2675 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2676 LM_ServiceInterrupts(pDevice);
2677 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2678 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2679 i++;
2681 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2682 (i < max_intr_loop));
2684 if (pDevice->Flags & UNDI_FIX_FLAG) {
2685 REG_WR(pDevice, Grc.LocalCtrl,
2686 pDevice->GrcLocalCtrl | 0x2);
2690 else
2692 /* not my interrupt */
2693 handled = 0;
2696 #ifdef BCM_TASKLET
2697 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2698 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2699 pDevice->QueueAgain) &&
2700 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2702 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2703 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2705 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2706 !pUmDevice->tasklet_pending) {
2708 pUmDevice->tasklet_pending = 1;
2709 tasklet_schedule(&pUmDevice->tasklet);
2711 #else
2712 #ifdef BCM_NAPI_RXPOLL
2713 if (!pDevice->RxPoll &&
2714 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2715 pDevice->RxPoll = 1;
2716 MM_ScheduleRxPoll(pDevice);
2718 #else
2719 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2720 replenish_rx_buffers(pUmDevice, 0);
2723 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2724 pDevice->QueueAgain) {
2726 LM_QueueRxPackets(pDevice);
2728 #endif
2729 #endif
2731 clear_bit(0, (void*)&pUmDevice->interrupt);
2732 bcm5700_intr_unlock(pUmDevice);
2733 if (pUmDevice->tx_queued) {
2734 pUmDevice->tx_queued = 0;
2735 netif_wake_queue(dev);
2737 return IRQ_RETVAL(handled);
2741 #ifdef BCM_TASKLET
2742 STATIC void
2743 bcm5700_tasklet(unsigned long data)
2745 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2746 unsigned long flags = 0;
2748 /* RH 7.2 Beta 3 tasklets are reentrant */
2749 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2750 pUmDevice->tasklet_pending = 0;
2751 return;
2754 pUmDevice->tasklet_pending = 0;
2755 if (pUmDevice->opened && !pUmDevice->suspended) {
2756 BCM5700_LOCK(pUmDevice, flags);
2757 replenish_rx_buffers(pUmDevice, 0);
2758 BCM5700_UNLOCK(pUmDevice, flags);
2761 clear_bit(0, &pUmDevice->tasklet_busy);
2763 #endif
2765 STATIC int
2766 bcm5700_close(struct net_device *dev)
2769 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2770 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2772 #if (LINUX_VERSION_CODE < 0x02032b)
2773 dev->start = 0;
2774 #endif
2775 netif_stop_queue(dev);
2776 pUmDevice->opened = 0;
2778 #ifdef BCM_ASF
2779 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2780 #endif
2781 #ifdef BCM_WOL
2782 if( enable_wol[pUmDevice->index] == 0 )
2783 #endif
2784 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
2786 if (tigon3_debug > 1)
2787 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2788 dev->name);
2790 LM_MulticastClear(pDevice);
2791 bcm5700_shutdown(pUmDevice);
2793 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2794 del_timer_sync(&pUmDevice->statstimer);
2797 del_timer_sync(&pUmDevice->timer);
2799 free_irq(pUmDevice->pdev->irq, dev);
2801 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2803 if(pDevice->Flags & USING_MSI_FLAG) {
2804 pci_disable_msi(pUmDevice->pdev);
2805 REG_WR(pDevice, Msi.Mode, 1 );
2806 pDevice->Flags &= ~USING_MSI_FLAG;
2809 #endif
2812 #if (LINUX_VERSION_CODE < 0x020300)
2813 MOD_DEC_USE_COUNT;
2814 #endif
2816 /* BCM4785: Don't go to low-power state because it will power down the smbus block. */
2817 if (!(pDevice->Flags & SB_CORE_FLAG))
2818 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2821 bcm5700_freemem(dev);
2823 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2824 MAX_RX_PACKET_DESC_COUNT);
2826 return 0;
2829 STATIC int
2830 bcm5700_freemem(struct net_device *dev)
2832 int i;
2833 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2834 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2836 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2837 if (pUmDevice->mem_size_list[i] == 0) {
2838 kfree(pUmDevice->mem_list[i]);
2840 else {
2841 pci_free_consistent(pUmDevice->pdev,
2842 (size_t) pUmDevice->mem_size_list[i],
2843 pUmDevice->mem_list[i],
2844 pUmDevice->dma_list[i]);
2848 pDevice->pStatusBlkVirt = 0;
2849 pDevice->pStatsBlkVirt = 0;
2850 pUmDevice->mem_list_num = 0;
2852 #ifdef NICE_SUPPORT
2853 if (!pUmDevice->opened) {
2854 for (i = 0; i < MAX_MEM2; i++) {
2855 if (pUmDevice->mem_size_list2[i]) {
2856 bcm5700_freemem2(pUmDevice, i);
2860 #endif
2861 return 0;
2864 #ifdef NICE_SUPPORT
2865 /* Frees consistent memory allocated through ioctl */
2866 /* The memory to be freed is in mem_list2[index] */
2867 STATIC int
2868 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2870 #if (LINUX_VERSION_CODE >= 0x020400)
2871 void *ptr;
2872 struct page *pg, *last_pg;
2874 /* Probably won't work on some architectures */
2875 ptr = pUmDevice->mem_list2[index],
2876 pg = virt_to_page(ptr);
2877 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2878 for (; ; pg++) {
2879 #if (LINUX_VERSION_CODE > 0x020500)
2880 ClearPageReserved(pg);
2881 #else
2882 mem_map_unreserve(pg);
2883 #endif
2884 if (pg == last_pg)
2885 break;
2887 pci_free_consistent(pUmDevice->pdev,
2888 (size_t) pUmDevice->mem_size_list2[index],
2889 pUmDevice->mem_list2[index],
2890 pUmDevice->dma_list2[index]);
2891 pUmDevice->mem_size_list2[index] = 0;
2892 #endif
2893 return 0;
2895 #endif
2897 uint64_t
2898 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2900 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2901 LM_UINT32 Value32;
2902 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2903 unsigned long flags;
2905 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2906 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2907 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2909 if (!pUmDevice->opened || !pDevice->InitDone)
2912 return 0;
2915 /* regulate MDIO access during run time */
2916 if (pUmDevice->crc_counter_expiry > 0)
2917 return pUmDevice->phy_crc_count;
2919 pUmDevice->crc_counter_expiry = (5 * HZ) /
2920 pUmDevice->timer_interval;
2922 BCM5700_PHY_LOCK(pUmDevice, flags);
2923 LM_ReadPhy(pDevice, 0x1e, &Value32);
2924 if ((Value32 & 0x8000) == 0)
2925 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2926 LM_ReadPhy(pDevice, 0x14, &Value32);
2927 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2928 /* Sometimes data on the MDIO bus can be corrupted */
2929 if (Value32 != 0xffff)
2930 pUmDevice->phy_crc_count += Value32;
2931 return pUmDevice->phy_crc_count;
2933 else if (pStats == 0) {
2934 return 0;
2936 else {
2937 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2941 uint64_t
2942 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2944 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2945 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2947 if (pStats == 0)
2948 return 0;
2949 return (bcm5700_crc_count(pUmDevice) +
2950 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2951 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2952 MM_GETSTATS64(pStats->etherStatsFragments) +
2953 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2954 MM_GETSTATS64(pStats->etherStatsJabbers));
2957 STATIC struct net_device_stats *
2958 bcm5700_get_stats(struct net_device *dev)
2960 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2961 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2962 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2963 struct net_device_stats *p_netstats = &pUmDevice->stats;
2965 if (pStats == 0)
2966 return p_netstats;
2968 /* Get stats from LM */
2969 p_netstats->rx_packets =
2970 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2971 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2972 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2973 p_netstats->tx_packets =
2974 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2975 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2976 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2977 /* There counters seem to be innacurate. Use byte number accumulation
2978 instead.
2979 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2980 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2982 p_netstats->tx_errors =
2983 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2984 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2985 MM_GETSTATS(pStats->ifOutDiscards) +
2986 MM_GETSTATS(pStats->ifOutErrors);
2987 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2988 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2989 p_netstats->rx_length_errors =
2990 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2991 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2992 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2993 p_netstats->rx_frame_errors =
2994 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2995 p_netstats->rx_crc_errors = (unsigned long)
2996 bcm5700_crc_count(pUmDevice);
2997 p_netstats->rx_errors = (unsigned long)
2998 bcm5700_rx_err_count(pUmDevice);
3000 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
3001 p_netstats->tx_carrier_errors =
3002 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
3004 return p_netstats;
3007 void
3008 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
3010 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3012 if (pUmDevice->opened) {
3013 bcm5700_intr_off(pUmDevice);
3014 netif_carrier_off(pUmDevice->dev);
3015 netif_stop_queue(pUmDevice->dev);
3016 #ifdef BCM_TASKLET
3017 tasklet_kill(&pUmDevice->tasklet);
3018 #endif
3019 bcm5700_poll_wait(pUmDevice);
3021 pUmDevice->suspended = 1;
3022 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
3025 void
3026 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
3028 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3030 if (pUmDevice->suspended) {
3031 pUmDevice->suspended = 0;
3032 if (pUmDevice->opened) {
3033 bcm5700_reset(pUmDevice->dev);
3035 else {
3036 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
3041 /* Returns 0 on failure, 1 on success */
3043 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
3045 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3046 int j;
3048 if (!pUmDevice->opened)
3049 return 0;
3050 pUmDevice->intr_test_result = 0;
3051 pUmDevice->intr_test = 1;
3053 REG_WR(pDevice, HostCoalesce.Mode,
3054 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
3055 HOST_COALESCE_NOW);
3057 for (j = 0; j < 10; j++) {
3058 if (pUmDevice->intr_test_result){
3059 break;
3062 REG_WR(pDevice, HostCoalesce.Mode,
3063 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
3064 HOST_COALESCE_NOW);
3066 MM_Sleep(pDevice, 1);
3069 return pUmDevice->intr_test_result;
3073 #ifdef SIOCETHTOOL
3075 #ifdef ETHTOOL_GSTRINGS
3077 #define ETH_NUM_STATS 30
3078 #define RX_CRC_IDX 5
3079 #define RX_MAC_ERR_IDX 14
3081 struct {
3082 char string[ETH_GSTRING_LEN];
3083 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
3084 { "rx_unicast_packets" },
3085 { "rx_multicast_packets" },
3086 { "rx_broadcast_packets" },
3087 { "rx_bytes" },
3088 { "rx_fragments" },
3089 { "rx_crc_errors" }, /* this needs to be calculated */
3090 { "rx_align_errors" },
3091 { "rx_xon_frames" },
3092 { "rx_xoff_frames" },
3093 { "rx_long_frames" },
3094 { "rx_short_frames" },
3095 { "rx_jabber" },
3096 { "rx_discards" },
3097 { "rx_errors" },
3098 { "rx_mac_errors" }, /* this needs to be calculated */
3099 { "tx_unicast_packets" },
3100 { "tx_multicast_packets" },
3101 { "tx_broadcast_packets" },
3102 { "tx_bytes" },
3103 { "tx_deferred" },
3104 { "tx_single_collisions" },
3105 { "tx_multi_collisions" },
3106 { "tx_total_collisions" },
3107 { "tx_excess_collisions" },
3108 { "tx_late_collisions" },
3109 { "tx_xon_frames" },
3110 { "tx_xoff_frames" },
3111 { "tx_internal_mac_errors" },
3112 { "tx_carrier_errors" },
3113 { "tx_errors" },
3116 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
3118 #ifdef __BIG_ENDIAN
3119 #define SWAP_DWORD_64(x) (x)
3120 #else
3121 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
3122 #endif
3124 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
3125 STATS_OFFSET(ifHCInUcastPkts),
3126 STATS_OFFSET(ifHCInMulticastPkts),
3127 STATS_OFFSET(ifHCInBroadcastPkts),
3128 STATS_OFFSET(ifHCInOctets),
3129 STATS_OFFSET(etherStatsFragments),
3131 STATS_OFFSET(dot3StatsAlignmentErrors),
3132 STATS_OFFSET(xonPauseFramesReceived),
3133 STATS_OFFSET(xoffPauseFramesReceived),
3134 STATS_OFFSET(dot3StatsFramesTooLong),
3135 STATS_OFFSET(etherStatsUndersizePkts),
3136 STATS_OFFSET(etherStatsJabbers),
3137 STATS_OFFSET(ifInDiscards),
3138 STATS_OFFSET(ifInErrors),
3140 STATS_OFFSET(ifHCOutUcastPkts),
3141 STATS_OFFSET(ifHCOutMulticastPkts),
3142 STATS_OFFSET(ifHCOutBroadcastPkts),
3143 STATS_OFFSET(ifHCOutOctets),
3144 STATS_OFFSET(dot3StatsDeferredTransmissions),
3145 STATS_OFFSET(dot3StatsSingleCollisionFrames),
3146 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
3147 STATS_OFFSET(etherStatsCollisions),
3148 STATS_OFFSET(dot3StatsExcessiveCollisions),
3149 STATS_OFFSET(dot3StatsLateCollisions),
3150 STATS_OFFSET(outXonSent),
3151 STATS_OFFSET(outXoffSent),
3152 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
3153 STATS_OFFSET(dot3StatsCarrierSenseErrors),
3154 STATS_OFFSET(ifOutErrors),
3157 #endif /* ETHTOOL_GSTRINGS */
3159 #ifdef ETHTOOL_TEST
3160 #define ETH_NUM_TESTS 6
3161 struct {
3162 char string[ETH_GSTRING_LEN];
3163 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
3164 { "register test (offline)" },
3165 { "memory test (offline)" },
3166 { "loopback test (offline)" },
3167 { "nvram test (online)" },
3168 { "interrupt test (online)" },
3169 { "link test (online)" },
3172 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
3173 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
3174 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
3175 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
3176 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
3177 #endif
3179 #ifdef ETHTOOL_GREGS
3180 #if (LINUX_VERSION_CODE >= 0x02040f)
3181 static void
3182 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
3183 int reserved)
3185 u32 offset;
3186 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3188 if (reserved) {
3189 memset(*buf, 0, end - start);
3190 *buf = *buf + (end - start)/4;
3191 return;
3193 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
3194 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
3195 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
3196 ((offset >= 0x5400) && (offset < 0x5800)) ||
3197 ((offset >= 0x6400) && (offset < 0x6800))) {
3198 **buf = 0;
3199 continue;
3202 **buf = REG_RD_OFFSET(pDevice, offset);
3205 #endif
3206 #endif
3208 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
3210 struct ethtool_cmd ethcmd;
3211 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3212 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3214 if (mm_copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
3215 return -EFAULT;
3217 switch (ethcmd.cmd) {
3218 #ifdef ETHTOOL_GDRVINFO
3219 case ETHTOOL_GDRVINFO: {
3220 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
3222 strcpy(info.driver, bcm5700_driver);
3223 #ifdef INCLUDE_5701_AX_FIX
3224 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
3225 extern int t3FwReleaseMajor;
3226 extern int t3FwReleaseMinor;
3227 extern int t3FwReleaseFix;
3229 sprintf(info.fw_version, "%i.%i.%i",
3230 t3FwReleaseMajor, t3FwReleaseMinor,
3231 t3FwReleaseFix);
3233 #endif
3234 strcpy(info.fw_version, pDevice->BootCodeVer);
3235 strcpy(info.version, bcm5700_version);
3236 #if (LINUX_VERSION_CODE <= 0x020422)
3237 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
3238 #else
3239 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
3240 #endif
3244 #ifdef ETHTOOL_GEEPROM
3245 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
3246 #endif
3247 #ifdef ETHTOOL_GREGS
3248 /* dump everything, including holes in the register space */
3249 info.regdump_len = 0x6c00;
3250 #endif
3251 #ifdef ETHTOOL_GSTATS
3252 info.n_stats = ETH_NUM_STATS;
3253 #endif
3254 #ifdef ETHTOOL_TEST
3255 info.testinfo_len = ETH_NUM_TESTS;
3256 #endif
3257 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
3258 return -EFAULT;
3259 return 0;
3261 #endif
3262 case ETHTOOL_GSET: {
3263 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
3264 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3265 ethcmd.supported =
3266 (SUPPORTED_1000baseT_Full |
3267 SUPPORTED_Autoneg);
3268 ethcmd.supported |= SUPPORTED_FIBRE;
3269 ethcmd.port = PORT_FIBRE;
3270 } else {
3271 ethcmd.supported =
3272 (SUPPORTED_10baseT_Half |
3273 SUPPORTED_10baseT_Full |
3274 SUPPORTED_100baseT_Half |
3275 SUPPORTED_100baseT_Full |
3276 SUPPORTED_1000baseT_Half |
3277 SUPPORTED_1000baseT_Full |
3278 SUPPORTED_Autoneg);
3279 ethcmd.supported |= SUPPORTED_TP;
3280 ethcmd.port = PORT_TP;
3283 ethcmd.transceiver = XCVR_INTERNAL;
3284 ethcmd.phy_address = 0;
3286 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3287 ethcmd.speed = SPEED_1000;
3288 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3289 ethcmd.speed = SPEED_100;
3290 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3291 ethcmd.speed = SPEED_10;
3292 else
3293 ethcmd.speed = 0;
3295 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3296 ethcmd.duplex = DUPLEX_FULL;
3297 else
3298 ethcmd.duplex = DUPLEX_HALF;
3300 if (pDevice->DisableAutoNeg == FALSE) {
3301 ethcmd.autoneg = AUTONEG_ENABLE;
3302 ethcmd.advertising = ADVERTISED_Autoneg;
3303 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3304 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3305 ethcmd.advertising |=
3306 ADVERTISED_1000baseT_Full |
3307 ADVERTISED_FIBRE;
3309 else {
3310 ethcmd.advertising |=
3311 ADVERTISED_TP;
3312 if (pDevice->advertising &
3313 PHY_AN_AD_10BASET_HALF) {
3315 ethcmd.advertising |=
3316 ADVERTISED_10baseT_Half;
3318 if (pDevice->advertising &
3319 PHY_AN_AD_10BASET_FULL) {
3321 ethcmd.advertising |=
3322 ADVERTISED_10baseT_Full;
3324 if (pDevice->advertising &
3325 PHY_AN_AD_100BASETX_HALF) {
3327 ethcmd.advertising |=
3328 ADVERTISED_100baseT_Half;
3330 if (pDevice->advertising &
3331 PHY_AN_AD_100BASETX_FULL) {
3333 ethcmd.advertising |=
3334 ADVERTISED_100baseT_Full;
3336 if (pDevice->advertising1000 &
3337 BCM540X_AN_AD_1000BASET_HALF) {
3339 ethcmd.advertising |=
3340 ADVERTISED_1000baseT_Half;
3342 if (pDevice->advertising1000 &
3343 BCM540X_AN_AD_1000BASET_FULL) {
3345 ethcmd.advertising |=
3346 ADVERTISED_1000baseT_Full;
3350 else {
3351 ethcmd.autoneg = AUTONEG_DISABLE;
3352 ethcmd.advertising = 0;
3355 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3356 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3358 if(mm_copy_to_user(useraddr, &ethcmd, sizeof(ethcmd)))
3359 return -EFAULT;
3360 return 0;
3362 case ETHTOOL_SSET: {
3363 unsigned long flags;
3365 if(!capable(CAP_NET_ADMIN))
3366 return -EPERM;
3367 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3368 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3369 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3370 pDevice->DisableAutoNeg = FALSE;
3372 else {
3373 if (ethcmd.speed == SPEED_1000 &&
3374 pDevice->PhyFlags & PHY_NO_GIGABIT)
3375 return -EINVAL;
3377 if (ethcmd.speed == SPEED_1000 &&
3378 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3379 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3381 pDevice->RequestedLineSpeed =
3382 LM_LINE_SPEED_1000MBPS;
3384 pDevice->RequestedDuplexMode =
3385 LM_DUPLEX_MODE_FULL;
3387 else if (ethcmd.speed == SPEED_100 &&
3388 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3389 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3391 pDevice->RequestedLineSpeed =
3392 LM_LINE_SPEED_100MBPS;
3394 else if (ethcmd.speed == SPEED_10 &&
3395 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3396 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3398 pDevice->RequestedLineSpeed =
3399 LM_LINE_SPEED_10MBPS;
3401 else {
3402 return -EINVAL;
3405 pDevice->DisableAutoNeg = TRUE;
3406 if (ethcmd.duplex == DUPLEX_FULL) {
3407 pDevice->RequestedDuplexMode =
3408 LM_DUPLEX_MODE_FULL;
3410 else {
3411 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3412 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3414 pDevice->RequestedDuplexMode =
3415 LM_DUPLEX_MODE_HALF;
3419 if (netif_running(dev)) {
3420 BCM5700_PHY_LOCK(pUmDevice, flags);
3421 LM_SetupPhy(pDevice);
3422 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3424 return 0;
3426 #ifdef ETHTOOL_GWOL
3427 #ifdef BCM_WOL
3428 case ETHTOOL_GWOL: {
3429 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3431 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3432 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3433 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3434 wol.supported = 0;
3435 wol.wolopts = 0;
3437 else {
3438 wol.supported = WAKE_MAGIC;
3439 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3441 wol.wolopts = WAKE_MAGIC;
3443 else {
3444 wol.wolopts = 0;
3447 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3448 return -EFAULT;
3449 return 0;
3451 case ETHTOOL_SWOL: {
3452 struct ethtool_wolinfo wol;
3454 if(!capable(CAP_NET_ADMIN))
3455 return -EPERM;
3456 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3457 return -EFAULT;
3458 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3459 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3460 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3461 wol.wolopts) {
3462 return -EINVAL;
3465 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3466 return -EINVAL;
3468 if (wol.wolopts & WAKE_MAGIC) {
3469 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3470 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3472 else {
3473 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3474 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3476 return 0;
3478 #endif
3479 #endif
3480 #ifdef ETHTOOL_GLINK
3481 case ETHTOOL_GLINK: {
3482 struct ethtool_value edata = {ETHTOOL_GLINK};
3484 /* ifup only waits for 5 seconds for link up */
3485 /* NIC may take more than 5 seconds to establish link */
3486 if ((pUmDevice->delayed_link_ind > 0) &&
3487 delay_link[pUmDevice->index])
3488 return -EOPNOTSUPP;
3490 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3491 edata.data = 1;
3493 else {
3494 edata.data = 0;
3496 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3497 return -EFAULT;
3498 return 0;
3500 #endif
3501 #ifdef ETHTOOL_NWAY_RST
3502 case ETHTOOL_NWAY_RST: {
3503 LM_UINT32 phyctrl;
3504 unsigned long flags;
3506 if(!capable(CAP_NET_ADMIN))
3507 return -EPERM;
3508 if (pDevice->DisableAutoNeg) {
3509 return -EINVAL;
3511 if (!netif_running(dev))
3512 return -EAGAIN;
3513 BCM5700_PHY_LOCK(pUmDevice, flags);
3514 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3515 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3516 pDevice->DisableAutoNeg = TRUE;
3517 LM_SetupPhy(pDevice);
3519 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3520 pDevice->DisableAutoNeg = FALSE;
3521 LM_SetupPhy(pDevice);
3523 else {
3524 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3525 T3_ASIC_REV_5703) ||
3526 (T3_ASIC_REV(pDevice->ChipRevId) ==
3527 T3_ASIC_REV_5704) ||
3528 (T3_ASIC_REV(pDevice->ChipRevId) ==
3529 T3_ASIC_REV_5705))
3531 LM_ResetPhy(pDevice);
3532 LM_SetupPhy(pDevice);
3534 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3535 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3536 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3537 PHY_CTRL_AUTO_NEG_ENABLE |
3538 PHY_CTRL_RESTART_AUTO_NEG);
3540 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3541 return 0;
3543 #endif
3544 #ifdef ETHTOOL_GEEPROM
3545 case ETHTOOL_GEEPROM: {
3546 struct ethtool_eeprom eeprom;
3547 LM_UINT32 *buf = 0;
3548 LM_UINT32 buf1[64/4];
3549 int i, j, offset, len;
3551 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3552 return -EFAULT;
3554 if (eeprom.offset >= pDevice->NvramSize)
3555 return -EFAULT;
3557 /* maximum data limited */
3558 /* to read more, call again with a different offset */
3559 if (eeprom.len > 0x800) {
3560 eeprom.len = 0x800;
3561 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3562 return -EFAULT;
3565 if (eeprom.len > 64) {
3566 buf = kmalloc(eeprom.len, GFP_KERNEL);
3567 if (!buf)
3568 return -ENOMEM;
3570 else {
3571 buf = buf1;
3573 useraddr += offsetof(struct ethtool_eeprom, data);
3575 offset = eeprom.offset;
3576 len = eeprom.len;
3577 if (offset & 3) {
3578 offset &= 0xfffffffc;
3579 len += (offset & 3);
3581 len = (len + 3) & 0xfffffffc;
3582 for (i = 0, j = 0; j < len; i++, j += 4) {
3583 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3584 LM_STATUS_SUCCESS) {
3585 break;
3588 if (j >= len) {
3589 buf += (eeprom.offset & 3);
3590 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3592 if (eeprom.len > 64) {
3593 kfree(buf);
3595 if ((j < len) || i)
3596 return -EFAULT;
3597 return 0;
3599 case ETHTOOL_SEEPROM: {
3600 struct ethtool_eeprom eeprom;
3601 LM_UINT32 buf[64/4];
3602 int i, offset, len;
3604 if(!capable(CAP_NET_ADMIN))
3605 return -EPERM;
3606 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3607 return -EFAULT;
3609 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3610 (eeprom.offset >= pDevice->NvramSize)) {
3611 return -EFAULT;
3614 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3615 eeprom.len = pDevice->NvramSize - eeprom.offset;
3618 useraddr += offsetof(struct ethtool_eeprom, data);
3620 len = eeprom.len;
3621 offset = eeprom.offset;
3622 for (; len > 0; ) {
3623 if (len < 64)
3624 i = len;
3625 else
3626 i = 64;
3627 if (mm_copy_from_user(&buf, useraddr, i))
3628 return -EFAULT;
3630 bcm5700_intr_off(pUmDevice);
3631 /* Prevent race condition on Grc.Mode register */
3632 bcm5700_poll_wait(pUmDevice);
3634 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3635 LM_STATUS_SUCCESS) {
3636 bcm5700_intr_on(pUmDevice);
3637 return -EFAULT;
3639 bcm5700_intr_on(pUmDevice);
3640 len -= i;
3641 offset += i;
3642 useraddr += i;
3644 return 0;
3646 #endif
3647 #ifdef ETHTOOL_GREGS
3648 #if (LINUX_VERSION_CODE >= 0x02040f)
3649 case ETHTOOL_GREGS: {
3650 struct ethtool_regs eregs;
3651 LM_UINT32 *buf, *buf1;
3652 unsigned int i;
3654 if(!capable(CAP_NET_ADMIN))
3655 return -EPERM;
3656 if (pDevice->Flags & UNDI_FIX_FLAG)
3657 return -EOPNOTSUPP;
3658 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3659 return -EFAULT;
3660 if (eregs.len > 0x6c00)
3661 eregs.len = 0x6c00;
3662 eregs.version = 0x0;
3663 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3664 return -EFAULT;
3665 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3666 if (!buf)
3667 return -ENOMEM;
3668 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3669 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3670 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3671 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3672 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3673 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3674 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3675 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3676 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3677 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3678 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3679 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3680 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3681 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3682 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3683 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3684 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3685 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3686 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3687 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3688 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3689 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3690 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3691 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3692 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3693 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3694 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3695 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3696 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3697 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3698 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3699 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3700 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3701 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3702 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3703 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3704 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3705 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3706 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3707 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3708 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3709 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3710 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3711 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3712 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3713 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3714 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3715 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3716 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3717 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3719 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3720 kfree(buf1);
3721 if (i)
3722 return -EFAULT;
3723 return 0;
3725 #endif
3726 #endif
3727 #ifdef ETHTOOL_GPAUSEPARAM
3728 case ETHTOOL_GPAUSEPARAM: {
3729 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3731 if (!pDevice->DisableAutoNeg) {
3732 epause.autoneg = (pDevice->FlowControlCap &
3733 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3735 else {
3736 epause.autoneg = 0;
3738 epause.rx_pause =
3739 (pDevice->FlowControl &
3740 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3741 epause.tx_pause =
3742 (pDevice->FlowControl &
3743 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3744 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3745 return -EFAULT;
3747 return 0;
3749 case ETHTOOL_SPAUSEPARAM: {
3750 struct ethtool_pauseparam epause;
3751 unsigned long flags;
3753 if(!capable(CAP_NET_ADMIN))
3754 return -EPERM;
3755 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3756 return -EFAULT;
3757 pDevice->FlowControlCap = 0;
3758 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3759 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3761 if (epause.rx_pause) {
3762 pDevice->FlowControlCap |=
3763 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3765 if (epause.tx_pause) {
3766 pDevice->FlowControlCap |=
3767 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3769 if (netif_running(dev)) {
3770 BCM5700_PHY_LOCK(pUmDevice, flags);
3771 LM_SetupPhy(pDevice);
3772 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3775 return 0;
3777 #endif
3778 #ifdef ETHTOOL_GRXCSUM
3779 case ETHTOOL_GRXCSUM: {
3780 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3782 edata.data =
3783 (pDevice->TaskToOffload &
3784 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3785 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3786 return -EFAULT;
3788 return 0;
3790 case ETHTOOL_SRXCSUM: {
3791 struct ethtool_value edata;
3793 if(!capable(CAP_NET_ADMIN))
3794 return -EPERM;
3795 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3796 return -EFAULT;
3797 if (edata.data) {
3798 if (!(pDevice->TaskOffloadCap &
3799 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3801 return -EINVAL;
3803 pDevice->TaskToOffload |=
3804 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3805 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3807 else {
3808 pDevice->TaskToOffload &=
3809 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3810 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3812 return 0;
3814 case ETHTOOL_GTXCSUM: {
3815 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3817 edata.data =
3818 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3819 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3820 return -EFAULT;
3822 return 0;
3824 case ETHTOOL_STXCSUM: {
3825 struct ethtool_value edata;
3827 if(!capable(CAP_NET_ADMIN))
3828 return -EPERM;
3829 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3830 return -EFAULT;
3831 if (edata.data) {
3832 if (!(pDevice->TaskOffloadCap &
3833 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3835 return -EINVAL;
3837 dev->features |= get_csum_flag( pDevice->ChipRevId);
3838 pDevice->TaskToOffload |=
3839 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3840 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3842 else {
3843 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3844 pDevice->TaskToOffload &=
3845 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3846 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3848 return 0;
3850 case ETHTOOL_GSG: {
3851 struct ethtool_value edata = { ETHTOOL_GSG };
3853 edata.data =
3854 (dev->features & NETIF_F_SG) != 0;
3855 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3856 return -EFAULT;
3857 return 0;
3859 case ETHTOOL_SSG: {
3860 struct ethtool_value edata;
3862 if(!capable(CAP_NET_ADMIN))
3863 return -EPERM;
3864 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3865 return -EFAULT;
3866 if (edata.data) {
3867 dev->features |= NETIF_F_SG;
3869 else {
3870 dev->features &= ~NETIF_F_SG;
3872 return 0;
3874 #endif
3875 #ifdef ETHTOOL_GRINGPARAM
3876 case ETHTOOL_GRINGPARAM: {
3877 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3879 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3880 ering.rx_pending = pDevice->RxStdDescCnt;
3881 ering.rx_mini_max_pending = 0;
3882 ering.rx_mini_pending = 0;
3883 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3884 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3885 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3886 #else
3887 ering.rx_jumbo_max_pending = 0;
3888 ering.rx_jumbo_pending = 0;
3889 #endif
3890 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3891 ering.tx_pending = pDevice->TxPacketDescCnt;
3892 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3893 return -EFAULT;
3894 return 0;
3896 #endif
3897 #ifdef ETHTOOL_PHYS_ID
3898 case ETHTOOL_PHYS_ID: {
3899 struct ethtool_value edata;
3901 if(!capable(CAP_NET_ADMIN))
3902 return -EPERM;
3903 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3904 return -EFAULT;
3905 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3906 return 0;
3907 return -EINTR;
3909 #endif
3910 #ifdef ETHTOOL_GSTRINGS
3911 case ETHTOOL_GSTRINGS: {
3912 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3914 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3915 return -EFAULT;
3916 switch(egstr.string_set) {
3917 #ifdef ETHTOOL_GSTATS
3918 case ETH_SS_STATS:
3919 egstr.len = ETH_NUM_STATS;
3920 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3921 return -EFAULT;
3922 if (mm_copy_to_user(useraddr + sizeof(egstr),
3923 bcm5700_stats_str_arr,
3924 sizeof(bcm5700_stats_str_arr)))
3925 return -EFAULT;
3926 return 0;
3927 #endif
3928 #ifdef ETHTOOL_TEST
3929 case ETH_SS_TEST:
3930 egstr.len = ETH_NUM_TESTS;
3931 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3932 return -EFAULT;
3933 if (mm_copy_to_user(useraddr + sizeof(egstr),
3934 bcm5700_tests_str_arr,
3935 sizeof(bcm5700_tests_str_arr)))
3936 return -EFAULT;
3937 return 0;
3938 #endif
3939 default:
3940 return -EOPNOTSUPP;
3943 #endif
3944 #ifdef ETHTOOL_GSTATS
3945 case ETHTOOL_GSTATS: {
3946 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3947 uint64_t stats[ETH_NUM_STATS];
3948 int i;
3949 uint64_t *pStats =
3950 (uint64_t *) pDevice->pStatsBlkVirt;
3952 estats.n_stats = ETH_NUM_STATS;
3953 if (pStats == 0) {
3954 memset(stats, 0, sizeof(stats));
3956 else {
3958 for (i = 0; i < ETH_NUM_STATS; i++) {
3959 if (bcm5700_stats_offset_arr[i] != 0) {
3960 stats[i] = SWAP_DWORD_64(*(pStats +
3961 bcm5700_stats_offset_arr[i]));
3963 else if (i == RX_CRC_IDX) {
3964 stats[i] =
3965 bcm5700_crc_count(pUmDevice);
3967 else if (i == RX_MAC_ERR_IDX) {
3968 stats[i] =
3969 bcm5700_rx_err_count(pUmDevice);
3973 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3974 return -EFAULT;
3976 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3977 sizeof(stats))) {
3978 return -EFAULT;
3980 return 0;
3982 #endif
3983 #ifdef ETHTOOL_TEST
3984 case ETHTOOL_TEST: {
3985 struct ethtool_test etest;
3986 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3987 LM_POWER_STATE old_power_level;
3989 printk( KERN_ALERT "Performing ethtool test.\n"
3990 "This test will take a few seconds to complete.\n" );
3992 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3993 return -EFAULT;
3995 etest.len = ETH_NUM_TESTS;
3996 old_power_level = pDevice->PowerLevel;
3997 if (old_power_level != LM_POWER_STATE_D0) {
3998 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3999 LM_SwitchClocks(pDevice);
4001 MM_Sleep(pDevice, 1000);
4002 if (etest.flags & ETH_TEST_FL_OFFLINE) {
4003 b57_suspend_chip(pUmDevice);
4004 MM_Sleep(pDevice, 1000);
4005 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
4006 MM_Sleep(pDevice, 1000);
4007 if (b57_test_registers(pUmDevice) == 0) {
4008 etest.flags |= ETH_TEST_FL_FAILED;
4009 tests[0] = 1;
4011 MM_Sleep(pDevice, 1000);
4012 if (b57_test_memory(pUmDevice) == 0) {
4013 etest.flags |= ETH_TEST_FL_FAILED;
4014 tests[1] = 1;
4016 MM_Sleep(pDevice, 1000);
4017 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
4018 etest.flags |= ETH_TEST_FL_FAILED;
4019 tests[2] = 1;
4021 MM_Sleep(pDevice, 1000);
4022 b57_resume_chip(pUmDevice);
4023 /* wait for link to come up for the link test */
4024 MM_Sleep(pDevice, 4000);
4025 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
4026 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
4028 /* wait a little longer for linkup on copper */
4029 MM_Sleep(pDevice, 3000);
4032 if (b57_test_nvram(pUmDevice) == 0) {
4033 etest.flags |= ETH_TEST_FL_FAILED;
4034 tests[3] = 1;
4036 MM_Sleep(pDevice, 1000);
4037 if (b57_test_intr(pUmDevice) == 0) {
4038 etest.flags |= ETH_TEST_FL_FAILED;
4039 tests[4] = 1;
4041 MM_Sleep(pDevice, 1000);
4042 if (b57_test_link(pUmDevice) == 0) {
4043 etest.flags |= ETH_TEST_FL_FAILED;
4044 tests[5] = 1;
4046 MM_Sleep(pDevice, 1000);
4047 if (old_power_level != LM_POWER_STATE_D0) {
4048 LM_SetPowerState(pDevice, old_power_level);
4050 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
4051 return -EFAULT;
4053 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
4054 sizeof(tests))) {
4055 return -EFAULT;
4057 return 0;
4059 #endif
4060 #ifdef ETHTOOL_GTSO
4061 case ETHTOOL_GTSO: {
4062 struct ethtool_value edata = { ETHTOOL_GTSO };
4064 #ifdef BCM_TSO
4065 edata.data =
4066 (dev->features & NETIF_F_TSO) != 0;
4067 #else
4068 edata.data = 0;
4069 #endif
4070 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
4071 return -EFAULT;
4072 return 0;
4074 #endif
4075 #ifdef ETHTOOL_STSO
4076 case ETHTOOL_STSO: {
4077 #ifdef BCM_TSO
4078 struct ethtool_value edata;
4080 if (!capable(CAP_NET_ADMIN))
4081 return -EPERM;
4083 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
4084 return -EFAULT;
4086 if (!(pDevice->TaskToOffload &
4087 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
4088 return -EINVAL;
4091 dev->features &= ~NETIF_F_TSO;
4093 if (edata.data) {
4094 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4095 (dev->mtu > 1500)) {
4096 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4097 return -EINVAL;
4098 } else {
4099 dev->features |= NETIF_F_TSO;
4102 return 0;
4103 #else
4104 return -EINVAL;
4105 #endif
4107 #endif
4110 return -EOPNOTSUPP;
4112 #endif /* #ifdef SIOCETHTOOL */
4114 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
4115 #include <linux/iobuf.h>
4116 #endif
4118 #ifdef BCMDBG
4119 STATIC void
4120 b57_dump(struct net_device *dev, struct bcmstrbuf *b)
4122 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4123 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4124 struct net_device_stats *st;
4125 char macaddr[32];
4127 bcm_bprintf(b, "b57%d: %s %s version %s\n", pUmDevice->index,
4128 __DATE__, __TIME__, EPI_VERSION_STR);
4130 bcm_bprintf(b, "dev 0x%x pdev 0x%x unit %d msglevel %d flags 0x%x boardflags 0x%x\n",
4131 (uint)dev, (uint)pDevice, pUmDevice->index, b57_msg_level,
4132 pDevice->Flags, pUmDevice->boardflags);
4133 bcm_bprintf(b, "speed/duplex %d/%s promisc 0x%x loopbk %d advertise 0x%x\n",
4134 pDevice->LineSpeed,
4135 (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL) ? "full" : "half",
4136 pDevice->ReceiveMask & LM_PROMISCUOUS_MODE,
4137 pDevice->LoopBackMode,
4138 pDevice->advertising);
4139 bcm_bprintf(b, "allmulti %d qos %d phyaddr %d linkstat %d\n",
4140 pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST, pUmDevice->qos,
4141 pDevice->PhyAddr, pDevice->LinkStatus);
4142 bcm_bprintf(b, "vendor 0x%x device 0x%x rev %d subsys vendor 0x%x subsys id 0x%x\n",
4143 pDevice->PciVendorId, pDevice->PciDeviceId, pDevice->PciRevId,
4144 pDevice->SubsystemVendorId, pDevice->SubsystemId);
4145 bcm_bprintf(b, "MAC addr %s\n", bcm_ether_ntoa((struct ether_addr *)&pDevice->NodeAddress,
4146 macaddr));
4148 if ((st = bcm5700_get_stats(dev)) != NULL) {
4149 bcm_bprintf(b, "txframe %d txbyte %d txerror %d rxframe %d rxbyte %d rxerror %d\n",
4150 st->tx_packets, st->tx_bytes, st->tx_errors,
4151 st->rx_packets, st->rx_bytes, st->rx_errors);
4152 bcm_bprintf(b, "multicast %d collisions %d tx_abort %d tx_carrier %d\n",
4153 st->multicast, st->collisions, st->tx_aborted_errors,
4154 st->tx_carrier_errors);
4155 bcm_bprintf(b, "rx_length %d rx_over %d rx_frame %d rx_crc %d\n",
4156 st->rx_length_errors, st->rx_over_errors, st->rx_frame_errors,
4157 st->rx_crc_errors);
4159 if (pDevice->Flags & ROBO_SWITCH_FLAG)
4160 robo_dump_regs(pUmDevice->robo, b);
4162 bcm_bprintf(b, "\n");
4164 #endif /* BCMDBG */
4166 /* Provide ioctl() calls to examine the MII xcvr state. */
4167 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4169 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4170 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4171 u16 *data = (u16 *)&rq->ifr_data;
4172 u32 value;
4173 unsigned long flags;
4175 switch(cmd) {
4176 #ifdef SIOCGMIIPHY
4177 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
4179 data[0] = pDevice->PhyAddr;
4180 return 0;
4181 #endif
4183 #ifdef SIOCGMIIREG
4184 case SIOCGMIIREG: /* Read the specified MII register. */
4186 uint32 savephyaddr = 0;
4188 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4189 return -EOPNOTSUPP;
4191 /* ifup only waits for 5 seconds for link up */
4192 /* NIC may take more than 5 seconds to establish link */
4193 if ((pUmDevice->delayed_link_ind > 0) &&
4194 delay_link[pUmDevice->index]) {
4195 return -EAGAIN;
4198 BCM5700_PHY_LOCK(pUmDevice, flags);
4199 if (data[0] != 0xffff) {
4200 savephyaddr = pDevice->PhyAddr;
4201 pDevice->PhyAddr = data[0];
4203 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *)&value);
4204 if (data[0] != 0xffff)
4205 pDevice->PhyAddr = savephyaddr;
4206 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4207 data[3] = value & 0xffff;
4208 return 0;
4210 #endif
4212 case SIOCGETCPHYRD: /* Read the specified MII register. */
4213 case SIOCGETCPHYRD2:
4215 int args[2];
4216 uint32 savephyaddr = 0;
4218 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4219 return -EOPNOTSUPP;
4221 /* ifup only waits for 5 seconds for link up */
4222 /* NIC may take more than 5 seconds to establish link */
4223 if ((pUmDevice->delayed_link_ind > 0) &&
4224 delay_link[pUmDevice->index]) {
4225 return -EAGAIN;
4228 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4229 return -EFAULT;
4231 BCM5700_PHY_LOCK(pUmDevice, flags);
4232 if (cmd == SIOCGETCPHYRD2) {
4233 savephyaddr = pDevice->PhyAddr;
4234 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4236 LM_ReadPhy(pDevice, args[0] & 0xffff, (LM_UINT32 *)&value);
4237 if (cmd == SIOCGETCPHYRD2)
4238 pDevice->PhyAddr = savephyaddr;
4239 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4241 args[1] = value & 0xffff;
4242 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4243 return -EFAULT;
4245 return 0;
4248 #ifdef SIOCSMIIREG
4249 case SIOCSMIIREG: /* Write the specified MII register */
4251 uint32 savephyaddr = 0;
4253 if (!capable(CAP_NET_ADMIN))
4254 return -EPERM;
4256 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4257 return -EOPNOTSUPP;
4259 BCM5700_PHY_LOCK(pUmDevice, flags);
4260 if (data[0] != 0xffff) {
4261 savephyaddr = pDevice->PhyAddr;
4262 pDevice->PhyAddr = data[0];
4264 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
4266 /* Invalidate current robo page */
4267 if ((pDevice->Flags & ROBO_SWITCH_FLAG) && pUmDevice->robo &&
4268 (pDevice->PhyAddr == 0x1e) && ((data[1] & 0x1f) == 0x10))
4269 ((robo_info_t *)pUmDevice->robo)->page = (data[2] >> 8);
4271 if (data[0] != 0xffff)
4272 pDevice->PhyAddr = savephyaddr;
4273 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4274 data[3] = 0;
4275 return 0;
4277 #endif
4279 case SIOCSETCPHYWR: /* Write the specified MII register */
4280 case SIOCSETCPHYWR2:
4282 int args[2];
4283 uint32 savephyaddr = 0;
4285 if (!capable(CAP_NET_ADMIN))
4286 return -EPERM;
4288 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4289 return -EOPNOTSUPP;
4291 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4292 return -EFAULT;
4294 BCM5700_PHY_LOCK(pUmDevice, flags);
4295 if (cmd == SIOCSETCPHYWR2) {
4296 savephyaddr = pDevice->PhyAddr;
4297 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4299 LM_WritePhy(pDevice, args[0] & 0xffff, args[1]);
4301 /* Invalidate current robo page */
4302 if ((pDevice->Flags & ROBO_SWITCH_FLAG) && pUmDevice->robo &&
4303 (pDevice->PhyAddr == 0x1e) && ((args[0] & 0xffff) == 0x10))
4304 ((robo_info_t *)pUmDevice->robo)->page = ((uint16)args[1] >> 8);
4306 if (cmd == SIOCSETCPHYWR2)
4307 pDevice->PhyAddr = savephyaddr;
4308 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4309 return 0;
4312 case SIOCGETCROBORD: /* Read the specified ROBO register. */
4314 int args[2];
4315 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4317 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4318 return -ENXIO;
4320 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4321 return -EFAULT;
4323 if (robo->ops->read_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff, &value, 2))
4324 return -EIO;
4326 args[1] = value & 0xffff;
4327 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4328 return -EFAULT;
4330 return 0;
4333 case SIOCSETCROBOWR: /* Write the specified ROBO register. */
4335 int args[2];
4336 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4338 if (!capable(CAP_NET_ADMIN))
4339 return -EPERM;
4341 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4342 return -ENXIO;
4344 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4345 return -EFAULT;
4347 if (robo->ops->write_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff,
4348 &args[1], 2))
4349 return -EIO;
4351 return 0;
4354 case SIOCSETGETVAR:
4356 int ret = 0;
4357 void *buffer = NULL;
4358 bool get = FALSE, set = TRUE;
4359 et_var_t var;
4361 if (set && mm_copy_from_user(&var, rq->ifr_data, sizeof(var)))
4362 return -EFAULT;
4364 /* prepare buffer if any */
4365 if (var.buf) {
4366 if (!var.set)
4367 get = TRUE;
4369 if (!(buffer = (void *) MALLOC(SI_OSH, var.len))) {
4370 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__,
4371 MALLOCED(SI_OSH)));
4372 return -ENOMEM;
4375 if (mm_copy_from_user(buffer, var.buf, var.len)) {
4376 MFREE(SI_OSH, buffer, var.len);
4377 return -EFAULT;
4381 /* do var.cmd */
4382 switch (var.cmd) {
4383 case IOV_ET_ROBO_DEVID:
4385 uint *vecarg = (uint *)buffer;
4386 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4388 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) ||
4389 (robo == NULL)) {
4390 ret = -ENXIO;
4391 break;
4394 /* get robo device id */
4395 *vecarg = robo->devid;
4397 if (mm_copy_to_user(var.buf, buffer, var.len)) {
4398 ret = -EFAULT;
4399 break;
4402 break;
4405 default:
4406 ret = -EOPNOTSUPP;
4407 break;
4410 if (buffer)
4411 MFREE(SI_OSH, buffer, var.len);
4413 return ret;
4416 case SIOCSETCSETMSGLEVEL:
4417 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4418 return -EFAULT;
4420 b57_msg_level = value;
4421 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
4422 return 0;
4424 case SIOCSETCQOS: /* Set the qos flag */
4425 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4426 return -EFAULT;
4428 pUmDevice->qos = value;
4429 B57_INFO(("Qos flag now: %d\n", pUmDevice->qos));
4430 return 0;
4432 case SIOCGETCDUMP:
4434 char *buf;
4436 if ((buf = MALLOC(SI_OSH, 4096)) == NULL) {
4437 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__,
4438 MALLOCED(SI_OSH)));
4439 return (-ENOMEM);
4442 if (b57_msg_level & 0x10000)
4443 bcmdumplog(buf, 4096);
4444 #ifdef BCMDBG
4445 else {
4446 struct bcmstrbuf b;
4447 bcm_binit(&b, buf, 4096);
4448 b57_dump(dev, &b);
4450 #endif /* BCMDBG */
4451 value = mm_copy_to_user(rq->ifr_data, buf, 4096);
4453 MFREE(SI_OSH, buf, 4096);
4455 if (value)
4456 return -EFAULT;
4457 else
4458 return 0;
4461 #ifdef NICE_SUPPORT
4462 case SIOCNICE:
4464 struct nice_req* nrq;
4466 if (!capable(CAP_NET_ADMIN))
4467 return -EPERM;
4469 nrq = (struct nice_req*)&rq->ifr_ifru;
4470 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
4471 nrq->nrq_magic = NICE_DEVICE_MAGIC;
4472 nrq->nrq_support_rx = 1;
4473 nrq->nrq_support_vlan = 1;
4474 nrq->nrq_support_get_speed = 1;
4475 #ifdef BCM_NAPI_RXPOLL
4476 nrq->nrq_support_rx_napi = 1;
4477 #endif
4478 return 0;
4480 #ifdef BCM_NAPI_RXPOLL
4481 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
4482 #else
4483 else if( nrq->cmd == NICE_CMD_SET_RX )
4484 #endif
4486 pUmDevice->nice_rx = nrq->nrq_rx;
4487 pUmDevice->nice_ctx = nrq->nrq_ctx;
4488 bcm5700_set_vlan_mode(pUmDevice);
4489 return 0;
4491 #ifdef BCM_NAPI_RXPOLL
4492 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
4493 #else
4494 else if( nrq->cmd == NICE_CMD_GET_RX )
4495 #endif
4497 nrq->nrq_rx = pUmDevice->nice_rx;
4498 nrq->nrq_ctx = pUmDevice->nice_ctx;
4499 return 0;
4501 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
4502 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
4503 nrq->nrq_speed = 0;
4505 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
4506 nrq->nrq_speed = SPEED_1000;
4507 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
4508 nrq->nrq_speed = SPEED_100;
4509 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
4510 nrq->nrq_speed = SPEED_100;
4511 } else {
4512 nrq->nrq_speed = 0;
4514 return 0;
4516 else {
4517 if (!pUmDevice->opened)
4518 return -EINVAL;
4520 switch (nrq->cmd) {
4521 case NICE_CMD_BLINK_LED:
4522 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
4523 LM_STATUS_SUCCESS) {
4524 return 0;
4526 return -EINTR;
4528 case NICE_CMD_DIAG_SUSPEND:
4529 b57_suspend_chip(pUmDevice);
4530 return 0;
4532 case NICE_CMD_DIAG_RESUME:
4533 b57_resume_chip(pUmDevice);
4534 return 0;
4536 case NICE_CMD_REG_READ:
4537 if (nrq->nrq_offset >= 0x10000) {
4538 nrq->nrq_data = LM_RegRdInd(pDevice,
4539 nrq->nrq_offset);
4541 else {
4542 nrq->nrq_data = LM_RegRd(pDevice,
4543 nrq->nrq_offset);
4545 return 0;
4547 case NICE_CMD_REG_WRITE:
4548 if (nrq->nrq_offset >= 0x10000) {
4549 LM_RegWrInd(pDevice, nrq->nrq_offset,
4550 nrq->nrq_data);
4552 else {
4553 LM_RegWr(pDevice, nrq->nrq_offset,
4554 nrq->nrq_data, FALSE);
4556 return 0;
4558 case NICE_CMD_REG_READ_DIRECT:
4559 case NICE_CMD_REG_WRITE_DIRECT:
4560 if ((nrq->nrq_offset >= 0x10000) ||
4561 (pDevice->Flags & UNDI_FIX_FLAG)) {
4562 return -EINVAL;
4565 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
4566 nrq->nrq_data = REG_RD_OFFSET(pDevice,
4567 nrq->nrq_offset);
4569 else {
4570 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4571 nrq->nrq_data);
4573 return 0;
4575 case NICE_CMD_MEM_READ:
4576 nrq->nrq_data = LM_MemRdInd(pDevice,
4577 nrq->nrq_offset);
4578 return 0;
4580 case NICE_CMD_MEM_WRITE:
4581 LM_MemWrInd(pDevice, nrq->nrq_offset,
4582 nrq->nrq_data);
4583 return 0;
4585 case NICE_CMD_CFG_READ32:
4586 pci_read_config_dword(pUmDevice->pdev,
4587 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4588 return 0;
4590 case NICE_CMD_CFG_READ16:
4591 pci_read_config_word(pUmDevice->pdev,
4592 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4593 return 0;
4595 case NICE_CMD_CFG_READ8:
4596 pci_read_config_byte(pUmDevice->pdev,
4597 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4598 return 0;
4600 case NICE_CMD_CFG_WRITE32:
4601 pci_write_config_dword(pUmDevice->pdev,
4602 nrq->nrq_offset, (u32)nrq->nrq_data);
4603 return 0;
4605 case NICE_CMD_CFG_WRITE16:
4606 pci_write_config_word(pUmDevice->pdev,
4607 nrq->nrq_offset, (u16)nrq->nrq_data);
4608 return 0;
4610 case NICE_CMD_CFG_WRITE8:
4611 pci_write_config_byte(pUmDevice->pdev,
4612 nrq->nrq_offset, (u8)nrq->nrq_data);
4613 return 0;
4615 case NICE_CMD_RESET:
4616 bcm5700_reset(dev);
4617 return 0;
4619 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4620 if (pDevice->LoopBackMode != 0) {
4621 return -EINVAL;
4624 BCM5700_PHY_LOCK(pUmDevice, flags);
4625 LM_EnableMacLoopBack(pDevice);
4626 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4627 return 0;
4629 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4630 if (pDevice->LoopBackMode !=
4631 LM_MAC_LOOP_BACK_MODE) {
4632 return -EINVAL;
4635 BCM5700_PHY_LOCK(pUmDevice, flags);
4636 LM_DisableMacLoopBack(pDevice);
4637 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4638 return 0;
4640 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4641 if (pDevice->LoopBackMode != 0) {
4642 return -EINVAL;
4645 BCM5700_PHY_LOCK(pUmDevice, flags);
4646 LM_EnablePhyLoopBack(pDevice);
4647 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4648 return 0;
4650 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4651 if (pDevice->LoopBackMode !=
4652 LM_PHY_LOOP_BACK_MODE) {
4653 return -EINVAL;
4656 BCM5700_PHY_LOCK(pUmDevice, flags);
4657 LM_DisablePhyLoopBack(pDevice);
4658 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4659 return 0;
4661 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4662 if (pDevice->LoopBackMode != 0) {
4663 return -EINVAL;
4666 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4667 if (nrq->nrq_speed != 1000)
4668 return -EINVAL;
4670 else {
4671 if ((nrq->nrq_speed != 1000) &&
4672 (nrq->nrq_speed != 100) &&
4673 (nrq->nrq_speed != 10)) {
4674 return -EINVAL;
4677 BCM5700_PHY_LOCK(pUmDevice, flags);
4678 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4679 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4680 return 0;
4682 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4683 if (pDevice->LoopBackMode !=
4684 LM_EXT_LOOP_BACK_MODE) {
4685 return -EINVAL;
4688 BCM5700_PHY_LOCK(pUmDevice, flags);
4689 LM_DisableExtLoopBack(pDevice);
4690 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4691 return 0;
4693 case NICE_CMD_INTERRUPT_TEST:
4694 nrq->nrq_intr_test_result =
4695 b57_test_intr(pUmDevice);
4696 return 0;
4698 case NICE_CMD_LOOPBACK_TEST:
4699 value = 0;
4700 switch (nrq->nrq_looptype) {
4701 case NICE_LOOPBACK_TESTTYPE_EXT:
4702 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4703 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4704 break;
4705 switch (nrq->nrq_loopspeed) {
4706 case NICE_LOOPBACK_TEST_10MBPS:
4707 value = LM_LINE_SPEED_10MBPS;
4708 break;
4709 case NICE_LOOPBACK_TEST_100MBPS:
4710 value = LM_LINE_SPEED_100MBPS;
4711 break;
4712 case NICE_LOOPBACK_TEST_1000MBPS:
4713 value = LM_LINE_SPEED_1000MBPS;
4714 break;
4716 /* Fall through */
4718 case NICE_LOOPBACK_TESTTYPE_MAC:
4719 case NICE_LOOPBACK_TESTTYPE_PHY:
4720 b57_suspend_chip(pUmDevice);
4721 value = b57_test_loopback(pUmDevice,
4722 nrq->nrq_looptype, value);
4723 b57_resume_chip(pUmDevice);
4724 break;
4727 if (value == 1) {
4728 /* A '1' indicates success */
4729 value = 0;
4730 } else {
4731 value = -EINTR;
4734 return value;
4736 case NICE_CMD_KMALLOC_PHYS: {
4737 #if (LINUX_VERSION_CODE >= 0x020400)
4738 dma_addr_t mapping;
4739 __u64 cpu_pa;
4740 void *ptr;
4741 int i;
4742 struct page *pg, *last_pg;
4744 for (i = 0; i < MAX_MEM2; i++) {
4745 if (pUmDevice->mem_size_list2[i] == 0)
4746 break;
4748 if (i >= MAX_MEM2)
4749 return -EFAULT;
4750 ptr = pci_alloc_consistent(pUmDevice->pdev,
4751 nrq->nrq_size, &mapping);
4752 if (!ptr) {
4753 return -EFAULT;
4755 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4756 pUmDevice->mem_list2[i] = ptr;
4757 pUmDevice->dma_list2[i] = mapping;
4759 /* put pci mapping at the beginning of buffer */
4760 *((__u64 *) ptr) = (__u64) mapping;
4762 /* Probably won't work on some architectures */
4763 /* get CPU mapping */
4764 cpu_pa = (__u64) virt_to_phys(ptr);
4765 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4766 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4767 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4769 pg = virt_to_page(ptr);
4770 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4771 for (; ; pg++) {
4772 #if (LINUX_VERSION_CODE > 0x020500)
4773 SetPageReserved(pg);
4774 #else
4775 mem_map_reserve(pg);
4776 #endif
4777 if (pg == last_pg)
4778 break;
4780 return 0;
4781 #else
4782 return -EOPNOTSUPP;
4783 #endif
4786 case NICE_CMD_KFREE_PHYS: {
4787 int i;
4788 __u64 cpu_pa;
4790 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4791 ((__u64) nrq->nrq_phys_addr_hi << 32);
4792 for (i = 0; i < MAX_MEM2; i++) {
4793 if (pUmDevice->cpu_pa_list2[i] ==
4794 cpu_pa)
4796 break;
4799 if (i >= MAX_MEM2)
4800 return -EFAULT;
4802 bcm5700_freemem2(pUmDevice, i);
4803 return 0;
4806 case NICE_CMD_SET_WRITE_PROTECT:
4807 if (nrq->nrq_write_protect)
4808 pDevice->Flags |= EEPROM_WP_FLAG;
4809 else
4810 pDevice->Flags &= ~EEPROM_WP_FLAG;
4811 return 0;
4812 case NICE_CMD_GET_STATS_BLOCK: {
4813 PT3_STATS_BLOCK pStats =
4814 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4815 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4816 pStats, nrq->nrq_stats_size)) {
4817 return -EFAULT;
4819 return 0;
4821 case NICE_CMD_CLR_STATS_BLOCK: {
4822 int j;
4823 PT3_STATS_BLOCK pStats =
4824 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4826 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4827 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4828 T3_ASIC_REV_5705) {
4829 return 0;
4831 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4832 MEM_WR_OFFSET(pDevice, j, 0);
4835 return 0;
4840 return -EOPNOTSUPP;
4842 #endif /* NICE_SUPPORT */
4843 #ifdef SIOCETHTOOL
4844 case SIOCETHTOOL:
4845 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4846 #endif
4847 default:
4848 return -EOPNOTSUPP;
4850 return -EOPNOTSUPP;
4853 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4855 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4856 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4857 int i;
4858 struct dev_mc_list *mclist;
4860 LM_MulticastClear(pDevice);
4861 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4862 i++, mclist = mclist->next) {
4863 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4865 if (dev->flags & IFF_ALLMULTI) {
4866 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4867 LM_SetReceiveMask(pDevice,
4868 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4871 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4872 LM_SetReceiveMask(pDevice,
4873 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4875 if (dev->flags & IFF_PROMISC) {
4876 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4877 LM_SetReceiveMask(pDevice,
4878 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4881 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4882 LM_SetReceiveMask(pDevice,
4883 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4888 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4890 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4891 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4892 int i;
4893 struct dev_mc_list *mclist;
4894 unsigned long flags;
4896 BCM5700_PHY_LOCK(pUmDevice, flags);
4898 LM_MulticastClear(pDevice);
4899 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4900 i++, mclist = mclist->next) {
4901 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4903 if (dev->flags & IFF_ALLMULTI) {
4904 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4905 LM_SetReceiveMask(pDevice,
4906 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4909 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4910 LM_SetReceiveMask(pDevice,
4911 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4913 if (dev->flags & IFF_PROMISC) {
4914 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4915 LM_SetReceiveMask(pDevice,
4916 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4919 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4920 LM_SetReceiveMask(pDevice,
4921 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4924 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4928 * Set the hardware MAC address.
4930 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4932 struct sockaddr *addr=p;
4933 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4934 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4936 if(is_valid_ether_addr(addr->sa_data)){
4938 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4939 if (pUmDevice->opened)
4940 LM_SetMacAddress(pDevice, dev->dev_addr);
4941 return 0;
4943 return -EINVAL;
4946 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4947 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4949 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4950 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4951 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4952 unsigned long flags;
4953 int reinit = 0;
4955 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4956 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4958 return -EINVAL;
4960 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4961 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4963 return -EINVAL;
4965 if (pUmDevice->suspended)
4966 return -EAGAIN;
4968 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4969 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4970 reinit = 1;
4973 BCM5700_PHY_LOCK(pUmDevice, flags);
4974 if (reinit) {
4975 netif_stop_queue(dev);
4976 bcm5700_shutdown(pUmDevice);
4977 bcm5700_freemem(dev);
4980 dev->mtu = new_mtu;
4981 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4982 pDevice->RxMtu = pDevice->TxMtu =
4983 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4985 else {
4986 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4989 if (dev->mtu <= 1514) {
4990 pDevice->RxJumboDescCnt = 0;
4992 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4993 pDevice->RxJumboDescCnt =
4994 rx_jumbo_desc_cnt[pUmDevice->index];
4996 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4997 pDevice->RxStdDescCnt;
4999 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
5000 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
5002 #ifdef BCM_TSO
5003 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
5004 (dev->mtu > 1514) ) {
5005 if (dev->features & NETIF_F_TSO) {
5006 dev->features &= ~NETIF_F_TSO;
5007 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
5010 #endif
5012 if (reinit) {
5013 LM_InitializeAdapter(pDevice);
5014 bcm5700_do_rx_mode(dev);
5015 bcm5700_set_vlan_mode(pUmDevice);
5016 bcm5700_init_counters(pUmDevice);
5017 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
5018 LM_SetMacAddress(pDevice, dev->dev_addr);
5020 netif_start_queue(dev);
5021 bcm5700_intr_on(pUmDevice);
5023 BCM5700_PHY_UNLOCK(pUmDevice, flags);
5025 return 0;
5027 #endif
5030 #if (LINUX_VERSION_CODE < 0x020300)
5032 bcm5700_probe(struct net_device *dev)
5034 int cards_found = 0;
5035 struct pci_dev *pdev = NULL;
5036 struct pci_device_id *pci_tbl;
5037 u16 ssvid, ssid;
5039 if ( ! pci_present())
5040 return -ENODEV;
5042 pci_tbl = bcm5700_pci_tbl;
5043 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
5044 int idx;
5046 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
5047 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
5048 for (idx = 0; pci_tbl[idx].vendor; idx++) {
5049 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
5050 pci_tbl[idx].vendor == pdev->vendor) &&
5051 (pci_tbl[idx].device == PCI_ANY_ID ||
5052 pci_tbl[idx].device == pdev->device) &&
5053 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
5054 pci_tbl[idx].subvendor == ssvid) &&
5055 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
5056 pci_tbl[idx].subdevice == ssid))
5059 break;
5062 if (pci_tbl[idx].vendor == 0)
5063 continue;
5066 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
5067 cards_found++;
5070 return cards_found ? 0 : -ENODEV;
5073 #ifdef MODULE
5074 int init_module(void)
5076 return bcm5700_probe(NULL);
5079 void cleanup_module(void)
5081 struct net_device *next_dev;
5082 PUM_DEVICE_BLOCK pUmDevice;
5084 #ifdef BCM_PROC_FS
5085 bcm5700_proc_remove_notifier();
5086 #endif
5087 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
5088 while (root_tigon3_dev) {
5089 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
5090 #ifdef BCM_PROC_FS
5091 bcm5700_proc_remove_dev(root_tigon3_dev);
5092 #endif
5093 next_dev = pUmDevice->next_module;
5094 unregister_netdev(root_tigon3_dev);
5095 if (pUmDevice->lm_dev.pMappedMemBase)
5096 iounmap(pUmDevice->lm_dev.pMappedMemBase);
5097 #if (LINUX_VERSION_CODE < 0x020600)
5098 kfree(root_tigon3_dev);
5099 #else
5100 free_netdev(root_tigon3_dev);
5101 #endif
5102 root_tigon3_dev = next_dev;
5104 #ifdef BCM_IOCTL32
5105 unregister_ioctl32_conversion(SIOCNICE);
5106 #endif
5109 #endif /* MODULE */
5110 #else /* LINUX_VERSION_CODE < 0x020300 */
5112 #if (LINUX_VERSION_CODE >= 0x020406)
5113 static int bcm5700_suspend (struct pci_dev *pdev, DRV_SUSPEND_STATE_TYPE state)
5114 #else
5115 static void bcm5700_suspend (struct pci_dev *pdev)
5116 #endif
5118 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
5119 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
5120 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
5122 if (!netif_running(dev))
5123 #if (LINUX_VERSION_CODE >= 0x020406)
5124 return 0;
5125 #else
5126 return;
5127 #endif
5129 netif_device_detach (dev);
5130 bcm5700_shutdown(pUmDevice);
5132 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
5134 /* pci_power_off(pdev, -1);*/
5135 #if (LINUX_VERSION_CODE >= 0x020406)
5136 return 0;
5137 #endif
5141 #if (LINUX_VERSION_CODE >= 0x020406)
5142 static int bcm5700_resume(struct pci_dev *pdev)
5143 #else
5144 static void bcm5700_resume(struct pci_dev *pdev)
5145 #endif
5147 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
5148 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
5149 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
5151 if (!netif_running(dev))
5152 #if (LINUX_VERSION_CODE >= 0x020406)
5153 return 0;
5154 #else
5155 return;
5156 #endif
5157 /* pci_power_on(pdev);*/
5158 netif_device_attach(dev);
5159 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
5160 MM_InitializeUmPackets(pDevice);
5161 bcm5700_reset(dev);
5162 #if (LINUX_VERSION_CODE >= 0x020406)
5163 return 0;
5164 #endif
5168 static struct pci_driver bcm5700_pci_driver = {
5169 name: bcm5700_driver,
5170 id_table: bcm5700_pci_tbl,
5171 probe: bcm5700_init_one,
5172 remove: __devexit_p(bcm5700_remove_one),
5173 suspend: bcm5700_suspend,
5174 resume: bcm5700_resume,
5177 static int
5178 bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused)
5180 switch (event) {
5181 case SYS_HALT:
5182 case SYS_POWER_OFF:
5183 case SYS_RESTART:
5184 break;
5185 default:
5186 return NOTIFY_DONE;
5189 B57_INFO(("bcm5700 reboot notification\n"));
5190 pci_unregister_driver(&bcm5700_pci_driver);
5191 return NOTIFY_DONE;
5194 static int __init bcm5700_init_module (void)
5196 if (msglevel != 0xdeadbeef) {
5197 b57_msg_level = msglevel;
5198 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
5199 } else
5200 b57_msg_level = B57_ERR_VAL;
5202 return pci_module_init(&bcm5700_pci_driver);
5205 static void __exit bcm5700_cleanup_module (void)
5207 #ifdef BCM_PROC_FS
5208 bcm5700_proc_remove_notifier();
5209 #endif
5210 unregister_reboot_notifier(&bcm5700_reboot_notifier);
5211 pci_unregister_driver(&bcm5700_pci_driver);
5214 module_init(bcm5700_init_module);
5215 module_exit(bcm5700_cleanup_module);
5216 #endif
5219 * Middle Module
5224 #ifdef BCM_NAPI_RXPOLL
5225 LM_STATUS
5226 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
5228 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
5230 if (netif_rx_schedule_prep(dev)) {
5231 __netif_rx_schedule(dev);
5232 return LM_STATUS_SUCCESS;
5234 return LM_STATUS_FAILURE;
5236 #endif
5238 LM_STATUS
5239 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5240 LM_UINT16 *pValue16)
5242 UM_DEVICE_BLOCK *pUmDevice;
5244 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5245 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
5246 return LM_STATUS_SUCCESS;
5249 LM_STATUS
5250 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5251 LM_UINT32 *pValue32)
5253 UM_DEVICE_BLOCK *pUmDevice;
5255 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5256 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
5257 return LM_STATUS_SUCCESS;
5260 LM_STATUS
5261 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5262 LM_UINT16 Value16)
5264 UM_DEVICE_BLOCK *pUmDevice;
5266 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5267 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
5268 return LM_STATUS_SUCCESS;
5271 LM_STATUS
5272 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5273 LM_UINT32 Value32)
5275 UM_DEVICE_BLOCK *pUmDevice;
5277 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5278 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
5279 return LM_STATUS_SUCCESS;
5282 LM_STATUS
5283 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5284 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
5285 LM_BOOL Cached)
5287 PLM_VOID pvirt;
5288 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5289 dma_addr_t mapping;
5291 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
5292 &mapping);
5293 if (!pvirt) {
5294 return LM_STATUS_FAILURE;
5296 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5297 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
5298 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
5299 memset(pvirt, 0, BlockSize);
5300 *pMemoryBlockVirt = (PLM_VOID) pvirt;
5301 MM_SetAddr(pMemoryBlockPhy, mapping);
5302 return LM_STATUS_SUCCESS;
5305 LM_STATUS
5306 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5307 PLM_VOID *pMemoryBlockVirt)
5309 PLM_VOID pvirt;
5310 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5313 /* Maximum in slab.c */
5314 if (BlockSize > 131072) {
5315 goto MM_Alloc_error;
5318 pvirt = kmalloc(BlockSize, GFP_ATOMIC);
5319 if (!pvirt) {
5320 goto MM_Alloc_error;
5322 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5323 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
5324 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
5325 /* mem_size_list[i] == 0 indicates that the memory should be freed */
5326 /* using kfree */
5327 memset(pvirt, 0, BlockSize);
5328 *pMemoryBlockVirt = pvirt;
5329 return LM_STATUS_SUCCESS;
5331 MM_Alloc_error:
5332 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
5333 return LM_STATUS_FAILURE;
5336 LM_STATUS
5337 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
5339 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5341 pDevice->pMappedMemBase = ioremap_nocache(
5342 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
5343 if (pDevice->pMappedMemBase == 0)
5344 return LM_STATUS_FAILURE;
5346 return LM_STATUS_SUCCESS;
5349 LM_STATUS
5350 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
5352 unsigned int i;
5353 struct sk_buff *skb;
5354 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5355 PUM_PACKET pUmPacket;
5356 PLM_PACKET pPacket;
5358 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
5359 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
5360 pUmPacket = (PUM_PACKET) pPacket;
5361 if (pPacket == 0) {
5362 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
5364 if (pUmPacket->skbuff == 0) {
5365 #ifdef BCM_WL_EMULATOR
5366 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5367 #else
5368 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5369 #endif
5370 if (skb == 0) {
5371 pUmPacket->skbuff = 0;
5372 QQ_PushTail(
5373 &pUmDevice->rx_out_of_buf_q.Container,
5374 pPacket);
5375 continue;
5377 pUmPacket->skbuff = skb;
5378 skb->dev = pUmDevice->dev;
5379 #ifndef BCM_WL_EMULATOR
5380 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5381 #endif
5383 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5385 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
5386 /* reallocate buffers in the ISR */
5387 pUmDevice->rx_buf_repl_thresh = 0;
5388 pUmDevice->rx_buf_repl_panic_thresh = 0;
5389 pUmDevice->rx_buf_repl_isr_limit = 0;
5391 else {
5392 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
5393 pUmDevice->rx_buf_repl_panic_thresh =
5394 pDevice->RxPacketDescCnt * 7 / 8;
5396 /* This limits the time spent in the ISR when the receiver */
5397 /* is in a steady state of being overrun. */
5398 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
5400 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5401 if (pDevice->RxJumboDescCnt != 0) {
5402 if (pUmDevice->rx_buf_repl_thresh >=
5403 pDevice->RxJumboDescCnt) {
5405 pUmDevice->rx_buf_repl_thresh =
5406 pUmDevice->rx_buf_repl_panic_thresh =
5407 pDevice->RxJumboDescCnt - 1;
5409 if (pUmDevice->rx_buf_repl_thresh >=
5410 pDevice->RxStdDescCnt) {
5412 pUmDevice->rx_buf_repl_thresh =
5413 pUmDevice->rx_buf_repl_panic_thresh =
5414 pDevice->RxStdDescCnt - 1;
5417 #endif
5419 return LM_STATUS_SUCCESS;
5422 LM_STATUS
5423 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
5425 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5426 int index = pUmDevice->index;
5427 struct net_device *dev = pUmDevice->dev;
5429 if (index >= MAX_UNITS)
5430 return LM_STATUS_SUCCESS;
5432 #if LINUX_KERNEL_VERSION < 0x0020609
5434 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
5435 0, 1, 1);
5436 if (auto_speed[index] == 0)
5437 pDevice->DisableAutoNeg = TRUE;
5438 else
5439 pDevice->DisableAutoNeg = FALSE;
5441 if (line_speed[index] == 0) {
5442 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5443 pDevice->DisableAutoNeg = FALSE;
5445 else {
5446 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
5447 "full_duplex", 0, 1, 1);
5448 if (full_duplex[index]) {
5449 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5451 else {
5452 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
5455 if (line_speed[index] == 1000) {
5456 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
5457 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
5458 pDevice->RequestedLineSpeed =
5459 LM_LINE_SPEED_100MBPS;
5460 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
5462 else {
5463 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5464 !full_duplex[index]) {
5465 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
5466 pDevice->RequestedDuplexMode =
5467 LM_DUPLEX_MODE_FULL;
5470 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5471 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
5472 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
5473 pDevice->DisableAutoNeg = FALSE;
5477 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
5478 (pDevice->PhyFlags & PHY_IS_FIBER)){
5479 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5480 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5481 pDevice->DisableAutoNeg = FALSE;
5482 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
5484 else if (line_speed[index] == 100) {
5486 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
5488 else if (line_speed[index] == 10) {
5490 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
5492 else {
5493 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5494 pDevice->DisableAutoNeg = FALSE;
5495 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
5500 #endif /* LINUX_KERNEL_VERSION */
5502 /* This is an unmanageable switch nic and will have link problems if
5503 not set to auto
5505 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
5507 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
5509 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
5510 bcm5700_driver, index, line_speed[index]);
5512 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5513 pDevice->DisableAutoNeg = FALSE;
5516 #if LINUX_KERNEL_VERSION < 0x0020609
5518 pDevice->FlowControlCap = 0;
5519 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
5520 "rx_flow_control", 0, 1, 0);
5521 if (rx_flow_control[index] != 0) {
5522 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
5524 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
5525 "tx_flow_control", 0, 1, 0);
5526 if (tx_flow_control[index] != 0) {
5527 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
5529 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
5530 "auto_flow_control", 0, 1, 0);
5531 if (auto_flow_control[index] != 0) {
5532 if (pDevice->DisableAutoNeg == FALSE) {
5534 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
5535 if ((tx_flow_control[index] == 0) &&
5536 (rx_flow_control[index] == 0)) {
5538 pDevice->FlowControlCap |=
5539 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
5540 LM_FLOW_CONTROL_RECEIVE_PAUSE;
5545 if (dev->mtu > 1500) {
5546 #ifdef BCM_TSO
5547 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
5548 (dev->features & NETIF_F_TSO)) {
5549 dev->features &= ~NETIF_F_TSO;
5550 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
5552 #endif
5553 pDevice->RxMtu = dev->mtu + 14;
5556 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
5557 !(pDevice->Flags & BCM5788_FLAG)) {
5558 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
5559 pUmDevice->timer_interval = HZ;
5560 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
5561 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
5562 pUmDevice->timer_interval = HZ/4;
5565 else {
5566 pUmDevice->timer_interval = HZ/10;
5569 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
5570 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
5571 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
5572 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
5573 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
5574 RX_DESC_CNT);
5575 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
5577 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5578 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
5579 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
5580 JBO_DESC_CNT);
5582 if (mtu[index] <= 1514)
5583 pDevice->RxJumboDescCnt = 0;
5584 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
5585 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5587 #endif
5589 #ifdef BCM_INT_COAL
5590 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5591 "adaptive_coalesce", 0, 1, 1);
5592 #ifdef BCM_NAPI_RXPOLL
5593 if (adaptive_coalesce[index]) {
5594 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5595 adaptive_coalesce[index] = 0;
5598 #endif
5599 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5600 if (!pUmDevice->adaptive_coalesce) {
5601 bcm5700_validate_param_range(pUmDevice,
5602 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5603 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5604 if ((rx_coalesce_ticks[index] == 0) &&
5605 (rx_max_coalesce_frames[index] == 0)) {
5607 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5608 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5610 rx_coalesce_ticks[index] = RX_COAL_TK;
5611 rx_max_coalesce_frames[index] = RX_COAL_FM;
5613 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5614 rx_coalesce_ticks[index];
5615 #ifdef BCM_NAPI_RXPOLL
5616 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5617 #endif
5619 bcm5700_validate_param_range(pUmDevice,
5620 &rx_max_coalesce_frames[index],
5621 "rx_max_coalesce_frames", 0,
5622 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5624 pDevice->RxMaxCoalescedFrames =
5625 pUmDevice->rx_curr_coalesce_frames =
5626 rx_max_coalesce_frames[index];
5627 #ifdef BCM_NAPI_RXPOLL
5628 pDevice->RxMaxCoalescedFramesDuringInt =
5629 rx_max_coalesce_frames[index];
5630 #endif
5632 bcm5700_validate_param_range(pUmDevice,
5633 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5634 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5635 if ((tx_coalesce_ticks[index] == 0) &&
5636 (tx_max_coalesce_frames[index] == 0)) {
5638 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5639 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5641 tx_coalesce_ticks[index] = TX_COAL_TK;
5642 tx_max_coalesce_frames[index] = TX_COAL_FM;
5644 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5645 bcm5700_validate_param_range(pUmDevice,
5646 &tx_max_coalesce_frames[index],
5647 "tx_max_coalesce_frames", 0,
5648 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5649 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5650 pUmDevice->tx_curr_coalesce_frames =
5651 pDevice->TxMaxCoalescedFrames;
5653 bcm5700_validate_param_range(pUmDevice,
5654 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5655 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5656 if (adaptive_coalesce[index]) {
5657 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5658 }else{
5659 if ((stats_coalesce_ticks[index] > 0) &&
5660 (stats_coalesce_ticks[index] < 100)) {
5661 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5662 stats_coalesce_ticks[index] = 100;
5663 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5664 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5668 else {
5669 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5670 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5671 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5673 #endif
5675 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5676 unsigned int tmpvar;
5678 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5681 * If the result is zero, the request is too demanding.
5683 if (tmpvar == 0) {
5684 tmpvar = 1;
5687 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5689 pUmDevice->statstimer_interval = tmpvar;
5692 #ifdef BCM_WOL
5693 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5694 "enable_wol", 0, 1, 0);
5695 if (enable_wol[index]) {
5696 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5697 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5699 #endif
5700 #ifdef INCLUDE_TBI_SUPPORT
5701 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5702 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5703 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5704 /* just poll since we have hardware autoneg. in 5704 */
5705 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5707 else {
5708 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5711 #endif
5712 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5713 "scatter_gather", 0, 1, 1);
5714 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5715 "tx_checksum", 0, 1, 1);
5716 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5717 "rx_checksum", 0, 1, 1);
5718 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5719 if (tx_checksum[index] || rx_checksum[index]) {
5721 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5722 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5725 else {
5726 if (rx_checksum[index]) {
5727 pDevice->TaskToOffload |=
5728 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5729 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5731 if (tx_checksum[index]) {
5732 pDevice->TaskToOffload |=
5733 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5734 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5735 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5738 #ifdef BCM_TSO
5739 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5740 "enable_tso", 0, 1, 1);
5742 /* Always enable TSO firmware if supported */
5743 /* This way we can turn it on or off on the fly */
5744 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5746 pDevice->TaskToOffload |=
5747 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5749 if (enable_tso[index] &&
5750 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5752 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5754 #endif
5755 #ifdef BCM_ASF
5756 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5757 "vlan_strip_mode", 0, 2, 0);
5758 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5759 #else
5760 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5761 #endif
5763 #endif /* LINUX_KERNEL_VERSION */
5765 #ifdef BCM_NIC_SEND_BD
5766 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5767 0, 1, 0);
5768 if (nic_tx_bd[index])
5769 pDevice->Flags |= NIC_SEND_BD_FLAG;
5770 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5771 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5772 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5773 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5774 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5777 #endif
5778 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5779 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5780 "disable_msi", 0, 1, 0);
5781 #endif
5783 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5784 "delay_link", 0, 1, 0);
5786 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5787 "disable_d3hot", 0, 1, 0);
5788 if (disable_d3hot[index]) {
5790 #ifdef BCM_WOL
5791 if (enable_wol[index]) {
5792 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5793 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5794 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5796 #endif
5797 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5800 return LM_STATUS_SUCCESS;
5803 /* From include/proto/ethernet.h */
5804 #define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
5806 /* From include/proto/vlan.h */
5807 #define VLAN_PRI_MASK 7 /* 3 bits of priority */
5808 #define VLAN_PRI_SHIFT 13
5810 /* Replace the priority in a vlan tag */
5811 #define UPD_VLANTAG_PRIO(tag, prio) do { \
5812 tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); \
5813 tag |= prio << VLAN_PRI_SHIFT; \
5814 } while (0)
5816 LM_STATUS
5817 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5819 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5820 PLM_PACKET pPacket;
5821 PUM_PACKET pUmPacket;
5822 struct sk_buff *skb;
5823 int size;
5824 int vlan_tag_size = 0;
5825 uint16 dscp_prio;
5827 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5828 vlan_tag_size = 4;
5830 while (1) {
5831 pPacket = (PLM_PACKET)
5832 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5833 if (pPacket == 0)
5834 break;
5835 pUmPacket = (PUM_PACKET) pPacket;
5836 #if !defined(NO_PCI_UNMAP)
5837 pci_unmap_single(pUmDevice->pdev,
5838 pci_unmap_addr(pUmPacket, map[0]),
5839 pPacket->u.Rx.RxBufferSize,
5840 PCI_DMA_FROMDEVICE);
5841 #endif
5842 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5843 ((size = pPacket->PacketSize) >
5844 (pDevice->RxMtu + vlan_tag_size))) {
5846 /* reuse skb */
5847 #ifdef BCM_TASKLET
5848 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5849 #else
5850 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5851 #endif
5852 pUmDevice->rx_misc_errors++;
5853 continue;
5855 skb = pUmPacket->skbuff;
5856 skb_put(skb, size);
5857 skb->pkt_type = 0;
5859 #ifdef HNDCTF
5860 if (CTF_ENAB(pUmDevice->cih)) {
5861 if (ctf_forward(pUmDevice->cih, skb, skb->dev) != BCME_ERROR) {
5862 pUmDevice->dev->last_rx = jiffies;
5863 pUmDevice->stats.rx_bytes += skb->len;
5864 goto drop_rx;
5867 /* clear skipct flag before sending up */
5868 PKTCLRSKIPCT(pUmDevice->osh, skb);
5870 #endif /* HNDCTF */
5872 /* Extract priority from payload and put it in skb->priority */
5873 dscp_prio = 0;
5874 if (pUmDevice->qos) {
5875 uint rc;
5877 rc = pktsetprio(skb, TRUE);
5878 if (rc & (PKTPRIO_VDSCP | PKTPRIO_DSCP))
5879 dscp_prio = rc & VLAN_PRI_MASK;
5880 if (rc != 0)
5881 B57_INFO(("pktsetprio returned 0x%x, skb->priority: %d\n",
5882 rc, skb->priority));
5884 skb->protocol = eth_type_trans(skb, skb->dev);
5885 if (size > pDevice->RxMtu) {
5886 /* Make sure we have a valid VLAN tag */
5887 if (htons(skb->protocol) != ETHER_TYPE_8021Q) {
5888 dev_kfree_skb_irq(skb);
5889 pUmDevice->rx_misc_errors++;
5890 goto drop_rx;
5894 pUmDevice->stats.rx_bytes += skb->len;
5896 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5897 (pDevice->TaskToOffload &
5898 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5899 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5901 skb->ip_summed = CHECKSUM_UNNECESSARY;
5902 #if TIGON3_DEBUG
5903 pUmDevice->rx_good_chksum_count++;
5904 #endif
5906 else {
5907 skb->ip_summed = CHECKSUM_NONE;
5908 pUmDevice->rx_bad_chksum_count++;
5911 else {
5912 skb->ip_summed = CHECKSUM_NONE;
5914 #ifdef NICE_SUPPORT
5915 if( pUmDevice->nice_rx ) {
5916 vlan_tag_t *vlan_tag;
5918 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5919 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5920 vlan_tag->signature = 0x7777;
5921 vlan_tag->tag = pPacket->VlanTag;
5922 /* Override vlan priority with dscp priority */
5923 if (dscp_prio)
5924 UPD_VLANTAG_PRIO(vlan_tag->tag, dscp_prio);
5925 } else {
5926 vlan_tag->signature = 0;
5928 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5929 } else
5930 #endif
5932 #ifdef BCM_VLAN
5933 if (pUmDevice->vlgrp &&
5934 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5935 /* Override vlan priority with dscp priority */
5936 if (dscp_prio)
5937 UPD_VLANTAG_PRIO(pPacket->VlanTag, dscp_prio);
5938 #ifdef BCM_NAPI_RXPOLL
5939 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5940 pPacket->VlanTag);
5941 #else
5942 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5943 pPacket->VlanTag);
5944 #endif
5945 } else
5946 #endif
5948 #ifdef BCM_WL_EMULATOR
5949 if(pDevice->wl_emulate_rx) {
5950 /* bcmstats("emu recv %d %d"); */
5951 wlcemu_receive_skb(pDevice->wlc, skb);
5952 /* bcmstats("emu recv end %d %d"); */
5954 else
5955 #endif /* BCM_WL_EMULATOR */
5957 #ifdef BCM_NAPI_RXPOLL
5958 netif_receive_skb(skb);
5959 #else
5960 netif_rx(skb);
5961 #endif
5965 pUmDevice->dev->last_rx = jiffies;
5967 drop_rx:
5968 #ifdef BCM_TASKLET
5969 pUmPacket->skbuff = 0;
5970 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5971 #else
5972 #ifdef BCM_WL_EMULATOR
5973 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5974 #else
5975 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5976 #endif /* BCM_WL_EMULATOR */
5977 if (skb == 0) {
5978 pUmPacket->skbuff = 0;
5979 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5981 else {
5982 pUmPacket->skbuff = skb;
5983 skb->dev = pUmDevice->dev;
5984 #ifndef BCM_WL_EMULATOR
5985 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5986 #endif
5987 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5989 #endif
5991 return LM_STATUS_SUCCESS;
5994 LM_STATUS
5995 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5997 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5998 struct sk_buff *skb = pUmPacket->skbuff;
5999 struct sk_buff *nskb;
6000 #if !defined(NO_PCI_UNMAP)
6001 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
6003 pci_unmap_single(pUmDevice->pdev,
6004 pci_unmap_addr(pUmPacket, map[0]),
6005 pci_unmap_len(pUmPacket, map_len[0]),
6006 PCI_DMA_TODEVICE);
6007 #if MAX_SKB_FRAGS
6009 int i;
6011 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6012 pci_unmap_page(pUmDevice->pdev,
6013 pci_unmap_addr(pUmPacket, map[i + 1]),
6014 pci_unmap_len(pUmPacket, map_len[i + 1]),
6015 PCI_DMA_TODEVICE);
6018 #endif
6019 #endif
6020 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
6021 pUmPacket->lm_packet.u.Tx.FragCount = 1;
6022 dev_kfree_skb(skb);
6023 pUmPacket->skbuff = nskb;
6024 return LM_STATUS_SUCCESS;
6026 dev_kfree_skb(skb);
6027 pUmPacket->skbuff = 0;
6028 return LM_STATUS_FAILURE;
6031 /* Returns 1 if not all buffers are allocated */
6032 STATIC int
6033 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
6035 PLM_PACKET pPacket;
6036 PUM_PACKET pUmPacket;
6037 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
6038 struct sk_buff *skb;
6039 int queue_rx = 0;
6040 int alloc_cnt = 0;
6041 int ret = 0;
6043 while ((pUmPacket = (PUM_PACKET)
6044 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
6045 pPacket = (PLM_PACKET) pUmPacket;
6046 if (pUmPacket->skbuff) {
6047 /* reuse an old skb */
6048 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
6049 queue_rx = 1;
6050 continue;
6052 #ifdef BCM_WL_EMULATOR
6053 if ((skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2)) == 0)
6054 #else
6055 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR)) == 0)
6056 #endif /* BCM_WL_EMULATOR */
6058 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
6059 pPacket);
6060 ret = 1;
6061 break;
6063 pUmPacket->skbuff = skb;
6064 skb->dev = pUmDevice->dev;
6065 #ifndef BCM_WL_EMULATOR
6066 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
6067 #endif
6068 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
6069 queue_rx = 1;
6070 if (max > 0) {
6071 alloc_cnt++;
6072 if (alloc_cnt >= max)
6073 break;
6076 if (queue_rx || pDevice->QueueAgain) {
6077 LM_QueueRxPackets(pDevice);
6079 return ret;
6082 LM_STATUS
6083 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
6085 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
6086 PLM_PACKET pPacket;
6087 PUM_PACKET pUmPacket;
6088 struct sk_buff *skb;
6089 #if !defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
6090 int i;
6091 #endif
6093 while (1) {
6094 pPacket = (PLM_PACKET)
6095 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
6096 if (pPacket == 0)
6097 break;
6098 pUmPacket = (PUM_PACKET) pPacket;
6099 skb = pUmPacket->skbuff;
6100 #if !defined(NO_PCI_UNMAP)
6101 pci_unmap_single(pUmDevice->pdev,
6102 pci_unmap_addr(pUmPacket, map[0]),
6103 pci_unmap_len(pUmPacket, map_len[0]),
6104 PCI_DMA_TODEVICE);
6105 #if MAX_SKB_FRAGS
6106 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6107 pci_unmap_page(pUmDevice->pdev,
6108 pci_unmap_addr(pUmPacket, map[i + 1]),
6109 pci_unmap_len(pUmPacket, map_len[i + 1]),
6110 PCI_DMA_TODEVICE);
6112 #endif
6113 #endif
6114 dev_kfree_skb_irq(skb);
6115 pUmPacket->skbuff = 0;
6116 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
6118 if (pUmDevice->tx_full) {
6119 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
6120 (pDevice->TxPacketDescCnt >> 1)) {
6122 pUmDevice->tx_full = 0;
6123 netif_wake_queue(pUmDevice->dev);
6126 return LM_STATUS_SUCCESS;
6129 LM_STATUS
6130 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
6132 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
6133 struct net_device *dev = pUmDevice->dev;
6134 LM_FLOW_CONTROL flow_control;
6135 int speed = 0;
6137 if (!pUmDevice->opened)
6138 return LM_STATUS_SUCCESS;
6140 if (!pUmDevice->suspended) {
6141 if (Status == LM_STATUS_LINK_DOWN) {
6142 netif_carrier_off(dev);
6144 else if (Status == LM_STATUS_LINK_ACTIVE) {
6145 netif_carrier_on(dev);
6149 if (pUmDevice->delayed_link_ind > 0) {
6150 pUmDevice->delayed_link_ind = 0;
6151 if (Status == LM_STATUS_LINK_DOWN) {
6152 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
6154 else if (Status == LM_STATUS_LINK_ACTIVE) {
6155 B57_INFO(("%s: %s NIC Link is UP, ", bcm5700_driver, dev->name));
6158 else {
6159 if (Status == LM_STATUS_LINK_DOWN) {
6160 B57_INFO(("%s: %s NIC Link is Down\n", bcm5700_driver, dev->name));
6162 else if (Status == LM_STATUS_LINK_ACTIVE) {
6163 B57_INFO(("%s: %s NIC Link is Up, ", bcm5700_driver, dev->name));
6167 if (Status == LM_STATUS_LINK_ACTIVE) {
6168 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
6169 speed = 1000;
6170 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
6171 speed = 100;
6172 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
6173 speed = 10;
6175 B57_INFO(("%d Mbps ", speed));
6177 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
6178 B57_INFO(("full duplex"));
6179 else
6180 B57_INFO(("half duplex"));
6182 flow_control = pDevice->FlowControl &
6183 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
6184 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
6185 if (flow_control) {
6186 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
6187 B57_INFO((", receive "));
6188 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
6189 B57_INFO(("& transmit "));
6191 else {
6192 B57_INFO((", transmit "));
6194 B57_INFO(("flow control ON"));
6196 B57_INFO(("\n"));
6198 return LM_STATUS_SUCCESS;
6201 void
6202 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
6204 #if !defined(NO_PCI_UNMAP)
6205 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6206 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
6208 if (!pUmPacket->skbuff)
6209 return;
6211 pci_unmap_single(pUmDevice->pdev,
6212 pci_unmap_addr(pUmPacket, map[0]),
6213 pPacket->u.Rx.RxBufferSize,
6214 PCI_DMA_FROMDEVICE);
6215 #endif
6218 LM_STATUS
6219 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
6221 PUM_PACKET pUmPacket;
6222 struct sk_buff *skb;
6224 if (pPacket == 0)
6225 return LM_STATUS_SUCCESS;
6226 pUmPacket = (PUM_PACKET) pPacket;
6227 if ((skb = pUmPacket->skbuff)) {
6228 /* DMA address already unmapped */
6229 dev_kfree_skb(skb);
6231 pUmPacket->skbuff = 0;
6232 return LM_STATUS_SUCCESS;
6235 LM_STATUS
6236 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
6238 current->state = TASK_INTERRUPTIBLE;
6239 if (schedule_timeout(HZ * msec / 1000) != 0) {
6240 return LM_STATUS_FAILURE;
6242 if (signal_pending(current))
6243 return LM_STATUS_FAILURE;
6245 return LM_STATUS_SUCCESS;
6248 void
6249 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
6251 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
6253 bcm5700_intr_off(pUmDevice);
6254 netif_carrier_off(pUmDevice->dev);
6255 #ifdef BCM_TASKLET
6256 tasklet_kill(&pUmDevice->tasklet);
6257 #endif
6258 bcm5700_poll_wait(pUmDevice);
6260 LM_Halt(pDevice);
6262 pDevice->InitDone = 0;
6263 bcm5700_free_remaining_rx_bufs(pUmDevice);
6266 void
6267 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
6269 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
6270 UM_PACKET *pUmPacket;
6271 int cnt, i;
6273 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
6274 for (i = 0; i < cnt; i++) {
6275 if ((pUmPacket =
6276 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
6277 != 0) {
6279 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
6280 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
6281 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
6282 pUmPacket);
6287 void
6288 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
6289 char *param_name, int min, int max, int deflt)
6291 if (((unsigned int) *param < (unsigned int) min) ||
6292 ((unsigned int) *param > (unsigned int) max)) {
6294 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
6295 *param = deflt;
6299 struct net_device *
6300 bcm5700_find_peer(struct net_device *dev)
6302 struct net_device *tmp_dev;
6303 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
6304 LM_DEVICE_BLOCK *pDevice;
6306 tmp_dev = 0;
6307 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
6308 pDevice = &pUmDevice->lm_dev;
6309 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
6310 tmp_dev = root_tigon3_dev;
6311 while (tmp_dev) {
6312 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
6313 if ((tmp_dev != dev) &&
6314 (pUmDevice->pdev->bus->number ==
6315 pUmTmp->pdev->bus->number) &&
6316 PCI_SLOT(pUmDevice->pdev->devfn) ==
6317 PCI_SLOT(pUmTmp->pdev->devfn)) {
6319 break;
6321 tmp_dev = pUmTmp->next_module;
6324 return tmp_dev;
6327 LM_DEVICE_BLOCK *
6328 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
6330 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6331 struct net_device *dev = pUmDevice->dev;
6332 struct net_device *peer_dev;
6334 peer_dev = bcm5700_find_peer(dev);
6335 if (!peer_dev)
6336 return 0;
6337 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
6340 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
6342 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6343 return (pci_find_capability(pUmDevice->pdev, capability));
6346 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
6347 STATIC void
6348 poll_bcm5700(struct net_device *dev)
6350 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
6352 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
6353 if (netdump_mode) {
6354 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6355 #ifdef BCM_NAPI_RXPOLL
6356 if (dev->poll_list.prev) {
6357 int budget = 64;
6359 bcm5700_poll(dev, &budget);
6361 #endif
6363 else
6364 #endif
6366 disable_irq(pUmDevice->pdev->irq);
6367 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6368 enable_irq(pUmDevice->pdev->irq);
6371 #endif