libsodium: Needed for Dnscrypto-proxy Release 1.3.0
[tomato.git] / release / src / bcm57xx / linux / b57um.c
blob806cc947a6144a47d1c45708976e83f6d12bffa9
1 /******************************************************************************/
2 /* */
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom */
4 /* Corporation. */
5 /* All rights reserved. */
6 /* */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
10 /* */
11 /******************************************************************************/
13 /* $Id: b57um.c,v 1.25 2007/04/04 00:19:14 Exp $ */
15 char bcm5700_driver[] = "bcm5700";
16 char bcm5700_version[] = "8.3.14";
17 char bcm5700_date[] = "(11/2/05)";
19 #define B57UM
20 #include "mm.h"
22 #include "typedefs.h"
23 #include <epivers.h>
24 #include "osl.h"
25 #include "bcmdefs.h"
26 #include "bcmdevs.h"
27 #include "bcmutils.h"
28 #include "sbconfig.h"
29 #include "sbutils.h"
30 #include "hndgige.h"
31 #include "etioctl.h"
32 #include "bcmrobo.h"
34 /* this is needed to get good and stable performances */
35 #define EXTRA_HDR BCMEXTRAHDROOM
37 /* A few user-configurable values. */
39 #define MAX_UNITS 16
40 /* Used to pass the full-duplex flag, etc. */
41 static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
42 static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
43 static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
44 static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
45 static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
46 static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
47 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
48 static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500}; /* Jumbo MTU for interfaces. */
49 #endif
50 static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
51 static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
52 static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
54 #define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
55 static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
56 {TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
57 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
58 TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
59 TX_DESC_CNT};
61 #define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
62 static unsigned int rx_std_desc_cnt[MAX_UNITS] =
63 {RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
64 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
65 RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
66 RX_DESC_CNT };
68 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
69 #define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
70 static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
71 {JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
72 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
73 JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
74 JBO_DESC_CNT };
75 #endif
77 #ifdef BCM_INT_COAL
78 #ifdef BCM_NAPI_RXPOLL
79 static unsigned int adaptive_coalesce[MAX_UNITS] =
80 {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
81 #else
82 static unsigned int adaptive_coalesce[MAX_UNITS] =
83 {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
84 #endif
86 #define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
87 static unsigned int rx_coalesce_ticks[MAX_UNITS] =
88 {RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
89 RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
90 RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
91 RX_COAL_TK};
93 #define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
94 static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
95 {RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
96 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
97 RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
98 RX_COAL_FM};
100 #define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
101 static unsigned int tx_coalesce_ticks[MAX_UNITS] =
102 {TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
103 TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
104 TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
105 TX_COAL_TK};
107 #define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
108 static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
109 {TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
110 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
111 TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
112 TX_COAL_FM};
114 #define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
115 static unsigned int stats_coalesce_ticks[MAX_UNITS] =
116 {ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
117 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
118 ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
119 ST_COAL_TK,};
121 #endif
122 #ifdef BCM_WOL
123 static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
124 #endif
125 #ifdef BCM_TSO
126 static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
127 #endif
128 #ifdef BCM_NIC_SEND_BD
129 static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
130 #endif
131 #ifdef BCM_ASF
132 static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
133 #endif
134 static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
135 static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
137 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
138 static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
139 static int bcm_msi_chipset_bug = 0;
140 #endif
142 #define BCM_TIMER_GRANULARITY (1000000 / HZ)
144 /* Hack to hook the data path to the BCM WL dirver */
145 #ifdef BCM_WL_EMULATOR
146 #include "bcmnvram.h"
147 #include "wl_bcm57emu.h"
148 #ifdef SKB_MANAGER
149 int skb_old_alloc = 0;
150 #endif
151 #endif /* BCM_WL_EMULATOR */
153 /* Operational parameters that usually are not changed. */
154 /* Time in jiffies before concluding the transmitter is hung. */
155 #define TX_TIMEOUT (2*HZ)
157 #if (LINUX_VERSION_CODE < 0x02030d)
158 #define pci_resource_start(dev, bar) (dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
159 #elif (LINUX_VERSION_CODE < 0x02032b)
160 #define pci_resource_start(dev, bar) (dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
161 #endif
163 #if (LINUX_VERSION_CODE < 0x02032b)
164 #define dev_kfree_skb_irq(skb) dev_kfree_skb(skb)
165 #define netif_wake_queue(dev) clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
166 #define netif_stop_queue(dev) set_bit(0, &dev->tbusy)
168 static inline void netif_start_queue(struct net_device *dev)
170 dev->tbusy = 0;
171 dev->interrupt = 0;
172 dev->start = 1;
175 #define netif_queue_stopped(dev) dev->tbusy
176 #define netif_running(dev) dev->start
178 static inline void tasklet_schedule(struct tasklet_struct *tasklet)
180 queue_task(tasklet, &tq_immediate);
181 mark_bh(IMMEDIATE_BH);
184 static inline void tasklet_init(struct tasklet_struct *tasklet,
185 void (*func)(unsigned long),
186 unsigned long data)
188 tasklet->next = NULL;
189 tasklet->sync = 0;
190 tasklet->routine = (void (*)(void *))func;
191 tasklet->data = (void *)data;
194 #define tasklet_kill(tasklet)
196 #endif
198 #if (LINUX_VERSION_CODE < 0x020300)
199 struct pci_device_id {
200 unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */
201 unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */
202 unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */
203 unsigned long driver_data; /* Data private to the driver */
206 #define PCI_ANY_ID 0
208 #define pci_set_drvdata(pdev, dev)
209 #define pci_get_drvdata(pdev) 0
211 #define pci_enable_device(pdev) 0
213 #define __devinit __init
214 #define __devinitdata __initdata
215 #define __devexit
217 #define SET_MODULE_OWNER(dev)
218 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
220 #endif
222 #if (LINUX_VERSION_CODE < 0x020411)
223 #ifndef __devexit_p
224 #define __devexit_p(x) x
225 #endif
226 #endif
228 #ifndef MODULE_LICENSE
229 #define MODULE_LICENSE(license)
230 #endif
232 #ifndef IRQ_RETVAL
233 typedef void irqreturn_t;
234 #define IRQ_RETVAL(x)
235 #endif
237 #if (LINUX_VERSION_CODE < 0x02032a)
238 static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
239 dma_addr_t *dma_handle)
241 void *virt_ptr;
243 /* Maximum in slab.c */
244 if (size > 131072)
245 return 0;
247 virt_ptr = kmalloc(size, GFP_KERNEL);
248 *dma_handle = virt_to_bus(virt_ptr);
249 return virt_ptr;
251 #define pci_free_consistent(dev, size, ptr, dma_ptr) kfree(ptr)
253 #endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
256 #if (LINUX_VERSION_CODE < 0x02040d)
258 #if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
260 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
261 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
263 #else
264 /* pci_set_dma_mask is using dma_addr_t */
266 #define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
267 #define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
269 #endif
271 #else /* (LINUX_VERSION_CODE < 0x02040d) */
273 #define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
274 #define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
275 #endif
277 #if (LINUX_VERSION_CODE < 0x020329)
278 #define pci_set_dma_mask(pdev, mask) (0)
279 #else
280 #if (LINUX_VERSION_CODE < 0x020403)
282 pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
284 if(! pci_dma_supported(dev, mask))
285 return -EIO;
287 dev->dma_mask = mask;
289 return 0;
291 #endif
292 #endif
294 #if (LINUX_VERSION_CODE < 0x020547)
295 #define pci_set_consistent_dma_mask(pdev, mask) (0)
296 #endif
298 #if (LINUX_VERSION_CODE < 0x020402)
299 #define pci_request_regions(pdev, name) (0)
300 #define pci_release_regions(pdev)
301 #endif
303 #if !defined(spin_is_locked)
304 #define spin_is_locked(lock) (test_bit(0,(lock)))
305 #endif
307 #define BCM5700_LOCK(pUmDevice, flags) \
308 if ((pUmDevice)->do_global_lock) { \
309 spin_lock_irqsave(&(pUmDevice)->global_lock, flags); \
312 #define BCM5700_UNLOCK(pUmDevice, flags) \
313 if ((pUmDevice)->do_global_lock) { \
314 spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
317 inline void
318 bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
320 if (pUmDevice->do_global_lock) {
321 spin_lock(&pUmDevice->global_lock);
325 inline void
326 bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
328 if (pUmDevice->do_global_lock) {
329 spin_unlock(&pUmDevice->global_lock);
333 void
334 bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
336 atomic_inc(&pUmDevice->intr_sem);
337 LM_DisableInterrupt(&pUmDevice->lm_dev);
338 #if (LINUX_VERSION_CODE >= 0x2051c)
339 synchronize_irq(pUmDevice->dev->irq);
340 #else
341 synchronize_irq();
342 #endif
343 LM_DisableInterrupt(&pUmDevice->lm_dev);
346 void
347 bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
349 if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
350 LM_EnableInterrupt(&pUmDevice->lm_dev);
355 * Broadcom NIC Extension support
356 * -ffan
358 #ifdef NICE_SUPPORT
359 #include "nicext.h"
361 typedef struct {
362 ushort tag;
363 ushort signature;
364 } vlan_tag_t;
366 #endif /* NICE_SUPPORT */
368 int MM_Packet_Desc_Size = sizeof(UM_PACKET);
370 #if defined(MODULE)
371 MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
372 MODULE_DESCRIPTION("BCM5700 Driver");
373 MODULE_LICENSE("GPL");
375 #if (LINUX_VERSION_CODE < 0x020605)
377 MODULE_PARM(debug, "i");
378 MODULE_PARM(msglevel, "i");
379 MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
380 MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
381 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
382 MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
383 MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
384 MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
385 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
386 MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
387 #endif
388 MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
389 MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
390 MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
391 MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
392 MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
393 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
394 MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
395 #endif
396 #ifdef BCM_INT_COAL
397 MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
398 MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
399 MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
400 MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
401 MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
402 MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
403 #endif
404 #ifdef BCM_WOL
405 MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
406 #endif
407 #ifdef BCM_TSO
408 MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
409 #endif
410 #ifdef BCM_NIC_SEND_BD
411 MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
412 #endif
413 #ifdef BCM_ASF
414 MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
415 #endif
416 MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
417 MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
419 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
420 MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
421 #endif
423 #else /* parms*/
425 #if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
427 static int var;
429 #define numvar var
431 #endif
433 #if (LINUX_VERSION_CODE >= 0x2060a)
435 #define numvar NULL
437 #endif
439 module_param_array(line_speed, int, numvar, 0);
440 module_param_array(auto_speed, int, numvar, 0);
441 module_param_array(full_duplex, int, numvar, 0);
442 module_param_array(rx_flow_control, int, numvar, 0);
443 module_param_array(tx_flow_control, int, numvar, 0);
444 module_param_array(auto_flow_control, int, numvar, 0);
445 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
446 module_param_array(mtu, int, numvar, 0);
447 #endif
448 module_param_array(tx_checksum, int, numvar, 0);
449 module_param_array(rx_checksum, int, numvar, 0);
450 module_param_array(scatter_gather, int, numvar, 0);
451 module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
452 module_param_array(rx_std_desc_cnt, int, numvar, 0);
453 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
454 module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
455 #endif
456 #ifdef BCM_INT_COAL
457 module_param_array(adaptive_coalesce, int, numvar, 0);
458 module_param_array(rx_coalesce_ticks, int, numvar, 0);
459 module_param_array(rx_max_coalesce_frames, int, numvar, 0);
460 module_param_array(tx_coalesce_ticks, int, numvar, 0);
461 module_param_array(tx_max_coalesce_frames, int, numvar, 0);
462 module_param_array(stats_coalesce_ticks, int, numvar, 0);
463 #endif
464 #ifdef BCM_WOL
465 module_param_array(enable_wol, int, numvar, 0);
466 #endif
467 #ifdef BCM_TSO
468 module_param_array(enable_tso, int, numvar, 0);
469 #endif
470 #ifdef BCM_NIC_SEND_BD
471 module_param_array(nic_tx_bd, int, numvar, 0);
472 #endif
473 #ifdef BCM_ASF
474 module_param_array(vlan_tag_mode, int, numvar, 0);
475 #endif
476 module_param_array(delay_link, int, numvar, 0);
477 module_param_array(disable_d3hot, int, numvar, 0);
479 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
480 module_param_array(disable_msi, int, numvar, 0);
481 #endif
484 #endif /* params */
487 #endif
489 #define RUN_AT(x) (jiffies + (x))
491 char kernel_version[] = UTS_RELEASE;
493 #define PCI_SUPPORT_VER2
495 #if !defined(CAP_NET_ADMIN)
496 #define capable(CAP_XXX) (suser())
497 #endif
499 #define tigon3_debug debug
500 #if TIGON3_DEBUG
501 static int tigon3_debug = TIGON3_DEBUG;
502 #else
503 static int tigon3_debug = 0;
504 #endif
505 static int msglevel = 0xdeadbeef;
506 int b57_msg_level;
508 int bcm5700_open(struct net_device *dev);
509 STATIC void bcm5700_timer(unsigned long data);
510 STATIC void bcm5700_stats_timer(unsigned long data);
511 STATIC void bcm5700_reset(struct net_device *dev);
512 STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
513 STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
514 #ifdef BCM_TASKLET
515 STATIC void bcm5700_tasklet(unsigned long data);
516 #endif
517 STATIC int bcm5700_close(struct net_device *dev);
518 STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
519 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
520 STATIC void bcm5700_do_rx_mode(struct net_device *dev);
521 STATIC void bcm5700_set_rx_mode(struct net_device *dev);
522 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
523 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
524 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
525 #endif
526 #ifdef BCM_NAPI_RXPOLL
527 STATIC int bcm5700_poll(struct net_device *dev, int *budget);
528 #endif
529 STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
530 STATIC int bcm5700_freemem(struct net_device *dev);
531 #ifdef NICE_SUPPORT
532 STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
533 #endif
534 #ifdef BCM_INT_COAL
535 #ifndef BCM_NAPI_RXPOLL
536 STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
537 #endif
538 #endif
539 STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
540 STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
541 #ifdef BCM_VLAN
542 STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
543 STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
544 #endif
545 void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
546 void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
547 void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
548 char *param_name, int min, int max, int deflt);
550 static int bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused);
551 static struct notifier_block bcm5700_reboot_notifier = {
552 bcm5700_notify_reboot,
553 NULL,
557 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
558 STATIC void poll_bcm5700(struct net_device *dev);
559 #endif
561 /* A list of all installed bcm5700 devices. */
562 static struct net_device *root_tigon3_dev = NULL;
564 #if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
566 #ifdef NICE_SUPPORT
567 #if (LINUX_VERSION_CODE < 0x20500)
568 extern int register_ioctl32_conversion(unsigned int cmd,
569 int (*handler)(unsigned int, unsigned int, unsigned long,
570 struct file *));
571 int unregister_ioctl32_conversion(unsigned int cmd);
572 #else
573 #include <linux/ioctl32.h>
574 #endif
576 #define BCM_IOCTL32 1
578 atomic_t bcm5700_load_count = ATOMIC_INIT(0);
580 static int
581 bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
582 struct file *filep)
584 struct ifreq rq;
585 struct net_device *tmp_dev = root_tigon3_dev;
586 int ret;
587 struct nice_req* nrq;
588 struct ifreq_nice32 {
589 char ifnr_name[16];
590 __u32 cmd;
591 __u32 nrq1;
592 __u32 nrq2;
593 __u32 nrq3;
594 } nrq32;
596 if (!capable(CAP_NET_ADMIN))
597 return -EPERM;
599 if (mm_copy_from_user(&nrq32, (char *) arg, 32))
600 return -EFAULT;
602 memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
604 nrq = (struct nice_req*) &rq.ifr_ifru;
605 nrq->cmd = nrq32.cmd;
606 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
607 nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
608 nrq->nrq_stats_size = nrq32.nrq2;
610 else {
611 memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
613 while (tmp_dev) {
614 if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
615 ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
616 if (ret == 0) {
617 if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
618 return ret;
620 memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
621 if (mm_copy_to_user((char *) arg, &nrq32, 32))
622 return -EFAULT;
624 return ret;
626 tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
628 return -ENODEV;
630 #endif /* NICE_SUPPORT */
631 #endif
633 typedef enum {
634 BCM5700A6 = 0,
635 BCM5700T6,
636 BCM5700A9,
637 BCM5700T9,
638 BCM5700,
639 BCM5701A5,
640 BCM5701T1,
641 BCM5701T8,
642 BCM5701A7,
643 BCM5701A10,
644 BCM5701A12,
645 BCM5701,
646 BCM5702,
647 BCM5703,
648 BCM5703A31,
649 BCM5703ARBUCKLE,
650 TC996T,
651 TC996ST,
652 TC996SSX,
653 TC996SX,
654 TC996BT,
655 TC997T,
656 TC997SX,
657 TC1000T,
658 TC1000BT,
659 TC940BR01,
660 TC942BR01,
661 TC998T,
662 TC998SX,
663 TC999T,
664 NC6770,
665 NC1020,
666 NC150T,
667 NC7760,
668 NC7761,
669 NC7770,
670 NC7771,
671 NC7780,
672 NC7781,
673 NC7772,
674 NC7782,
675 NC7783,
676 NC320T,
677 NC320I,
678 NC325I,
679 NC324I,
680 NC326I,
681 BCM5704CIOBE,
682 BCM5704,
683 BCM5704S,
684 BCM5705,
685 BCM5705M,
686 BCM5705F,
687 BCM5901,
688 BCM5782,
689 BCM5788,
690 BCM5789,
691 BCM5750,
692 BCM5750M,
693 BCM5720,
694 BCM5751,
695 BCM5751M,
696 BCM5751F,
697 BCM5721,
698 BCM5753,
699 BCM5753M,
700 BCM5753F,
701 BCM5781,
702 BCM5752,
703 BCM5752M,
704 BCM5714,
705 BCM5780,
706 BCM5780S,
707 BCM5715,
708 BCM4785,
709 BCM5903M,
710 UNK5788
711 } board_t;
714 /* indexed by board_t, above */
715 static struct {
716 char *name;
717 } board_info[] __devinitdata = {
718 { "Broadcom BCM5700 1000Base-T" },
719 { "Broadcom BCM5700 1000Base-SX" },
720 { "Broadcom BCM5700 1000Base-SX" },
721 { "Broadcom BCM5700 1000Base-T" },
722 { "Broadcom BCM5700" },
723 { "Broadcom BCM5701 1000Base-T" },
724 { "Broadcom BCM5701 1000Base-T" },
725 { "Broadcom BCM5701 1000Base-T" },
726 { "Broadcom BCM5701 1000Base-SX" },
727 { "Broadcom BCM5701 1000Base-T" },
728 { "Broadcom BCM5701 1000Base-T" },
729 { "Broadcom BCM5701" },
730 { "Broadcom BCM5702 1000Base-T" },
731 { "Broadcom BCM5703 1000Base-T" },
732 { "Broadcom BCM5703 1000Base-SX" },
733 { "Broadcom B5703 1000Base-SX" },
734 { "3Com 3C996 10/100/1000 Server NIC" },
735 { "3Com 3C996 10/100/1000 Server NIC" },
736 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
737 { "3Com 3C996 Gigabit Fiber-SX Server NIC" },
738 { "3Com 3C996B Gigabit Server NIC" },
739 { "3Com 3C997 Gigabit Server NIC" },
740 { "3Com 3C997 Gigabit Fiber-SX Server NIC" },
741 { "3Com 3C1000 Gigabit NIC" },
742 { "3Com 3C1000B-T 10/100/1000 PCI" },
743 { "3Com 3C940 Gigabit LOM (21X21)" },
744 { "3Com 3C942 Gigabit LOM (31X31)" },
745 { "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
746 { "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
747 { "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
748 { "HP NC6770 Gigabit Server Adapter" },
749 { "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
750 { "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
751 { "HP NC7760 Gigabit Server Adapter" },
752 { "HP NC7761 Gigabit Server Adapter" },
753 { "HP NC7770 Gigabit Server Adapter" },
754 { "HP NC7771 Gigabit Server Adapter" },
755 { "HP NC7780 Gigabit Server Adapter" },
756 { "HP NC7781 Gigabit Server Adapter" },
757 { "HP NC7772 Gigabit Server Adapter" },
758 { "HP NC7782 Gigabit Server Adapter" },
759 { "HP NC7783 Gigabit Server Adapter" },
760 { "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
761 { "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
762 { "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
763 { "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
764 { "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
765 { "Broadcom BCM5704 CIOB-E 1000Base-T" },
766 { "Broadcom BCM5704 1000Base-T" },
767 { "Broadcom BCM5704 1000Base-SX" },
768 { "Broadcom BCM5705 1000Base-T" },
769 { "Broadcom BCM5705M 1000Base-T" },
770 { "Broadcom 570x 10/100 Integrated Controller" },
771 { "Broadcom BCM5901 100Base-TX" },
772 { "Broadcom NetXtreme Gigabit Ethernet for hp" },
773 { "Broadcom BCM5788 NetLink 1000Base-T" },
774 { "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
775 { "Broadcom BCM5750 1000Base-T PCI" },
776 { "Broadcom BCM5750M 1000Base-T PCI" },
777 { "Broadcom BCM5720 1000Base-T PCI" },
778 { "Broadcom BCM5751 1000Base-T PCI Express" },
779 { "Broadcom BCM5751M 1000Base-T PCI Express" },
780 { "Broadcom BCM5751F 100Base-TX PCI Express" },
781 { "Broadcom BCM5721 1000Base-T PCI Express" },
782 { "Broadcom BCM5753 1000Base-T PCI Express" },
783 { "Broadcom BCM5753M 1000Base-T PCI Express" },
784 { "Broadcom BCM5753F 100Base-TX PCI Express" },
785 { "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
786 { "Broadcom BCM5752 1000Base-T PCI Express" },
787 { "Broadcom BCM5752M 1000Base-T PCI Express" },
788 { "Broadcom BCM5714 1000Base-T " },
789 { "Broadcom BCM5780 1000Base-T" },
790 { "Broadcom BCM5780S 1000Base-SX" },
791 { "Broadcom BCM5715 1000Base-T " },
792 { "Broadcom BCM4785 10/100/1000 Integrated Controller" },
793 { "Broadcom BCM5903M Gigabit Ethernet " },
794 { "Unknown BCM5788 Gigabit Ethernet " },
795 { 0 }
798 static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
799 {0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
800 {0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
801 {0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
802 {0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
803 {0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
804 {0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
805 {0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
806 {0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
807 {0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
808 {0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
809 {0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
810 {0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
811 {0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
812 {0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
813 {0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
814 {0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
815 {0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
816 {0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
817 {0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
818 {0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
819 {0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
820 {0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
821 {0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
822 {0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
823 {0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
824 {0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
825 {0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
826 {0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
827 {0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
828 {0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
829 {0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
830 {0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
831 {0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
832 {0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
833 {0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
834 {0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
835 {0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
836 {0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
837 {0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
838 {0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
839 {0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
840 {0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
841 {0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
842 {0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
843 {0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
844 {0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
845 {0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
846 {0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
847 {0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
848 {0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
849 {0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
850 {0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
851 {0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
852 {0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
853 {0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
854 {0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
855 {0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
856 {0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
857 {0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
858 {0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
859 {0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
860 {0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
861 {0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
862 {0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
863 {0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
864 {0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
865 {0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
866 {0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
867 {0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
868 {0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
869 {0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
870 {0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
871 {0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
872 {0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
873 {0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
874 {0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
875 {0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
876 {0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
877 {0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
878 {0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
879 {0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
880 {0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
881 {0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
882 {0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
883 {0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
884 {0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
885 {0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
886 {0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
887 {0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
888 {0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
889 {0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
890 {0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
891 {0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
892 {0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
893 {0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
894 {0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
895 {0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
896 {0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
897 {0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
898 {0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
899 {0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
900 {0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
901 {0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
902 {0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
903 {0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
904 {0x14e4, 0x471f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM4785 },
905 {0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
906 {0x173b, 0x03ed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, UNK5788 },
907 {0,}
910 MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
912 #ifdef BCM_PROC_FS
913 extern int bcm5700_proc_create(void);
914 extern int bcm5700_proc_create_dev(struct net_device *dev);
915 extern int bcm5700_proc_remove_dev(struct net_device *dev);
916 extern int bcm5700_proc_remove_notifier(void);
917 #endif
919 #if (LINUX_VERSION_CODE >= 0x2060a)
920 static struct pci_device_id pci_AMD762id[]={
921 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
922 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
925 #endif
927 static int sbgige = -1;
929 /*******************************************************************************
930 *******************************************************************************
933 int get_csum_flag(LM_UINT32 ChipRevId)
935 return NETIF_F_IP_CSUM;
938 /*******************************************************************************
939 *******************************************************************************
941 This function returns true if the device passed to it is attached to an
942 ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
943 or newer, it returns false.
945 This function determines which bridge it is attached to by scaning the pci
946 bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
947 the bridge's subordinate's secondary bus number is compared with this
948 devices bus number. If they match, then the device is attached to this
949 bridge. The bridge's device id is compared to a list of known device ids for
950 ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
951 chip revision must also be checked to determine if the chip is older than an
952 ICH5.
954 To scan the bus, one of two functions is used depending on the kernel
955 version. For 2.4 kernels, the pci_find_device function is used. This
956 function has been depricated in the 2.6 kernel and replaced with the
957 fucntion pci_get_device. The macro walk_pci_bus determines which function to
958 use when the driver is built.
961 #if (LINUX_VERSION_CODE >= 0x2060a)
962 #define walk_pci_bus(d) while ((d = pci_get_device( \
963 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
965 #define unwalk_pci_bus(d) pci_dev_put(d)
967 #else
968 #define walk_pci_bus(d) while ((d = pci_find_device( \
969 PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
970 #define unwalk_pci_bus(d)
972 #endif
974 #define ICH5_CHIP_VERSION 0xc0
976 static struct pci_device_id pci_ICHtable[] = {
977 {0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8 */
978 {0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8 */
979 {0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6 */
980 {0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
981 {0, 0}
984 int attached_to_ICH4_or_older( struct pci_dev *pdev)
986 struct pci_dev *tmp_pdev = NULL;
987 struct pci_device_id *ich_table;
988 u8 chip_rev;
990 walk_pci_bus (tmp_pdev) {
991 if ((tmp_pdev->hdr_type == 1) &&
992 (tmp_pdev->subordinate != NULL) &&
993 (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
995 ich_table = pci_ICHtable;
997 while (ich_table->vendor) {
998 if ((ich_table->vendor == tmp_pdev->vendor) &&
999 (ich_table->device == tmp_pdev->device)) {
1001 pci_read_config_byte( tmp_pdev,
1002 PCI_REVISION_ID, &chip_rev);
1004 if (chip_rev < ICH5_CHIP_VERSION) {
1005 unwalk_pci_bus( tmp_pdev);
1006 return 1;
1009 ich_table++;
1013 return 0;
1016 static int
1017 __devinit bcm5700_init_board(struct pci_dev *pdev, struct net_device **dev_out, int board_idx)
1019 struct net_device *dev;
1020 PUM_DEVICE_BLOCK pUmDevice;
1021 PLM_DEVICE_BLOCK pDevice;
1022 bool rgmii = FALSE;
1023 sb_t *sbh = NULL;
1024 int rc;
1026 *dev_out = NULL;
1028 /* dev zeroed in init_etherdev */
1029 #if (LINUX_VERSION_CODE >= 0x20600)
1030 dev = alloc_etherdev(sizeof(*pUmDevice));
1031 #else
1032 dev = init_etherdev(NULL, sizeof(*pUmDevice));
1033 #endif
1034 if (dev == NULL) {
1035 printk(KERN_ERR "%s: unable to alloc new ethernet\n", bcm5700_driver);
1036 return -ENOMEM;
1038 SET_MODULE_OWNER(dev);
1039 #if (LINUX_VERSION_CODE >= 0x20600)
1040 SET_NETDEV_DEV(dev, &pdev->dev);
1041 #endif
1042 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1044 /* enable device (incl. PCI PM wakeup), and bus-mastering */
1045 rc = pci_enable_device(pdev);
1046 if (rc)
1047 goto err_out;
1049 /* init core specific stuff */
1050 if (pdev->device == T3_PCI_DEVICE_ID(T3_PCI_ID_BCM471F)) {
1051 sbh = sb_kattach(SB_OSH);
1052 sb_gige_init(sbh, ++sbgige, &rgmii);
1055 rc = pci_request_regions(pdev, bcm5700_driver);
1056 if (rc) {
1057 if (!sbh)
1058 goto err_out;
1059 printk(KERN_INFO "bcm5700_init_board: pci_request_regions returned error %d\n"
1060 "This may be because the region is already requested by"
1061 " the SMBus driver. Ignore the PCI error messages.\n", rc);
1064 pci_set_master(pdev);
1066 if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1067 pUmDevice->using_dac = 1;
1068 if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0) {
1069 printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1070 pci_release_regions(pdev);
1071 goto err_out;
1073 } else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1074 pUmDevice->using_dac = 0;
1075 } else {
1076 printk(KERN_ERR "System does not support DMA\n");
1077 pci_release_regions(pdev);
1078 goto err_out;
1081 pUmDevice->dev = dev;
1082 pUmDevice->pdev = pdev;
1083 pUmDevice->mem_list_num = 0;
1084 pUmDevice->next_module = root_tigon3_dev;
1085 pUmDevice->index = board_idx;
1086 pUmDevice->sbh = (void *)sbh;
1087 root_tigon3_dev = dev;
1089 spin_lock_init(&pUmDevice->global_lock);
1091 spin_lock_init(&pUmDevice->undi_lock);
1093 spin_lock_init(&pUmDevice->phy_lock);
1095 pDevice = &pUmDevice->lm_dev;
1096 pDevice->Flags = 0;
1097 pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1098 pUmDevice->boardflags = getintvar(NULL, "boardflags");
1099 if (sbh) {
1100 if (pUmDevice->boardflags & BFL_ENETROBO)
1101 pDevice->Flags |= ROBO_SWITCH_FLAG;
1102 pDevice->Flags |= rgmii ? RGMII_MODE_FLAG : 0;
1103 if (sb_chip(sbh) == BCM4785_CHIP_ID && sb_chiprev(sbh) < 2)
1104 pDevice->Flags |= ONE_DMA_AT_ONCE_FLAG;
1105 pDevice->Flags |= SB_CORE_FLAG;
1106 if (sb_chip(sbh) == BCM4785_CHIP_ID)
1107 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1110 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1111 if (board_idx < MAX_UNITS) {
1112 bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1113 dev->mtu = mtu[board_idx];
1115 #endif
1117 if (attached_to_ICH4_or_older(pdev)) {
1118 pDevice->Flags |= UNDI_FIX_FLAG;
1121 #if (LINUX_VERSION_CODE >= 0x2060a)
1122 if (pci_dev_present(pci_AMD762id)) {
1123 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1124 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1126 #else
1127 if (pci_find_device(0x1022, 0x700c, NULL)) {
1128 /* AMD762 writes I/O out of order */
1129 /* Setting bit 1 in 762's register 0x4C still doesn't work */
1130 /* in all cases */
1131 pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1132 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1134 #endif
1135 if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1136 rc = -ENODEV;
1137 goto err_out_unmap;
1140 if (pDevice->Flags & ROBO_SWITCH_FLAG) {
1141 robo_info_t *robo;
1143 if ((robo = bcm_robo_attach(sbh, pDevice, NULL,
1144 robo_miird, robo_miiwr)) == NULL) {
1145 B57_ERR(("robo_setup: failed to attach robo switch \n"));
1146 goto robo_fail;
1149 if (bcm_robo_enable_device(robo)) {
1150 B57_ERR(("robo_setup: failed to enable robo switch \n"));
1151 goto robo_fail;
1154 /* Configure the switch to do VLAN */
1155 if ((pUmDevice->boardflags & BFL_ENETVLAN) &&
1156 bcm_robo_config_vlan(robo, pDevice->PermanentNodeAddress)) {
1157 B57_ERR(("robo_setup: robo_config_vlan failed\n"));
1158 goto robo_fail;
1161 /* Enable the switch */
1162 if (bcm_robo_enable_switch(robo)) {
1163 B57_ERR(("robo_setup: robo_enable_switch failed\n"));
1164 robo_fail:
1165 bcm_robo_detach(robo);
1166 rc = -ENODEV;
1167 goto err_out_unmap;
1169 pUmDevice->robo = (void *)robo;
1172 if ((pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0) {
1173 if (dev->mtu > 1500) {
1174 dev->mtu = 1500;
1175 printk(KERN_WARNING
1176 "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n",
1177 bcm5700_driver, pUmDevice->index);
1181 pUmDevice->do_global_lock = 0;
1182 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1183 /* The 5700 chip works best without interleaved register */
1184 /* accesses on certain machines. */
1185 pUmDevice->do_global_lock = 1;
1188 if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1189 ((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1191 pUmDevice->rx_buf_align = 0;
1192 } else {
1193 pUmDevice->rx_buf_align = 2;
1195 dev->mem_start = pci_resource_start(pdev, 0);
1196 dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1197 dev->irq = pdev->irq;
1199 *dev_out = dev;
1200 return 0;
1202 err_out_unmap:
1203 pci_release_regions(pdev);
1204 bcm5700_freemem(dev);
1206 err_out:
1207 #if (LINUX_VERSION_CODE < 0x020600)
1208 unregister_netdev(dev);
1209 kfree(dev);
1210 #else
1211 free_netdev(dev);
1212 #endif
1213 return rc;
1216 static int __devinit
1217 bcm5700_print_ver(void)
1219 printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1220 bcm5700_driver);
1221 #ifdef NICE_SUPPORT
1222 printk("with Broadcom NIC Extension (NICE) ");
1223 #endif
1224 printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1225 return 0;
1228 static int __devinit
1229 bcm5700_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1231 struct net_device *dev = NULL;
1232 PUM_DEVICE_BLOCK pUmDevice;
1233 PLM_DEVICE_BLOCK pDevice;
1234 int i;
1235 static int board_idx = -1;
1236 static int printed_version = 0;
1237 struct pci_dev *pci_dev;
1239 board_idx++;
1241 if (!printed_version) {
1242 bcm5700_print_ver();
1243 #ifdef BCM_PROC_FS
1244 bcm5700_proc_create();
1245 #endif
1246 printed_version = 1;
1249 i = bcm5700_init_board(pdev, &dev, board_idx);
1250 if (i < 0) {
1251 return i;
1254 if (dev == NULL)
1255 return -ENOMEM;
1257 #ifdef BCM_IOCTL32
1258 if (atomic_read(&bcm5700_load_count) == 0) {
1259 register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1261 atomic_inc(&bcm5700_load_count);
1262 #endif
1263 dev->open = bcm5700_open;
1264 dev->hard_start_xmit = bcm5700_start_xmit;
1265 dev->stop = bcm5700_close;
1266 dev->get_stats = bcm5700_get_stats;
1267 dev->set_multicast_list = bcm5700_set_rx_mode;
1268 dev->do_ioctl = bcm5700_ioctl;
1269 dev->set_mac_address = &bcm5700_set_mac_addr;
1270 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1271 dev->change_mtu = &bcm5700_change_mtu;
1272 #endif
1273 #if (LINUX_VERSION_CODE >= 0x20400)
1274 dev->tx_timeout = bcm5700_reset;
1275 dev->watchdog_timeo = TX_TIMEOUT;
1276 #endif
1277 #ifdef BCM_VLAN
1278 dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1279 dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1280 #endif
1281 #ifdef BCM_NAPI_RXPOLL
1282 dev->poll = bcm5700_poll;
1283 dev->weight = 64;
1284 #endif
1286 pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1287 pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1289 dev->base_addr = pci_resource_start(pdev, 0);
1290 dev->irq = pdev->irq;
1291 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1292 dev->poll_controller = poll_bcm5700;
1293 #endif
1295 #if (LINUX_VERSION_CODE >= 0x20600)
1296 if ((i = register_netdev(dev))) {
1297 printk(KERN_ERR "%s: Cannot register net device\n",
1298 bcm5700_driver);
1299 if (pUmDevice->lm_dev.pMappedMemBase)
1300 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1301 pci_release_regions(pdev);
1302 bcm5700_freemem(dev);
1303 free_netdev(dev);
1304 return i;
1306 #endif
1309 pci_set_drvdata(pdev, dev);
1311 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1312 pUmDevice->name = board_info[ent->driver_data].name,
1313 printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1314 dev->name, pUmDevice->name, dev->base_addr,
1315 dev->irq);
1316 printk("node addr ");
1317 for (i = 0; i < 6; i++) {
1318 printk("%2.2x", dev->dev_addr[i]);
1320 printk("\n");
1322 printk(KERN_INFO "%s: ", dev->name);
1323 if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1324 printk("Broadcom BCM5400 Copper ");
1325 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1326 printk("Broadcom BCM5401 Copper ");
1327 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1328 printk("Broadcom BCM5411 Copper ");
1329 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5461_PHY_ID)
1330 printk("Broadcom BCM5461 Copper ");
1331 else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1332 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1333 printk("Broadcom BCM5701 Integrated Copper ");
1335 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1336 printk("Broadcom BCM5703 Integrated ");
1337 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1338 printk("SerDes ");
1339 else
1340 printk("Copper ");
1342 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1343 printk("Broadcom BCM5704 Integrated ");
1344 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1345 printk("SerDes ");
1346 else
1347 printk("Copper ");
1349 else if (pDevice->PhyFlags & PHY_IS_FIBER){
1350 if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1351 printk("Broadcom BCM5780S Integrated Serdes ");
1354 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1355 printk("Broadcom BCM5705 Integrated Copper ");
1356 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1357 printk("Broadcom BCM5750 Integrated Copper ");
1359 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1360 printk("Broadcom BCM5714 Integrated Copper ");
1361 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1362 printk("Broadcom BCM5780 Integrated Copper ");
1364 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1365 printk("Broadcom BCM5752 Integrated Copper ");
1366 else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1367 printk("Broadcom BCM8002 SerDes ");
1368 else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1369 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1370 printk("Broadcom BCM5703 Integrated SerDes ");
1372 else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1373 printk("Broadcom BCM5704 Integrated SerDes ");
1375 else {
1376 printk("Agilent HDMP-1636 SerDes ");
1379 else {
1380 printk("Unknown ");
1382 printk("transceiver found\n");
1384 #if (LINUX_VERSION_CODE >= 0x20400)
1385 if (scatter_gather[board_idx]) {
1386 dev->features |= NETIF_F_SG;
1387 if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1388 dev->features |= NETIF_F_HIGHDMA;
1390 if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1391 tx_checksum[board_idx]) {
1393 dev->features |= get_csum_flag( pDevice->ChipRevId);
1395 #ifdef BCM_VLAN
1396 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1397 #endif
1398 #ifdef BCM_TSO
1399 /* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1400 the same time. Since only one of these features can be enable at a
1401 time, we'll enable only Jumbo Frames and disable TSO when the user
1402 tries to enable both.
1404 dev->features &= ~NETIF_F_TSO;
1406 if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1407 (enable_tso[board_idx])) {
1408 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1409 (dev->mtu > 1500)) {
1410 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1411 } else {
1412 dev->features |= NETIF_F_TSO;
1415 #endif
1416 printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1417 dev->name,
1418 (char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1419 (char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1420 (char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1421 #endif
1422 if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1423 rx_checksum[board_idx])
1424 printk("Rx Checksum ON");
1425 else
1426 printk("Rx Checksum OFF");
1427 #ifdef BCM_VLAN
1428 printk(", 802.1Q VLAN ON");
1429 #endif
1430 #ifdef BCM_TSO
1431 if (dev->features & NETIF_F_TSO) {
1432 printk(", TSO ON");
1434 else
1435 #endif
1436 #ifdef BCM_NAPI_RXPOLL
1437 printk(", NAPI ON");
1438 #endif
1439 printk("\n");
1441 #ifdef BCM_PROC_FS
1442 bcm5700_proc_create_dev(dev);
1443 #endif
1444 register_reboot_notifier(&bcm5700_reboot_notifier);
1445 #ifdef BCM_TASKLET
1446 tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1447 (unsigned long) pUmDevice);
1448 #endif
1449 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1450 if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1451 T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1453 printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1457 #if (LINUX_VERSION_CODE > 0x20605)
1459 if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL)))
1460 #else
1461 if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL)))
1462 #endif
1464 u32 val;
1466 /* Found AMD 762 North bridge */
1467 pci_read_config_dword(pci_dev, 0x4c, &val);
1468 if ((val & 0x02) == 0) {
1469 pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1470 printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1474 #if (LINUX_VERSION_CODE > 0x20605)
1476 pci_dev_put(pci_dev);
1478 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1480 if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1481 bcm_msi_chipset_bug = 1;
1483 pci_dev_put(pci_dev);
1484 #endif
1485 #endif
1487 return 0;
1491 static void __devexit
1492 bcm5700_remove_one (struct pci_dev *pdev)
1494 struct net_device *dev = pci_get_drvdata (pdev);
1495 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1497 #ifdef BCM_PROC_FS
1498 bcm5700_proc_remove_dev(dev);
1499 #endif
1500 #ifdef BCM_IOCTL32
1501 atomic_dec(&bcm5700_load_count);
1502 if (atomic_read(&bcm5700_load_count) == 0)
1503 unregister_ioctl32_conversion(SIOCNICE);
1504 #endif
1505 unregister_netdev(dev);
1507 if (pUmDevice->lm_dev.pMappedMemBase)
1508 iounmap(pUmDevice->lm_dev.pMappedMemBase);
1510 pci_release_regions(pdev);
1512 #if (LINUX_VERSION_CODE < 0x020600)
1513 kfree(dev);
1514 #else
1515 free_netdev(dev);
1516 #endif
1518 pci_set_drvdata(pdev, NULL);
1522 int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1524 #ifdef BCM_WL_EMULATOR
1525 /* new transmit callback */
1526 static int bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev);
1527 /* keep track of the 2 gige devices */
1528 static PLM_DEVICE_BLOCK pDev1;
1529 static PLM_DEVICE_BLOCK pDev2;
1531 static void
1532 bcm5700emu_open(struct net_device *dev)
1534 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1535 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1536 static int instance = 0;
1537 static char *wlemu_if = NULL;
1538 char *wlemu_mode = NULL;
1539 //int wlemu_idx = 0;
1540 static int rx_enable = 0;
1541 static int tx_enable = 0;
1543 /* which interface is the emulator ? */
1544 if(instance == 0) {
1545 wlemu_if = nvram_get("wlemu_if");
1546 /* do we emulate rx, tx or both */
1547 wlemu_mode = nvram_get("wlemu_mode");
1548 if(wlemu_mode) {
1549 if (!strcmp(wlemu_mode,"rx"))
1551 rx_enable = 1;
1553 else if (!strcmp(wlemu_mode,"tx"))
1556 tx_enable = 1;
1559 else if (!strcmp(wlemu_mode,"rx_tx"))
1562 rx_enable = 1;
1563 tx_enable = 1;
1568 instance++;
1570 /* The context is used for accessing the OSL for emulating devices */
1571 pDevice->wlc = NULL;
1573 /* determines if this device is an emulator */
1574 pDevice->wl_emulate_rx = 0;
1575 pDevice->wl_emulate_tx = 0;
1577 if(wlemu_if && !strcmp(dev->name,wlemu_if))
1579 /* create an emulator context. */
1580 pDevice->wlc = (void *)wlcemu_wlccreate((void *)dev);
1581 B57_INFO(("Using %s for wl emulation \n", dev->name));
1582 if(rx_enable)
1584 B57_INFO(("Enabling wl RX emulation \n"));
1585 pDevice->wl_emulate_rx = 1;
1587 /* re-direct transmit callback to emulator */
1588 if(tx_enable)
1590 pDevice->wl_emulate_tx = 1;
1591 dev->hard_start_xmit = bcm5700emu_start_xmit;
1592 B57_INFO(("Enabling wl TX emulation \n"));
1595 /* for debug access to configured devices only */
1596 if(instance == 1)
1597 pDev1 = pDevice;
1598 else if (instance == 2)
1599 pDev2 = pDevice;
1602 /* Public API to get current emulation info */
1603 int bcm5700emu_get_info(char *buf)
1605 int len = 0;
1606 PLM_DEVICE_BLOCK p;
1608 /* look for an emulating device */
1609 if(pDev1->wlc) {
1610 p = pDev1;
1611 len += sprintf(buf+len,"emulation device : eth0\n");
1613 else if (pDev2->wlc) {
1614 p = pDev2;
1615 len += sprintf(buf+len,"emulation device : eth1\n");
1617 else {
1618 len += sprintf(buf+len,"emulation not activated\n");
1619 return len;
1621 if(p->wl_emulate_rx)
1622 len += sprintf(buf+len,"RX emulation enabled\n");
1623 else
1624 len += sprintf(buf+len,"RX emulation disabled\n");
1625 if(p->wl_emulate_tx)
1626 len += sprintf(buf+len,"TX emulation enabled\n");
1627 else
1628 len += sprintf(buf+len,"TX emulation disabled\n");
1629 return len;
1634 /* Public API to access the bcm5700_start_xmit callback */
1636 int
1637 bcm5700emu_forward_xmit(struct sk_buff *skb, struct net_device *dev)
1639 return bcm5700_start_xmit(skb, dev);
1643 /* hook to kernel txmit callback */
1644 STATIC int
1645 bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev)
1648 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1649 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1650 return wlcemu_start_xmit(skb,pDevice->wlc);
1653 #endif /* BCM_WL_EMULATOR */
1656 bcm5700_open(struct net_device *dev)
1658 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1659 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1660 int rc;
1662 if (pUmDevice->suspended){
1663 return -EAGAIN;
1666 #ifdef BCM_WL_EMULATOR
1667 bcm5700emu_open(dev);
1668 #endif
1670 /* delay for 6 seconds */
1671 pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1673 #ifdef BCM_INT_COAL
1674 #ifndef BCM_NAPI_RXPOLL
1675 pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1676 #endif
1677 #endif
1679 #ifdef INCLUDE_TBI_SUPPORT
1680 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1681 (pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1682 pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1683 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1684 pUmDevice->poll_tbi_interval /= 4;
1686 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1688 #endif
1689 /* set this timer for 2 seconds */
1690 pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1692 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1695 if ( ( (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1696 (T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1697 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1698 (T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1699 !bcm_msi_chipset_bug ){
1701 if (disable_msi[pUmDevice->index]==1){
1702 /* do nothing-it's not turned on */
1703 }else{
1704 pDevice->Flags |= USING_MSI_FLAG;
1706 REG_WR(pDevice, Msi.Mode, 2 );
1708 rc = pci_enable_msi(pUmDevice->pdev);
1710 if(rc!=0){
1711 pDevice->Flags &= ~ USING_MSI_FLAG;
1712 REG_WR(pDevice, Msi.Mode, 1 );
1718 #endif
1720 if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1723 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1725 if(pDevice->Flags & USING_MSI_FLAG) {
1727 pci_disable_msi(pUmDevice->pdev);
1728 pDevice->Flags &= ~USING_MSI_FLAG;
1729 REG_WR(pDevice, Msi.Mode, 1 );
1732 #endif
1733 return rc;
1736 pUmDevice->opened = 1;
1737 if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1738 pUmDevice->opened = 0;
1739 free_irq(dev->irq, dev);
1740 bcm5700_freemem(dev);
1741 return -EAGAIN;
1744 bcm5700_set_vlan_mode(pUmDevice);
1745 bcm5700_init_counters(pUmDevice);
1747 if (pDevice->Flags & UNDI_FIX_FLAG) {
1748 printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1751 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1753 /* Do not use invalid eth addrs: any multicast & all zeros */
1754 if( is_valid_ether_addr(dev->dev_addr) ){
1755 LM_SetMacAddress(pDevice, dev->dev_addr);
1757 else
1759 printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1760 memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1764 if (tigon3_debug > 1)
1765 printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1767 QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1768 MAX_RX_PACKET_DESC_COUNT);
1771 #if (LINUX_VERSION_CODE < 0x020300)
1772 MOD_INC_USE_COUNT;
1773 #endif
1775 atomic_set(&pUmDevice->intr_sem, 0);
1777 LM_EnableInterrupt(pDevice);
1779 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1781 if (pDevice->Flags & USING_MSI_FLAG){
1783 /* int test to check support on older machines */
1784 if (b57_test_intr(pUmDevice) != 1) {
1786 LM_DisableInterrupt(pDevice);
1787 free_irq(pUmDevice->pdev->irq, dev);
1788 pci_disable_msi(pUmDevice->pdev);
1789 REG_WR(pDevice, Msi.Mode, 1 );
1790 pDevice->Flags &= ~USING_MSI_FLAG;
1792 rc = LM_ResetAdapter(pDevice);
1793 printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1795 if (rc == LM_STATUS_SUCCESS)
1796 rc = 0;
1797 else
1798 rc = -ENODEV;
1800 if(rc == 0){
1801 rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1802 SA_SHIRQ, dev->name, dev);
1805 if(rc){
1806 LM_Halt(pDevice);
1807 bcm5700_freemem(dev);
1808 pUmDevice->opened = 0;
1809 return rc;
1813 pDevice->InitDone = TRUE;
1814 atomic_set(&pUmDevice->intr_sem, 0);
1815 LM_EnableInterrupt(pDevice);
1818 #endif
1820 init_timer(&pUmDevice->timer);
1821 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1822 pUmDevice->timer.data = (unsigned long)dev;
1823 pUmDevice->timer.function = &bcm5700_timer;
1824 add_timer(&pUmDevice->timer);
1826 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1827 init_timer(&pUmDevice->statstimer);
1828 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1829 pUmDevice->statstimer.data = (unsigned long)dev;
1830 pUmDevice->statstimer.function = &bcm5700_stats_timer;
1831 add_timer(&pUmDevice->statstimer);
1834 if(pDevice->Flags & USING_MSI_FLAG)
1835 printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI) \n", dev->name);
1836 else
1837 printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1839 netif_start_queue(dev);
1841 return 0;
1845 STATIC void
1846 bcm5700_stats_timer(unsigned long data)
1848 struct net_device *dev = (struct net_device *)data;
1849 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1850 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1851 unsigned long flags = 0;
1853 if (!pUmDevice->opened)
1854 return;
1856 if (!atomic_read(&pUmDevice->intr_sem) &&
1857 !pUmDevice->suspended &&
1858 (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1859 BCM5700_LOCK(pUmDevice, flags);
1860 LM_GetStats(pDevice);
1861 BCM5700_UNLOCK(pUmDevice, flags);
1864 pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1866 add_timer(&pUmDevice->statstimer);
1870 STATIC void
1871 bcm5700_timer(unsigned long data)
1873 struct net_device *dev = (struct net_device *)data;
1874 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1875 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1876 unsigned long flags = 0;
1877 LM_UINT32 value32;
1879 if (!pUmDevice->opened)
1880 return;
1882 /* BCM4785: Flush posted writes from GbE to host memory. */
1883 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
1884 REG_RD(pDevice, HostCoalesce.Mode);
1886 if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1887 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1888 add_timer(&pUmDevice->timer);
1889 return;
1892 #ifdef INCLUDE_TBI_SUPPORT
1893 if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1894 (--pUmDevice->poll_tbi_expiry <= 0)) {
1896 BCM5700_PHY_LOCK(pUmDevice, flags);
1897 value32 = REG_RD(pDevice, MacCtrl.Status);
1898 if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1899 ((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1900 MAC_STATUS_CFG_CHANGED)) ||
1901 !(value32 & MAC_STATUS_PCS_SYNCED)))
1903 ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1904 (value32 & (MAC_STATUS_PCS_SYNCED |
1905 MAC_STATUS_SIGNAL_DETECTED))))
1907 LM_SetupPhy(pDevice);
1909 BCM5700_PHY_UNLOCK(pUmDevice, flags);
1910 pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1913 #endif
1915 if (pUmDevice->delayed_link_ind > 0) {
1916 if (pUmDevice->delayed_link_ind == 1)
1917 MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1918 else
1919 pUmDevice->delayed_link_ind--;
1922 if (pUmDevice->crc_counter_expiry > 0)
1923 pUmDevice->crc_counter_expiry--;
1925 if (!pUmDevice->interrupt) {
1926 if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1927 BCM5700_LOCK(pUmDevice, flags);
1928 if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1929 /* This will generate an interrupt */
1930 REG_WR(pDevice, Grc.LocalCtrl,
1931 pDevice->GrcLocalCtrl |
1932 GRC_MISC_LOCAL_CTRL_SET_INT);
1934 else {
1935 REG_WR(pDevice, HostCoalesce.Mode,
1936 pDevice->CoalesceMode |
1937 HOST_COALESCE_ENABLE |
1938 HOST_COALESCE_NOW);
1940 if (!(REG_RD(pDevice, DmaWrite.Mode) &
1941 DMA_WRITE_MODE_ENABLE)) {
1942 BCM5700_UNLOCK(pUmDevice, flags);
1943 bcm5700_reset(dev);
1945 else {
1946 BCM5700_UNLOCK(pUmDevice, flags);
1948 if (pUmDevice->tx_queued) {
1949 pUmDevice->tx_queued = 0;
1950 netif_wake_queue(dev);
1953 #if (LINUX_VERSION_CODE < 0x02032b)
1954 if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1955 pDevice->TxPacketDescCnt) &&
1956 ((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1958 printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1959 bcm5700_reset(dev);
1961 #endif
1963 #ifdef BCM_INT_COAL
1964 #ifndef BCM_NAPI_RXPOLL
1965 if (pUmDevice->adaptive_coalesce) {
1966 pUmDevice->adaptive_expiry--;
1967 if (pUmDevice->adaptive_expiry == 0) {
1968 pUmDevice->adaptive_expiry = HZ /
1969 pUmDevice->timer_interval;
1970 bcm5700_adapt_coalesce(pUmDevice);
1973 #endif
1974 #endif
1975 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1976 (unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1977 /* Generate interrupt and let isr allocate buffers */
1978 REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1979 HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1982 #ifdef BCM_ASF
1983 if (pDevice->AsfFlags & ASF_ENABLED) {
1984 pUmDevice->asf_heartbeat--;
1985 if (pUmDevice->asf_heartbeat == 0) {
1986 if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1987 (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1988 MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1989 T3_CMD_NICDRV_ALIVE2);
1990 MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1992 MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1993 } else {
1994 LM_RegWr(pDevice,
1995 (T3_NIC_MBUF_POOL_ADDR +
1996 T3_CMD_MAILBOX),
1997 T3_CMD_NICDRV_ALIVE2, 1);
1998 LM_RegWr(pDevice,
1999 (T3_NIC_MBUF_POOL_ADDR +
2000 T3_CMD_LENGTH_MAILBOX),4,1);
2001 LM_RegWr(pDevice,
2002 (T3_NIC_MBUF_POOL_ADDR +
2003 T3_CMD_DATA_MAILBOX),5,1);
2006 value32 = REG_RD(pDevice, Grc.RxCpuEvent);
2007 REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
2008 pUmDevice->asf_heartbeat = (2 * HZ) /
2009 pUmDevice->timer_interval;
2012 #endif
2014 if (pDevice->PhyFlags & PHY_IS_FIBER){
2015 BCM5700_PHY_LOCK(pUmDevice, flags);
2016 LM_5714_FamFiberCheckLink(pDevice);
2017 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2020 pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
2021 add_timer(&pUmDevice->timer);
2024 STATIC int
2025 bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
2027 #ifdef BCM_INT_COAL
2028 #ifndef BCM_NAPI_RXPOLL
2029 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2031 pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
2032 pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
2033 pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
2034 pUmDevice->rx_last_cnt = 0;
2035 pUmDevice->tx_last_cnt = 0;
2036 #endif
2037 #endif
2038 pUmDevice->phy_crc_count = 0;
2039 #if TIGON3_DEBUG
2040 pUmDevice->tx_zc_count = 0;
2041 pUmDevice->tx_chksum_count = 0;
2042 pUmDevice->tx_himem_count = 0;
2043 pUmDevice->rx_good_chksum_count = 0;
2044 pUmDevice->rx_bad_chksum_count = 0;
2045 #endif
2046 #ifdef BCM_TSO
2047 pUmDevice->tso_pkt_count = 0;
2048 #endif
2049 return 0;
2052 #ifdef BCM_INT_COAL
2053 #ifndef BCM_NAPI_RXPOLL
2054 STATIC int
2055 bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
2056 int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
2058 unsigned long flags = 0;
2059 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2061 if (pUmDevice->do_global_lock) {
2062 if (spin_is_locked(&pUmDevice->global_lock))
2063 return 0;
2064 spin_lock_irqsave(&pUmDevice->global_lock, flags);
2066 pUmDevice->rx_curr_coalesce_frames = rx_frames;
2067 pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
2068 pUmDevice->tx_curr_coalesce_frames = tx_frames;
2069 pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
2070 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
2072 REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
2074 REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
2076 REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
2077 rx_frames_intr);
2079 BCM5700_UNLOCK(pUmDevice, flags);
2080 return 0;
2083 STATIC int
2084 bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
2086 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2087 uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
2089 rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
2090 tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
2091 if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
2092 (tx_curr_cnt < pUmDevice->tx_last_cnt)) {
2094 /* skip if there is counter rollover */
2095 pUmDevice->rx_last_cnt = rx_curr_cnt;
2096 pUmDevice->tx_last_cnt = tx_curr_cnt;
2097 return 0;
2100 rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
2101 tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
2102 total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
2104 pUmDevice->rx_last_cnt = rx_curr_cnt;
2105 pUmDevice->tx_last_cnt = tx_curr_cnt;
2107 if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
2108 if (pUmDevice->rx_curr_coalesce_frames !=
2109 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
2111 bcm5700_do_adapt_coalesce(pUmDevice,
2112 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
2113 ADAPTIVE_LO_RX_COALESCING_TICKS,
2114 ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
2115 ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
2118 else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
2119 if (pUmDevice->rx_curr_coalesce_frames !=
2120 DEFAULT_RX_MAX_COALESCED_FRAMES) {
2122 bcm5700_do_adapt_coalesce(pUmDevice,
2123 DEFAULT_RX_MAX_COALESCED_FRAMES,
2124 DEFAULT_RX_COALESCING_TICKS,
2125 DEFAULT_TX_MAX_COALESCED_FRAMES,
2126 DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
2129 else {
2130 if (pUmDevice->rx_curr_coalesce_frames !=
2131 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
2133 bcm5700_do_adapt_coalesce(pUmDevice,
2134 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
2135 ADAPTIVE_HI_RX_COALESCING_TICKS,
2136 ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
2137 ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
2140 return 0;
2142 #endif
2143 #endif
2145 STATIC void
2146 bcm5700_reset(struct net_device *dev)
2148 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2149 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2150 unsigned long flags;
2152 #ifdef BCM_TSO
2154 if( (dev->features & NETIF_F_TSO) &&
2155 (pUmDevice->tx_full) ) {
2157 dev->features &= ~NETIF_F_TSO;
2159 #endif
2161 netif_stop_queue(dev);
2162 bcm5700_intr_off(pUmDevice);
2163 BCM5700_PHY_LOCK(pUmDevice, flags);
2164 LM_ResetAdapter(pDevice);
2165 pDevice->InitDone = TRUE;
2166 bcm5700_do_rx_mode(dev);
2167 bcm5700_set_vlan_mode(pUmDevice);
2168 bcm5700_init_counters(pUmDevice);
2169 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
2170 LM_SetMacAddress(pDevice, dev->dev_addr);
2172 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2173 atomic_set(&pUmDevice->intr_sem, 1);
2174 bcm5700_intr_on(pUmDevice);
2175 netif_wake_queue(dev);
2178 STATIC void
2179 bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
2181 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2182 LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
2183 int vlan_tag_mode = pUmDevice->vlan_tag_mode;
2185 if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
2186 if (pDevice->AsfFlags & ASF_ENABLED) {
2187 vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2189 else {
2190 vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2193 if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2194 ReceiveMask |= LM_KEEP_VLAN_TAG;
2195 #ifdef BCM_VLAN
2196 if (pUmDevice->vlgrp)
2197 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2198 #endif
2199 #ifdef NICE_SUPPORT
2200 if (pUmDevice->nice_rx)
2201 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2202 #endif
2204 else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2205 ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2207 if (ReceiveMask != pDevice->ReceiveMask)
2209 LM_SetReceiveMask(pDevice, ReceiveMask);
2213 static void
2214 bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2216 #ifdef BCM_NAPI_RXPOLL
2217 while (pUmDevice->lm_dev.RxPoll) {
2218 current->state = TASK_INTERRUPTIBLE;
2219 schedule_timeout(1);
2221 #endif
2225 #ifdef BCM_VLAN
2226 STATIC void
2227 bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2229 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2231 bcm5700_intr_off(pUmDevice);
2232 bcm5700_poll_wait(pUmDevice);
2233 pUmDevice->vlgrp = vlgrp;
2234 bcm5700_set_vlan_mode(pUmDevice);
2235 bcm5700_intr_on(pUmDevice);
2238 STATIC void
2239 bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2241 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2243 bcm5700_intr_off(pUmDevice);
2244 bcm5700_poll_wait(pUmDevice);
2245 if (pUmDevice->vlgrp) {
2246 pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2248 bcm5700_intr_on(pUmDevice);
2250 #endif
2252 STATIC int
2253 bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2255 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2256 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2257 PLM_PACKET pPacket;
2258 PUM_PACKET pUmPacket;
2259 unsigned long flags = 0;
2260 int frag_no;
2261 #ifdef NICE_SUPPORT
2262 vlan_tag_t *vlan_tag;
2263 #endif
2264 #ifdef BCM_TSO
2265 LM_UINT32 mss = 0 ;
2266 uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2267 #endif
2269 if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2270 !pDevice->InitDone || pUmDevice->suspended)
2272 dev_kfree_skb(skb);
2273 return 0;
2276 #if (LINUX_VERSION_CODE < 0x02032b)
2277 if (test_and_set_bit(0, &dev->tbusy)) {
2278 return 1;
2280 #endif
2282 if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2283 netif_stop_queue(dev);
2284 pUmDevice->tx_queued = 1;
2285 if (!pUmDevice->interrupt) {
2286 netif_wake_queue(dev);
2287 pUmDevice->tx_queued = 0;
2289 return 1;
2292 pPacket = (PLM_PACKET)
2293 QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2294 if (pPacket == 0) {
2295 netif_stop_queue(dev);
2296 pUmDevice->tx_full = 1;
2297 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2298 netif_wake_queue(dev);
2299 pUmDevice->tx_full = 0;
2301 return 1;
2303 pUmPacket = (PUM_PACKET) pPacket;
2304 pUmPacket->skbuff = skb;
2305 pUmDevice->stats.tx_bytes += skb->len;
2307 if (skb->ip_summed == CHECKSUM_HW) {
2308 pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2309 #if TIGON3_DEBUG
2310 pUmDevice->tx_chksum_count++;
2311 #endif
2313 else {
2314 pPacket->Flags = 0;
2316 #if MAX_SKB_FRAGS
2317 frag_no = skb_shinfo(skb)->nr_frags;
2318 #else
2319 frag_no = 0;
2320 #endif
2321 if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2322 netif_stop_queue(dev);
2323 pUmDevice->tx_full = 1;
2324 QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2325 if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2326 netif_wake_queue(dev);
2327 pUmDevice->tx_full = 0;
2329 return 1;
2332 pPacket->u.Tx.FragCount = frag_no + 1;
2333 #if TIGON3_DEBUG
2334 if (pPacket->u.Tx.FragCount > 1)
2335 pUmDevice->tx_zc_count++;
2336 #endif
2338 #ifdef BCM_VLAN
2339 if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2340 pPacket->VlanTag = vlan_tx_tag_get(skb);
2341 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2343 #endif
2344 #ifdef NICE_SUPPORT
2345 vlan_tag = (vlan_tag_t *) &skb->cb[0];
2346 if (vlan_tag->signature == 0x5555) {
2347 pPacket->VlanTag = vlan_tag->tag;
2348 pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2349 vlan_tag->signature = 0;
2351 #endif
2353 #ifdef BCM_TSO
2354 if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2355 (skb->len > pDevice->TxMtu)) {
2357 #if (LINUX_VERSION_CODE >= 0x02060c)
2359 if (skb_header_cloned(skb) &&
2360 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2362 dev_kfree_skb(skb);
2363 return 0;
2365 #endif
2366 pUmDevice->tso_pkt_count++;
2368 pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2369 SND_BD_FLAG_CPU_POST_DMA;
2371 tcp_opt_len = 0;
2372 if (skb->h.th->doff > 5) {
2373 tcp_opt_len = (skb->h.th->doff - 5) << 2;
2375 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2376 skb->nh.iph->check = 0;
2378 if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2379 skb->h.th->check = 0;
2380 pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2382 else {
2383 skb->h.th->check = ~csum_tcpudp_magic(
2384 skb->nh.iph->saddr, skb->nh.iph->daddr,
2385 0, IPPROTO_TCP, 0);
2388 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2389 tcp_seg_flags = 0;
2391 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2392 if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2393 tcp_seg_flags =
2394 ((skb->nh.iph->ihl - 5) +
2395 (tcp_opt_len >> 2)) << 11;
2397 else {
2398 pPacket->Flags |=
2399 ((skb->nh.iph->ihl - 5) +
2400 (tcp_opt_len >> 2)) << 12;
2403 pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2405 else
2407 pPacket->u.Tx.MaxSegmentSize = 0;
2409 #endif
2410 BCM5700_LOCK(pUmDevice, flags);
2411 LM_SendPacket(pDevice, pPacket);
2412 BCM5700_UNLOCK(pUmDevice, flags);
2414 #if (LINUX_VERSION_CODE < 0x02032b)
2415 netif_wake_queue(dev);
2416 #endif
2417 dev->trans_start = jiffies;
2420 return 0;
2423 #ifdef BCM_NAPI_RXPOLL
2424 STATIC int
2425 bcm5700_poll(struct net_device *dev, int *budget)
2427 int orig_budget = *budget;
2428 int work_done;
2429 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2430 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2431 unsigned long flags = 0;
2432 LM_UINT32 tag;
2434 if (orig_budget > dev->quota)
2435 orig_budget = dev->quota;
2437 BCM5700_LOCK(pUmDevice, flags);
2438 /* BCM4785: Flush posted writes from GbE to host memory. */
2439 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2440 REG_RD(pDevice, HostCoalesce.Mode);
2441 work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2442 *budget -= work_done;
2443 dev->quota -= work_done;
2445 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2446 replenish_rx_buffers(pUmDevice, 0);
2448 BCM5700_UNLOCK(pUmDevice, flags);
2449 if (work_done) {
2450 MM_IndicateRxPackets(pDevice);
2451 BCM5700_LOCK(pUmDevice, flags);
2452 LM_QueueRxPackets(pDevice);
2453 BCM5700_UNLOCK(pUmDevice, flags);
2455 if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2456 pUmDevice->suspended) {
2458 netif_rx_complete(dev);
2459 BCM5700_LOCK(pUmDevice, flags);
2460 REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2461 pDevice->RxPoll = FALSE;
2462 if (pDevice->RxPoll) {
2463 BCM5700_UNLOCK(pUmDevice, flags);
2464 return 0;
2466 /* Take care of possible missed rx interrupts */
2467 REG_RD_BACK(pDevice, Grc.Mode); /* flush the register write */
2468 tag = pDevice->pStatusBlkVirt->StatusTag;
2469 if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2470 (pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2471 pDevice->RcvRetConIdx)) {
2473 REG_WR(pDevice, HostCoalesce.Mode,
2474 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2475 HOST_COALESCE_NOW);
2477 /* If a new status block is pending in the WDMA state machine */
2478 /* before the register write to enable the rx interrupt, */
2479 /* the new status block may DMA with no interrupt. In this */
2480 /* scenario, the tag read above will be older than the tag in */
2481 /* the pending status block and writing the older tag will */
2482 /* cause interrupt to be generated. */
2483 else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2484 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2485 tag << 24);
2486 /* Make sure we service tx in case some tx interrupts */
2487 /* are cleared */
2488 if (atomic_read(&pDevice->SendBdLeft) <
2489 (T3_SEND_RCB_ENTRY_COUNT / 2)) {
2490 REG_WR(pDevice, HostCoalesce.Mode,
2491 pDevice->CoalesceMode |
2492 HOST_COALESCE_ENABLE |
2493 HOST_COALESCE_NOW);
2496 BCM5700_UNLOCK(pUmDevice, flags);
2497 return 0;
2499 return 1;
2501 #endif /* BCM_NAPI_RXPOLL */
2503 STATIC irqreturn_t
2504 bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2506 struct net_device *dev = (struct net_device *)dev_instance;
2507 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2508 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2509 LM_UINT32 oldtag, newtag;
2510 int i, max_intr_loop;
2511 #ifdef BCM_TASKLET
2512 int repl_buf_count;
2513 #endif
2514 unsigned int handled = 1;
2516 if (!pDevice->InitDone) {
2517 handled = 0;
2518 return IRQ_RETVAL(handled);
2521 bcm5700_intr_lock(pUmDevice);
2522 if (atomic_read(&pUmDevice->intr_sem)) {
2523 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2524 bcm5700_intr_unlock(pUmDevice);
2525 handled = 0;
2526 return IRQ_RETVAL(handled);
2529 if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2530 printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2531 dev->name);
2532 bcm5700_intr_unlock(pUmDevice);
2533 handled = 0;
2534 return IRQ_RETVAL(handled);
2537 /* BCM4785: Flush posted writes from GbE to host memory. */
2538 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2539 REG_RD(pDevice, HostCoalesce.Mode);
2541 if ((pDevice->Flags & USING_MSI_FLAG) ||
2542 (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2543 !(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2546 if (pUmDevice->intr_test) {
2547 if (!(REG_RD(pDevice, PciCfg.PciState) &
2548 T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2549 pDevice->Flags & USING_MSI_FLAG ) {
2550 pUmDevice->intr_test_result = 1;
2552 pUmDevice->intr_test = 0;
2555 #ifdef BCM_NAPI_RXPOLL
2556 max_intr_loop = 1;
2557 #else
2558 max_intr_loop = 50;
2559 #endif
2560 if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2561 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2562 oldtag = pDevice->pStatusBlkVirt->StatusTag;
2564 for (i = 0; ; i++) {
2565 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2567 LM_ServiceInterrupts(pDevice);
2568 /* BCM4785: Flush GbE posted writes to host memory. */
2569 if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2570 MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2571 newtag = pDevice->pStatusBlkVirt->StatusTag;
2572 if ((newtag == oldtag) || (i > max_intr_loop)) {
2573 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2574 pDevice->LastTag = oldtag;
2575 if (pDevice->Flags & UNDI_FIX_FLAG) {
2576 REG_WR(pDevice, Grc.LocalCtrl,
2577 pDevice->GrcLocalCtrl | 0x2);
2579 break;
2581 oldtag = newtag;
2584 else
2586 i = 0;
2587 do {
2588 uint dummy;
2590 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2591 pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2592 LM_ServiceInterrupts(pDevice);
2593 MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2594 dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2595 i++;
2597 while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2598 (i < max_intr_loop));
2600 if (pDevice->Flags & UNDI_FIX_FLAG) {
2601 REG_WR(pDevice, Grc.LocalCtrl,
2602 pDevice->GrcLocalCtrl | 0x2);
2606 else
2608 /* not my interrupt */
2609 handled = 0;
2612 #ifdef BCM_TASKLET
2613 repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2614 if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2615 pDevice->QueueAgain) &&
2616 (!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2618 replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2619 clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2621 else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2622 !pUmDevice->tasklet_pending) {
2624 pUmDevice->tasklet_pending = 1;
2625 tasklet_schedule(&pUmDevice->tasklet);
2627 #else
2628 #ifdef BCM_NAPI_RXPOLL
2629 if (!pDevice->RxPoll &&
2630 QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2631 pDevice->RxPoll = 1;
2632 MM_ScheduleRxPoll(pDevice);
2634 #else
2635 if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2636 replenish_rx_buffers(pUmDevice, 0);
2639 if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2640 pDevice->QueueAgain) {
2642 LM_QueueRxPackets(pDevice);
2644 #endif
2645 #endif
2647 clear_bit(0, (void*)&pUmDevice->interrupt);
2648 bcm5700_intr_unlock(pUmDevice);
2649 if (pUmDevice->tx_queued) {
2650 pUmDevice->tx_queued = 0;
2651 netif_wake_queue(dev);
2653 return IRQ_RETVAL(handled);
2657 #ifdef BCM_TASKLET
2658 STATIC void
2659 bcm5700_tasklet(unsigned long data)
2661 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2662 unsigned long flags = 0;
2664 /* RH 7.2 Beta 3 tasklets are reentrant */
2665 if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2666 pUmDevice->tasklet_pending = 0;
2667 return;
2670 pUmDevice->tasklet_pending = 0;
2671 if (pUmDevice->opened && !pUmDevice->suspended) {
2672 BCM5700_LOCK(pUmDevice, flags);
2673 replenish_rx_buffers(pUmDevice, 0);
2674 BCM5700_UNLOCK(pUmDevice, flags);
2677 clear_bit(0, &pUmDevice->tasklet_busy);
2679 #endif
2681 STATIC int
2682 bcm5700_close(struct net_device *dev)
2685 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2686 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2688 #if (LINUX_VERSION_CODE < 0x02032b)
2689 dev->start = 0;
2690 #endif
2691 netif_stop_queue(dev);
2692 pUmDevice->opened = 0;
2694 #ifdef BCM_ASF
2695 if( !(pDevice->AsfFlags & ASF_ENABLED) )
2696 #endif
2697 #ifdef BCM_WOL
2698 if( enable_wol[pUmDevice->index] == 0 )
2699 #endif
2700 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
2702 if (tigon3_debug > 1)
2703 printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2704 dev->name);
2706 LM_MulticastClear(pDevice);
2707 bcm5700_shutdown(pUmDevice);
2709 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2710 del_timer_sync(&pUmDevice->statstimer);
2713 del_timer_sync(&pUmDevice->timer);
2715 free_irq(pUmDevice->pdev->irq, dev);
2717 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2719 if(pDevice->Flags & USING_MSI_FLAG) {
2720 pci_disable_msi(pUmDevice->pdev);
2721 REG_WR(pDevice, Msi.Mode, 1 );
2722 pDevice->Flags &= ~USING_MSI_FLAG;
2725 #endif
2728 #if (LINUX_VERSION_CODE < 0x020300)
2729 MOD_DEC_USE_COUNT;
2730 #endif
2732 /* BCM4785: Don't go to low-power state because it will power down the smbus block. */
2733 if (!(pDevice->Flags & SB_CORE_FLAG))
2734 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2737 bcm5700_freemem(dev);
2739 QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2740 MAX_RX_PACKET_DESC_COUNT);
2742 return 0;
2745 STATIC int
2746 bcm5700_freemem(struct net_device *dev)
2748 int i;
2749 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2750 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2752 for (i = 0; i < pUmDevice->mem_list_num; i++) {
2753 if (pUmDevice->mem_size_list[i] == 0) {
2754 kfree(pUmDevice->mem_list[i]);
2756 else {
2757 pci_free_consistent(pUmDevice->pdev,
2758 (size_t) pUmDevice->mem_size_list[i],
2759 pUmDevice->mem_list[i],
2760 pUmDevice->dma_list[i]);
2764 pDevice->pStatusBlkVirt = 0;
2765 pDevice->pStatsBlkVirt = 0;
2766 pUmDevice->mem_list_num = 0;
2768 #ifdef NICE_SUPPORT
2769 if (!pUmDevice->opened) {
2770 for (i = 0; i < MAX_MEM2; i++) {
2771 if (pUmDevice->mem_size_list2[i]) {
2772 bcm5700_freemem2(pUmDevice, i);
2776 #endif
2777 return 0;
2780 #ifdef NICE_SUPPORT
2781 /* Frees consistent memory allocated through ioctl */
2782 /* The memory to be freed is in mem_list2[index] */
2783 STATIC int
2784 bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2786 #if (LINUX_VERSION_CODE >= 0x020400)
2787 void *ptr;
2788 struct page *pg, *last_pg;
2790 /* Probably won't work on some architectures */
2791 ptr = pUmDevice->mem_list2[index],
2792 pg = virt_to_page(ptr);
2793 last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2794 for (; ; pg++) {
2795 #if (LINUX_VERSION_CODE > 0x020500)
2796 ClearPageReserved(pg);
2797 #else
2798 mem_map_unreserve(pg);
2799 #endif
2800 if (pg == last_pg)
2801 break;
2803 pci_free_consistent(pUmDevice->pdev,
2804 (size_t) pUmDevice->mem_size_list2[index],
2805 pUmDevice->mem_list2[index],
2806 pUmDevice->dma_list2[index]);
2807 pUmDevice->mem_size_list2[index] = 0;
2808 #endif
2809 return 0;
2811 #endif
2813 uint64_t
2814 bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2816 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2817 LM_UINT32 Value32;
2818 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2819 unsigned long flags;
2821 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2822 T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2823 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2825 if (!pUmDevice->opened || !pDevice->InitDone)
2828 return 0;
2831 /* regulate MDIO access during run time */
2832 if (pUmDevice->crc_counter_expiry > 0)
2833 return pUmDevice->phy_crc_count;
2835 pUmDevice->crc_counter_expiry = (5 * HZ) /
2836 pUmDevice->timer_interval;
2838 BCM5700_PHY_LOCK(pUmDevice, flags);
2839 LM_ReadPhy(pDevice, 0x1e, &Value32);
2840 if ((Value32 & 0x8000) == 0)
2841 LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2842 LM_ReadPhy(pDevice, 0x14, &Value32);
2843 BCM5700_PHY_UNLOCK(pUmDevice, flags);
2844 /* Sometimes data on the MDIO bus can be corrupted */
2845 if (Value32 != 0xffff)
2846 pUmDevice->phy_crc_count += Value32;
2847 return pUmDevice->phy_crc_count;
2849 else if (pStats == 0) {
2850 return 0;
2852 else {
2853 return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2857 uint64_t
2858 bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2860 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2861 T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2863 if (pStats == 0)
2864 return 0;
2865 return (bcm5700_crc_count(pUmDevice) +
2866 MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2867 MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2868 MM_GETSTATS64(pStats->etherStatsFragments) +
2869 MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2870 MM_GETSTATS64(pStats->etherStatsJabbers));
2873 STATIC struct net_device_stats *
2874 bcm5700_get_stats(struct net_device *dev)
2876 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2877 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2878 PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2879 struct net_device_stats *p_netstats = &pUmDevice->stats;
2881 if (pStats == 0)
2882 return p_netstats;
2884 /* Get stats from LM */
2885 p_netstats->rx_packets =
2886 MM_GETSTATS(pStats->ifHCInUcastPkts) +
2887 MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2888 MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2889 p_netstats->tx_packets =
2890 MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2891 MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2892 MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2893 /* There counters seem to be innacurate. Use byte number accumulation
2894 instead.
2895 p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2896 p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2898 p_netstats->tx_errors =
2899 MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2900 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2901 MM_GETSTATS(pStats->ifOutDiscards) +
2902 MM_GETSTATS(pStats->ifOutErrors);
2903 p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2904 p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2905 p_netstats->rx_length_errors =
2906 MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2907 MM_GETSTATS(pStats->etherStatsUndersizePkts);
2908 p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2909 p_netstats->rx_frame_errors =
2910 MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2911 p_netstats->rx_crc_errors = (unsigned long)
2912 bcm5700_crc_count(pUmDevice);
2913 p_netstats->rx_errors = (unsigned long)
2914 bcm5700_rx_err_count(pUmDevice);
2916 p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2917 p_netstats->tx_carrier_errors =
2918 MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2920 return p_netstats;
2923 void
2924 b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2926 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2928 if (pUmDevice->opened) {
2929 bcm5700_intr_off(pUmDevice);
2930 netif_carrier_off(pUmDevice->dev);
2931 netif_stop_queue(pUmDevice->dev);
2932 #ifdef BCM_TASKLET
2933 tasklet_kill(&pUmDevice->tasklet);
2934 #endif
2935 bcm5700_poll_wait(pUmDevice);
2937 pUmDevice->suspended = 1;
2938 LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2941 void
2942 b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2944 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2946 if (pUmDevice->suspended) {
2947 pUmDevice->suspended = 0;
2948 if (pUmDevice->opened) {
2949 bcm5700_reset(pUmDevice->dev);
2951 else {
2952 LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2957 /* Returns 0 on failure, 1 on success */
2959 b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2961 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2962 int j;
2964 if (!pUmDevice->opened)
2965 return 0;
2966 pUmDevice->intr_test_result = 0;
2967 pUmDevice->intr_test = 1;
2969 REG_WR(pDevice, HostCoalesce.Mode,
2970 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2971 HOST_COALESCE_NOW);
2973 for (j = 0; j < 10; j++) {
2974 if (pUmDevice->intr_test_result){
2975 break;
2978 REG_WR(pDevice, HostCoalesce.Mode,
2979 pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2980 HOST_COALESCE_NOW);
2982 MM_Sleep(pDevice, 1);
2985 return pUmDevice->intr_test_result;
2989 #ifdef SIOCETHTOOL
2991 #ifdef ETHTOOL_GSTRINGS
2993 #define ETH_NUM_STATS 30
2994 #define RX_CRC_IDX 5
2995 #define RX_MAC_ERR_IDX 14
2997 struct {
2998 char string[ETH_GSTRING_LEN];
2999 } bcm5700_stats_str_arr[ETH_NUM_STATS] = {
3000 { "rx_unicast_packets" },
3001 { "rx_multicast_packets" },
3002 { "rx_broadcast_packets" },
3003 { "rx_bytes" },
3004 { "rx_fragments" },
3005 { "rx_crc_errors" }, /* this needs to be calculated */
3006 { "rx_align_errors" },
3007 { "rx_xon_frames" },
3008 { "rx_xoff_frames" },
3009 { "rx_long_frames" },
3010 { "rx_short_frames" },
3011 { "rx_jabber" },
3012 { "rx_discards" },
3013 { "rx_errors" },
3014 { "rx_mac_errors" }, /* this needs to be calculated */
3015 { "tx_unicast_packets" },
3016 { "tx_multicast_packets" },
3017 { "tx_broadcast_packets" },
3018 { "tx_bytes" },
3019 { "tx_deferred" },
3020 { "tx_single_collisions" },
3021 { "tx_multi_collisions" },
3022 { "tx_total_collisions" },
3023 { "tx_excess_collisions" },
3024 { "tx_late_collisions" },
3025 { "tx_xon_frames" },
3026 { "tx_xoff_frames" },
3027 { "tx_internal_mac_errors" },
3028 { "tx_carrier_errors" },
3029 { "tx_errors" },
3032 #define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
3034 #ifdef __BIG_ENDIAN
3035 #define SWAP_DWORD_64(x) (x)
3036 #else
3037 #define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
3038 #endif
3040 unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
3041 STATS_OFFSET(ifHCInUcastPkts),
3042 STATS_OFFSET(ifHCInMulticastPkts),
3043 STATS_OFFSET(ifHCInBroadcastPkts),
3044 STATS_OFFSET(ifHCInOctets),
3045 STATS_OFFSET(etherStatsFragments),
3047 STATS_OFFSET(dot3StatsAlignmentErrors),
3048 STATS_OFFSET(xonPauseFramesReceived),
3049 STATS_OFFSET(xoffPauseFramesReceived),
3050 STATS_OFFSET(dot3StatsFramesTooLong),
3051 STATS_OFFSET(etherStatsUndersizePkts),
3052 STATS_OFFSET(etherStatsJabbers),
3053 STATS_OFFSET(ifInDiscards),
3054 STATS_OFFSET(ifInErrors),
3056 STATS_OFFSET(ifHCOutUcastPkts),
3057 STATS_OFFSET(ifHCOutMulticastPkts),
3058 STATS_OFFSET(ifHCOutBroadcastPkts),
3059 STATS_OFFSET(ifHCOutOctets),
3060 STATS_OFFSET(dot3StatsDeferredTransmissions),
3061 STATS_OFFSET(dot3StatsSingleCollisionFrames),
3062 STATS_OFFSET(dot3StatsMultipleCollisionFrames),
3063 STATS_OFFSET(etherStatsCollisions),
3064 STATS_OFFSET(dot3StatsExcessiveCollisions),
3065 STATS_OFFSET(dot3StatsLateCollisions),
3066 STATS_OFFSET(outXonSent),
3067 STATS_OFFSET(outXoffSent),
3068 STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
3069 STATS_OFFSET(dot3StatsCarrierSenseErrors),
3070 STATS_OFFSET(ifOutErrors),
3073 #endif /* ETHTOOL_GSTRINGS */
3075 #ifdef ETHTOOL_TEST
3076 #define ETH_NUM_TESTS 6
3077 struct {
3078 char string[ETH_GSTRING_LEN];
3079 } bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
3080 { "register test (offline)" },
3081 { "memory test (offline)" },
3082 { "loopback test (offline)" },
3083 { "nvram test (online)" },
3084 { "interrupt test (online)" },
3085 { "link test (online)" },
3088 extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
3089 extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
3090 extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
3091 extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
3092 extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
3093 #endif
3095 #ifdef ETHTOOL_GREGS
3096 #if (LINUX_VERSION_CODE >= 0x02040f)
3097 static void
3098 bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
3099 int reserved)
3101 u32 offset;
3102 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3104 if (reserved) {
3105 memset(*buf, 0, end - start);
3106 *buf = *buf + (end - start)/4;
3107 return;
3109 for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
3110 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
3111 if (((offset >= 0x3400) && (offset < 0x3c00)) ||
3112 ((offset >= 0x5400) && (offset < 0x5800)) ||
3113 ((offset >= 0x6400) && (offset < 0x6800))) {
3114 **buf = 0;
3115 continue;
3118 **buf = REG_RD_OFFSET(pDevice, offset);
3121 #endif
3122 #endif
3124 static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
3126 struct ethtool_cmd ethcmd;
3127 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3128 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3130 if (mm_copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
3131 return -EFAULT;
3133 switch (ethcmd.cmd) {
3134 #ifdef ETHTOOL_GDRVINFO
3135 case ETHTOOL_GDRVINFO: {
3136 struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
3138 strcpy(info.driver, bcm5700_driver);
3139 #ifdef INCLUDE_5701_AX_FIX
3140 if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
3141 extern int t3FwReleaseMajor;
3142 extern int t3FwReleaseMinor;
3143 extern int t3FwReleaseFix;
3145 sprintf(info.fw_version, "%i.%i.%i",
3146 t3FwReleaseMajor, t3FwReleaseMinor,
3147 t3FwReleaseFix);
3149 #endif
3150 strcpy(info.fw_version, pDevice->BootCodeVer);
3151 strcpy(info.version, bcm5700_version);
3152 #if (LINUX_VERSION_CODE <= 0x020422)
3153 strcpy(info.bus_info, pUmDevice->pdev->slot_name);
3154 #else
3155 strcpy(info.bus_info, pci_name(pUmDevice->pdev));
3156 #endif
3160 #ifdef ETHTOOL_GEEPROM
3161 BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
3162 #endif
3163 #ifdef ETHTOOL_GREGS
3164 /* dump everything, including holes in the register space */
3165 info.regdump_len = 0x6c00;
3166 #endif
3167 #ifdef ETHTOOL_GSTATS
3168 info.n_stats = ETH_NUM_STATS;
3169 #endif
3170 #ifdef ETHTOOL_TEST
3171 info.testinfo_len = ETH_NUM_TESTS;
3172 #endif
3173 if (mm_copy_to_user(useraddr, &info, sizeof(info)))
3174 return -EFAULT;
3175 return 0;
3177 #endif
3178 case ETHTOOL_GSET: {
3179 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
3180 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3181 ethcmd.supported =
3182 (SUPPORTED_1000baseT_Full |
3183 SUPPORTED_Autoneg);
3184 ethcmd.supported |= SUPPORTED_FIBRE;
3185 ethcmd.port = PORT_FIBRE;
3186 } else {
3187 ethcmd.supported =
3188 (SUPPORTED_10baseT_Half |
3189 SUPPORTED_10baseT_Full |
3190 SUPPORTED_100baseT_Half |
3191 SUPPORTED_100baseT_Full |
3192 SUPPORTED_1000baseT_Half |
3193 SUPPORTED_1000baseT_Full |
3194 SUPPORTED_Autoneg);
3195 ethcmd.supported |= SUPPORTED_TP;
3196 ethcmd.port = PORT_TP;
3199 ethcmd.transceiver = XCVR_INTERNAL;
3200 ethcmd.phy_address = 0;
3202 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3203 ethcmd.speed = SPEED_1000;
3204 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3205 ethcmd.speed = SPEED_100;
3206 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3207 ethcmd.speed = SPEED_10;
3208 else
3209 ethcmd.speed = 0;
3211 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3212 ethcmd.duplex = DUPLEX_FULL;
3213 else
3214 ethcmd.duplex = DUPLEX_HALF;
3216 if (pDevice->DisableAutoNeg == FALSE) {
3217 ethcmd.autoneg = AUTONEG_ENABLE;
3218 ethcmd.advertising = ADVERTISED_Autoneg;
3219 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3220 (pDevice->PhyFlags & PHY_IS_FIBER)) {
3221 ethcmd.advertising |=
3222 ADVERTISED_1000baseT_Full |
3223 ADVERTISED_FIBRE;
3225 else {
3226 ethcmd.advertising |=
3227 ADVERTISED_TP;
3228 if (pDevice->advertising &
3229 PHY_AN_AD_10BASET_HALF) {
3231 ethcmd.advertising |=
3232 ADVERTISED_10baseT_Half;
3234 if (pDevice->advertising &
3235 PHY_AN_AD_10BASET_FULL) {
3237 ethcmd.advertising |=
3238 ADVERTISED_10baseT_Full;
3240 if (pDevice->advertising &
3241 PHY_AN_AD_100BASETX_HALF) {
3243 ethcmd.advertising |=
3244 ADVERTISED_100baseT_Half;
3246 if (pDevice->advertising &
3247 PHY_AN_AD_100BASETX_FULL) {
3249 ethcmd.advertising |=
3250 ADVERTISED_100baseT_Full;
3252 if (pDevice->advertising1000 &
3253 BCM540X_AN_AD_1000BASET_HALF) {
3255 ethcmd.advertising |=
3256 ADVERTISED_1000baseT_Half;
3258 if (pDevice->advertising1000 &
3259 BCM540X_AN_AD_1000BASET_FULL) {
3261 ethcmd.advertising |=
3262 ADVERTISED_1000baseT_Full;
3266 else {
3267 ethcmd.autoneg = AUTONEG_DISABLE;
3268 ethcmd.advertising = 0;
3271 ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3272 ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3274 if(mm_copy_to_user(useraddr, &ethcmd, sizeof(ethcmd)))
3275 return -EFAULT;
3276 return 0;
3278 case ETHTOOL_SSET: {
3279 unsigned long flags;
3281 if(!capable(CAP_NET_ADMIN))
3282 return -EPERM;
3283 if (ethcmd.autoneg == AUTONEG_ENABLE) {
3284 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3285 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3286 pDevice->DisableAutoNeg = FALSE;
3288 else {
3289 if (ethcmd.speed == SPEED_1000 &&
3290 pDevice->PhyFlags & PHY_NO_GIGABIT)
3291 return -EINVAL;
3293 if (ethcmd.speed == SPEED_1000 &&
3294 (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3295 pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3297 pDevice->RequestedLineSpeed =
3298 LM_LINE_SPEED_1000MBPS;
3300 pDevice->RequestedDuplexMode =
3301 LM_DUPLEX_MODE_FULL;
3303 else if (ethcmd.speed == SPEED_100 &&
3304 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3305 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3307 pDevice->RequestedLineSpeed =
3308 LM_LINE_SPEED_100MBPS;
3310 else if (ethcmd.speed == SPEED_10 &&
3311 !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3312 !(pDevice->PhyFlags & PHY_IS_FIBER)) {
3314 pDevice->RequestedLineSpeed =
3315 LM_LINE_SPEED_10MBPS;
3317 else {
3318 return -EINVAL;
3321 pDevice->DisableAutoNeg = TRUE;
3322 if (ethcmd.duplex == DUPLEX_FULL) {
3323 pDevice->RequestedDuplexMode =
3324 LM_DUPLEX_MODE_FULL;
3326 else {
3327 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3328 !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
3330 pDevice->RequestedDuplexMode =
3331 LM_DUPLEX_MODE_HALF;
3335 if (netif_running(dev)) {
3336 BCM5700_PHY_LOCK(pUmDevice, flags);
3337 LM_SetupPhy(pDevice);
3338 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3340 return 0;
3342 #ifdef ETHTOOL_GWOL
3343 #ifdef BCM_WOL
3344 case ETHTOOL_GWOL: {
3345 struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3347 if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3348 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3349 (pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3350 wol.supported = 0;
3351 wol.wolopts = 0;
3353 else {
3354 wol.supported = WAKE_MAGIC;
3355 if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3357 wol.wolopts = WAKE_MAGIC;
3359 else {
3360 wol.wolopts = 0;
3363 if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3364 return -EFAULT;
3365 return 0;
3367 case ETHTOOL_SWOL: {
3368 struct ethtool_wolinfo wol;
3370 if(!capable(CAP_NET_ADMIN))
3371 return -EPERM;
3372 if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3373 return -EFAULT;
3374 if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3375 !(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3376 (pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3377 wol.wolopts) {
3378 return -EINVAL;
3381 if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3382 return -EINVAL;
3384 if (wol.wolopts & WAKE_MAGIC) {
3385 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3386 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3388 else {
3389 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3390 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3392 return 0;
3394 #endif
3395 #endif
3396 #ifdef ETHTOOL_GLINK
3397 case ETHTOOL_GLINK: {
3398 struct ethtool_value edata = {ETHTOOL_GLINK};
3400 /* ifup only waits for 5 seconds for link up */
3401 /* NIC may take more than 5 seconds to establish link */
3402 if ((pUmDevice->delayed_link_ind > 0) &&
3403 delay_link[pUmDevice->index])
3404 return -EOPNOTSUPP;
3406 if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3407 edata.data = 1;
3409 else {
3410 edata.data = 0;
3412 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3413 return -EFAULT;
3414 return 0;
3416 #endif
3417 #ifdef ETHTOOL_NWAY_RST
3418 case ETHTOOL_NWAY_RST: {
3419 LM_UINT32 phyctrl;
3420 unsigned long flags;
3422 if(!capable(CAP_NET_ADMIN))
3423 return -EPERM;
3424 if (pDevice->DisableAutoNeg) {
3425 return -EINVAL;
3427 if (!netif_running(dev))
3428 return -EAGAIN;
3429 BCM5700_PHY_LOCK(pUmDevice, flags);
3430 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3431 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3432 pDevice->DisableAutoNeg = TRUE;
3433 LM_SetupPhy(pDevice);
3435 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3436 pDevice->DisableAutoNeg = FALSE;
3437 LM_SetupPhy(pDevice);
3439 else {
3440 if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3441 T3_ASIC_REV_5703) ||
3442 (T3_ASIC_REV(pDevice->ChipRevId) ==
3443 T3_ASIC_REV_5704) ||
3444 (T3_ASIC_REV(pDevice->ChipRevId) ==
3445 T3_ASIC_REV_5705))
3447 LM_ResetPhy(pDevice);
3448 LM_SetupPhy(pDevice);
3450 pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3451 LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3452 LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3453 PHY_CTRL_AUTO_NEG_ENABLE |
3454 PHY_CTRL_RESTART_AUTO_NEG);
3456 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3457 return 0;
3459 #endif
3460 #ifdef ETHTOOL_GEEPROM
3461 case ETHTOOL_GEEPROM: {
3462 struct ethtool_eeprom eeprom;
3463 LM_UINT32 *buf = 0;
3464 LM_UINT32 buf1[64/4];
3465 int i, j, offset, len;
3467 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3468 return -EFAULT;
3470 if (eeprom.offset >= pDevice->NvramSize)
3471 return -EFAULT;
3473 /* maximum data limited */
3474 /* to read more, call again with a different offset */
3475 if (eeprom.len > 0x800) {
3476 eeprom.len = 0x800;
3477 if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3478 return -EFAULT;
3481 if (eeprom.len > 64) {
3482 buf = kmalloc(eeprom.len, GFP_KERNEL);
3483 if (!buf)
3484 return -ENOMEM;
3486 else {
3487 buf = buf1;
3489 useraddr += offsetof(struct ethtool_eeprom, data);
3491 offset = eeprom.offset;
3492 len = eeprom.len;
3493 if (offset & 3) {
3494 offset &= 0xfffffffc;
3495 len += (offset & 3);
3497 len = (len + 3) & 0xfffffffc;
3498 for (i = 0, j = 0; j < len; i++, j += 4) {
3499 if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3500 LM_STATUS_SUCCESS) {
3501 break;
3504 if (j >= len) {
3505 buf += (eeprom.offset & 3);
3506 i = mm_copy_to_user(useraddr, buf, eeprom.len);
3508 if (eeprom.len > 64) {
3509 kfree(buf);
3511 if ((j < len) || i)
3512 return -EFAULT;
3513 return 0;
3515 case ETHTOOL_SEEPROM: {
3516 struct ethtool_eeprom eeprom;
3517 LM_UINT32 buf[64/4];
3518 int i, offset, len;
3520 if(!capable(CAP_NET_ADMIN))
3521 return -EPERM;
3522 if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3523 return -EFAULT;
3525 if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3526 (eeprom.offset >= pDevice->NvramSize)) {
3527 return -EFAULT;
3530 if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3531 eeprom.len = pDevice->NvramSize - eeprom.offset;
3534 useraddr += offsetof(struct ethtool_eeprom, data);
3536 len = eeprom.len;
3537 offset = eeprom.offset;
3538 for (; len > 0; ) {
3539 if (len < 64)
3540 i = len;
3541 else
3542 i = 64;
3543 if (mm_copy_from_user(&buf, useraddr, i))
3544 return -EFAULT;
3546 bcm5700_intr_off(pUmDevice);
3547 /* Prevent race condition on Grc.Mode register */
3548 bcm5700_poll_wait(pUmDevice);
3550 if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3551 LM_STATUS_SUCCESS) {
3552 bcm5700_intr_on(pUmDevice);
3553 return -EFAULT;
3555 bcm5700_intr_on(pUmDevice);
3556 len -= i;
3557 offset += i;
3558 useraddr += i;
3560 return 0;
3562 #endif
3563 #ifdef ETHTOOL_GREGS
3564 #if (LINUX_VERSION_CODE >= 0x02040f)
3565 case ETHTOOL_GREGS: {
3566 struct ethtool_regs eregs;
3567 LM_UINT32 *buf, *buf1;
3568 unsigned int i;
3570 if(!capable(CAP_NET_ADMIN))
3571 return -EPERM;
3572 if (pDevice->Flags & UNDI_FIX_FLAG)
3573 return -EOPNOTSUPP;
3574 if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3575 return -EFAULT;
3576 if (eregs.len > 0x6c00)
3577 eregs.len = 0x6c00;
3578 eregs.version = 0x0;
3579 if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3580 return -EFAULT;
3581 buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3582 if (!buf)
3583 return -ENOMEM;
3584 bcm5700_get_reg_blk(pUmDevice, &buf, 0, 0xb0, 0);
3585 bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0, 0x200, 1);
3586 bcm5700_get_reg_blk(pUmDevice, &buf, 0x200, 0x8f0, 0);
3587 bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0, 0xc00, 1);
3588 bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00, 0xce0, 0);
3589 bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0, 0x1000, 1);
3590 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3591 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3592 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3593 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3594 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3595 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3596 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3597 bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3598 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3599 bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3600 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3601 bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3602 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3603 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3604 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3605 bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3606 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3607 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3608 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3609 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3610 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3611 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3612 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3613 bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3614 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3615 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3616 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3617 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3618 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3619 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3620 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3621 bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3622 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3623 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3624 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3625 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3626 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3627 bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3628 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3629 bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3630 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3631 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3632 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3633 bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3635 i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3636 kfree(buf1);
3637 if (i)
3638 return -EFAULT;
3639 return 0;
3641 #endif
3642 #endif
3643 #ifdef ETHTOOL_GPAUSEPARAM
3644 case ETHTOOL_GPAUSEPARAM: {
3645 struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3647 if (!pDevice->DisableAutoNeg) {
3648 epause.autoneg = (pDevice->FlowControlCap &
3649 LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3651 else {
3652 epause.autoneg = 0;
3654 epause.rx_pause =
3655 (pDevice->FlowControl &
3656 LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3657 epause.tx_pause =
3658 (pDevice->FlowControl &
3659 LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3660 if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3661 return -EFAULT;
3663 return 0;
3665 case ETHTOOL_SPAUSEPARAM: {
3666 struct ethtool_pauseparam epause;
3667 unsigned long flags;
3669 if(!capable(CAP_NET_ADMIN))
3670 return -EPERM;
3671 if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3672 return -EFAULT;
3673 pDevice->FlowControlCap = 0;
3674 if (epause.autoneg && !pDevice->DisableAutoNeg) {
3675 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3677 if (epause.rx_pause) {
3678 pDevice->FlowControlCap |=
3679 LM_FLOW_CONTROL_RECEIVE_PAUSE;
3681 if (epause.tx_pause) {
3682 pDevice->FlowControlCap |=
3683 LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3685 if (netif_running(dev)) {
3686 BCM5700_PHY_LOCK(pUmDevice, flags);
3687 LM_SetupPhy(pDevice);
3688 BCM5700_PHY_UNLOCK(pUmDevice, flags);
3691 return 0;
3693 #endif
3694 #ifdef ETHTOOL_GRXCSUM
3695 case ETHTOOL_GRXCSUM: {
3696 struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3698 edata.data =
3699 (pDevice->TaskToOffload &
3700 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3701 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3702 return -EFAULT;
3704 return 0;
3706 case ETHTOOL_SRXCSUM: {
3707 struct ethtool_value edata;
3709 if(!capable(CAP_NET_ADMIN))
3710 return -EPERM;
3711 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3712 return -EFAULT;
3713 if (edata.data) {
3714 if (!(pDevice->TaskOffloadCap &
3715 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3717 return -EINVAL;
3719 pDevice->TaskToOffload |=
3720 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3721 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3723 else {
3724 pDevice->TaskToOffload &=
3725 ~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3726 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3728 return 0;
3730 case ETHTOOL_GTXCSUM: {
3731 struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3733 edata.data =
3734 (dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3735 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3736 return -EFAULT;
3738 return 0;
3740 case ETHTOOL_STXCSUM: {
3741 struct ethtool_value edata;
3743 if(!capable(CAP_NET_ADMIN))
3744 return -EPERM;
3745 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3746 return -EFAULT;
3747 if (edata.data) {
3748 if (!(pDevice->TaskOffloadCap &
3749 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3751 return -EINVAL;
3753 dev->features |= get_csum_flag( pDevice->ChipRevId);
3754 pDevice->TaskToOffload |=
3755 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3756 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3758 else {
3759 dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3760 pDevice->TaskToOffload &=
3761 ~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3762 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3764 return 0;
3766 case ETHTOOL_GSG: {
3767 struct ethtool_value edata = { ETHTOOL_GSG };
3769 edata.data =
3770 (dev->features & NETIF_F_SG) != 0;
3771 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3772 return -EFAULT;
3773 return 0;
3775 case ETHTOOL_SSG: {
3776 struct ethtool_value edata;
3778 if(!capable(CAP_NET_ADMIN))
3779 return -EPERM;
3780 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3781 return -EFAULT;
3782 if (edata.data) {
3783 dev->features |= NETIF_F_SG;
3785 else {
3786 dev->features &= ~NETIF_F_SG;
3788 return 0;
3790 #endif
3791 #ifdef ETHTOOL_GRINGPARAM
3792 case ETHTOOL_GRINGPARAM: {
3793 struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3795 ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3796 ering.rx_pending = pDevice->RxStdDescCnt;
3797 ering.rx_mini_max_pending = 0;
3798 ering.rx_mini_pending = 0;
3799 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3800 ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3801 ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3802 #else
3803 ering.rx_jumbo_max_pending = 0;
3804 ering.rx_jumbo_pending = 0;
3805 #endif
3806 ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3807 ering.tx_pending = pDevice->TxPacketDescCnt;
3808 if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3809 return -EFAULT;
3810 return 0;
3812 #endif
3813 #ifdef ETHTOOL_PHYS_ID
3814 case ETHTOOL_PHYS_ID: {
3815 struct ethtool_value edata;
3817 if(!capable(CAP_NET_ADMIN))
3818 return -EPERM;
3819 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3820 return -EFAULT;
3821 if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3822 return 0;
3823 return -EINTR;
3825 #endif
3826 #ifdef ETHTOOL_GSTRINGS
3827 case ETHTOOL_GSTRINGS: {
3828 struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3830 if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3831 return -EFAULT;
3832 switch(egstr.string_set) {
3833 #ifdef ETHTOOL_GSTATS
3834 case ETH_SS_STATS:
3835 egstr.len = ETH_NUM_STATS;
3836 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3837 return -EFAULT;
3838 if (mm_copy_to_user(useraddr + sizeof(egstr),
3839 bcm5700_stats_str_arr,
3840 sizeof(bcm5700_stats_str_arr)))
3841 return -EFAULT;
3842 return 0;
3843 #endif
3844 #ifdef ETHTOOL_TEST
3845 case ETH_SS_TEST:
3846 egstr.len = ETH_NUM_TESTS;
3847 if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3848 return -EFAULT;
3849 if (mm_copy_to_user(useraddr + sizeof(egstr),
3850 bcm5700_tests_str_arr,
3851 sizeof(bcm5700_tests_str_arr)))
3852 return -EFAULT;
3853 return 0;
3854 #endif
3855 default:
3856 return -EOPNOTSUPP;
3859 #endif
3860 #ifdef ETHTOOL_GSTATS
3861 case ETHTOOL_GSTATS: {
3862 struct ethtool_stats estats = { ETHTOOL_GSTATS };
3863 uint64_t stats[ETH_NUM_STATS];
3864 int i;
3865 uint64_t *pStats =
3866 (uint64_t *) pDevice->pStatsBlkVirt;
3868 estats.n_stats = ETH_NUM_STATS;
3869 if (pStats == 0) {
3870 memset(stats, 0, sizeof(stats));
3872 else {
3874 for (i = 0; i < ETH_NUM_STATS; i++) {
3875 if (bcm5700_stats_offset_arr[i] != 0) {
3876 stats[i] = SWAP_DWORD_64(*(pStats +
3877 bcm5700_stats_offset_arr[i]));
3879 else if (i == RX_CRC_IDX) {
3880 stats[i] =
3881 bcm5700_crc_count(pUmDevice);
3883 else if (i == RX_MAC_ERR_IDX) {
3884 stats[i] =
3885 bcm5700_rx_err_count(pUmDevice);
3889 if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3890 return -EFAULT;
3892 if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3893 sizeof(stats))) {
3894 return -EFAULT;
3896 return 0;
3898 #endif
3899 #ifdef ETHTOOL_TEST
3900 case ETHTOOL_TEST: {
3901 struct ethtool_test etest;
3902 uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3903 LM_POWER_STATE old_power_level;
3905 printk( KERN_ALERT "Performing ethtool test.\n"
3906 "This test will take a few seconds to complete.\n" );
3908 if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3909 return -EFAULT;
3911 etest.len = ETH_NUM_TESTS;
3912 old_power_level = pDevice->PowerLevel;
3913 if (old_power_level != LM_POWER_STATE_D0) {
3914 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3915 LM_SwitchClocks(pDevice);
3917 MM_Sleep(pDevice, 1000);
3918 if (etest.flags & ETH_TEST_FL_OFFLINE) {
3919 b57_suspend_chip(pUmDevice);
3920 MM_Sleep(pDevice, 1000);
3921 LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3922 MM_Sleep(pDevice, 1000);
3923 if (b57_test_registers(pUmDevice) == 0) {
3924 etest.flags |= ETH_TEST_FL_FAILED;
3925 tests[0] = 1;
3927 MM_Sleep(pDevice, 1000);
3928 if (b57_test_memory(pUmDevice) == 0) {
3929 etest.flags |= ETH_TEST_FL_FAILED;
3930 tests[1] = 1;
3932 MM_Sleep(pDevice, 1000);
3933 if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3934 etest.flags |= ETH_TEST_FL_FAILED;
3935 tests[2] = 1;
3937 MM_Sleep(pDevice, 1000);
3938 b57_resume_chip(pUmDevice);
3939 /* wait for link to come up for the link test */
3940 MM_Sleep(pDevice, 4000);
3941 if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3942 !(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3944 /* wait a little longer for linkup on copper */
3945 MM_Sleep(pDevice, 3000);
3948 if (b57_test_nvram(pUmDevice) == 0) {
3949 etest.flags |= ETH_TEST_FL_FAILED;
3950 tests[3] = 1;
3952 MM_Sleep(pDevice, 1000);
3953 if (b57_test_intr(pUmDevice) == 0) {
3954 etest.flags |= ETH_TEST_FL_FAILED;
3955 tests[4] = 1;
3957 MM_Sleep(pDevice, 1000);
3958 if (b57_test_link(pUmDevice) == 0) {
3959 etest.flags |= ETH_TEST_FL_FAILED;
3960 tests[5] = 1;
3962 MM_Sleep(pDevice, 1000);
3963 if (old_power_level != LM_POWER_STATE_D0) {
3964 LM_SetPowerState(pDevice, old_power_level);
3966 if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3967 return -EFAULT;
3969 if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3970 sizeof(tests))) {
3971 return -EFAULT;
3973 return 0;
3975 #endif
3976 #ifdef ETHTOOL_GTSO
3977 case ETHTOOL_GTSO: {
3978 struct ethtool_value edata = { ETHTOOL_GTSO };
3980 #ifdef BCM_TSO
3981 edata.data =
3982 (dev->features & NETIF_F_TSO) != 0;
3983 #else
3984 edata.data = 0;
3985 #endif
3986 if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3987 return -EFAULT;
3988 return 0;
3990 #endif
3991 #ifdef ETHTOOL_STSO
3992 case ETHTOOL_STSO: {
3993 #ifdef BCM_TSO
3994 struct ethtool_value edata;
3996 if (!capable(CAP_NET_ADMIN))
3997 return -EPERM;
3999 if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
4000 return -EFAULT;
4002 if (!(pDevice->TaskToOffload &
4003 LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
4004 return -EINVAL;
4007 dev->features &= ~NETIF_F_TSO;
4009 if (edata.data) {
4010 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4011 (dev->mtu > 1500)) {
4012 printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4013 return -EINVAL;
4014 } else {
4015 dev->features |= NETIF_F_TSO;
4018 return 0;
4019 #else
4020 return -EINVAL;
4021 #endif
4023 #endif
4026 return -EOPNOTSUPP;
4028 #endif /* #ifdef SIOCETHTOOL */
4030 #if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
4031 #include <linux/iobuf.h>
4032 #endif
4035 /* Provide ioctl() calls to examine the MII xcvr state. */
4036 STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4038 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4039 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4040 u16 *data = (u16 *)&rq->ifr_data;
4041 u32 value;
4042 unsigned long flags;
4044 switch(cmd) {
4045 #ifdef SIOCGMIIPHY
4046 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
4048 data[0] = pDevice->PhyAddr;
4049 return 0;
4050 #endif
4052 #ifdef SIOCGMIIREG
4053 case SIOCGMIIREG: /* Read the specified MII register. */
4055 uint32 savephyaddr = 0;
4057 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4058 return -EOPNOTSUPP;
4060 /* ifup only waits for 5 seconds for link up */
4061 /* NIC may take more than 5 seconds to establish link */
4062 if ((pUmDevice->delayed_link_ind > 0) &&
4063 delay_link[pUmDevice->index]) {
4064 return -EAGAIN;
4067 BCM5700_PHY_LOCK(pUmDevice, flags);
4068 if (data[0] != 0xffff) {
4069 savephyaddr = pDevice->PhyAddr;
4070 pDevice->PhyAddr = data[0];
4072 LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *)&value);
4073 if (data[0] != 0xffff)
4074 pDevice->PhyAddr = savephyaddr;
4075 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4076 data[3] = value & 0xffff;
4077 return 0;
4079 #endif
4081 case SIOCGETCPHYRD: /* Read the specified MII register. */
4082 case SIOCGETCPHYRD2:
4084 int args[2];
4085 uint32 savephyaddr = 0;
4087 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4088 return -EOPNOTSUPP;
4090 /* ifup only waits for 5 seconds for link up */
4091 /* NIC may take more than 5 seconds to establish link */
4092 if ((pUmDevice->delayed_link_ind > 0) &&
4093 delay_link[pUmDevice->index]) {
4094 return -EAGAIN;
4097 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4098 return -EFAULT;
4100 BCM5700_PHY_LOCK(pUmDevice, flags);
4101 if (cmd == SIOCGETCPHYRD2) {
4102 savephyaddr = pDevice->PhyAddr;
4103 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4105 LM_ReadPhy(pDevice, args[0] & 0xffff, (LM_UINT32 *)&value);
4106 if (cmd == SIOCGETCPHYRD2)
4107 pDevice->PhyAddr = savephyaddr;
4108 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4110 args[1] = value & 0xffff;
4111 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4112 return -EFAULT;
4114 return 0;
4117 #ifdef SIOCSMIIREG
4118 case SIOCSMIIREG: /* Write the specified MII register */
4120 uint32 savephyaddr = 0;
4122 if (!capable(CAP_NET_ADMIN))
4123 return -EPERM;
4125 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4126 return -EOPNOTSUPP;
4128 BCM5700_PHY_LOCK(pUmDevice, flags);
4129 if (data[0] != 0xffff) {
4130 savephyaddr = pDevice->PhyAddr;
4131 pDevice->PhyAddr = data[0];
4133 LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
4134 if (data[0] != 0xffff)
4135 pDevice->PhyAddr = savephyaddr;
4136 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4137 data[3] = 0;
4138 return 0;
4140 #endif
4142 case SIOCSETCPHYWR: /* Write the specified MII register */
4143 case SIOCSETCPHYWR2:
4145 int args[2];
4146 uint32 savephyaddr = 0;
4148 if (!capable(CAP_NET_ADMIN))
4149 return -EPERM;
4151 if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4152 return -EOPNOTSUPP;
4154 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4155 return -EFAULT;
4157 BCM5700_PHY_LOCK(pUmDevice, flags);
4158 if (cmd == SIOCSETCPHYWR2) {
4159 savephyaddr = pDevice->PhyAddr;
4160 pDevice->PhyAddr = (args[0] >> 16) & 0xffff;
4162 LM_WritePhy(pDevice, args[0] & 0xffff, args[1]);
4163 if (cmd == SIOCSETCPHYWR2)
4164 pDevice->PhyAddr = savephyaddr;
4165 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4166 return 0;
4169 case SIOCGETCROBORD: /* Read the specified ROBO register. */
4171 int args[2];
4172 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4174 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4175 return -ENXIO;
4177 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4178 return -EFAULT;
4180 if (robo->ops->read_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff, &value, 2))
4181 return -EIO;
4183 args[1] = value & 0xffff;
4184 if (mm_copy_to_user(rq->ifr_data, &args, sizeof(args)))
4185 return -EFAULT;
4187 return 0;
4190 case SIOCSETCROBOWR: /* Write the specified ROBO register. */
4192 int args[2];
4193 robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4195 if (!capable(CAP_NET_ADMIN))
4196 return -EPERM;
4198 if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4199 return -ENXIO;
4201 if (mm_copy_from_user(&args, rq->ifr_data, sizeof(args)))
4202 return -EFAULT;
4204 if (robo->ops->write_reg(robo, (args[0] >> 16) & 0xffff, args[0] & 0xffff,
4205 &args[1], 2))
4206 return -EIO;
4208 return 0;
4211 case SIOCSETCSETMSGLEVEL:
4212 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4213 return -EFAULT;
4215 b57_msg_level = value;
4216 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
4217 return 0;
4219 case SIOCSETCQOS: /* Set the qos flag */
4220 if (mm_copy_from_user(&value, rq->ifr_data, sizeof(value)))
4221 return -EFAULT;
4223 pUmDevice->qos = value;
4224 B57_INFO(("Qos flag now: %d\n", pUmDevice->qos));
4225 return 0;
4227 case SIOCGETCDUMP:
4229 char *buf;
4231 if ((buf = MALLOC(SB_OSH, 4096)) == NULL) {
4232 B57_ERR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__,
4233 MALLOCED(SB_OSH)));
4234 return (-ENOMEM);
4237 if (b57_msg_level & 0x10000)
4238 bcmdumplog(buf, 4096);
4239 value = mm_copy_to_user(rq->ifr_data, buf, 4096);
4241 MFREE(SB_OSH, buf, 4096);
4243 if (value)
4244 return -EFAULT;
4245 else
4246 return 0;
4249 #ifdef NICE_SUPPORT
4250 case SIOCNICE:
4252 struct nice_req* nrq;
4254 if (!capable(CAP_NET_ADMIN))
4255 return -EPERM;
4257 nrq = (struct nice_req*)&rq->ifr_ifru;
4258 if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
4259 nrq->nrq_magic = NICE_DEVICE_MAGIC;
4260 nrq->nrq_support_rx = 1;
4261 nrq->nrq_support_vlan = 1;
4262 nrq->nrq_support_get_speed = 1;
4263 #ifdef BCM_NAPI_RXPOLL
4264 nrq->nrq_support_rx_napi = 1;
4265 #endif
4266 return 0;
4268 #ifdef BCM_NAPI_RXPOLL
4269 else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
4270 #else
4271 else if( nrq->cmd == NICE_CMD_SET_RX )
4272 #endif
4274 pUmDevice->nice_rx = nrq->nrq_rx;
4275 pUmDevice->nice_ctx = nrq->nrq_ctx;
4276 bcm5700_set_vlan_mode(pUmDevice);
4277 return 0;
4279 #ifdef BCM_NAPI_RXPOLL
4280 else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
4281 #else
4282 else if( nrq->cmd == NICE_CMD_GET_RX )
4283 #endif
4285 nrq->nrq_rx = pUmDevice->nice_rx;
4286 nrq->nrq_ctx = pUmDevice->nice_ctx;
4287 return 0;
4289 else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
4290 if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
4291 nrq->nrq_speed = 0;
4293 else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
4294 nrq->nrq_speed = SPEED_1000;
4295 } else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
4296 nrq->nrq_speed = SPEED_100;
4297 } else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
4298 nrq->nrq_speed = SPEED_100;
4299 } else {
4300 nrq->nrq_speed = 0;
4302 return 0;
4304 else {
4305 if (!pUmDevice->opened)
4306 return -EINVAL;
4308 switch (nrq->cmd) {
4309 case NICE_CMD_BLINK_LED:
4310 if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
4311 LM_STATUS_SUCCESS) {
4312 return 0;
4314 return -EINTR;
4316 case NICE_CMD_DIAG_SUSPEND:
4317 b57_suspend_chip(pUmDevice);
4318 return 0;
4320 case NICE_CMD_DIAG_RESUME:
4321 b57_resume_chip(pUmDevice);
4322 return 0;
4324 case NICE_CMD_REG_READ:
4325 if (nrq->nrq_offset >= 0x10000) {
4326 nrq->nrq_data = LM_RegRdInd(pDevice,
4327 nrq->nrq_offset);
4329 else {
4330 nrq->nrq_data = LM_RegRd(pDevice,
4331 nrq->nrq_offset);
4333 return 0;
4335 case NICE_CMD_REG_WRITE:
4336 if (nrq->nrq_offset >= 0x10000) {
4337 LM_RegWrInd(pDevice, nrq->nrq_offset,
4338 nrq->nrq_data);
4340 else {
4341 LM_RegWr(pDevice, nrq->nrq_offset,
4342 nrq->nrq_data, FALSE);
4344 return 0;
4346 case NICE_CMD_REG_READ_DIRECT:
4347 case NICE_CMD_REG_WRITE_DIRECT:
4348 if ((nrq->nrq_offset >= 0x10000) ||
4349 (pDevice->Flags & UNDI_FIX_FLAG)) {
4350 return -EINVAL;
4353 if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
4354 nrq->nrq_data = REG_RD_OFFSET(pDevice,
4355 nrq->nrq_offset);
4357 else {
4358 REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4359 nrq->nrq_data);
4361 return 0;
4363 case NICE_CMD_MEM_READ:
4364 nrq->nrq_data = LM_MemRdInd(pDevice,
4365 nrq->nrq_offset);
4366 return 0;
4368 case NICE_CMD_MEM_WRITE:
4369 LM_MemWrInd(pDevice, nrq->nrq_offset,
4370 nrq->nrq_data);
4371 return 0;
4373 case NICE_CMD_CFG_READ32:
4374 pci_read_config_dword(pUmDevice->pdev,
4375 nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4376 return 0;
4378 case NICE_CMD_CFG_READ16:
4379 pci_read_config_word(pUmDevice->pdev,
4380 nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4381 return 0;
4383 case NICE_CMD_CFG_READ8:
4384 pci_read_config_byte(pUmDevice->pdev,
4385 nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4386 return 0;
4388 case NICE_CMD_CFG_WRITE32:
4389 pci_write_config_dword(pUmDevice->pdev,
4390 nrq->nrq_offset, (u32)nrq->nrq_data);
4391 return 0;
4393 case NICE_CMD_CFG_WRITE16:
4394 pci_write_config_word(pUmDevice->pdev,
4395 nrq->nrq_offset, (u16)nrq->nrq_data);
4396 return 0;
4398 case NICE_CMD_CFG_WRITE8:
4399 pci_write_config_byte(pUmDevice->pdev,
4400 nrq->nrq_offset, (u8)nrq->nrq_data);
4401 return 0;
4403 case NICE_CMD_RESET:
4404 bcm5700_reset(dev);
4405 return 0;
4407 case NICE_CMD_ENABLE_MAC_LOOPBACK:
4408 if (pDevice->LoopBackMode != 0) {
4409 return -EINVAL;
4412 BCM5700_PHY_LOCK(pUmDevice, flags);
4413 LM_EnableMacLoopBack(pDevice);
4414 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4415 return 0;
4417 case NICE_CMD_DISABLE_MAC_LOOPBACK:
4418 if (pDevice->LoopBackMode !=
4419 LM_MAC_LOOP_BACK_MODE) {
4420 return -EINVAL;
4423 BCM5700_PHY_LOCK(pUmDevice, flags);
4424 LM_DisableMacLoopBack(pDevice);
4425 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4426 return 0;
4428 case NICE_CMD_ENABLE_PHY_LOOPBACK:
4429 if (pDevice->LoopBackMode != 0) {
4430 return -EINVAL;
4433 BCM5700_PHY_LOCK(pUmDevice, flags);
4434 LM_EnablePhyLoopBack(pDevice);
4435 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4436 return 0;
4438 case NICE_CMD_DISABLE_PHY_LOOPBACK:
4439 if (pDevice->LoopBackMode !=
4440 LM_PHY_LOOP_BACK_MODE) {
4441 return -EINVAL;
4444 BCM5700_PHY_LOCK(pUmDevice, flags);
4445 LM_DisablePhyLoopBack(pDevice);
4446 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4447 return 0;
4449 case NICE_CMD_ENABLE_EXT_LOOPBACK:
4450 if (pDevice->LoopBackMode != 0) {
4451 return -EINVAL;
4454 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4455 if (nrq->nrq_speed != 1000)
4456 return -EINVAL;
4458 else {
4459 if ((nrq->nrq_speed != 1000) &&
4460 (nrq->nrq_speed != 100) &&
4461 (nrq->nrq_speed != 10)) {
4462 return -EINVAL;
4465 BCM5700_PHY_LOCK(pUmDevice, flags);
4466 LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4467 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4468 return 0;
4470 case NICE_CMD_DISABLE_EXT_LOOPBACK:
4471 if (pDevice->LoopBackMode !=
4472 LM_EXT_LOOP_BACK_MODE) {
4473 return -EINVAL;
4476 BCM5700_PHY_LOCK(pUmDevice, flags);
4477 LM_DisableExtLoopBack(pDevice);
4478 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4479 return 0;
4481 case NICE_CMD_INTERRUPT_TEST:
4482 nrq->nrq_intr_test_result =
4483 b57_test_intr(pUmDevice);
4484 return 0;
4486 case NICE_CMD_LOOPBACK_TEST:
4487 value = 0;
4488 switch (nrq->nrq_looptype) {
4489 case NICE_LOOPBACK_TESTTYPE_EXT:
4490 if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4491 !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4492 break;
4493 switch (nrq->nrq_loopspeed) {
4494 case NICE_LOOPBACK_TEST_10MBPS:
4495 value = LM_LINE_SPEED_10MBPS;
4496 break;
4497 case NICE_LOOPBACK_TEST_100MBPS:
4498 value = LM_LINE_SPEED_100MBPS;
4499 break;
4500 case NICE_LOOPBACK_TEST_1000MBPS:
4501 value = LM_LINE_SPEED_1000MBPS;
4502 break;
4504 /* Fall through */
4506 case NICE_LOOPBACK_TESTTYPE_MAC:
4507 case NICE_LOOPBACK_TESTTYPE_PHY:
4508 b57_suspend_chip(pUmDevice);
4509 value = b57_test_loopback(pUmDevice,
4510 nrq->nrq_looptype, value);
4511 b57_resume_chip(pUmDevice);
4512 break;
4515 if (value == 1) {
4516 /* A '1' indicates success */
4517 value = 0;
4518 } else {
4519 value = -EINTR;
4522 return value;
4524 case NICE_CMD_KMALLOC_PHYS: {
4525 #if (LINUX_VERSION_CODE >= 0x020400)
4526 dma_addr_t mapping;
4527 __u64 cpu_pa;
4528 void *ptr;
4529 int i;
4530 struct page *pg, *last_pg;
4532 for (i = 0; i < MAX_MEM2; i++) {
4533 if (pUmDevice->mem_size_list2[i] == 0)
4534 break;
4536 if (i >= MAX_MEM2)
4537 return -EFAULT;
4538 ptr = pci_alloc_consistent(pUmDevice->pdev,
4539 nrq->nrq_size, &mapping);
4540 if (!ptr) {
4541 return -EFAULT;
4543 pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4544 pUmDevice->mem_list2[i] = ptr;
4545 pUmDevice->dma_list2[i] = mapping;
4547 /* put pci mapping at the beginning of buffer */
4548 *((__u64 *) ptr) = (__u64) mapping;
4550 /* Probably won't work on some architectures */
4551 /* get CPU mapping */
4552 cpu_pa = (__u64) virt_to_phys(ptr);
4553 pUmDevice->cpu_pa_list2[i] = cpu_pa;
4554 nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4555 nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4557 pg = virt_to_page(ptr);
4558 last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4559 for (; ; pg++) {
4560 #if (LINUX_VERSION_CODE > 0x020500)
4561 SetPageReserved(pg);
4562 #else
4563 mem_map_reserve(pg);
4564 #endif
4565 if (pg == last_pg)
4566 break;
4568 return 0;
4569 #else
4570 return -EOPNOTSUPP;
4571 #endif
4574 case NICE_CMD_KFREE_PHYS: {
4575 int i;
4576 __u64 cpu_pa;
4578 cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4579 ((__u64) nrq->nrq_phys_addr_hi << 32);
4580 for (i = 0; i < MAX_MEM2; i++) {
4581 if (pUmDevice->cpu_pa_list2[i] ==
4582 cpu_pa)
4584 break;
4587 if (i >= MAX_MEM2)
4588 return -EFAULT;
4590 bcm5700_freemem2(pUmDevice, i);
4591 return 0;
4594 case NICE_CMD_SET_WRITE_PROTECT:
4595 if (nrq->nrq_write_protect)
4596 pDevice->Flags |= EEPROM_WP_FLAG;
4597 else
4598 pDevice->Flags &= ~EEPROM_WP_FLAG;
4599 return 0;
4600 case NICE_CMD_GET_STATS_BLOCK: {
4601 PT3_STATS_BLOCK pStats =
4602 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4603 if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4604 pStats, nrq->nrq_stats_size)) {
4605 return -EFAULT;
4607 return 0;
4609 case NICE_CMD_CLR_STATS_BLOCK: {
4610 int j;
4611 PT3_STATS_BLOCK pStats =
4612 (PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4614 memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4615 if (T3_ASIC_REV(pDevice->ChipRevId) ==
4616 T3_ASIC_REV_5705) {
4617 return 0;
4619 for(j = 0x0300; j < 0x0b00; j = j + 4) {
4620 MEM_WR_OFFSET(pDevice, j, 0);
4623 return 0;
4628 return -EOPNOTSUPP;
4630 #endif /* NICE_SUPPORT */
4631 #ifdef SIOCETHTOOL
4632 case SIOCETHTOOL:
4633 return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4634 #endif
4635 default:
4636 return -EOPNOTSUPP;
4638 return -EOPNOTSUPP;
4641 STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4643 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4644 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4645 int i;
4646 struct dev_mc_list *mclist;
4648 LM_MulticastClear(pDevice);
4649 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4650 i++, mclist = mclist->next) {
4651 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4653 if (dev->flags & IFF_ALLMULTI) {
4654 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4655 LM_SetReceiveMask(pDevice,
4656 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4659 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4660 LM_SetReceiveMask(pDevice,
4661 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4663 if (dev->flags & IFF_PROMISC) {
4664 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4665 LM_SetReceiveMask(pDevice,
4666 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4669 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4670 LM_SetReceiveMask(pDevice,
4671 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4676 STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4678 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4679 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4680 int i;
4681 struct dev_mc_list *mclist;
4682 unsigned long flags;
4684 BCM5700_PHY_LOCK(pUmDevice, flags);
4686 LM_MulticastClear(pDevice);
4687 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4688 i++, mclist = mclist->next) {
4689 LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4691 if (dev->flags & IFF_ALLMULTI) {
4692 if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4693 LM_SetReceiveMask(pDevice,
4694 pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4697 else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4698 LM_SetReceiveMask(pDevice,
4699 pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4701 if (dev->flags & IFF_PROMISC) {
4702 if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4703 LM_SetReceiveMask(pDevice,
4704 pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4707 else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4708 LM_SetReceiveMask(pDevice,
4709 pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4712 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4716 * Set the hardware MAC address.
4718 STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4720 struct sockaddr *addr=p;
4721 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4722 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4724 if(is_valid_ether_addr(addr->sa_data)){
4726 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4727 if (pUmDevice->opened)
4728 LM_SetMacAddress(pDevice, dev->dev_addr);
4729 return 0;
4731 return -EINVAL;
4734 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4735 STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4737 int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4738 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4739 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4740 unsigned long flags;
4741 int reinit = 0;
4743 if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4744 (pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4746 return -EINVAL;
4748 if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG) &&
4749 (pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4751 return -EINVAL;
4753 if (pUmDevice->suspended)
4754 return -EAGAIN;
4756 if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4757 (pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4758 reinit = 1;
4761 BCM5700_PHY_LOCK(pUmDevice, flags);
4762 if (reinit) {
4763 netif_stop_queue(dev);
4764 bcm5700_shutdown(pUmDevice);
4765 bcm5700_freemem(dev);
4768 dev->mtu = new_mtu;
4769 if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4770 pDevice->RxMtu = pDevice->TxMtu =
4771 MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4773 else {
4774 pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4777 if (dev->mtu <= 1514) {
4778 pDevice->RxJumboDescCnt = 0;
4780 else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4781 pDevice->RxJumboDescCnt =
4782 rx_jumbo_desc_cnt[pUmDevice->index];
4784 pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4785 pDevice->RxStdDescCnt;
4787 pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4788 COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4790 #ifdef BCM_TSO
4791 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4792 (dev->mtu > 1514) ) {
4793 if (dev->features & NETIF_F_TSO) {
4794 dev->features &= ~NETIF_F_TSO;
4795 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4798 #endif
4800 if (reinit) {
4801 LM_InitializeAdapter(pDevice);
4802 bcm5700_do_rx_mode(dev);
4803 bcm5700_set_vlan_mode(pUmDevice);
4804 bcm5700_init_counters(pUmDevice);
4805 if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4806 LM_SetMacAddress(pDevice, dev->dev_addr);
4808 netif_start_queue(dev);
4809 bcm5700_intr_on(pUmDevice);
4811 BCM5700_PHY_UNLOCK(pUmDevice, flags);
4813 return 0;
4815 #endif
4818 #if (LINUX_VERSION_CODE < 0x020300)
4820 bcm5700_probe(struct net_device *dev)
4822 int cards_found = 0;
4823 struct pci_dev *pdev = NULL;
4824 struct pci_device_id *pci_tbl;
4825 u16 ssvid, ssid;
4827 if ( ! pci_present())
4828 return -ENODEV;
4830 pci_tbl = bcm5700_pci_tbl;
4831 while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4832 int idx;
4834 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4835 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4836 for (idx = 0; pci_tbl[idx].vendor; idx++) {
4837 if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4838 pci_tbl[idx].vendor == pdev->vendor) &&
4839 (pci_tbl[idx].device == PCI_ANY_ID ||
4840 pci_tbl[idx].device == pdev->device) &&
4841 (pci_tbl[idx].subvendor == PCI_ANY_ID ||
4842 pci_tbl[idx].subvendor == ssvid) &&
4843 (pci_tbl[idx].subdevice == PCI_ANY_ID ||
4844 pci_tbl[idx].subdevice == ssid))
4847 break;
4850 if (pci_tbl[idx].vendor == 0)
4851 continue;
4854 if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4855 cards_found++;
4858 return cards_found ? 0 : -ENODEV;
4861 #ifdef MODULE
4862 int init_module(void)
4864 return bcm5700_probe(NULL);
4867 void cleanup_module(void)
4869 struct net_device *next_dev;
4870 PUM_DEVICE_BLOCK pUmDevice;
4872 #ifdef BCM_PROC_FS
4873 bcm5700_proc_remove_notifier();
4874 #endif
4875 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4876 while (root_tigon3_dev) {
4877 pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4878 #ifdef BCM_PROC_FS
4879 bcm5700_proc_remove_dev(root_tigon3_dev);
4880 #endif
4881 next_dev = pUmDevice->next_module;
4882 unregister_netdev(root_tigon3_dev);
4883 if (pUmDevice->lm_dev.pMappedMemBase)
4884 iounmap(pUmDevice->lm_dev.pMappedMemBase);
4885 #if (LINUX_VERSION_CODE < 0x020600)
4886 kfree(root_tigon3_dev);
4887 #else
4888 free_netdev(root_tigon3_dev);
4889 #endif
4890 root_tigon3_dev = next_dev;
4892 #ifdef BCM_IOCTL32
4893 unregister_ioctl32_conversion(SIOCNICE);
4894 #endif
4897 #endif /* MODULE */
4898 #else /* LINUX_VERSION_CODE < 0x020300 */
4900 #if (LINUX_VERSION_CODE >= 0x020406)
4901 static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4902 #else
4903 static void bcm5700_suspend (struct pci_dev *pdev)
4904 #endif
4906 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4907 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4908 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4910 if (!netif_running(dev))
4911 #if (LINUX_VERSION_CODE >= 0x020406)
4912 return 0;
4913 #else
4914 return;
4915 #endif
4917 netif_device_detach (dev);
4918 bcm5700_shutdown(pUmDevice);
4920 LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4922 /* pci_power_off(pdev, -1);*/
4923 #if (LINUX_VERSION_CODE >= 0x020406)
4924 return 0;
4925 #endif
4929 #if (LINUX_VERSION_CODE >= 0x020406)
4930 static int bcm5700_resume(struct pci_dev *pdev)
4931 #else
4932 static void bcm5700_resume(struct pci_dev *pdev)
4933 #endif
4935 struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4936 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4937 PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4939 if (!netif_running(dev))
4940 #if (LINUX_VERSION_CODE >= 0x020406)
4941 return 0;
4942 #else
4943 return;
4944 #endif
4945 /* pci_power_on(pdev);*/
4946 netif_device_attach(dev);
4947 LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4948 MM_InitializeUmPackets(pDevice);
4949 bcm5700_reset(dev);
4950 #if (LINUX_VERSION_CODE >= 0x020406)
4951 return 0;
4952 #endif
4956 static struct pci_driver bcm5700_pci_driver = {
4957 name: bcm5700_driver,
4958 id_table: bcm5700_pci_tbl,
4959 probe: bcm5700_init_one,
4960 remove: __devexit_p(bcm5700_remove_one),
4961 suspend: bcm5700_suspend,
4962 resume: bcm5700_resume,
4965 static int
4966 bcm5700_notify_reboot(struct notifier_block *this, unsigned long event, void *unused)
4968 switch (event) {
4969 case SYS_HALT:
4970 case SYS_POWER_OFF:
4971 case SYS_RESTART:
4972 break;
4973 default:
4974 return NOTIFY_DONE;
4977 B57_INFO(("bcm5700 reboot notification\n"));
4978 pci_unregister_driver(&bcm5700_pci_driver);
4979 return NOTIFY_DONE;
4982 static int __init bcm5700_init_module (void)
4984 if (msglevel != 0xdeadbeef) {
4985 b57_msg_level = msglevel;
4986 printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
4987 } else
4988 b57_msg_level = B57_ERR_VAL;
4990 return pci_module_init(&bcm5700_pci_driver);
4993 static void __exit bcm5700_cleanup_module (void)
4995 #ifdef BCM_PROC_FS
4996 bcm5700_proc_remove_notifier();
4997 #endif
4998 unregister_reboot_notifier(&bcm5700_reboot_notifier);
4999 pci_unregister_driver(&bcm5700_pci_driver);
5002 module_init(bcm5700_init_module);
5003 module_exit(bcm5700_cleanup_module);
5004 #endif
5007 * Middle Module
5012 #ifdef BCM_NAPI_RXPOLL
5013 LM_STATUS
5014 MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
5016 struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
5018 if (netif_rx_schedule_prep(dev)) {
5019 __netif_rx_schedule(dev);
5020 return LM_STATUS_SUCCESS;
5022 return LM_STATUS_FAILURE;
5024 #endif
5026 LM_STATUS
5027 MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5028 LM_UINT16 *pValue16)
5030 UM_DEVICE_BLOCK *pUmDevice;
5032 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5033 pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
5034 return LM_STATUS_SUCCESS;
5037 LM_STATUS
5038 MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5039 LM_UINT32 *pValue32)
5041 UM_DEVICE_BLOCK *pUmDevice;
5043 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5044 pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
5045 return LM_STATUS_SUCCESS;
5048 LM_STATUS
5049 MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5050 LM_UINT16 Value16)
5052 UM_DEVICE_BLOCK *pUmDevice;
5054 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5055 pci_write_config_word(pUmDevice->pdev, Offset, Value16);
5056 return LM_STATUS_SUCCESS;
5059 LM_STATUS
5060 MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
5061 LM_UINT32 Value32)
5063 UM_DEVICE_BLOCK *pUmDevice;
5065 pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5066 pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
5067 return LM_STATUS_SUCCESS;
5070 LM_STATUS
5071 MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5072 PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
5073 LM_BOOL Cached)
5075 PLM_VOID pvirt;
5076 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5077 dma_addr_t mapping;
5079 pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
5080 &mapping);
5081 if (!pvirt) {
5082 return LM_STATUS_FAILURE;
5084 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5085 pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
5086 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
5087 memset(pvirt, 0, BlockSize);
5088 *pMemoryBlockVirt = (PLM_VOID) pvirt;
5089 MM_SetAddr(pMemoryBlockPhy, mapping);
5090 return LM_STATUS_SUCCESS;
5093 LM_STATUS
5094 MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
5095 PLM_VOID *pMemoryBlockVirt)
5097 PLM_VOID pvirt;
5098 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5101 /* Maximum in slab.c */
5102 if (BlockSize > 131072) {
5103 goto MM_Alloc_error;
5106 pvirt = kmalloc(BlockSize, GFP_ATOMIC);
5107 if (!pvirt) {
5108 goto MM_Alloc_error;
5110 pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
5111 pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
5112 pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
5113 /* mem_size_list[i] == 0 indicates that the memory should be freed */
5114 /* using kfree */
5115 memset(pvirt, 0, BlockSize);
5116 *pMemoryBlockVirt = pvirt;
5117 return LM_STATUS_SUCCESS;
5119 MM_Alloc_error:
5120 printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
5121 return LM_STATUS_FAILURE;
5124 LM_STATUS
5125 MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
5127 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5129 pDevice->pMappedMemBase = ioremap_nocache(
5130 pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
5131 if (pDevice->pMappedMemBase == 0)
5132 return LM_STATUS_FAILURE;
5134 return LM_STATUS_SUCCESS;
5137 LM_STATUS
5138 MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
5140 unsigned int i;
5141 struct sk_buff *skb;
5142 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5143 PUM_PACKET pUmPacket;
5144 PLM_PACKET pPacket;
5146 for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
5147 pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
5148 pUmPacket = (PUM_PACKET) pPacket;
5149 if (pPacket == 0) {
5150 printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
5152 if (pUmPacket->skbuff == 0) {
5153 #ifdef BCM_WL_EMULATOR
5154 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5155 #else
5156 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5157 #endif
5158 if (skb == 0) {
5159 pUmPacket->skbuff = 0;
5160 QQ_PushTail(
5161 &pUmDevice->rx_out_of_buf_q.Container,
5162 pPacket);
5163 continue;
5165 pUmPacket->skbuff = skb;
5166 skb->dev = pUmDevice->dev;
5167 #ifndef BCM_WL_EMULATOR
5168 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5169 #endif
5171 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5173 if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
5174 /* reallocate buffers in the ISR */
5175 pUmDevice->rx_buf_repl_thresh = 0;
5176 pUmDevice->rx_buf_repl_panic_thresh = 0;
5177 pUmDevice->rx_buf_repl_isr_limit = 0;
5179 else {
5180 pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
5181 pUmDevice->rx_buf_repl_panic_thresh =
5182 pDevice->RxPacketDescCnt * 7 / 8;
5184 /* This limits the time spent in the ISR when the receiver */
5185 /* is in a steady state of being overrun. */
5186 pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
5188 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5189 if (pDevice->RxJumboDescCnt != 0) {
5190 if (pUmDevice->rx_buf_repl_thresh >=
5191 pDevice->RxJumboDescCnt) {
5193 pUmDevice->rx_buf_repl_thresh =
5194 pUmDevice->rx_buf_repl_panic_thresh =
5195 pDevice->RxJumboDescCnt - 1;
5197 if (pUmDevice->rx_buf_repl_thresh >=
5198 pDevice->RxStdDescCnt) {
5200 pUmDevice->rx_buf_repl_thresh =
5201 pUmDevice->rx_buf_repl_panic_thresh =
5202 pDevice->RxStdDescCnt - 1;
5205 #endif
5207 return LM_STATUS_SUCCESS;
5210 LM_STATUS
5211 MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
5213 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5214 int index = pUmDevice->index;
5215 struct net_device *dev = pUmDevice->dev;
5217 if (index >= MAX_UNITS)
5218 return LM_STATUS_SUCCESS;
5220 #if LINUX_KERNEL_VERSION < 0x0020609
5222 bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
5223 0, 1, 1);
5224 if (auto_speed[index] == 0)
5225 pDevice->DisableAutoNeg = TRUE;
5226 else
5227 pDevice->DisableAutoNeg = FALSE;
5229 if (line_speed[index] == 0) {
5230 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5231 pDevice->DisableAutoNeg = FALSE;
5233 else {
5234 bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
5235 "full_duplex", 0, 1, 1);
5236 if (full_duplex[index]) {
5237 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5239 else {
5240 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
5243 if (line_speed[index] == 1000) {
5244 pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
5245 if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
5246 pDevice->RequestedLineSpeed =
5247 LM_LINE_SPEED_100MBPS;
5248 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
5250 else {
5251 if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5252 !full_duplex[index]) {
5253 printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
5254 pDevice->RequestedDuplexMode =
5255 LM_DUPLEX_MODE_FULL;
5258 if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5259 !auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
5260 printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
5261 pDevice->DisableAutoNeg = FALSE;
5265 else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
5266 (pDevice->PhyFlags & PHY_IS_FIBER)){
5267 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5268 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5269 pDevice->DisableAutoNeg = FALSE;
5270 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
5272 else if (line_speed[index] == 100) {
5274 pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
5276 else if (line_speed[index] == 10) {
5278 pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
5280 else {
5281 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5282 pDevice->DisableAutoNeg = FALSE;
5283 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
5288 #endif /* LINUX_KERNEL_VERSION */
5290 /* This is an unmanageable switch nic and will have link problems if
5291 not set to auto
5293 if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
5295 if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
5297 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
5298 bcm5700_driver, index, line_speed[index]);
5300 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5301 pDevice->DisableAutoNeg = FALSE;
5304 #if LINUX_KERNEL_VERSION < 0x0020609
5306 pDevice->FlowControlCap = 0;
5307 bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
5308 "rx_flow_control", 0, 1, 0);
5309 if (rx_flow_control[index] != 0) {
5310 pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
5312 bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
5313 "tx_flow_control", 0, 1, 0);
5314 if (tx_flow_control[index] != 0) {
5315 pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
5317 bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
5318 "auto_flow_control", 0, 1, 0);
5319 if (auto_flow_control[index] != 0) {
5320 if (pDevice->DisableAutoNeg == FALSE) {
5322 pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
5323 if ((tx_flow_control[index] == 0) &&
5324 (rx_flow_control[index] == 0)) {
5326 pDevice->FlowControlCap |=
5327 LM_FLOW_CONTROL_TRANSMIT_PAUSE |
5328 LM_FLOW_CONTROL_RECEIVE_PAUSE;
5333 if (dev->mtu > 1500) {
5334 #ifdef BCM_TSO
5335 if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
5336 (dev->features & NETIF_F_TSO)) {
5337 dev->features &= ~NETIF_F_TSO;
5338 printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
5340 #endif
5341 pDevice->RxMtu = dev->mtu + 14;
5344 if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
5345 !(pDevice->Flags & BCM5788_FLAG)) {
5346 pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
5347 pUmDevice->timer_interval = HZ;
5348 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
5349 (pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
5350 pUmDevice->timer_interval = HZ/4;
5353 else {
5354 pUmDevice->timer_interval = HZ/10;
5357 bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
5358 "tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
5359 pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
5360 bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
5361 "rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
5362 RX_DESC_CNT);
5363 pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
5365 #if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5366 bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
5367 "rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
5368 JBO_DESC_CNT);
5370 if (mtu[index] <= 1514)
5371 pDevice->RxJumboDescCnt = 0;
5372 else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
5373 pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5375 #endif
5377 #ifdef BCM_INT_COAL
5378 bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5379 "adaptive_coalesce", 0, 1, 1);
5380 #ifdef BCM_NAPI_RXPOLL
5381 if (adaptive_coalesce[index]) {
5382 printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5383 adaptive_coalesce[index] = 0;
5386 #endif
5387 pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5388 if (!pUmDevice->adaptive_coalesce) {
5389 bcm5700_validate_param_range(pUmDevice,
5390 &rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5391 MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5392 if ((rx_coalesce_ticks[index] == 0) &&
5393 (rx_max_coalesce_frames[index] == 0)) {
5395 printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5396 bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5398 rx_coalesce_ticks[index] = RX_COAL_TK;
5399 rx_max_coalesce_frames[index] = RX_COAL_FM;
5401 pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5402 rx_coalesce_ticks[index];
5403 #ifdef BCM_NAPI_RXPOLL
5404 pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5405 #endif
5407 bcm5700_validate_param_range(pUmDevice,
5408 &rx_max_coalesce_frames[index],
5409 "rx_max_coalesce_frames", 0,
5410 MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5412 pDevice->RxMaxCoalescedFrames =
5413 pUmDevice->rx_curr_coalesce_frames =
5414 rx_max_coalesce_frames[index];
5415 #ifdef BCM_NAPI_RXPOLL
5416 pDevice->RxMaxCoalescedFramesDuringInt =
5417 rx_max_coalesce_frames[index];
5418 #endif
5420 bcm5700_validate_param_range(pUmDevice,
5421 &tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5422 MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5423 if ((tx_coalesce_ticks[index] == 0) &&
5424 (tx_max_coalesce_frames[index] == 0)) {
5426 printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5427 bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5429 tx_coalesce_ticks[index] = TX_COAL_TK;
5430 tx_max_coalesce_frames[index] = TX_COAL_FM;
5432 pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5433 bcm5700_validate_param_range(pUmDevice,
5434 &tx_max_coalesce_frames[index],
5435 "tx_max_coalesce_frames", 0,
5436 MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5437 pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5438 pUmDevice->tx_curr_coalesce_frames =
5439 pDevice->TxMaxCoalescedFrames;
5441 bcm5700_validate_param_range(pUmDevice,
5442 &stats_coalesce_ticks[index], "stats_coalesce_ticks",
5443 0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5444 if (adaptive_coalesce[index]) {
5445 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5446 }else{
5447 if ((stats_coalesce_ticks[index] > 0) &&
5448 (stats_coalesce_ticks[index] < 100)) {
5449 printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5450 stats_coalesce_ticks[index] = 100;
5451 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5452 pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5456 else {
5457 pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5458 pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5459 pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5461 #endif
5463 if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5464 unsigned int tmpvar;
5466 tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5469 * If the result is zero, the request is too demanding.
5471 if (tmpvar == 0) {
5472 tmpvar = 1;
5475 pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5477 pUmDevice->statstimer_interval = tmpvar;
5480 #ifdef BCM_WOL
5481 bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5482 "enable_wol", 0, 1, 0);
5483 if (enable_wol[index]) {
5484 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5485 pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5487 #endif
5488 #ifdef INCLUDE_TBI_SUPPORT
5489 if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5490 if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5491 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5492 /* just poll since we have hardware autoneg. in 5704 */
5493 pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5495 else {
5496 pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5499 #endif
5500 bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5501 "scatter_gather", 0, 1, 1);
5502 bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5503 "tx_checksum", 0, 1, 1);
5504 bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5505 "rx_checksum", 0, 1, 1);
5506 if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5507 if (tx_checksum[index] || rx_checksum[index]) {
5509 pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5510 printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5513 else {
5514 if (rx_checksum[index]) {
5515 pDevice->TaskToOffload |=
5516 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5517 LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5519 if (tx_checksum[index]) {
5520 pDevice->TaskToOffload |=
5521 LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5522 LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5523 pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5526 #ifdef BCM_TSO
5527 bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5528 "enable_tso", 0, 1, 1);
5530 /* Always enable TSO firmware if supported */
5531 /* This way we can turn it on or off on the fly */
5532 if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5534 pDevice->TaskToOffload |=
5535 LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5537 if (enable_tso[index] &&
5538 !(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5540 printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5542 #endif
5543 #ifdef BCM_ASF
5544 bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5545 "vlan_strip_mode", 0, 2, 0);
5546 pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5547 #else
5548 pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5549 #endif
5551 #endif /* LINUX_KERNEL_VERSION */
5553 #ifdef BCM_NIC_SEND_BD
5554 bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5555 0, 1, 0);
5556 if (nic_tx_bd[index])
5557 pDevice->Flags |= NIC_SEND_BD_FLAG;
5558 if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5559 (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5560 if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5561 pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5562 printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5565 #endif
5566 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5567 bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5568 "disable_msi", 0, 1, 0);
5569 #endif
5571 bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5572 "delay_link", 0, 1, 0);
5574 bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5575 "disable_d3hot", 0, 1, 0);
5576 if (disable_d3hot[index]) {
5578 #ifdef BCM_WOL
5579 if (enable_wol[index]) {
5580 pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5581 pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5582 printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5584 #endif
5585 pDevice->Flags |= DISABLE_D3HOT_FLAG;
5588 return LM_STATUS_SUCCESS;
5591 /* From include/proto/ethernet.h */
5592 #define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */
5594 /* From include/proto/vlan.h */
5595 #define VLAN_PRI_MASK 7 /* 3 bits of priority */
5596 #define VLAN_PRI_SHIFT 13
5598 /* Replace the priority in a vlan tag */
5599 #define UPD_VLANTAG_PRIO(tag, prio) do { \
5600 tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); \
5601 tag |= prio << VLAN_PRI_SHIFT; \
5602 } while (0)
5604 LM_STATUS
5605 MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5607 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5608 PLM_PACKET pPacket;
5609 PUM_PACKET pUmPacket;
5610 struct sk_buff *skb;
5611 int size;
5612 int vlan_tag_size = 0;
5613 uint16 dscp_prio;
5615 if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5616 vlan_tag_size = 4;
5618 while (1) {
5619 pPacket = (PLM_PACKET)
5620 QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5621 if (pPacket == 0)
5622 break;
5623 pUmPacket = (PUM_PACKET) pPacket;
5624 #if !defined(NO_PCI_UNMAP)
5625 pci_unmap_single(pUmDevice->pdev,
5626 pci_unmap_addr(pUmPacket, map[0]),
5627 pPacket->u.Rx.RxBufferSize,
5628 PCI_DMA_FROMDEVICE);
5629 #endif
5630 if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5631 ((size = pPacket->PacketSize) >
5632 (pDevice->RxMtu + vlan_tag_size))) {
5634 /* reuse skb */
5635 #ifdef BCM_TASKLET
5636 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5637 #else
5638 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5639 #endif
5640 pUmDevice->rx_misc_errors++;
5641 continue;
5643 skb = pUmPacket->skbuff;
5644 skb_put(skb, size);
5645 skb->pkt_type = 0;
5646 /* Extract priority from payload and put it in skb->priority */
5647 dscp_prio = 0;
5648 if (pUmDevice->qos) {
5649 uint rc;
5651 rc = pktsetprio(skb, TRUE);
5652 if (rc & (PKTPRIO_VDSCP | PKTPRIO_DSCP))
5653 dscp_prio = rc & VLAN_PRI_MASK;
5654 if (rc != 0)
5655 B57_INFO(("pktsetprio returned 0x%x, skb->priority: %d\n",
5656 rc, skb->priority));
5658 skb->protocol = eth_type_trans(skb, skb->dev);
5659 if (size > pDevice->RxMtu) {
5660 /* Make sure we have a valid VLAN tag */
5661 if (htons(skb->protocol) != ETHER_TYPE_8021Q) {
5662 dev_kfree_skb_irq(skb);
5663 pUmDevice->rx_misc_errors++;
5664 goto drop_rx;
5668 pUmDevice->stats.rx_bytes += skb->len;
5670 if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5671 (pDevice->TaskToOffload &
5672 LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5673 if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5675 skb->ip_summed = CHECKSUM_UNNECESSARY;
5676 #if TIGON3_DEBUG
5677 pUmDevice->rx_good_chksum_count++;
5678 #endif
5680 else {
5681 skb->ip_summed = CHECKSUM_NONE;
5682 pUmDevice->rx_bad_chksum_count++;
5685 else {
5686 skb->ip_summed = CHECKSUM_NONE;
5688 #ifdef NICE_SUPPORT
5689 if( pUmDevice->nice_rx ) {
5690 vlan_tag_t *vlan_tag;
5692 vlan_tag = (vlan_tag_t *) &skb->cb[0];
5693 if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5694 vlan_tag->signature = 0x7777;
5695 vlan_tag->tag = pPacket->VlanTag;
5696 /* Override vlan priority with dscp priority */
5697 if (dscp_prio)
5698 UPD_VLANTAG_PRIO(vlan_tag->tag, dscp_prio);
5699 } else {
5700 vlan_tag->signature = 0;
5702 pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5703 } else
5704 #endif
5706 #ifdef BCM_VLAN
5707 if (pUmDevice->vlgrp &&
5708 (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5709 /* Override vlan priority with dscp priority */
5710 if (dscp_prio)
5711 UPD_VLANTAG_PRIO(pPacket->VlanTag, dscp_prio);
5712 #ifdef BCM_NAPI_RXPOLL
5713 vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5714 pPacket->VlanTag);
5715 #else
5716 vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5717 pPacket->VlanTag);
5718 #endif
5719 } else
5720 #endif
5722 #ifdef BCM_WL_EMULATOR
5723 if(pDevice->wl_emulate_rx) {
5724 /* bcmstats("emu recv %d %d"); */
5725 wlcemu_receive_skb(pDevice->wlc, skb);
5726 /* bcmstats("emu recv end %d %d"); */
5728 else
5729 #endif /* BCM_WL_EMULATOR */
5731 #ifdef BCM_NAPI_RXPOLL
5732 netif_receive_skb(skb);
5733 #else
5734 netif_rx(skb);
5735 #endif
5739 pUmDevice->dev->last_rx = jiffies;
5741 drop_rx:
5742 #ifdef BCM_TASKLET
5743 pUmPacket->skbuff = 0;
5744 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5745 #else
5746 #ifdef BCM_WL_EMULATOR
5747 skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5748 #else
5749 skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5750 #endif /* BCM_WL_EMULATOR */
5751 if (skb == 0) {
5752 pUmPacket->skbuff = 0;
5753 QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5755 else {
5756 pUmPacket->skbuff = skb;
5757 skb->dev = pUmDevice->dev;
5758 #ifndef BCM_WL_EMULATOR
5759 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5760 #endif
5761 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5763 #endif
5765 return LM_STATUS_SUCCESS;
5768 LM_STATUS
5769 MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5771 PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5772 struct sk_buff *skb = pUmPacket->skbuff;
5773 struct sk_buff *nskb;
5774 #if !defined(NO_PCI_UNMAP)
5775 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5777 pci_unmap_single(pUmDevice->pdev,
5778 pci_unmap_addr(pUmPacket, map[0]),
5779 pci_unmap_len(pUmPacket, map_len[0]),
5780 PCI_DMA_TODEVICE);
5781 #if MAX_SKB_FRAGS
5783 int i;
5785 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5786 pci_unmap_page(pUmDevice->pdev,
5787 pci_unmap_addr(pUmPacket, map[i + 1]),
5788 pci_unmap_len(pUmPacket, map_len[i + 1]),
5789 PCI_DMA_TODEVICE);
5792 #endif
5793 #endif
5794 if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5795 pUmPacket->lm_packet.u.Tx.FragCount = 1;
5796 dev_kfree_skb(skb);
5797 pUmPacket->skbuff = nskb;
5798 return LM_STATUS_SUCCESS;
5800 dev_kfree_skb(skb);
5801 pUmPacket->skbuff = 0;
5802 return LM_STATUS_FAILURE;
5805 /* Returns 1 if not all buffers are allocated */
5806 STATIC int
5807 replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5809 PLM_PACKET pPacket;
5810 PUM_PACKET pUmPacket;
5811 PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5812 struct sk_buff *skb;
5813 int queue_rx = 0;
5814 int alloc_cnt = 0;
5815 int ret = 0;
5817 while ((pUmPacket = (PUM_PACKET)
5818 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5819 pPacket = (PLM_PACKET) pUmPacket;
5820 if (pUmPacket->skbuff) {
5821 /* reuse an old skb */
5822 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5823 queue_rx = 1;
5824 continue;
5826 #ifdef BCM_WL_EMULATOR
5827 if ((skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2)) == 0)
5828 #else
5829 if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR)) == 0)
5830 #endif /* BCM_WL_EMULATOR */
5832 QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5833 pPacket);
5834 ret = 1;
5835 break;
5837 pUmPacket->skbuff = skb;
5838 skb->dev = pUmDevice->dev;
5839 #ifndef BCM_WL_EMULATOR
5840 skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5841 #endif
5842 QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5843 queue_rx = 1;
5844 if (max > 0) {
5845 alloc_cnt++;
5846 if (alloc_cnt >= max)
5847 break;
5850 if (queue_rx || pDevice->QueueAgain) {
5851 LM_QueueRxPackets(pDevice);
5853 return ret;
5856 LM_STATUS
5857 MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5859 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5860 PLM_PACKET pPacket;
5861 PUM_PACKET pUmPacket;
5862 struct sk_buff *skb;
5863 #if !defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5864 int i;
5865 #endif
5867 while (1) {
5868 pPacket = (PLM_PACKET)
5869 QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5870 if (pPacket == 0)
5871 break;
5872 pUmPacket = (PUM_PACKET) pPacket;
5873 skb = pUmPacket->skbuff;
5874 #if !defined(NO_PCI_UNMAP)
5875 pci_unmap_single(pUmDevice->pdev,
5876 pci_unmap_addr(pUmPacket, map[0]),
5877 pci_unmap_len(pUmPacket, map_len[0]),
5878 PCI_DMA_TODEVICE);
5879 #if MAX_SKB_FRAGS
5880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5881 pci_unmap_page(pUmDevice->pdev,
5882 pci_unmap_addr(pUmPacket, map[i + 1]),
5883 pci_unmap_len(pUmPacket, map_len[i + 1]),
5884 PCI_DMA_TODEVICE);
5886 #endif
5887 #endif
5888 dev_kfree_skb_irq(skb);
5889 pUmPacket->skbuff = 0;
5890 QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5892 if (pUmDevice->tx_full) {
5893 if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5894 (pDevice->TxPacketDescCnt >> 1)) {
5896 pUmDevice->tx_full = 0;
5897 netif_wake_queue(pUmDevice->dev);
5900 return LM_STATUS_SUCCESS;
5903 LM_STATUS
5904 MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5906 PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5907 struct net_device *dev = pUmDevice->dev;
5908 LM_FLOW_CONTROL flow_control;
5909 int speed = 0;
5911 if (!pUmDevice->opened)
5912 return LM_STATUS_SUCCESS;
5914 if (!pUmDevice->suspended) {
5915 if (Status == LM_STATUS_LINK_DOWN) {
5916 netif_carrier_off(dev);
5918 else if (Status == LM_STATUS_LINK_ACTIVE) {
5919 netif_carrier_on(dev);
5923 if (pUmDevice->delayed_link_ind > 0) {
5924 pUmDevice->delayed_link_ind = 0;
5925 if (Status == LM_STATUS_LINK_DOWN) {
5926 B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
5928 else if (Status == LM_STATUS_LINK_ACTIVE) {
5929 B57_INFO(("%s: %s NIC Link is UP, ", bcm5700_driver, dev->name));
5932 else {
5933 if (Status == LM_STATUS_LINK_DOWN) {
5934 B57_INFO(("%s: %s NIC Link is Down\n", bcm5700_driver, dev->name));
5936 else if (Status == LM_STATUS_LINK_ACTIVE) {
5937 B57_INFO(("%s: %s NIC Link is Up, ", bcm5700_driver, dev->name));
5941 if (Status == LM_STATUS_LINK_ACTIVE) {
5942 if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5943 speed = 1000;
5944 else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5945 speed = 100;
5946 else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5947 speed = 10;
5949 B57_INFO(("%d Mbps ", speed));
5951 if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5952 B57_INFO(("full duplex"));
5953 else
5954 B57_INFO(("half duplex"));
5956 flow_control = pDevice->FlowControl &
5957 (LM_FLOW_CONTROL_RECEIVE_PAUSE |
5958 LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5959 if (flow_control) {
5960 if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5961 B57_INFO((", receive "));
5962 if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5963 B57_INFO(("& transmit "));
5965 else {
5966 B57_INFO((", transmit "));
5968 B57_INFO(("flow control ON"));
5970 B57_INFO(("\n"));
5972 return LM_STATUS_SUCCESS;
5975 void
5976 MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5978 #if !defined(NO_PCI_UNMAP)
5979 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5980 UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5982 if (!pUmPacket->skbuff)
5983 return;
5985 pci_unmap_single(pUmDevice->pdev,
5986 pci_unmap_addr(pUmPacket, map[0]),
5987 pPacket->u.Rx.RxBufferSize,
5988 PCI_DMA_FROMDEVICE);
5989 #endif
5992 LM_STATUS
5993 MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5995 PUM_PACKET pUmPacket;
5996 struct sk_buff *skb;
5998 if (pPacket == 0)
5999 return LM_STATUS_SUCCESS;
6000 pUmPacket = (PUM_PACKET) pPacket;
6001 if ((skb = pUmPacket->skbuff)) {
6002 /* DMA address already unmapped */
6003 dev_kfree_skb(skb);
6005 pUmPacket->skbuff = 0;
6006 return LM_STATUS_SUCCESS;
6009 LM_STATUS
6010 MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
6012 current->state = TASK_INTERRUPTIBLE;
6013 if (schedule_timeout(HZ * msec / 1000) != 0) {
6014 return LM_STATUS_FAILURE;
6016 if (signal_pending(current))
6017 return LM_STATUS_FAILURE;
6019 return LM_STATUS_SUCCESS;
6022 void
6023 bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
6025 LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
6027 bcm5700_intr_off(pUmDevice);
6028 netif_carrier_off(pUmDevice->dev);
6029 #ifdef BCM_TASKLET
6030 tasklet_kill(&pUmDevice->tasklet);
6031 #endif
6032 bcm5700_poll_wait(pUmDevice);
6034 LM_Halt(pDevice);
6036 pDevice->InitDone = 0;
6037 bcm5700_free_remaining_rx_bufs(pUmDevice);
6040 void
6041 bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
6043 LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
6044 UM_PACKET *pUmPacket;
6045 int cnt, i;
6047 cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
6048 for (i = 0; i < cnt; i++) {
6049 if ((pUmPacket =
6050 QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
6051 != 0) {
6053 MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
6054 MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
6055 QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
6056 pUmPacket);
6061 void
6062 bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
6063 char *param_name, int min, int max, int deflt)
6065 if (((unsigned int) *param < (unsigned int) min) ||
6066 ((unsigned int) *param > (unsigned int) max)) {
6068 printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
6069 *param = deflt;
6073 struct net_device *
6074 bcm5700_find_peer(struct net_device *dev)
6076 struct net_device *tmp_dev;
6077 UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
6078 LM_DEVICE_BLOCK *pDevice;
6080 tmp_dev = 0;
6081 pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
6082 pDevice = &pUmDevice->lm_dev;
6083 if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
6084 tmp_dev = root_tigon3_dev;
6085 while (tmp_dev) {
6086 pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
6087 if ((tmp_dev != dev) &&
6088 (pUmDevice->pdev->bus->number ==
6089 pUmTmp->pdev->bus->number) &&
6090 PCI_SLOT(pUmDevice->pdev->devfn) ==
6091 PCI_SLOT(pUmTmp->pdev->devfn)) {
6093 break;
6095 tmp_dev = pUmTmp->next_module;
6098 return tmp_dev;
6101 LM_DEVICE_BLOCK *
6102 MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
6104 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6105 struct net_device *dev = pUmDevice->dev;
6106 struct net_device *peer_dev;
6108 peer_dev = bcm5700_find_peer(dev);
6109 if (!peer_dev)
6110 return 0;
6111 return ((LM_DEVICE_BLOCK *) peer_dev->priv);
6114 int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
6116 UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
6117 return (pci_find_capability(pUmDevice->pdev, capability));
6120 #if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
6121 STATIC void
6122 poll_bcm5700(struct net_device *dev)
6124 UM_DEVICE_BLOCK *pUmDevice = dev->priv;
6126 #if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
6127 if (netdump_mode) {
6128 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6129 #ifdef BCM_NAPI_RXPOLL
6130 if (dev->poll_list.prev) {
6131 int budget = 64;
6133 bcm5700_poll(dev, &budget);
6135 #endif
6137 else
6138 #endif
6140 disable_irq(pUmDevice->pdev->irq);
6141 bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
6142 enable_irq(pUmDevice->pdev->irq);
6145 #endif