1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2004 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
13 /* $Id: mm.h,v 1.6 2006/08/27 23:31:05 Exp $ */
18 #include <linux/config.h>
20 #if defined(CONFIG_SMP) && !defined(__SMP__)
24 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && !defined(MODVERSIONS)
31 #define __NO_VERSION__
33 #include <linux/version.h>
37 #if defined(MODVERSIONS) && (LINUX_VERSION_CODE < 0x020500)
39 #include <linux/modversions.h>
43 #if (LINUX_VERSION_CODE < 0x020605)
44 #include <linux/module.h>
46 #include <linux/moduleparam.h>
51 #define MOD_INC_USE_COUNT
52 #define MOD_DEC_USE_COUNT
53 #define SET_MODULE_OWNER(dev)
54 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
58 #include <linux/kernel.h>
59 #include <linux/sched.h>
60 #include <linux/string.h>
61 #include <linux/timer.h>
62 #include <linux/errno.h>
63 #include <linux/ioport.h>
64 #include <linux/slab.h>
65 #include <linux/interrupt.h>
66 #include <linux/pci.h>
67 #include <linux/init.h>
68 #include <linux/netdevice.h>
69 #include <linux/etherdevice.h>
70 #include <linux/skbuff.h>
71 #include <linux/reboot.h>
72 #include <asm/processor.h> /* Processor type for cache alignment. */
73 #include <asm/bitops.h>
75 #include <asm/unaligned.h>
76 #include <linux/delay.h>
77 #include <asm/byteorder.h>
78 #include <linux/time.h>
79 #include <asm/uaccess.h>
80 #if (LINUX_VERSION_CODE >= 0x020400)
81 #if (LINUX_VERSION_CODE < 0x020500)
82 #include <linux/wrapper.h>
84 #include <linux/ethtool.h>
87 #include <linux/smp_lock.h>
88 #include <linux/proc_fs.h>
91 #ifdef NETIF_F_HW_VLAN_TX
92 #include <linux/if_vlan.h>
97 #define INCLUDE_TCP_SEG_SUPPORT 1
100 #include <net/checksum.h>
103 #ifndef LINUX_KERNEL_VERSION
104 #define LINUX_KERNEL_VERSION 0
107 #ifndef MAX_SKB_FRAGS
108 #define MAX_SKB_FRAGS 0
111 #if (LINUX_VERSION_CODE >= 0x020400)
112 #ifndef ETHTOOL_GEEPROM
114 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
115 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
117 /* for passing EEPROM chunks */
118 struct ethtool_eeprom
{
121 u32 offset
; /* in bytes */
122 u32 len
; /* in bytes */
125 #define BCM_EEDUMP_LEN(info_p, size) *((u32 *) &((info_p)->reserved1[24]))=size
129 #define BCM_EEDUMP_LEN(info_p, size) (info_p)->eedump_len=size
134 #define BCM_INT_COAL 1
135 #define BCM_NIC_SEND_BD 1
138 #define BCM_TASKLET 1
140 #if HAVE_NETIF_RECEIVE_SKB
141 #define BCM_NAPI_RXPOLL 1
145 #if defined(CONFIG_PPC64)
146 #define BCM_DISCONNECT_AT_CACHELINE 1
151 #undef ETHTOOL_GEEPROM
152 #undef ETHTOOL_SEEPROM
154 #undef ETHTOOL_GPAUSEPARAM
155 #undef ETHTOOL_GRXCSUM
158 #undef BCM_NIC_SEND_BD
166 #define BIG_ENDIAN_HOST 1
169 #define MM_SWAP_LE32(x) cpu_to_le32(x)
170 #define MM_SWAP_BE32(x) cpu_to_be32(x)
172 #if (LINUX_VERSION_CODE < 0x020327)
173 #define __raw_readl readl
174 #define __raw_writel writel
177 #define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr)
178 #define MM_MEMREADL(ptr) __raw_readl(ptr)
180 typedef atomic_t MM_ATOMIC_T
;
182 #define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val)
183 #define MM_ATOMIC_READ(ptr) atomic_read(ptr)
184 #define MM_ATOMIC_INC(ptr) atomic_inc(ptr)
185 #define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr)
186 #define MM_ATOMIC_DEC(ptr) atomic_dec(ptr)
187 #define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr)
196 #define MM_WMB() wmb()
197 #define MM_RMB() rmb()
198 #define MM_MMIOWB() mmiowb()
207 #define STATIC static
210 extern int MM_Packet_Desc_Size
;
212 #define MM_PACKET_DESC_SIZE MM_Packet_Desc_Size
214 DECLARE_QUEUE_TYPE(UM_RX_PACKET_Q
, MAX_RX_PACKET_DESC_COUNT
+1);
219 #if (LINUX_VERSION_CODE < 0x020211)
220 typedef u32 dma_addr_t
;
223 #if (LINUX_VERSION_CODE < 0x02032a)
224 #define pci_map_single(dev, address, size, dir) virt_to_bus(address)
225 #define pci_unmap_single(dev, dma_addr, size, dir)
229 #if (LINUX_VERSION_CODE >= 0x02040d)
231 typedef dma_addr_t dmaaddr_high_t
;
235 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
237 #if defined(CONFIG_HIGHMEM64G)
238 typedef unsigned long long dmaaddr_high_t
;
240 typedef dma_addr_t dmaaddr_high_t
;
244 #define pci_map_page bcm_pci_map_page
247 static inline dmaaddr_high_t
248 bcm_pci_map_page(struct pci_dev
*dev
, struct page
*page
,
249 int offset
, size_t size
, int dir
)
253 phys
= (page
-mem_map
) * (dmaaddr_high_t
) PAGE_SIZE
+ offset
;
258 #ifndef pci_unmap_page
259 #define pci_unmap_page(dev, map, size, dir)
262 #else /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
264 typedef dma_addr_t dmaaddr_high_t
;
266 /* Warning - This may not work for all architectures if HIGHMEM is defined */
269 #define pci_map_page(dev, page, offset, size, dir) \
270 pci_map_single(dev, page_address(page) + (offset), size, dir)
272 #ifndef pci_unmap_page
273 #define pci_unmap_page(dev, map, size, dir) \
274 pci_unmap_single(dev, map, size, dir)
277 #endif /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
279 #endif /* #if (LINUX_VERSION_CODE >= 0x02040d)*/
280 #endif /* #if MAX_SKB_FRAGS*/
282 #if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
283 #define NO_PCI_UNMAP 1
286 #if (LINUX_VERSION_CODE < 0x020412)
287 #if !defined(NO_PCI_UNMAP)
288 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
289 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
291 #define pci_unmap_addr(PTR, ADDR_NAME) \
294 #define pci_unmap_len(PTR, LEN_NAME) \
297 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
298 (((PTR)->ADDR_NAME) = (VAL))
300 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
301 (((PTR)->LEN_NAME) = (VAL))
303 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
304 #define DECLARE_PCI_UNMAP_LEN(ADDR_NAME)
306 #define pci_unmap_addr(PTR, ADDR_NAME) 0
307 #define pci_unmap_len(PTR, LEN_NAME) 0
308 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
309 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
313 #if (LINUX_VERSION_CODE < 0x02030e)
314 #define net_device device
315 #define netif_carrier_on(dev)
316 #define netif_carrier_off(dev)
319 #if (LINUX_VERSION_CODE < 0x02032b)
320 #define tasklet_struct tq_struct
323 typedef struct _UM_DEVICE_BLOCK
{
324 LM_DEVICE_BLOCK lm_dev
;
325 struct net_device
*dev
;
326 struct pci_dev
*pdev
;
327 struct net_device
*next_module
;
330 struct proc_dir_entry
*pfs_entry
;
333 void *mem_list
[MAX_MEM
];
334 dma_addr_t dma_list
[MAX_MEM
];
335 int mem_size_list
[MAX_MEM
];
339 void *mem_list2
[MAX_MEM2
]; /* for diagnostics ioctl */
340 dma_addr_t dma_list2
[MAX_MEM2
];
341 __u64 cpu_pa_list2
[MAX_MEM2
];
342 int mem_size_list2
[MAX_MEM2
];
347 int using_dac
; /* dual address cycle */
348 int delayed_link_ind
; /* Delay link status during initial load */
349 int adapter_just_inited
; /* the first few seconds after init. */
351 int statstimer_interval
;
353 int crc_counter_expiry
;
354 int poll_tbi_interval
;
359 int line_speed
; /* in Mbps, 0 if link is down */
360 UM_RX_PACKET_Q rx_out_of_buf_q
;
362 int rx_buf_repl_thresh
;
363 int rx_buf_repl_panic_thresh
;
364 int rx_buf_repl_isr_limit
;
366 struct timer_list timer
;
367 struct timer_list statstimer
;
369 spinlock_t global_lock
;
370 spinlock_t undi_lock
;
372 unsigned long undi_flags
;
373 volatile unsigned long interrupt
;
376 volatile unsigned long tasklet_busy
;
377 struct tasklet_struct tasklet
;
378 struct net_device_stats stats
;
380 void (*nice_rx
)( struct sk_buff
*, void* );
382 #endif /* NICE_SUPPORT */
384 int intr_test_result
;
385 #ifdef NETIF_F_HW_VLAN_TX
386 struct vlan_group
*vlgrp
;
388 int vlan_tag_mode
; /* Setting to allow ASF to work properly with */
390 #define VLAN_TAG_MODE_AUTO_STRIP 0
391 #define VLAN_TAG_MODE_NORMAL_STRIP 1
392 #define VLAN_TAG_MODE_FORCED_STRIP 2
394 /* Auto mode - VLAN TAGs are always stripped if ASF is enabled, */
395 /* If ASF is not enabled, it will be in normal mode. */
396 /* Normal mode - VLAN TAGs are stripped when VLANs are registered */
397 /* Forced mode - VLAN TAGs are always stripped. */
399 int adaptive_coalesce
;
402 uint rx_curr_coalesce_frames
;
403 uint rx_curr_coalesce_frames_intr
;
404 uint rx_curr_coalesce_ticks
;
405 uint tx_curr_coalesce_frames
;
407 unsigned long tx_zc_count
;
408 unsigned long tx_chksum_count
;
409 unsigned long tx_himem_count
;
410 unsigned long rx_good_chksum_count
;
412 unsigned long rx_bad_chksum_count
;
414 unsigned long tso_pkt_count
;
416 unsigned long rx_misc_errors
;
417 uint64_t phy_crc_count
;
418 unsigned int spurious_int
;
421 unsigned long boardflags
;
424 } UM_DEVICE_BLOCK
, *PUM_DEVICE_BLOCK
;
426 typedef struct _UM_PACKET
{
428 struct sk_buff
*skbuff
;
430 DECLARE_PCI_UNMAP_ADDR(map
[MAX_SKB_FRAGS
+ 1])
431 DECLARE_PCI_UNMAP_LEN(map_len
[MAX_SKB_FRAGS
+ 1])
433 DECLARE_PCI_UNMAP_ADDR(map
[1])
434 DECLARE_PCI_UNMAP_LEN(map_len
[1])
436 } UM_PACKET
, *PUM_PACKET
;
438 static inline void MM_SetAddr(LM_PHYSICAL_ADDRESS
*paddr
, dma_addr_t addr
)
440 #if BITS_PER_LONG == 64
441 paddr
->High
= ((unsigned long) addr
) >> 32;
442 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
445 paddr
->Low
= (unsigned long) addr
;
449 static inline void MM_SetT3Addr(T3_64BIT_HOST_ADDR
*paddr
, dma_addr_t addr
)
451 #if BITS_PER_LONG == 64
452 paddr
->High
= ((unsigned long) addr
) >> 32;
453 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
456 paddr
->Low
= (unsigned long) addr
;
461 static inline void MM_SetT3AddrHigh(T3_64BIT_HOST_ADDR
*paddr
,
464 #if defined(CONFIG_HIGHMEM64G) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
465 paddr
->High
= (unsigned long) (addr
>> 32);
466 paddr
->Low
= (unsigned long) (addr
& 0xffffffff);
468 MM_SetT3Addr(paddr
, (dma_addr_t
) addr
);
473 static inline void MM_MapRxDma(PLM_DEVICE_BLOCK pDevice
,
474 struct _LM_PACKET
*pPacket
,
475 T3_64BIT_HOST_ADDR
*paddr
)
478 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
480 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
482 pPacket
->u
.Rx
.RxBufferSize
,
484 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[0], map
);
485 MM_SetT3Addr(paddr
, map
);
488 static inline void MM_MapTxDma(PLM_DEVICE_BLOCK pDevice
,
489 struct _LM_PACKET
*pPacket
,
490 T3_64BIT_HOST_ADDR
*paddr
,
495 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
500 if (skb_shinfo(skb
)->nr_frags
)
501 length
= skb
->len
- skb
->data_len
;
505 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
506 skb
->data
, length
, PCI_DMA_TODEVICE
);
507 MM_SetT3Addr(paddr
, map
);
508 pci_unmap_addr_set(((struct _UM_PACKET
*)pPacket
), map
[0], map
);
509 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
), map_len
[0],
516 dmaaddr_high_t hi_map
;
518 sk_frag
= &skb_shinfo(skb
)->frags
[frag
- 1];
520 hi_map
= pci_map_page(
521 ((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
523 sk_frag
->page_offset
,
524 sk_frag
->size
, PCI_DMA_TODEVICE
);
526 MM_SetT3AddrHigh(paddr
, hi_map
);
527 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[frag
],
529 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
),
530 map_len
[frag
], sk_frag
->size
);
531 *len
= sk_frag
->size
;
536 #define BCM5700_PHY_LOCK(pUmDevice, flags) { \
538 if ((pUmDevice)->do_global_lock) { \
539 lock = &(pUmDevice)->global_lock; \
542 lock = &(pUmDevice)->phy_lock; \
544 spin_lock_irqsave(lock, flags); \
547 #define BCM5700_PHY_UNLOCK(pUmDevice, flags) { \
549 if ((pUmDevice)->do_global_lock) { \
550 lock = &(pUmDevice)->global_lock; \
553 lock = &(pUmDevice)->phy_lock; \
555 spin_unlock_irqrestore(lock, flags); \
559 #define MM_ACQUIRE_UNDI_LOCK(_pDevice) \
560 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
561 unsigned long flags; \
562 spin_lock_irqsave(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
563 ((PUM_DEVICE_BLOCK)(_pDevice))->undi_flags = flags; \
566 #define MM_RELEASE_UNDI_LOCK(_pDevice) \
567 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
568 unsigned long flags = ((PUM_DEVICE_BLOCK) (_pDevice))->undi_flags; \
569 spin_unlock_irqrestore(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
572 #define MM_ACQUIRE_PHY_LOCK_IN_IRQ(_pDevice) \
573 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
574 spin_lock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
577 #define MM_RELEASE_PHY_LOCK_IN_IRQ(_pDevice) \
578 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
579 spin_unlock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
582 #define MM_UINT_PTR(_ptr) ((unsigned long) (_ptr))
584 #define MM_GETSTATS64(_Ctr) \
585 (uint64_t) (_Ctr).Low + ((uint64_t) (_Ctr).High << 32)
587 #define MM_GETSTATS32(_Ctr) \
588 (uint32_t) (_Ctr).Low
590 #if BITS_PER_LONG == 64
591 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS64(_Ctr)
593 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS32(_Ctr)
596 #if (LINUX_VERSION_CODE >= 0x020600)
597 #define mm_copy_to_user( to, from, size ) \
598 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_to_user((to),(from),(size)))
599 #define mm_copy_from_user( to, from, size ) \
600 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_from_user((to),(from),(size)))
602 #define mm_copy_to_user( to, from, size ) \
603 copy_to_user((to),(from),(size) )
604 #define mm_copy_from_user( to, from, size ) \
605 copy_from_user((to),(from),(size))
609 #define printf(fmt, args...) printk(KERN_WARNING fmt, ##args)
612 #define DbgPrint(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
613 #if defined(CONFIG_X86)
614 #define DbgBreakPoint() __asm__("int $129")
616 #define DbgBreakPoint()
618 #define MM_Wait(time) udelay(time)