1 /******************************************************************************/
3 /* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2004 Broadcom */
5 /* All rights reserved. */
7 /* This program is free software; you can redistribute it and/or modify */
8 /* it under the terms of the GNU General Public License as published by */
9 /* the Free Software Foundation, located in the file LICENSE. */
11 /******************************************************************************/
13 /* $Id: mm.h,v 1.10 2009-10-20 01:17:52 Exp $ */
19 #include <ctf/hndctf.h>
22 #include <linux/config.h>
24 #if defined(CONFIG_SMP) && !defined(__SMP__)
28 #if defined(CONFIG_MODVERSIONS) && defined(MODULE) && !defined(MODVERSIONS)
35 #define __NO_VERSION__
37 #include <linux/version.h>
41 #if defined(MODVERSIONS) && (LINUX_VERSION_CODE < 0x020500)
43 #include <linux/modversions.h>
47 #if (LINUX_VERSION_CODE < 0x020605)
48 #include <linux/module.h>
50 #include <linux/moduleparam.h>
55 #define MOD_INC_USE_COUNT
56 #define MOD_DEC_USE_COUNT
57 #define SET_MODULE_OWNER(dev)
58 #define MODULE_DEVICE_TABLE(pci, pci_tbl)
63 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
64 #include <linux/utsrelease.h>
66 #include <linux/kernel.h>
67 #include <linux/sched.h>
68 #include <linux/string.h>
69 #include <linux/timer.h>
70 #include <linux/errno.h>
71 #include <linux/ioport.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/pci.h>
75 #include <linux/init.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/reboot.h>
80 #include <asm/processor.h> /* Processor type for cache alignment. */
81 #include <asm/bitops.h>
83 #include <asm/unaligned.h>
84 #include <linux/delay.h>
85 #include <asm/byteorder.h>
86 #include <linux/time.h>
87 #include <asm/uaccess.h>
88 #if (LINUX_VERSION_CODE >= 0x020400)
89 #if (LINUX_VERSION_CODE < 0x020500)
90 #include <linux/wrapper.h>
92 #include <linux/ethtool.h>
95 #include <linux/smp_lock.h>
96 #include <linux/proc_fs.h>
99 #ifdef NETIF_F_HW_VLAN_TX
100 #include <linux/if_vlan.h>
105 #define INCLUDE_TCP_SEG_SUPPORT 1
108 #include <net/checksum.h>
111 #ifndef LINUX_KERNEL_VERSION
112 #define LINUX_KERNEL_VERSION 0
115 #ifndef MAX_SKB_FRAGS
116 #define MAX_SKB_FRAGS 0
119 #if (LINUX_VERSION_CODE >= 0x020400)
120 #ifndef ETHTOOL_GEEPROM
122 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
123 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
125 /* for passing EEPROM chunks */
126 struct ethtool_eeprom
{
129 u32 offset
; /* in bytes */
130 u32 len
; /* in bytes */
133 #define BCM_EEDUMP_LEN(info_p, size) *((u32 *) &((info_p)->reserved1[24]))=size
137 #define BCM_EEDUMP_LEN(info_p, size) (info_p)->eedump_len=size
142 #define BCM_INT_COAL 1
143 #define BCM_NIC_SEND_BD 1
146 #define BCM_TASKLET 1
148 #if HAVE_NETIF_RECEIVE_SKB
149 #define BCM_NAPI_RXPOLL 1
153 #if defined(CONFIG_PPC64)
154 #define BCM_DISCONNECT_AT_CACHELINE 1
159 #undef ETHTOOL_GEEPROM
160 #undef ETHTOOL_SEEPROM
162 #undef ETHTOOL_GPAUSEPARAM
163 #undef ETHTOOL_GRXCSUM
166 #undef BCM_NIC_SEND_BD
174 #define BIG_ENDIAN_HOST 1
177 #define MM_SWAP_LE32(x) cpu_to_le32(x)
178 #define MM_SWAP_BE32(x) cpu_to_be32(x)
180 #if (LINUX_VERSION_CODE < 0x020327)
181 #define __raw_readl readl
182 #define __raw_writel writel
185 #define MM_MEMWRITEL(ptr, val) __raw_writel(val, ptr)
186 #define MM_MEMREADL(ptr) __raw_readl(ptr)
188 typedef atomic_t MM_ATOMIC_T
;
190 #define MM_ATOMIC_SET(ptr, val) atomic_set(ptr, val)
191 #define MM_ATOMIC_READ(ptr) atomic_read(ptr)
192 #define MM_ATOMIC_INC(ptr) atomic_inc(ptr)
193 #define MM_ATOMIC_ADD(ptr, val) atomic_add(val, ptr)
194 #define MM_ATOMIC_DEC(ptr) atomic_dec(ptr)
195 #define MM_ATOMIC_SUB(ptr, val) atomic_sub(val, ptr)
204 #define MM_WMB() wmb()
205 #define MM_RMB() rmb()
206 #define MM_MMIOWB() mmiowb()
215 #define STATIC static
218 extern int MM_Packet_Desc_Size
;
220 #define MM_PACKET_DESC_SIZE MM_Packet_Desc_Size
222 DECLARE_QUEUE_TYPE(UM_RX_PACKET_Q
, MAX_RX_PACKET_DESC_COUNT
+1);
227 #if (LINUX_VERSION_CODE < 0x020211)
228 typedef u32 dma_addr_t
;
231 #if (LINUX_VERSION_CODE < 0x02032a)
232 #define pci_map_single(dev, address, size, dir) virt_to_bus(address)
233 #define pci_unmap_single(dev, dma_addr, size, dir)
237 #if (LINUX_VERSION_CODE >= 0x02040d)
239 typedef dma_addr_t dmaaddr_high_t
;
243 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
245 #if defined(CONFIG_HIGHMEM64G)
246 typedef unsigned long long dmaaddr_high_t
;
248 typedef dma_addr_t dmaaddr_high_t
;
252 #define pci_map_page bcm_pci_map_page
255 static inline dmaaddr_high_t
256 bcm_pci_map_page(struct pci_dev
*dev
, struct page
*page
,
257 int offset
, size_t size
, int dir
)
261 phys
= (page
-mem_map
) * (dmaaddr_high_t
) PAGE_SIZE
+ offset
;
266 #ifndef pci_unmap_page
267 #define pci_unmap_page(dev, map, size, dir)
270 #else /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
272 typedef dma_addr_t dmaaddr_high_t
;
274 /* Warning - This may not work for all architectures if HIGHMEM is defined */
277 #define pci_map_page(dev, page, offset, size, dir) \
278 pci_map_single(dev, page_address(page) + (offset), size, dir)
280 #ifndef pci_unmap_page
281 #define pci_unmap_page(dev, map, size, dir) \
282 pci_unmap_single(dev, map, size, dir)
285 #endif /* #if defined(CONFIG_HIGHMEM) && defined(CONFIG_X86) && ! defined(CONFIG_X86_64)*/
287 #endif /* #if (LINUX_VERSION_CODE >= 0x02040d)*/
288 #endif /* #if MAX_SKB_FRAGS*/
290 #if defined(CONFIG_X86) && !defined(CONFIG_X86_64)
291 #define NO_PCI_UNMAP 1
294 #if (LINUX_VERSION_CODE < 0x020412)
295 #if !defined(NO_PCI_UNMAP)
296 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME;
297 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME;
299 #define pci_unmap_addr(PTR, ADDR_NAME) \
302 #define pci_unmap_len(PTR, LEN_NAME) \
305 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
306 (((PTR)->ADDR_NAME) = (VAL))
308 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
309 (((PTR)->LEN_NAME) = (VAL))
311 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
312 #define DECLARE_PCI_UNMAP_LEN(ADDR_NAME)
314 #define pci_unmap_addr(PTR, ADDR_NAME) 0
315 #define pci_unmap_len(PTR, LEN_NAME) 0
316 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
317 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
321 #if (LINUX_VERSION_CODE < 0x02030e)
322 #define net_device device
323 #define netif_carrier_on(dev)
324 #define netif_carrier_off(dev)
327 #if (LINUX_VERSION_CODE < 0x02032b)
328 #define tasklet_struct tq_struct
331 typedef struct _UM_DEVICE_BLOCK
{
332 LM_DEVICE_BLOCK lm_dev
;
333 struct net_device
*dev
;
334 struct pci_dev
*pdev
;
335 struct net_device
*next_module
;
338 struct proc_dir_entry
*pfs_entry
;
341 void *mem_list
[MAX_MEM
];
342 dma_addr_t dma_list
[MAX_MEM
];
343 int mem_size_list
[MAX_MEM
];
347 void *mem_list2
[MAX_MEM2
]; /* for diagnostics ioctl */
348 dma_addr_t dma_list2
[MAX_MEM2
];
349 __u64 cpu_pa_list2
[MAX_MEM2
];
350 int mem_size_list2
[MAX_MEM2
];
355 int using_dac
; /* dual address cycle */
356 int delayed_link_ind
; /* Delay link status during initial load */
357 int adapter_just_inited
; /* the first few seconds after init. */
359 int statstimer_interval
;
361 int crc_counter_expiry
;
362 int poll_tbi_interval
;
367 int line_speed
; /* in Mbps, 0 if link is down */
368 UM_RX_PACKET_Q rx_out_of_buf_q
;
370 int rx_buf_repl_thresh
;
371 int rx_buf_repl_panic_thresh
;
372 int rx_buf_repl_isr_limit
;
374 struct timer_list timer
;
375 struct timer_list statstimer
;
377 spinlock_t global_lock
;
378 spinlock_t undi_lock
;
380 unsigned long undi_flags
;
381 volatile unsigned long interrupt
;
384 volatile unsigned long tasklet_busy
;
385 struct tasklet_struct tasklet
;
386 struct net_device_stats stats
;
388 void (*nice_rx
)( struct sk_buff
*, void* );
390 #endif /* NICE_SUPPORT */
392 int intr_test_result
;
393 #ifdef NETIF_F_HW_VLAN_TX
394 struct vlan_group
*vlgrp
;
396 int vlan_tag_mode
; /* Setting to allow ASF to work properly with */
398 #define VLAN_TAG_MODE_AUTO_STRIP 0
399 #define VLAN_TAG_MODE_NORMAL_STRIP 1
400 #define VLAN_TAG_MODE_FORCED_STRIP 2
402 /* Auto mode - VLAN TAGs are always stripped if ASF is enabled, */
403 /* If ASF is not enabled, it will be in normal mode. */
404 /* Normal mode - VLAN TAGs are stripped when VLANs are registered */
405 /* Forced mode - VLAN TAGs are always stripped. */
407 int adaptive_coalesce
;
410 uint rx_curr_coalesce_frames
;
411 uint rx_curr_coalesce_frames_intr
;
412 uint rx_curr_coalesce_ticks
;
413 uint tx_curr_coalesce_frames
;
415 unsigned long tx_zc_count
;
416 unsigned long tx_chksum_count
;
417 unsigned long tx_himem_count
;
418 unsigned long rx_good_chksum_count
;
420 unsigned long rx_bad_chksum_count
;
422 unsigned long tso_pkt_count
;
424 unsigned long rx_misc_errors
;
425 uint64_t phy_crc_count
;
426 unsigned int spurious_int
;
429 unsigned long boardflags
;
436 } UM_DEVICE_BLOCK
, *PUM_DEVICE_BLOCK
;
438 typedef struct _UM_PACKET
{
440 struct sk_buff
*skbuff
;
442 DECLARE_PCI_UNMAP_ADDR(map
[MAX_SKB_FRAGS
+ 1])
443 DECLARE_PCI_UNMAP_LEN(map_len
[MAX_SKB_FRAGS
+ 1])
445 DECLARE_PCI_UNMAP_ADDR(map
[1])
446 DECLARE_PCI_UNMAP_LEN(map_len
[1])
448 } UM_PACKET
, *PUM_PACKET
;
450 static inline void MM_SetAddr(LM_PHYSICAL_ADDRESS
*paddr
, dma_addr_t addr
)
452 #if (BITS_PER_LONG == 64)
453 paddr
->High
= ((unsigned long) addr
) >> 32;
454 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
457 paddr
->Low
= (unsigned long) addr
;
461 static inline void MM_SetT3Addr(T3_64BIT_HOST_ADDR
*paddr
, dma_addr_t addr
)
463 #if (BITS_PER_LONG == 64)
464 paddr
->High
= ((unsigned long) addr
) >> 32;
465 paddr
->Low
= ((unsigned long) addr
) & 0xffffffff;
468 paddr
->Low
= (unsigned long) addr
;
473 static inline void MM_SetT3AddrHigh(T3_64BIT_HOST_ADDR
*paddr
,
476 #if defined(CONFIG_HIGHMEM64G) && defined(CONFIG_X86) && !defined(CONFIG_X86_64)
477 paddr
->High
= (unsigned long) (addr
>> 32);
478 paddr
->Low
= (unsigned long) (addr
& 0xffffffff);
480 MM_SetT3Addr(paddr
, (dma_addr_t
) addr
);
485 static inline void MM_MapRxDma(PLM_DEVICE_BLOCK pDevice
,
486 struct _LM_PACKET
*pPacket
,
487 T3_64BIT_HOST_ADDR
*paddr
)
490 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
492 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
494 pPacket
->u
.Rx
.RxBufferSize
,
496 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[0], map
);
497 MM_SetT3Addr(paddr
, map
);
500 static inline void MM_MapTxDma(PLM_DEVICE_BLOCK pDevice
,
501 struct _LM_PACKET
*pPacket
,
502 T3_64BIT_HOST_ADDR
*paddr
,
507 struct sk_buff
*skb
= ((struct _UM_PACKET
*) pPacket
)->skbuff
;
512 if (skb_shinfo(skb
)->nr_frags
)
513 length
= skb
->len
- skb
->data_len
;
517 map
= pci_map_single(((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
518 skb
->data
, length
, PCI_DMA_TODEVICE
);
519 MM_SetT3Addr(paddr
, map
);
520 pci_unmap_addr_set(((struct _UM_PACKET
*)pPacket
), map
[0], map
);
521 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
), map_len
[0],
528 dmaaddr_high_t hi_map
;
530 sk_frag
= &skb_shinfo(skb
)->frags
[frag
- 1];
532 hi_map
= pci_map_page(
533 ((struct _UM_DEVICE_BLOCK
*)pDevice
)->pdev
,
535 sk_frag
->page_offset
,
536 sk_frag
->size
, PCI_DMA_TODEVICE
);
538 MM_SetT3AddrHigh(paddr
, hi_map
);
539 pci_unmap_addr_set(((struct _UM_PACKET
*) pPacket
), map
[frag
],
541 pci_unmap_len_set(((struct _UM_PACKET
*) pPacket
),
542 map_len
[frag
], sk_frag
->size
);
543 *len
= sk_frag
->size
;
548 #define BCM5700_PHY_LOCK(pUmDevice, flags) { \
550 if ((pUmDevice)->do_global_lock) { \
551 lock = &(pUmDevice)->global_lock; \
554 lock = &(pUmDevice)->phy_lock; \
556 spin_lock_irqsave(lock, flags); \
559 #define BCM5700_PHY_UNLOCK(pUmDevice, flags) { \
561 if ((pUmDevice)->do_global_lock) { \
562 lock = &(pUmDevice)->global_lock; \
565 lock = &(pUmDevice)->phy_lock; \
567 spin_unlock_irqrestore(lock, flags); \
571 #define MM_ACQUIRE_UNDI_LOCK(_pDevice) \
572 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
573 unsigned long flags; \
574 spin_lock_irqsave(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
575 ((PUM_DEVICE_BLOCK)(_pDevice))->undi_flags = flags; \
578 #define MM_RELEASE_UNDI_LOCK(_pDevice) \
579 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
580 unsigned long flags = ((PUM_DEVICE_BLOCK) (_pDevice))->undi_flags; \
581 spin_unlock_irqrestore(&((PUM_DEVICE_BLOCK)(_pDevice))->undi_lock, flags); \
584 #define MM_ACQUIRE_PHY_LOCK_IN_IRQ(_pDevice) \
585 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
586 spin_lock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
589 #define MM_RELEASE_PHY_LOCK_IN_IRQ(_pDevice) \
590 if (!(((PUM_DEVICE_BLOCK)(_pDevice))->do_global_lock)) { \
591 spin_unlock(&((PUM_DEVICE_BLOCK)(_pDevice))->phy_lock); \
594 #define MM_UINT_PTR(_ptr) ((unsigned long) (_ptr))
596 #define MM_GETSTATS64(_Ctr) \
597 (uint64_t) (_Ctr).Low + ((uint64_t) (_Ctr).High << 32)
599 #define MM_GETSTATS32(_Ctr) \
600 (uint32_t) (_Ctr).Low
602 #if (BITS_PER_LONG == 64)
603 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS64(_Ctr)
605 #define MM_GETSTATS(_Ctr) (unsigned long) MM_GETSTATS32(_Ctr)
608 #if (LINUX_VERSION_CODE >= 0x020600)
609 #define mm_copy_to_user( to, from, size ) \
610 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_to_user((to),(from),(size)))
611 #define mm_copy_from_user( to, from, size ) \
612 (in_atomic() ? (memcpy((to),(from),(size)), 0) : copy_from_user((to),(from),(size)))
614 #define mm_copy_to_user( to, from, size ) \
615 copy_to_user((to),(from),(size) )
616 #define mm_copy_from_user( to, from, size ) \
617 copy_from_user((to),(from),(size))
621 #define printf(fmt, args...) printk(KERN_WARNING fmt, ##args)
624 #define DbgPrint(fmt, arg...) printk(KERN_DEBUG fmt, ##arg)
625 #if defined(CONFIG_X86)
626 #define DbgBreakPoint() __asm__("int $129")
628 #define DbgBreakPoint()
630 #define MM_Wait(time) udelay(time)