2 * Linux OS Independent Layer
4 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c,v 1.172.2.21 2011-01-27 17:03:39 Exp $
24 #include <bcmendian.h>
29 #include <linux/delay.h>
31 #include <asm/paccess.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognise osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 typedef struct bcm_mem_link
{
45 struct bcm_mem_link
*prev
;
46 struct bcm_mem_link
*next
;
50 char file
[BCM_MEM_FILENAME_LEN
];
53 #if defined(DSLCPE_DELAY_NOT_YET)
72 bcm_mem_link_t
*dbgmem_list
;
73 spinlock_t dbgmem_lock
;
74 #if defined(DSLCPE_DELAY)
75 shared_osl_t
*oshsh
; /* osh shared */
77 #ifdef BCMDBG_PKT /* pkt logging for debugging */
78 spinlock_t pktlist_lock
;
79 pktlist_info_t pktlist
;
80 #endif /* BCMDBG_PKT */
81 spinlock_t pktalloc_lock
;
84 /* PCMCIA attribute space access macros */
85 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
87 dev_link_t link
; /* PCMCIA device pointer */
88 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
89 dev_node_t node
; /* PCMCIA node structure */
91 void *base
; /* Mapped attribute memory window */
92 size_t size
; /* Size of window */
93 void *drv
; /* Driver data */
95 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
97 /* Global ASSERT type flag */
98 uint32 g_assert_type
= FALSE
;
100 static int16 linuxbcmerrormap
[] =
102 -EINVAL
, /* BCME_ERROR */
103 -EINVAL
, /* BCME_BADARG */
104 -EINVAL
, /* BCME_BADOPTION */
105 -EINVAL
, /* BCME_NOTUP */
106 -EINVAL
, /* BCME_NOTDOWN */
107 -EINVAL
, /* BCME_NOTAP */
108 -EINVAL
, /* BCME_NOTSTA */
109 -EINVAL
, /* BCME_BADKEYIDX */
110 -EINVAL
, /* BCME_RADIOOFF */
111 -EINVAL
, /* BCME_NOTBANDLOCKED */
112 -EINVAL
, /* BCME_NOCLK */
113 -EINVAL
, /* BCME_BADRATESET */
114 -EINVAL
, /* BCME_BADBAND */
115 -E2BIG
, /* BCME_BUFTOOSHORT */
116 -E2BIG
, /* BCME_BUFTOOLONG */
117 -EBUSY
, /* BCME_BUSY */
118 -EINVAL
, /* BCME_NOTASSOCIATED */
119 -EINVAL
, /* BCME_BADSSIDLEN */
120 -EINVAL
, /* BCME_OUTOFRANGECHAN */
121 -EINVAL
, /* BCME_BADCHAN */
122 -EFAULT
, /* BCME_BADADDR */
123 -ENOMEM
, /* BCME_NORESOURCE */
124 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
125 -EMSGSIZE
, /* BCME_BADLENGTH */
126 -EINVAL
, /* BCME_NOTREADY */
127 -EPERM
, /* BCME_NOTPERMITTED */
128 -ENOMEM
, /* BCME_NOMEM */
129 -EINVAL
, /* BCME_ASSOCIATED */
130 -ERANGE
, /* BCME_RANGE */
131 -EINVAL
, /* BCME_NOTFOUND */
132 -EINVAL
, /* BCME_WME_NOT_ENABLED */
133 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
134 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
135 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
136 -EIO
, /* BCME_SDIO_ERROR */
137 -ENODEV
, /* BCME_DONGLE_DOWN */
138 -EINVAL
, /* BCME_VERSION */
139 -EIO
, /* BCME_TXFAIL */
140 -EIO
, /* BCME_RXFAIL */
141 -EINVAL
, /* BCME_NODEVICE */
142 -EINVAL
, /* BCME_NMODE_DISABLED */
143 -ENODATA
, /* BCME_NONRESIDENT */
145 /* When an new error code is added to bcmutils.h, add os
146 * spcecific error translation here as well
148 /* check if BCME_LAST changed since the last time this function was updated */
150 #error "You need to add a OS error translation in the linuxbcmerrormap \
151 for new error code defined in bcmutils.h"
155 /* translate bcmerrors into linux errors */
157 osl_error(int bcmerror
)
161 else if (bcmerror
< BCME_LAST
)
162 bcmerror
= BCME_ERROR
;
164 /* Array bounds covered by ASSERT in osl_attach */
165 return linuxbcmerrormap
[-bcmerror
];
169 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
173 osh
= kmalloc(sizeof(osl_t
), GFP_ATOMIC
);
176 bzero(osh
, sizeof(osl_t
));
178 /* Check that error map has the right number of entries in it */
179 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
181 osh
->magic
= OS_HANDLE_MAGIC
;
182 atomic_set(&osh
->malloced
, 0);
184 osh
->dbgmem_list
= NULL
;
185 spin_lock_init(&(osh
->dbgmem_lock
));
187 osh
->pub
.pkttag
= pkttag
;
188 osh
->bustype
= bustype
;
194 osh
->pub
.mmbus
= TRUE
;
201 osh
->pub
.mmbus
= FALSE
;
209 spin_lock_init(&(osh
->pktlist_lock
));
211 spin_lock_init(&(osh
->pktalloc_lock
));
215 ASSERT(OSL_PKTTAG_SZ
<= sizeof(skb
->cb
));
222 osl_detach(osl_t
*osh
)
227 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
233 #ifdef CTFPOOL_SPINLOCK
234 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
235 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
237 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
238 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
239 #endif /* CTFPOOL_SPINLOCK */
241 * Allocate and add an object to packet pool.
244 osl_ctfpool_add(osl_t
*osh
)
247 #ifdef CTFPOOL_SPINLOCK
249 #endif /* CTFPOOL_SPINLOCK */
251 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
254 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
255 ASSERT(osh
->ctfpool
->curr_obj
<= osh
->ctfpool
->max_obj
);
257 /* No need to allocate more objects */
258 if (osh
->ctfpool
->curr_obj
== osh
->ctfpool
->max_obj
) {
259 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
263 /* Allocate a new skb and add it to the ctfpool */
264 skb
= dev_alloc_skb(osh
->ctfpool
->obj_size
);
266 printf("%s: skb alloc of len %d failed\n", __FUNCTION__
,
267 osh
->ctfpool
->obj_size
);
268 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
273 skb
->next
= (struct sk_buff
*)osh
->ctfpool
->head
;
274 osh
->ctfpool
->head
= skb
;
275 osh
->ctfpool
->fast_frees
++;
276 osh
->ctfpool
->curr_obj
++;
278 /* Hijack a skb member to store ptr to ctfpool */
279 CTFPOOLPTR(osh
, skb
) = (void *)osh
->ctfpool
;
281 /* Use bit flag to indicate skb from fast ctfpool */
282 PKTFAST(osh
, skb
) = FASTBUF
;
284 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
290 * Add new objects to the pool.
293 osl_ctfpool_replenish(osl_t
*osh
, uint thresh
)
295 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
298 /* Do nothing if no refills are required */
299 while ((osh
->ctfpool
->refills
> 0) && (thresh
--)) {
300 osl_ctfpool_add(osh
);
301 osh
->ctfpool
->refills
--;
306 * Initialize the packet pool with specified number of objects.
309 osl_ctfpool_init(osl_t
*osh
, uint numobj
, uint size
)
311 osh
->ctfpool
= kmalloc(sizeof(ctfpool_t
), GFP_ATOMIC
);
312 ASSERT(osh
->ctfpool
);
313 bzero(osh
->ctfpool
, sizeof(ctfpool_t
));
315 osh
->ctfpool
->max_obj
= numobj
;
316 osh
->ctfpool
->obj_size
= size
;
318 spin_lock_init(&osh
->ctfpool
->lock
);
321 if (!osl_ctfpool_add(osh
))
323 osh
->ctfpool
->fast_frees
--;
330 * Cleanup the packet pool objects.
333 osl_ctfpool_cleanup(osl_t
*osh
)
335 struct sk_buff
*skb
, *nskb
;
336 #ifdef CTFPOOL_SPINLOCK
338 #endif /* CTFPOOL_SPINLOCK */
340 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
343 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
345 skb
= osh
->ctfpool
->head
;
347 while (skb
!= NULL
) {
351 osh
->ctfpool
->curr_obj
--;
354 ASSERT(osh
->ctfpool
->curr_obj
== 0);
355 osh
->ctfpool
->head
= NULL
;
356 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
363 osl_ctfpool_stats(osl_t
*osh
, void *b
)
365 struct bcmstrbuf
*bb
;
367 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
372 ASSERT((osh
!= NULL
) && (bb
!= NULL
));
374 bcm_bprintf(bb
, "max_obj %d obj_size %d curr_obj %d refills %d\n",
375 osh
->ctfpool
->max_obj
, osh
->ctfpool
->obj_size
,
376 osh
->ctfpool
->curr_obj
, osh
->ctfpool
->refills
);
377 bcm_bprintf(bb
, "fast_allocs %d fast_frees %d slow_allocs %d\n",
378 osh
->ctfpool
->fast_allocs
, osh
->ctfpool
->fast_frees
,
379 osh
->ctfpool
->slow_allocs
);
382 static inline struct sk_buff
*
383 osl_pktfastget(osl_t
*osh
, uint len
)
386 #ifdef CTFPOOL_SPINLOCK
388 #endif /* CTFPOOL_SPINLOCK */
390 /* Try to do fast allocate. Return null if ctfpool is not in use
391 * or if there are no items in the ctfpool.
393 if (osh
->ctfpool
== NULL
)
396 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
397 if (osh
->ctfpool
->head
== NULL
) {
398 ASSERT(osh
->ctfpool
->curr_obj
== 0);
399 osh
->ctfpool
->slow_allocs
++;
400 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
404 ASSERT(len
<= osh
->ctfpool
->obj_size
);
406 /* Get an object from ctfpool */
407 skb
= (struct sk_buff
*)osh
->ctfpool
->head
;
408 osh
->ctfpool
->head
= (void *)skb
->next
;
410 osh
->ctfpool
->fast_allocs
++;
411 osh
->ctfpool
->curr_obj
--;
412 ASSERT(CTFPOOLHEAD(osh
, skb
) == (struct sock
*)osh
->ctfpool
->head
);
413 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
415 /* Init skb struct */
416 skb
->next
= skb
->prev
= NULL
;
417 skb
->data
= skb
->head
+ 16;
418 skb
->tail
= skb
->head
+ 16;
422 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
425 atomic_set(&skb
->users
, 1);
430 /* Convert a driver packet to native(OS) packet
431 * In the process, packettag is zeroed out before sending up
432 * IP code depends on skb->cb to be setup correctly with various options
433 * In our case, that means it should be 0
435 struct sk_buff
* BCMFASTPATH
436 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
439 struct sk_buff
*nskb
;
444 bzero((void*)((struct sk_buff
*)pkt
)->cb
, OSL_PKTTAG_SZ
);
447 /* Decrement the packet counter */
448 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
450 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
451 pktlist_remove(&(osh
->pktlist
), (void *) nskb
);
452 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
453 #endif /* BCMDBG_PKT */
454 spin_lock_irqsave(&osh
->pktalloc_lock
, flags
);
455 osh
->pub
.pktalloced
--;
456 spin_unlock_irqrestore(&osh
->pktalloc_lock
, flags
);
459 return (struct sk_buff
*)pkt
;
462 /* Convert a native(OS) packet to driver packet.
463 * In the process, native packet is destroyed, there is no copying
464 * Also, a packettag is zeroed out
468 osl_pkt_frmnative(osl_t
*osh
, void *pkt
, int line
, char *file
)
469 #else /* BCMDBG_PKT pkt logging for debugging */
471 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
472 #endif /* BCMDBG_PKT */
475 struct sk_buff
*nskb
;
480 bzero((void*)((struct sk_buff
*)pkt
)->cb
, OSL_PKTTAG_SZ
);
483 /* Increment the packet counter */
484 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
486 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
487 pktlist_add(&(osh
->pktlist
), (void *) nskb
, line
, file
);
488 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
489 #endif /* BCMDBG_PKT */
490 spin_lock_irqsave(&osh
->pktalloc_lock
, flags
);
491 osh
->pub
.pktalloced
++;
492 spin_unlock_irqrestore(&osh
->pktalloc_lock
, flags
);
498 /* Return a new packet. zero out pkttag */
501 osl_pktget(osl_t
*osh
, uint len
, int line
, char *file
)
502 #else /* BCMDBG_PKT */
504 osl_pktget(osl_t
*osh
, uint len
)
505 #endif /* BCMDBG_PKT */
511 /* Allocate from local pool */
512 skb
= osl_pktfastget(osh
, len
);
513 if ((skb
!= NULL
) || ((skb
= dev_alloc_skb(len
)) != NULL
)) {
515 if ((skb
= dev_alloc_skb(len
))) {
521 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
522 pktlist_add(&(osh
->pktlist
), (void *) skb
, line
, file
);
523 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
526 spin_lock_irqsave(&osh
->pktalloc_lock
, flags
);
527 osh
->pub
.pktalloced
++;
528 spin_unlock_irqrestore(&osh
->pktalloc_lock
, flags
);
531 return ((void*) skb
);
536 osl_pktfastfree(osl_t
*osh
, struct sk_buff
*skb
)
539 #ifdef CTFPOOL_SPINLOCK
541 #endif /* CTFPOOL_SPINLOCK */
543 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
544 skb
->tstamp
.tv
.sec
= 0;
546 skb
->stamp
.tv_sec
= 0;
549 /* We only need to init the fields that we change */
552 memset(skb
->cb
, 0, sizeof(skb
->cb
));
554 skb
->destructor
= NULL
;
556 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
558 ASSERT(ctfpool
!= NULL
);
560 if (ctfpool
== NULL
) return;
563 /* Add object to the ctfpool */
564 CTFPOOL_LOCK(ctfpool
, flags
);
565 skb
->next
= (struct sk_buff
*)ctfpool
->head
;
566 ctfpool
->head
= (void *)skb
;
568 ctfpool
->fast_frees
++;
571 ASSERT(ctfpool
->curr_obj
<= ctfpool
->max_obj
);
572 CTFPOOL_UNLOCK(ctfpool
, flags
);
576 /* Free the driver packet. Free the tag if present */
578 osl_pktfree(osl_t
*osh
, void *p
, bool send
)
580 struct sk_buff
*skb
, *nskb
;
583 skb
= (struct sk_buff
*) p
;
585 if (send
&& osh
->pub
.tx_fn
)
586 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
588 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
590 /* perversion: we use skb->next to chain multi-skb packets */
596 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
597 pktlist_remove(&(osh
->pktlist
), (void *) skb
);
598 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
602 /* Clear the map ptr before freeing */
604 CTFMAPPTR(osh
, skb
) = NULL
;
608 if ((PKTISFAST(osh
, skb
)) && (atomic_read(&skb
->users
) == 1))
609 osl_pktfastfree(osh
, skb
);
616 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
619 dev_kfree_skb_any(skb
);
621 /* can free immediately (even in_irq()) if destructor
626 spin_lock_irqsave(&osh
->pktalloc_lock
, flags
);
627 osh
->pub
.pktalloced
--;
628 spin_unlock_irqrestore(&osh
->pktalloc_lock
, flags
);
634 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
637 uint retry
= PCI_CFG_RETRY
;
639 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
641 /* only 4byte access supported */
645 pci_read_config_dword(osh
->pdev
, offset
, &val
);
646 if (val
!= 0xffffffff)
651 if (retry
< PCI_CFG_RETRY
)
652 printk("PCI CONFIG READ access to %d required %d retries\n", offset
,
653 (PCI_CFG_RETRY
- retry
));
660 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
662 uint retry
= PCI_CFG_RETRY
;
664 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
666 /* only 4byte access supported */
670 pci_write_config_dword(osh
->pdev
, offset
, val
);
671 if (offset
!= PCI_BAR0_WIN
)
673 if (osl_pci_read_config(osh
, offset
, size
) == val
)
678 if (retry
< PCI_CFG_RETRY
)
679 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset
,
680 (PCI_CFG_RETRY
- retry
));
684 /* return bus # for the pci device pointed by osh->pdev */
686 osl_pci_bus(osl_t
*osh
)
688 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
690 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
693 /* return slot # for the pci device pointed by osh->pdev */
695 osl_pci_slot(osl_t
*osh
)
697 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
699 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
703 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
708 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
710 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
714 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
716 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
720 /* In BCMDBG_MEM configurations osl_malloc is only used internally in
721 * the implementation of osl_debug_malloc. Because we are using the GCC
722 * -Wstrict-prototypes compile option, we must always have a prototype
723 * for a global/external function. So make osl_malloc static in
724 * the BCMDBG_MEM case.
729 osl_malloc(osl_t
*osh
, uint size
)
733 /* only ASSERT if osh is defined */
735 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
737 if ((addr
= kmalloc(size
, GFP_ATOMIC
)) == NULL
) {
743 atomic_add(size
, &osh
->malloced
);
749 /* In BCMDBG_MEM configurations osl_mfree is only used internally in
750 * the implementation of osl_debug_mfree. Because we are using the GCC
751 * -Wstrict-prototypes compile option, we must always have a prototype
752 * for a global/external function. So make osl_mfree static in
753 * the BCMDBG_MEM case.
758 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
761 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
762 atomic_sub(size
, &osh
->malloced
);
768 osl_malloced(osl_t
*osh
)
770 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
771 return (atomic_read(&osh
->malloced
));
775 osl_malloc_failed(osl_t
*osh
)
777 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
778 return (osh
->failed
);
782 #define MEMLIST_LOCK(osh, flags) spin_lock_irqsave(&(osh)->dbgmem_lock, flags)
783 #define MEMLIST_UNLOCK(osh, flags) spin_unlock_irqrestore(&(osh)->dbgmem_lock, flags)
786 osl_debug_malloc(osl_t
*osh
, uint size
, int line
, char* file
)
790 unsigned long flags
= 0;
793 printk("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__
, file
, line
);
798 MEMLIST_LOCK(osh
, flags
);
800 if ((p
= (bcm_mem_link_t
*)osl_malloc(osh
, sizeof(bcm_mem_link_t
) + size
)) == NULL
) {
802 MEMLIST_UNLOCK(osh
, flags
);
809 p
->osh
= (void *)osh
;
811 basename
= strrchr(file
, '/');
819 strncpy(p
->file
, basename
, BCM_MEM_FILENAME_LEN
);
820 p
->file
[BCM_MEM_FILENAME_LEN
- 1] = '\0';
822 /* link this block */
825 p
->next
= osh
->dbgmem_list
;
828 osh
->dbgmem_list
= p
;
829 MEMLIST_UNLOCK(osh
, flags
);
836 osl_debug_mfree(osl_t
*osh
, void *addr
, uint size
, int line
, char* file
)
838 bcm_mem_link_t
*p
= (bcm_mem_link_t
*)((int8
*)addr
- sizeof(bcm_mem_link_t
));
839 unsigned long flags
= 0;
841 ASSERT(osh
== NULL
|| osh
->magic
== OS_HANDLE_MAGIC
);
844 printk("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
845 addr
, size
, line
, file
);
850 if (p
->size
!= size
) {
851 printk("osl_debug_mfree: dealloc size %d does not match alloc size %d on addr %p"
852 " at line %d file %s\n",
853 size
, p
->size
, addr
, line
, file
);
854 ASSERT(p
->size
== size
);
858 if (p
->osh
!= (void *)osh
) {
859 printk("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
861 printk("Dealloc addr %p size %d at line %d file %s\n", addr
, size
, line
, file
);
862 printk("Alloc size %d line %d file %s\n", p
->size
, p
->line
, p
->file
);
863 ASSERT(p
->osh
== (void *)osh
);
867 /* unlink this block */
869 MEMLIST_LOCK(osh
, flags
);
871 p
->prev
->next
= p
->next
;
873 p
->next
->prev
= p
->prev
;
874 if (osh
->dbgmem_list
== p
)
875 osh
->dbgmem_list
= p
->next
;
876 p
->next
= p
->prev
= NULL
;
880 osl_mfree(osh
, p
, size
+ sizeof(bcm_mem_link_t
));
882 MEMLIST_UNLOCK(osh
, flags
);
887 osl_debug_memdump(osl_t
*osh
, struct bcmstrbuf
*b
)
890 unsigned long flags
= 0;
892 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
894 MEMLIST_LOCK(osh
, flags
);
895 if (osh
->dbgmem_list
) {
897 bcm_bprintf(b
, " Address Size File:line\n");
899 printf(" Address Size File:line\n");
901 for (p
= osh
->dbgmem_list
; p
; p
= p
->next
) {
903 bcm_bprintf(b
, "%p %6d %s:%d\n", (char*)p
+ sizeof(bcm_mem_link_t
),
904 p
->size
, p
->file
, p
->line
);
906 printf("%p %6d %s:%d\n", (char*)p
+ sizeof(bcm_mem_link_t
),
907 p
->size
, p
->file
, p
->line
);
909 /* Detects loop-to-self so we don't enter infinite loop */
912 bcm_bprintf(b
, "WARNING: loop-to-self "
913 "p %p p->next %p\n", p
, p
->next
);
915 printf("WARNING: loop-to-self "
916 "p %p p->next %p\n", p
, p
->next
);
922 MEMLIST_UNLOCK(osh
, flags
);
927 #endif /* BCMDBG_MEM */
930 osl_dma_consistent_align(void)
936 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, ulong
*pap
)
938 uint16 align
= (1 << align_bits
);
939 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
941 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
945 return (pci_alloc_consistent(osh
->pdev
, size
, (dma_addr_t
*)pap
));
949 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, ulong pa
)
951 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
953 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
957 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
)
961 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
962 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
963 return (pci_map_single(osh
->pdev
, va
, size
, dir
));
967 osl_dma_unmap(osl_t
*osh
, uint pa
, uint size
, int direction
)
971 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
972 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
973 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
976 #if defined(BCMDBG_ASSERT)
978 osl_assert(const char *exp
, const char *file
, int line
)
981 const char *basename
;
983 basename
= strrchr(file
, '/');
992 snprintf(tempbuf
, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
993 exp
, basename
, line
);
995 /* Print assert message and give it time to be written to /var/log/messages */
996 if (!in_interrupt()) {
998 printk("%s", tempbuf
);
999 printk("panic in %d seconds\n", delay
);
1000 set_current_state(TASK_INTERRUPTIBLE
);
1001 schedule_timeout(delay
* HZ
);
1004 switch (g_assert_type
) {
1006 panic("%s", tempbuf
);
1008 /* Inform Coverity that execution will not continue past this point */
1009 __coverity_panic__();
1010 #endif /* __COVERITY__ */
1013 printk("%s", tempbuf
);
1016 /* Inform Coverity that execution will not continue past this point */
1017 __coverity_panic__();
1018 #endif /* __COVERITY__ */
1023 #endif /* BCMDBG_ASSERT */
1029 osl_delay(uint usec
)
1034 d
= MIN(usec
, 1000);
1040 #if defined(DSLCPE_DELAY)
1043 osl_oshsh_init(osl_t
*osh
, shared_osl_t
* oshsh
)
1045 extern unsigned long loops_per_jiffy
;
1047 osh
->oshsh
->MIPS
= loops_per_jiffy
/ (500000/HZ
);
1051 in_long_delay(osl_t
*osh
)
1053 return osh
->oshsh
->long_delay
;
1057 osl_long_delay(osl_t
*osh
, uint usec
, bool yield
)
1060 bool yielded
= TRUE
;
1061 int usec_to_delay
= usec
;
1062 unsigned long tick1
, tick2
, tick_diff
= 0;
1064 /* delay at least requested usec */
1065 while (usec_to_delay
> 0) {
1066 if (!yield
|| !yielded
) {
1067 d
= MIN(usec_to_delay
, 10);
1071 if (usec_to_delay
> 0) {
1072 osh
->oshsh
->long_delay
++;
1073 OSL_GETCYCLES(tick1
);
1074 spin_unlock_bh(osh
->oshsh
->lock
);
1075 if (usec_to_delay
> 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1081 spin_lock_bh(osh
->oshsh
->lock
);
1082 OSL_GETCYCLES(tick2
);
1085 tick_diff
= TICKDIFF(tick2
, tick1
);
1086 tick_diff
= (tick_diff
* 2)/(osh
->oshsh
->MIPS
);
1088 usec_to_delay
-= tick_diff
;
1092 osh
->oshsh
->long_delay
--;
1093 ASSERT(osh
->oshsh
->long_delay
>= 0);
1097 #endif /* DSLCPE_DELAY */
1100 * The pkttag contents are NOT cloned.
1104 osl_pktdup(osl_t
*osh
, void *skb
, int line
, char *file
)
1105 #else /* BCMDBG_PKT */
1107 osl_pktdup(osl_t
*osh
, void *skb
)
1108 #endif /* BCMDBG_PKT */
1111 unsigned long flags
;
1113 /* clear the CTFBUF flag if set and map the reset of the buffer
1117 PKTCTFMAP(osh
, skb
);
1120 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1124 if (PKTISFAST(osh
, skb
)) {
1127 /* if the buffer allocated from ctfpool is cloned then
1128 * we can't be sure when it will be freed. since there
1129 * is a chance that we will be losing a buffer
1130 * from our pool, we increment the refill count for the
1131 * object to be alloced later.
1133 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
1134 ASSERT(ctfpool
!= NULL
);
1136 PKTCLRFAST(osh
, skb
);
1139 #endif /* CTFPOOL */
1141 /* skb_clone copies skb->cb.. we don't want that */
1142 if (osh
->pub
.pkttag
)
1143 bzero((void*)((struct sk_buff
*)p
)->cb
, OSL_PKTTAG_SZ
);
1145 /* Increment the packet counter */
1146 spin_lock_irqsave(&osh
->pktalloc_lock
, flags
);
1147 osh
->pub
.pktalloced
++;
1148 spin_unlock_irqrestore(&osh
->pktalloc_lock
, flags
);
1150 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
1151 pktlist_add(&(osh
->pktlist
), (void *) p
, line
, file
);
1152 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
1158 #ifdef BCMDBG_PTRACE
1160 osl_pkttrace(osl_t
*osh
, void *pkt
, uint16 bit
)
1162 pktlist_trace(&(osh
->pktlist
), pkt
, bit
);
1164 #endif /* BCMDBG_PTRACE */
1167 osl_pktlist_dump(osl_t
*osh
, char *buf
)
1169 pktlist_dump(&(osh
->pktlist
), buf
);
1174 osl_pktlist_add(osl_t
*osh
, void *p
, int line
, char *file
)
1176 unsigned long flags
;
1177 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
1178 pktlist_add(&(osh
->pktlist
), p
, line
, file
);
1179 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
1183 osl_pktlist_remove(osl_t
*osh
, void *p
)
1185 unsigned long flags
;
1186 spin_lock_irqsave(&osh
->pktlist_lock
, flags
);
1187 pktlist_remove(&(osh
->pktlist
), p
);
1188 spin_unlock_irqrestore(&osh
->pktlist_lock
, flags
);
1190 #endif /* BCMDBG_PKT */
1193 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1195 #if defined(OSLREGOPS) || (defined(WLC_HIGH) && !defined(WLC_LOW))
1197 osl_readb(osl_t
*osh
, volatile uint8
*r
)
1199 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1200 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1202 return (uint8
)((rreg
)(ctx
, (void*)r
, sizeof(uint8
)));
1207 osl_readw(osl_t
*osh
, volatile uint16
*r
)
1209 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1210 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1212 return (uint16
)((rreg
)(ctx
, (void*)r
, sizeof(uint16
)));
1216 osl_readl(osl_t
*osh
, volatile uint32
*r
)
1218 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1219 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1221 return (uint32
)((rreg
)(ctx
, (void*)r
, sizeof(uint32
)));
1225 osl_writeb(osl_t
*osh
, volatile uint8
*r
, uint8 v
)
1227 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1228 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1230 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint8
)));
1235 osl_writew(osl_t
*osh
, volatile uint16
*r
, uint16 v
)
1237 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1238 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1240 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint16
)));
1244 osl_writel(osl_t
*osh
, volatile uint32
*r
, uint32 v
)
1246 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1247 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1249 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint32
)));
1251 #endif /* OSLREGOPS */
1254 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1261 return ((uint32
)jiffies
* (1000 / HZ
));
1265 osl_pktalloced(osl_t
*osh
)
1267 return (osh
->pub
.pktalloced
);
1271 osl_printf(const char *format
, ...)
1274 static char printbuf
[1024];
1277 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1278 va_start(args
, format
);
1279 len
= vsnprintf(printbuf
, 1024, format
, args
);
1282 if (len
> sizeof(printbuf
)) {
1283 printk("osl_printf: buffer overrun\n");
1287 return (printk("%s", printbuf
));
1291 osl_sprintf(char *buf
, const char *format
, ...)
1296 va_start(args
, format
);
1297 rc
= vsprintf(buf
, format
, args
);
1303 osl_snprintf(char *buf
, size_t n
, const char *format
, ...)
1308 va_start(args
, format
);
1309 rc
= vsnprintf(buf
, n
, format
, args
);
1315 osl_vsprintf(char *buf
, const char *format
, va_list ap
)
1317 return (vsprintf(buf
, format
, ap
));
1321 osl_vsnprintf(char *buf
, size_t n
, const char *format
, va_list ap
)
1323 return (vsnprintf(buf
, n
, format
, ap
));
1327 osl_strcmp(const char *s1
, const char *s2
)
1329 return (strcmp(s1
, s2
));
1333 osl_strncmp(const char *s1
, const char *s2
, uint n
)
1335 return (strncmp(s1
, s2
, n
));
1339 osl_strlen(const char *s
)
1345 osl_strcpy(char *d
, const char *s
)
1347 return (strcpy(d
, s
));
1351 osl_strncpy(char *d
, const char *s
, uint n
)
1353 return (strncpy(d
, s
, n
));
1357 osl_strchr(const char *s
, int c
)
1359 return (strchr(s
, c
));
1363 osl_strrchr(const char *s
, int c
)
1365 return (strrchr(s
, c
));
1369 osl_memset(void *d
, int c
, size_t n
)
1371 return memset(d
, c
, n
);
1375 osl_memcpy(void *d
, const void *s
, size_t n
)
1377 return memcpy(d
, s
, n
);
1381 osl_memmove(void *d
, const void *s
, size_t n
)
1383 return memmove(d
, s
, n
);
1387 osl_memcmp(const void *s1
, const void *s2
, size_t n
)
1389 return memcmp(s1
, s2
, n
);
1393 osl_readl(volatile uint32
*r
)
1399 osl_readw(volatile uint16
*r
)
1405 osl_readb(volatile uint8
*r
)
1411 osl_writel(uint32 v
, volatile uint32
*r
)
1417 osl_writew(uint16 v
, volatile uint16
*r
)
1423 osl_writeb(uint8 v
, volatile uint8
*r
)
1429 osl_uncached(void *va
)
1432 return ((void*)KSEG1ADDR(va
));
1439 osl_cached(void *va
)
1442 return ((void*)KSEG0ADDR(va
));
1454 cycles
= read_c0_count() * 2;
1455 #elif defined(__i386__)
1459 #endif /* defined(mips) */
1464 osl_reg_map(uint32 pa
, uint size
)
1466 return (ioremap_nocache((unsigned long)pa
, (unsigned long)size
));
1470 osl_reg_unmap(void *va
)
1476 osl_busprobe(uint32
*val
, uint32 addr
)
1479 return get_dbe(*val
, (uint32
*)addr
);
1481 *val
= readl((uint32
*)(uintptr
)addr
);
1487 osl_pktshared(void *skb
)
1489 return (((struct sk_buff
*)skb
)->cloned
);
1493 osl_pktdata(osl_t
*osh
, void *skb
)
1495 return (((struct sk_buff
*)skb
)->data
);
1499 osl_pktlen(osl_t
*osh
, void *skb
)
1501 return (((struct sk_buff
*)skb
)->len
);
1505 osl_pktheadroom(osl_t
*osh
, void *skb
)
1507 return (uint
) skb_headroom((struct sk_buff
*) skb
);
1511 osl_pkttailroom(osl_t
*osh
, void *skb
)
1513 return (uint
) skb_tailroom((struct sk_buff
*) skb
);
1517 osl_pktnext(osl_t
*osh
, void *skb
)
1519 return (((struct sk_buff
*)skb
)->next
);
1523 osl_pktsetnext(void *skb
, void *x
)
1525 ((struct sk_buff
*)skb
)->next
= (struct sk_buff
*)x
;
1529 osl_pktsetlen(osl_t
*osh
, void *skb
, uint len
)
1531 __skb_trim((struct sk_buff
*)skb
, len
);
1535 osl_pktpush(osl_t
*osh
, void *skb
, int bytes
)
1537 return (skb_push((struct sk_buff
*)skb
, bytes
));
1541 osl_pktpull(osl_t
*osh
, void *skb
, int bytes
)
1543 return (skb_pull((struct sk_buff
*)skb
, bytes
));
1547 osl_pkttag(void *skb
)
1549 return ((void*)(((struct sk_buff
*)skb
)->cb
));
1553 osl_pktlink(void *skb
)
1555 return (((struct sk_buff
*)skb
)->prev
);
1559 osl_pktsetlink(void *skb
, void *x
)
1561 ((struct sk_buff
*)skb
)->prev
= (struct sk_buff
*)x
;
1565 osl_pktprio(void *skb
)
1567 return (((struct sk_buff
*)skb
)->priority
);
1571 osl_pktsetprio(void *skb
, uint x
)
1573 ((struct sk_buff
*)skb
)->priority
= x
;
1577 /* Linux Kernel: File Operations: start */
1579 osl_os_open_image(char *filename
)
1583 fp
= filp_open(filename
, O_RDONLY
, 0);
1585 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1587 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1597 osl_os_get_image_block(char *buf
, int len
, void *image
)
1599 struct file
*fp
= (struct file
*)image
;
1605 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
1613 osl_os_close_image(void *image
)
1616 filp_close((struct file
*)image
, NULL
);
1619 osl_os_image_size(void *image
)
1621 int len
= 0, curroffset
;
1624 /* store the current offset */
1625 curroffset
= generic_file_llseek(image
, 0, 1);
1626 /* goto end of file to get length */
1627 len
= generic_file_llseek(image
, 0, 2);
1628 /* restore back the offset */
1629 generic_file_llseek(image
, curroffset
, 0);
1634 /* Linux Kernel: File Operations: end */