2 * Linux OS Independent Layer
4 * Copyright (C) 2011, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c 330107 2012-04-27 22:04:17Z $
24 #include <bcmendian.h>
29 #include <linux/delay.h>
31 #include <asm/paccess.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 #ifdef DHD_USE_STATIC_BUF
45 #define STATIC_BUF_MAX_NUM 16
46 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
47 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
49 typedef struct bcm_static_buf
{
50 struct semaphore static_sem
;
51 unsigned char *buf_ptr
;
52 unsigned char buf_use
[STATIC_BUF_MAX_NUM
];
55 static bcm_static_buf_t
*bcm_static_buf
= 0;
57 #define STATIC_PKT_MAX_NUM 8
59 typedef struct bcm_static_pkt
{
60 struct sk_buff
*skb_4k
[STATIC_PKT_MAX_NUM
];
61 struct sk_buff
*skb_8k
[STATIC_PKT_MAX_NUM
];
62 struct semaphore osl_pkt_sem
;
63 unsigned char pkt_use
[STATIC_PKT_MAX_NUM
* 2];
66 static bcm_static_pkt_t
*bcm_static_skb
= 0;
67 #endif /* DHD_USE_STATIC_BUF */
69 typedef struct bcm_mem_link
{
70 struct bcm_mem_link
*prev
;
71 struct bcm_mem_link
*next
;
75 char file
[BCM_MEM_FILENAME_LEN
];
86 atomic_t pktalloced
; /* Number of allocated packet buffers */
89 bcm_mem_link_t
*dbgmem_list
;
90 #if defined(DSLCPE_DELAY)
91 shared_osl_t
*oshsh
; /* osh shared */
93 spinlock_t dbgmem_lock
;
94 spinlock_t pktalloc_lock
;
97 #define OSL_PKTTAG_CLEAR(p) \
99 struct sk_buff *s = (struct sk_buff *)(p); \
100 ASSERT(OSL_PKTTAG_SZ == 32); \
101 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
102 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
103 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
104 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
107 /* PCMCIA attribute space access macros */
108 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
110 dev_link_t link
; /* PCMCIA device pointer */
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
112 dev_node_t node
; /* PCMCIA node structure */
114 void *base
; /* Mapped attribute memory window */
115 size_t size
; /* Size of window */
116 void *drv
; /* Driver data */
118 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
120 /* Global ASSERT type flag */
121 uint32 g_assert_type
= FALSE
;
123 static int16 linuxbcmerrormap
[] =
125 -EINVAL
, /* BCME_ERROR */
126 -EINVAL
, /* BCME_BADARG */
127 -EINVAL
, /* BCME_BADOPTION */
128 -EINVAL
, /* BCME_NOTUP */
129 -EINVAL
, /* BCME_NOTDOWN */
130 -EINVAL
, /* BCME_NOTAP */
131 -EINVAL
, /* BCME_NOTSTA */
132 -EINVAL
, /* BCME_BADKEYIDX */
133 -EINVAL
, /* BCME_RADIOOFF */
134 -EINVAL
, /* BCME_NOTBANDLOCKED */
135 -EINVAL
, /* BCME_NOCLK */
136 -EINVAL
, /* BCME_BADRATESET */
137 -EINVAL
, /* BCME_BADBAND */
138 -E2BIG
, /* BCME_BUFTOOSHORT */
139 -E2BIG
, /* BCME_BUFTOOLONG */
140 -EBUSY
, /* BCME_BUSY */
141 -EINVAL
, /* BCME_NOTASSOCIATED */
142 -EINVAL
, /* BCME_BADSSIDLEN */
143 -EINVAL
, /* BCME_OUTOFRANGECHAN */
144 -EINVAL
, /* BCME_BADCHAN */
145 -EFAULT
, /* BCME_BADADDR */
146 -ENOMEM
, /* BCME_NORESOURCE */
147 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
148 -EMSGSIZE
, /* BCME_BADLENGTH */
149 -EINVAL
, /* BCME_NOTREADY */
150 -EPERM
, /* BCME_EPERM */
151 -ENOMEM
, /* BCME_NOMEM */
152 -EINVAL
, /* BCME_ASSOCIATED */
153 -ERANGE
, /* BCME_RANGE */
154 -EINVAL
, /* BCME_NOTFOUND */
155 -EINVAL
, /* BCME_WME_NOT_ENABLED */
156 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
157 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
158 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
159 -EIO
, /* BCME_SDIO_ERROR */
160 -ENODEV
, /* BCME_DONGLE_DOWN */
161 -EINVAL
, /* BCME_VERSION */
162 -EIO
, /* BCME_TXFAIL */
163 -EIO
, /* BCME_RXFAIL */
164 -ENODEV
, /* BCME_NODEVICE */
165 -EINVAL
, /* BCME_NMODE_DISABLED */
166 -ENODATA
, /* BCME_NONRESIDENT */
168 /* When an new error code is added to bcmutils.h, add os
169 * specific error translation here as well
171 /* check if BCME_LAST changed since the last time this function was updated */
173 #error "You need to add a OS error translation in the linuxbcmerrormap \
174 for new error code defined in bcmutils.h"
178 /* translate bcmerrors into linux errors */
180 osl_error(int bcmerror
)
184 else if (bcmerror
< BCME_LAST
)
185 bcmerror
= BCME_ERROR
;
187 /* Array bounds covered by ASSERT in osl_attach */
188 return linuxbcmerrormap
[-bcmerror
];
191 extern uint8
* dhd_os_prealloc(void *osh
, int section
, int size
);
194 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
198 osh
= kmalloc(sizeof(osl_t
), GFP_ATOMIC
);
201 bzero(osh
, sizeof(osl_t
));
203 /* Check that error map has the right number of entries in it */
204 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
206 osh
->magic
= OS_HANDLE_MAGIC
;
207 atomic_set(&osh
->malloced
, 0);
209 osh
->dbgmem_list
= NULL
;
210 spin_lock_init(&(osh
->dbgmem_lock
));
212 osh
->pub
.pkttag
= pkttag
;
213 osh
->bustype
= bustype
;
219 osh
->pub
.mmbus
= TRUE
;
226 osh
->pub
.mmbus
= FALSE
;
233 #if defined(DHD_USE_STATIC_BUF)
234 if (!bcm_static_buf
) {
235 if (!(bcm_static_buf
= (bcm_static_buf_t
*)dhd_os_prealloc(osh
, 3, STATIC_BUF_SIZE
+
236 STATIC_BUF_TOTAL_LEN
))) {
237 printk("can not alloc static buf!\n");
240 printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf
);
243 sema_init(&bcm_static_buf
->static_sem
, 1);
245 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
248 if (!bcm_static_skb
) {
250 void *skb_buff_ptr
= 0;
251 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
252 skb_buff_ptr
= dhd_os_prealloc(osh
, 4, 0);
254 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*)*16);
255 for (i
= 0; i
< STATIC_PKT_MAX_NUM
* 2; i
++)
256 bcm_static_skb
->pkt_use
[i
] = 0;
258 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
260 #endif /* DHD_USE_STATIC_BUF */
262 spin_lock_init(&(osh
->pktalloc_lock
));
267 ASSERT(OSL_PKTTAG_SZ
<= sizeof(skb
->cb
));
274 osl_detach(osl_t
*osh
)
279 #ifdef DHD_USE_STATIC_BUF
280 if (bcm_static_buf
) {
283 if (bcm_static_skb
) {
288 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
292 static struct sk_buff
*osl_alloc_skb(unsigned int len
)
294 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
295 gfp_t flags
= GFP_ATOMIC
;
297 return __dev_alloc_skb(len
, flags
);
299 return dev_alloc_skb(len
);
305 #ifdef CTFPOOL_SPINLOCK
306 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
307 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
309 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
310 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
311 #endif /* CTFPOOL_SPINLOCK */
313 * Allocate and add an object to packet pool.
316 osl_ctfpool_add(osl_t
*osh
)
319 #ifdef CTFPOOL_SPINLOCK
321 #endif /* CTFPOOL_SPINLOCK */
323 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
326 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
327 ASSERT(osh
->ctfpool
->curr_obj
<= osh
->ctfpool
->max_obj
);
329 /* No need to allocate more objects */
330 if (osh
->ctfpool
->curr_obj
== osh
->ctfpool
->max_obj
) {
331 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
335 /* Allocate a new skb and add it to the ctfpool */
336 skb
= osl_alloc_skb(osh
->ctfpool
->obj_size
);
338 printf("%s: skb alloc of len %d failed\n", __FUNCTION__
,
339 osh
->ctfpool
->obj_size
);
340 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
345 skb
->next
= (struct sk_buff
*)osh
->ctfpool
->head
;
346 osh
->ctfpool
->head
= skb
;
347 osh
->ctfpool
->fast_frees
++;
348 osh
->ctfpool
->curr_obj
++;
350 /* Hijack a skb member to store ptr to ctfpool */
351 CTFPOOLPTR(osh
, skb
) = (void *)osh
->ctfpool
;
353 /* Use bit flag to indicate skb from fast ctfpool */
354 PKTFAST(osh
, skb
) = FASTBUF
;
356 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
362 * Add new objects to the pool.
365 osl_ctfpool_replenish(osl_t
*osh
, uint thresh
)
367 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
370 /* Do nothing if no refills are required */
371 while ((osh
->ctfpool
->refills
> 0) && (thresh
--)) {
372 osl_ctfpool_add(osh
);
373 osh
->ctfpool
->refills
--;
378 * Initialize the packet pool with specified number of objects.
381 osl_ctfpool_init(osl_t
*osh
, uint numobj
, uint size
)
383 osh
->ctfpool
= kmalloc(sizeof(ctfpool_t
), GFP_ATOMIC
);
384 ASSERT(osh
->ctfpool
);
385 bzero(osh
->ctfpool
, sizeof(ctfpool_t
));
387 osh
->ctfpool
->max_obj
= numobj
;
388 osh
->ctfpool
->obj_size
= size
;
390 spin_lock_init(&osh
->ctfpool
->lock
);
393 if (!osl_ctfpool_add(osh
))
395 osh
->ctfpool
->fast_frees
--;
402 * Cleanup the packet pool objects.
405 osl_ctfpool_cleanup(osl_t
*osh
)
407 struct sk_buff
*skb
, *nskb
;
408 #ifdef CTFPOOL_SPINLOCK
410 #endif /* CTFPOOL_SPINLOCK */
412 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
415 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
417 skb
= osh
->ctfpool
->head
;
419 while (skb
!= NULL
) {
423 osh
->ctfpool
->curr_obj
--;
426 ASSERT(osh
->ctfpool
->curr_obj
== 0);
427 osh
->ctfpool
->head
= NULL
;
428 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
435 osl_ctfpool_stats(osl_t
*osh
, void *b
)
437 struct bcmstrbuf
*bb
;
439 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
442 #ifdef DHD_USE_STATIC_BUF
443 if (bcm_static_buf
) {
446 if (bcm_static_skb
) {
449 #endif /* DHD_USE_STATIC_BUF */
453 ASSERT((osh
!= NULL
) && (bb
!= NULL
));
455 bcm_bprintf(bb
, "max_obj %d obj_size %d curr_obj %d refills %d\n",
456 osh
->ctfpool
->max_obj
, osh
->ctfpool
->obj_size
,
457 osh
->ctfpool
->curr_obj
, osh
->ctfpool
->refills
);
458 bcm_bprintf(bb
, "fast_allocs %d fast_frees %d slow_allocs %d\n",
459 osh
->ctfpool
->fast_allocs
, osh
->ctfpool
->fast_frees
,
460 osh
->ctfpool
->slow_allocs
);
463 static inline struct sk_buff
*
464 osl_pktfastget(osl_t
*osh
, uint len
)
467 #ifdef CTFPOOL_SPINLOCK
469 #endif /* CTFPOOL_SPINLOCK */
471 /* Try to do fast allocate. Return null if ctfpool is not in use
472 * or if there are no items in the ctfpool.
474 if (osh
->ctfpool
== NULL
)
477 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
478 if (osh
->ctfpool
->head
== NULL
) {
479 ASSERT(osh
->ctfpool
->curr_obj
== 0);
480 osh
->ctfpool
->slow_allocs
++;
481 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
485 ASSERT(len
<= osh
->ctfpool
->obj_size
);
487 /* Get an object from ctfpool */
488 skb
= (struct sk_buff
*)osh
->ctfpool
->head
;
489 osh
->ctfpool
->head
= (void *)skb
->next
;
491 osh
->ctfpool
->fast_allocs
++;
492 osh
->ctfpool
->curr_obj
--;
493 ASSERT(CTFPOOLHEAD(osh
, skb
) == (struct sock
*)osh
->ctfpool
->head
);
494 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
496 /* Init skb struct */
497 skb
->next
= skb
->prev
= NULL
;
498 skb
->data
= skb
->head
+ 16;
499 skb
->tail
= skb
->head
+ 16;
503 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
506 atomic_set(&skb
->users
, 1);
508 PKTSETCLINK(skb
, NULL
);
514 /* Convert a driver packet to native(OS) packet
515 * In the process, packettag is zeroed out before sending up
516 * IP code depends on skb->cb to be setup correctly with various options
517 * In our case, that means it should be 0
519 struct sk_buff
* BCMFASTPATH
520 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
522 struct sk_buff
*nskb
;
525 OSL_PKTTAG_CLEAR(pkt
);
527 /* Decrement the packet counter */
528 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
529 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->pktalloced
);
531 return (struct sk_buff
*)pkt
;
534 /* Convert a native(OS) packet to driver packet.
535 * In the process, native packet is destroyed, there is no copying
536 * Also, a packettag is zeroed out
539 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
541 struct sk_buff
*nskb
;
544 OSL_PKTTAG_CLEAR(pkt
);
546 /* Increment the packet counter */
547 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
548 atomic_add(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->pktalloced
);
553 /* Return a new packet. zero out pkttag */
555 osl_pktget(osl_t
*osh
, uint len
)
560 /* Allocate from local pool */
561 skb
= osl_pktfastget(osh
, len
);
562 if ((skb
!= NULL
) || ((skb
= osl_alloc_skb(len
)) != NULL
)) {
564 if ((skb
= osl_alloc_skb(len
))) {
569 atomic_inc(&osh
->pktalloced
);
572 return ((void*) skb
);
577 osl_pktfastfree(osl_t
*osh
, struct sk_buff
*skb
)
580 #ifdef CTFPOOL_SPINLOCK
582 #endif /* CTFPOOL_SPINLOCK */
584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
585 skb
->tstamp
.tv
.sec
= 0;
587 skb
->stamp
.tv_sec
= 0;
590 /* We only need to init the fields that we change */
593 OSL_PKTTAG_CLEAR(skb
);
595 skb
->destructor
= NULL
;
597 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
598 //ASSERT(ctfpool != NULL);
599 if (ctfpool
== NULL
) return;
601 /* Add object to the ctfpool */
602 CTFPOOL_LOCK(ctfpool
, flags
);
603 skb
->next
= (struct sk_buff
*)ctfpool
->head
;
604 ctfpool
->head
= (void *)skb
;
606 ctfpool
->fast_frees
++;
609 ASSERT(ctfpool
->curr_obj
<= ctfpool
->max_obj
);
610 CTFPOOL_UNLOCK(ctfpool
, flags
);
614 /* Free the driver packet. Free the tag if present */
616 osl_pktfree(osl_t
*osh
, void *p
, bool send
)
618 struct sk_buff
*skb
, *nskb
;
620 skb
= (struct sk_buff
*) p
;
622 if (send
&& osh
->pub
.tx_fn
)
623 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
625 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
627 /* perversion: we use skb->next to chain multi-skb packets */
634 /* Clear the map ptr before freeing */
636 CTFMAPPTR(osh
, skb
) = NULL
;
640 if ((PKTISFAST(osh
, skb
)) && (atomic_read(&skb
->users
) == 1))
641 osl_pktfastfree(osh
, skb
);
648 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
651 dev_kfree_skb_any(skb
);
653 /* can free immediately (even in_irq()) if destructor
658 atomic_dec(&osh
->pktalloced
);
663 #ifdef DHD_USE_STATIC_BUF
665 osl_pktget_static(osl_t
*osh
, uint len
)
670 if (len
> (PAGE_SIZE
*2)) {
671 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
672 return osl_pktget(osh
, len
);
675 down(&bcm_static_skb
->osl_pkt_sem
);
677 if (len
<= PAGE_SIZE
) {
678 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
679 if (bcm_static_skb
->pkt_use
[i
] == 0)
683 if (i
!= STATIC_PKT_MAX_NUM
) {
684 bcm_static_skb
->pkt_use
[i
] = 1;
685 up(&bcm_static_skb
->osl_pkt_sem
);
686 skb
= bcm_static_skb
->skb_4k
[i
];
687 skb
->tail
= skb
->data
+ len
;
694 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
695 if (bcm_static_skb
->pkt_use
[i
+STATIC_PKT_MAX_NUM
] == 0)
699 if (i
!= STATIC_PKT_MAX_NUM
) {
700 bcm_static_skb
->pkt_use
[i
+STATIC_PKT_MAX_NUM
] = 1;
701 up(&bcm_static_skb
->osl_pkt_sem
);
702 skb
= bcm_static_skb
->skb_8k
[i
];
703 skb
->tail
= skb
->data
+ len
;
708 up(&bcm_static_skb
->osl_pkt_sem
);
709 printk("%s: all static pkt in use!\n", __FUNCTION__
);
710 return osl_pktget(osh
, len
);
714 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
718 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
719 if (p
== bcm_static_skb
->skb_4k
[i
]) {
720 down(&bcm_static_skb
->osl_pkt_sem
);
721 bcm_static_skb
->pkt_use
[i
] = 0;
722 up(&bcm_static_skb
->osl_pkt_sem
);
727 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
728 if (p
== bcm_static_skb
->skb_8k
[i
]) {
729 down(&bcm_static_skb
->osl_pkt_sem
);
730 bcm_static_skb
->pkt_use
[i
+ STATIC_PKT_MAX_NUM
] = 0;
731 up(&bcm_static_skb
->osl_pkt_sem
);
736 return osl_pktfree(osh
, p
, send
);
738 #endif /* DHD_USE_STATIC_BUF */
741 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
744 uint retry
= PCI_CFG_RETRY
;
746 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
748 /* only 4byte access supported */
752 pci_read_config_dword(osh
->pdev
, offset
, &val
);
753 if (val
!= 0xffffffff)
758 if (retry
< PCI_CFG_RETRY
)
759 printk("PCI CONFIG READ access to %d required %d retries\n", offset
,
760 (PCI_CFG_RETRY
- retry
));
767 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
769 uint retry
= PCI_CFG_RETRY
;
771 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
773 /* only 4byte access supported */
777 pci_write_config_dword(osh
->pdev
, offset
, val
);
778 if (offset
!= PCI_BAR0_WIN
)
780 if (osl_pci_read_config(osh
, offset
, size
) == val
)
785 if (retry
< PCI_CFG_RETRY
)
786 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset
,
787 (PCI_CFG_RETRY
- retry
));
791 /* return bus # for the pci device pointed by osh->pdev */
793 osl_pci_bus(osl_t
*osh
)
795 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
797 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
800 /* return slot # for the pci device pointed by osh->pdev */
802 osl_pci_slot(osl_t
*osh
)
804 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
806 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
809 /* return the pci device pointed by osh->pdev */
811 osl_pci_device(osl_t
*osh
)
813 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
819 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
824 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
826 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
830 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
832 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
836 osl_malloc(osl_t
*osh
, uint size
)
840 /* only ASSERT if osh is defined */
842 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
844 #ifdef DHD_USE_STATIC_BUF
848 if ((size
>= PAGE_SIZE
)&&(size
<= STATIC_BUF_SIZE
))
850 down(&bcm_static_buf
->static_sem
);
852 for (i
= 0; i
< STATIC_BUF_MAX_NUM
; i
++)
854 if (bcm_static_buf
->buf_use
[i
] == 0)
858 if (i
== STATIC_BUF_MAX_NUM
)
860 up(&bcm_static_buf
->static_sem
);
861 printk("all static buff in use!\n");
865 bcm_static_buf
->buf_use
[i
] = 1;
866 up(&bcm_static_buf
->static_sem
);
868 bzero(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
, size
);
870 atomic_add(size
, &osh
->malloced
);
872 return ((void *)(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
));
876 #endif /* DHD_USE_STATIC_BUF */
878 if ((addr
= kmalloc(size
, GFP_ATOMIC
)) == NULL
) {
884 atomic_add(size
, &osh
->malloced
);
890 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
892 #ifdef DHD_USE_STATIC_BUF
895 if ((addr
> (void *)bcm_static_buf
) && ((unsigned char *)addr
896 <= ((unsigned char *)bcm_static_buf
+ STATIC_BUF_TOTAL_LEN
)))
900 buf_idx
= ((unsigned char *)addr
- bcm_static_buf
->buf_ptr
)/STATIC_BUF_SIZE
;
902 down(&bcm_static_buf
->static_sem
);
903 bcm_static_buf
->buf_use
[buf_idx
] = 0;
904 up(&bcm_static_buf
->static_sem
);
907 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
908 atomic_sub(size
, &osh
->malloced
);
913 #endif /* DHD_USE_STATIC_BUF */
915 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
916 atomic_sub(size
, &osh
->malloced
);
922 osl_malloced(osl_t
*osh
)
924 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
925 return (atomic_read(&osh
->malloced
));
929 osl_malloc_failed(osl_t
*osh
)
931 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
932 return (osh
->failed
);
937 osl_dma_consistent_align(void)
943 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, ulong
*pap
)
945 uint16 align
= (1 << align_bits
);
946 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
948 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
952 return (pci_alloc_consistent(osh
->pdev
, size
, (dma_addr_t
*)pap
));
956 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, ulong pa
)
958 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
960 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
964 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
)
968 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
969 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
970 return (pci_map_single(osh
->pdev
, va
, size
, dir
));
974 osl_dma_unmap(osl_t
*osh
, uint pa
, uint size
, int direction
)
978 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
979 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
980 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
996 #if defined(DSLCPE_DELAY)
999 osl_oshsh_init(osl_t
*osh
, shared_osl_t
* oshsh
)
1001 extern unsigned long loops_per_jiffy
;
1003 osh
->oshsh
->MIPS
= loops_per_jiffy
/ (500000/HZ
);
1007 in_long_delay(osl_t
*osh
)
1009 return osh
->oshsh
->long_delay
;
1013 osl_long_delay(osl_t
*osh
, uint usec
, bool yield
)
1016 bool yielded
= TRUE
;
1017 int usec_to_delay
= usec
;
1018 unsigned long tick1
, tick2
, tick_diff
= 0;
1020 /* delay at least requested usec */
1021 while (usec_to_delay
> 0) {
1022 if (!yield
|| !yielded
) {
1023 d
= MIN(usec_to_delay
, 10);
1027 if (usec_to_delay
> 0) {
1028 osh
->oshsh
->long_delay
++;
1029 OSL_GETCYCLES(tick1
);
1030 spin_unlock_bh(osh
->oshsh
->lock
);
1031 if (usec_to_delay
> 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1037 spin_lock_bh(osh
->oshsh
->lock
);
1038 OSL_GETCYCLES(tick2
);
1041 tick_diff
= TICKDIFF(tick2
, tick1
);
1042 tick_diff
= (tick_diff
* 2)/(osh
->oshsh
->MIPS
);
1044 usec_to_delay
-= tick_diff
;
1048 osh
->oshsh
->long_delay
--;
1049 ASSERT(osh
->oshsh
->long_delay
>= 0);
1053 #endif /* DSLCPE_DELAY */
1056 * The pkttag contents are NOT cloned.
1059 osl_pktdup(osl_t
*osh
, void *skb
)
1063 ASSERT(!PKTISCHAINED(skb
));
1065 /* clear the CTFBUF flag if set and map the rest of the buffer
1068 PKTCTFMAP(osh
, skb
);
1070 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1074 if (PKTISFAST(osh
, skb
)) {
1077 /* if the buffer allocated from ctfpool is cloned then
1078 * we can't be sure when it will be freed. since there
1079 * is a chance that we will be losing a buffer
1080 * from our pool, we increment the refill count for the
1081 * object to be alloced later.
1083 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
1084 ASSERT(ctfpool
!= NULL
);
1086 PKTCLRFAST(osh
, skb
);
1089 #endif /* CTFPOOL */
1091 /* skb_clone copies skb->cb.. we don't want that */
1092 if (osh
->pub
.pkttag
)
1093 OSL_PKTTAG_CLEAR(p
);
1095 /* Increment the packet counter */
1096 atomic_inc(&osh
->pktalloced
);
1102 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1106 osl_readb(osl_t
*osh
, volatile uint8
*r
)
1108 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1109 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1111 return (uint8
)((rreg
)(ctx
, (void*)r
, sizeof(uint8
)));
1116 osl_readw(osl_t
*osh
, volatile uint16
*r
)
1118 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1119 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1121 return (uint16
)((rreg
)(ctx
, (void*)r
, sizeof(uint16
)));
1125 osl_readl(osl_t
*osh
, volatile uint32
*r
)
1127 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1128 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1130 return (uint32
)((rreg
)(ctx
, (void*)r
, sizeof(uint32
)));
1134 osl_writeb(osl_t
*osh
, volatile uint8
*r
, uint8 v
)
1136 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1137 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1139 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint8
)));
1144 osl_writew(osl_t
*osh
, volatile uint16
*r
, uint16 v
)
1146 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1147 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1149 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint16
)));
1153 osl_writel(osl_t
*osh
, volatile uint32
*r
, uint32 v
)
1155 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1156 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1158 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint32
)));
1160 #endif /* OSLREGOPS */
1163 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1170 return ((uint32
)jiffies
* (1000 / HZ
));
1174 osl_printf(const char *format
, ...)
1177 static char printbuf
[1024];
1180 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1181 va_start(args
, format
);
1182 len
= vsnprintf(printbuf
, 1024, format
, args
);
1185 if (len
> sizeof(printbuf
)) {
1186 printk("osl_printf: buffer overrun\n");
1190 return (printk("%s", printbuf
));
1194 osl_sprintf(char *buf
, const char *format
, ...)
1199 va_start(args
, format
);
1200 rc
= vsprintf(buf
, format
, args
);
1206 osl_snprintf(char *buf
, size_t n
, const char *format
, ...)
1211 va_start(args
, format
);
1212 rc
= vsnprintf(buf
, n
, format
, args
);
1218 osl_vsprintf(char *buf
, const char *format
, va_list ap
)
1220 return (vsprintf(buf
, format
, ap
));
1224 osl_vsnprintf(char *buf
, size_t n
, const char *format
, va_list ap
)
1226 return (vsnprintf(buf
, n
, format
, ap
));
1230 osl_strcmp(const char *s1
, const char *s2
)
1232 return (strcmp(s1
, s2
));
1236 osl_strncmp(const char *s1
, const char *s2
, uint n
)
1238 return (strncmp(s1
, s2
, n
));
1242 osl_strlen(const char *s
)
1248 osl_strcpy(char *d
, const char *s
)
1250 return (strcpy(d
, s
));
1254 osl_strncpy(char *d
, const char *s
, uint n
)
1256 return (strncpy(d
, s
, n
));
1260 osl_strchr(const char *s
, int c
)
1262 return (strchr(s
, c
));
1266 osl_strrchr(const char *s
, int c
)
1268 return (strrchr(s
, c
));
1272 osl_memset(void *d
, int c
, size_t n
)
1274 return memset(d
, c
, n
);
1278 osl_memcpy(void *d
, const void *s
, size_t n
)
1280 return memcpy(d
, s
, n
);
1284 osl_memmove(void *d
, const void *s
, size_t n
)
1286 return memmove(d
, s
, n
);
1290 osl_memcmp(const void *s1
, const void *s2
, size_t n
)
1292 return memcmp(s1
, s2
, n
);
1296 osl_readl(volatile uint32
*r
)
1302 osl_readw(volatile uint16
*r
)
1308 osl_readb(volatile uint8
*r
)
1314 osl_writel(uint32 v
, volatile uint32
*r
)
1320 osl_writew(uint16 v
, volatile uint16
*r
)
1326 osl_writeb(uint8 v
, volatile uint8
*r
)
1332 osl_uncached(void *va
)
1335 return ((void*)KSEG1ADDR(va
));
1342 osl_cached(void *va
)
1345 return ((void*)KSEG0ADDR(va
));
1357 cycles
= read_c0_count() * 2;
1358 #elif defined(__i386__)
1362 #endif /* defined(mips) */
1367 osl_reg_map(uint32 pa
, uint size
)
1369 return (ioremap_nocache((unsigned long)pa
, (unsigned long)size
));
1373 osl_reg_unmap(void *va
)
1379 osl_busprobe(uint32
*val
, uint32 addr
)
1382 return get_dbe(*val
, (uint32
*)addr
);
1384 *val
= readl((uint32
*)(uintptr
)addr
);
1390 osl_pktshared(void *skb
)
1392 return (((struct sk_buff
*)skb
)->cloned
);
1396 osl_pktdata(osl_t
*osh
, void *skb
)
1398 return (((struct sk_buff
*)skb
)->data
);
1402 osl_pktlen(osl_t
*osh
, void *skb
)
1404 return (((struct sk_buff
*)skb
)->len
);
1408 osl_pktheadroom(osl_t
*osh
, void *skb
)
1410 return (uint
) skb_headroom((struct sk_buff
*) skb
);
1414 osl_pkttailroom(osl_t
*osh
, void *skb
)
1416 return (uint
) skb_tailroom((struct sk_buff
*) skb
);
1420 osl_pktnext(osl_t
*osh
, void *skb
)
1422 return (((struct sk_buff
*)skb
)->next
);
1426 osl_pktsetnext(void *skb
, void *x
)
1428 ((struct sk_buff
*)skb
)->next
= (struct sk_buff
*)x
;
1432 osl_pktsetlen(osl_t
*osh
, void *skb
, uint len
)
1434 __skb_trim((struct sk_buff
*)skb
, len
);
1438 osl_pktpush(osl_t
*osh
, void *skb
, int bytes
)
1440 return (skb_push((struct sk_buff
*)skb
, bytes
));
1444 osl_pktpull(osl_t
*osh
, void *skb
, int bytes
)
1446 return (skb_pull((struct sk_buff
*)skb
, bytes
));
1450 osl_pkttag(void *skb
)
1452 return ((void*)(((struct sk_buff
*)skb
)->cb
));
1456 osl_pktlink(void *skb
)
1458 return (((struct sk_buff
*)skb
)->prev
);
1462 osl_pktsetlink(void *skb
, void *x
)
1464 ((struct sk_buff
*)skb
)->prev
= (struct sk_buff
*)x
;
1468 osl_pktprio(void *skb
)
1470 return (((struct sk_buff
*)skb
)->priority
);
1474 osl_pktsetprio(void *skb
, uint x
)
1476 ((struct sk_buff
*)skb
)->priority
= x
;
1481 osl_pktalloced(osl_t
*osh
)
1483 return (atomic_read(&osh
->pktalloced
));
1486 /* Linux Kernel: File Operations: start */
1488 osl_os_open_image(char *filename
)
1492 fp
= filp_open(filename
, O_RDONLY
, 0);
1494 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1496 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1506 osl_os_get_image_block(char *buf
, int len
, void *image
)
1508 struct file
*fp
= (struct file
*)image
;
1514 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
1522 osl_os_close_image(void *image
)
1525 filp_close((struct file
*)image
, NULL
);
1527 /* Linux Kernel: File Operations: end */