RT-AC66 3.0.0.4.374.130 core
[tomato.git] / release / src-rt-6.x / shared / linux_osl.c
blobb82a9906f44e0fe220458ba330df4738cc4da7e0
1 /*
2 * Linux OS Independent Layer
4 * Copyright (C) 2011, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c 330107 2012-04-27 22:04:17Z $
21 #define LINUX_PORT
23 #include <typedefs.h>
24 #include <bcmendian.h>
25 #include <linuxver.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <linux/delay.h>
30 #ifdef mips
31 #include <asm/paccess.h>
32 #endif /* mips */
33 #include <pcicfg.h>
37 #include <linux/fs.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 #ifdef DHD_USE_STATIC_BUF
45 #define STATIC_BUF_MAX_NUM 16
46 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
47 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
49 typedef struct bcm_static_buf {
50 struct semaphore static_sem;
51 unsigned char *buf_ptr;
52 unsigned char buf_use[STATIC_BUF_MAX_NUM];
53 } bcm_static_buf_t;
55 static bcm_static_buf_t *bcm_static_buf = 0;
57 #define STATIC_PKT_MAX_NUM 8
59 typedef struct bcm_static_pkt {
60 struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
61 struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
62 struct semaphore osl_pkt_sem;
63 unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
64 } bcm_static_pkt_t;
66 static bcm_static_pkt_t *bcm_static_skb = 0;
67 #endif /* DHD_USE_STATIC_BUF */
69 typedef struct bcm_mem_link {
70 struct bcm_mem_link *prev;
71 struct bcm_mem_link *next;
72 uint size;
73 int line;
74 void *osh;
75 char file[BCM_MEM_FILENAME_LEN];
76 } bcm_mem_link_t;
78 struct osl_info {
79 osl_pubinfo_t pub;
80 #ifdef CTFPOOL
81 ctfpool_t *ctfpool;
82 #endif /* CTFPOOL */
83 uint magic;
84 void *pdev;
85 atomic_t malloced;
86 atomic_t pktalloced; /* Number of allocated packet buffers */
87 uint failed;
88 uint bustype;
89 bcm_mem_link_t *dbgmem_list;
90 #if defined(DSLCPE_DELAY)
91 shared_osl_t *oshsh; /* osh shared */
92 #endif
93 spinlock_t dbgmem_lock;
94 spinlock_t pktalloc_lock;
97 #define OSL_PKTTAG_CLEAR(p) \
98 do { \
99 struct sk_buff *s = (struct sk_buff *)(p); \
100 ASSERT(OSL_PKTTAG_SZ == 32); \
101 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
102 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
103 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
104 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
105 } while (0)
107 /* PCMCIA attribute space access macros */
108 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
109 struct pcmcia_dev {
110 dev_link_t link; /* PCMCIA device pointer */
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
112 dev_node_t node; /* PCMCIA node structure */
113 #endif
114 void *base; /* Mapped attribute memory window */
115 size_t size; /* Size of window */
116 void *drv; /* Driver data */
118 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
120 /* Global ASSERT type flag */
121 uint32 g_assert_type = FALSE;
123 static int16 linuxbcmerrormap[] =
124 { 0, /* 0 */
125 -EINVAL, /* BCME_ERROR */
126 -EINVAL, /* BCME_BADARG */
127 -EINVAL, /* BCME_BADOPTION */
128 -EINVAL, /* BCME_NOTUP */
129 -EINVAL, /* BCME_NOTDOWN */
130 -EINVAL, /* BCME_NOTAP */
131 -EINVAL, /* BCME_NOTSTA */
132 -EINVAL, /* BCME_BADKEYIDX */
133 -EINVAL, /* BCME_RADIOOFF */
134 -EINVAL, /* BCME_NOTBANDLOCKED */
135 -EINVAL, /* BCME_NOCLK */
136 -EINVAL, /* BCME_BADRATESET */
137 -EINVAL, /* BCME_BADBAND */
138 -E2BIG, /* BCME_BUFTOOSHORT */
139 -E2BIG, /* BCME_BUFTOOLONG */
140 -EBUSY, /* BCME_BUSY */
141 -EINVAL, /* BCME_NOTASSOCIATED */
142 -EINVAL, /* BCME_BADSSIDLEN */
143 -EINVAL, /* BCME_OUTOFRANGECHAN */
144 -EINVAL, /* BCME_BADCHAN */
145 -EFAULT, /* BCME_BADADDR */
146 -ENOMEM, /* BCME_NORESOURCE */
147 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
148 -EMSGSIZE, /* BCME_BADLENGTH */
149 -EINVAL, /* BCME_NOTREADY */
150 -EPERM, /* BCME_EPERM */
151 -ENOMEM, /* BCME_NOMEM */
152 -EINVAL, /* BCME_ASSOCIATED */
153 -ERANGE, /* BCME_RANGE */
154 -EINVAL, /* BCME_NOTFOUND */
155 -EINVAL, /* BCME_WME_NOT_ENABLED */
156 -EINVAL, /* BCME_TSPEC_NOTFOUND */
157 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
158 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
159 -EIO, /* BCME_SDIO_ERROR */
160 -ENODEV, /* BCME_DONGLE_DOWN */
161 -EINVAL, /* BCME_VERSION */
162 -EIO, /* BCME_TXFAIL */
163 -EIO, /* BCME_RXFAIL */
164 -ENODEV, /* BCME_NODEVICE */
165 -EINVAL, /* BCME_NMODE_DISABLED */
166 -ENODATA, /* BCME_NONRESIDENT */
168 /* When an new error code is added to bcmutils.h, add os
169 * specific error translation here as well
171 /* check if BCME_LAST changed since the last time this function was updated */
172 #if BCME_LAST != -42
173 #error "You need to add a OS error translation in the linuxbcmerrormap \
174 for new error code defined in bcmutils.h"
175 #endif
178 /* translate bcmerrors into linux errors */
180 osl_error(int bcmerror)
182 if (bcmerror > 0)
183 bcmerror = 0;
184 else if (bcmerror < BCME_LAST)
185 bcmerror = BCME_ERROR;
187 /* Array bounds covered by ASSERT in osl_attach */
188 return linuxbcmerrormap[-bcmerror];
191 extern uint8* dhd_os_prealloc(void *osh, int section, int size);
193 osl_t *
194 osl_attach(void *pdev, uint bustype, bool pkttag)
196 osl_t *osh;
198 osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
199 ASSERT(osh);
201 bzero(osh, sizeof(osl_t));
203 /* Check that error map has the right number of entries in it */
204 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
206 osh->magic = OS_HANDLE_MAGIC;
207 atomic_set(&osh->malloced, 0);
208 osh->failed = 0;
209 osh->dbgmem_list = NULL;
210 spin_lock_init(&(osh->dbgmem_lock));
211 osh->pdev = pdev;
212 osh->pub.pkttag = pkttag;
213 osh->bustype = bustype;
215 switch (bustype) {
216 case PCI_BUS:
217 case SI_BUS:
218 case PCMCIA_BUS:
219 osh->pub.mmbus = TRUE;
220 break;
221 case JTAG_BUS:
222 case SDIO_BUS:
223 case USB_BUS:
224 case SPI_BUS:
225 case RPC_BUS:
226 osh->pub.mmbus = FALSE;
227 break;
228 default:
229 ASSERT(FALSE);
230 break;
233 #if defined(DHD_USE_STATIC_BUF)
234 if (!bcm_static_buf) {
235 if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
236 STATIC_BUF_TOTAL_LEN))) {
237 printk("can not alloc static buf!\n");
239 else
240 printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
243 sema_init(&bcm_static_buf->static_sem, 1);
245 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
248 if (!bcm_static_skb) {
249 int i;
250 void *skb_buff_ptr = 0;
251 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
252 skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
254 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
255 for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
256 bcm_static_skb->pkt_use[i] = 0;
258 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
260 #endif /* DHD_USE_STATIC_BUF */
262 spin_lock_init(&(osh->pktalloc_lock));
264 #ifdef BCMDBG
265 if (pkttag) {
266 struct sk_buff *skb;
267 ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
269 #endif
270 return osh;
273 void
274 osl_detach(osl_t *osh)
276 if (osh == NULL)
277 return;
279 #ifdef DHD_USE_STATIC_BUF
280 if (bcm_static_buf) {
281 bcm_static_buf = 0;
283 if (bcm_static_skb) {
284 bcm_static_skb = 0;
286 #endif
288 ASSERT(osh->magic == OS_HANDLE_MAGIC);
289 kfree(osh);
292 static struct sk_buff *osl_alloc_skb(unsigned int len)
294 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
295 gfp_t flags = GFP_ATOMIC;
297 return __dev_alloc_skb(len, flags);
298 #else
299 return dev_alloc_skb(len);
300 #endif
303 #ifdef CTFPOOL
305 #ifdef CTFPOOL_SPINLOCK
306 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
307 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
308 #else
309 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
310 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
311 #endif /* CTFPOOL_SPINLOCK */
313 * Allocate and add an object to packet pool.
315 void *
316 osl_ctfpool_add(osl_t *osh)
318 struct sk_buff *skb;
319 #ifdef CTFPOOL_SPINLOCK
320 unsigned long flags;
321 #endif /* CTFPOOL_SPINLOCK */
323 if ((osh == NULL) || (osh->ctfpool == NULL))
324 return NULL;
326 CTFPOOL_LOCK(osh->ctfpool, flags);
327 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
329 /* No need to allocate more objects */
330 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
331 CTFPOOL_UNLOCK(osh->ctfpool, flags);
332 return NULL;
335 /* Allocate a new skb and add it to the ctfpool */
336 skb = osl_alloc_skb(osh->ctfpool->obj_size);
337 if (skb == NULL) {
338 printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
339 osh->ctfpool->obj_size);
340 CTFPOOL_UNLOCK(osh->ctfpool, flags);
341 return NULL;
344 /* Add to ctfpool */
345 skb->next = (struct sk_buff *)osh->ctfpool->head;
346 osh->ctfpool->head = skb;
347 osh->ctfpool->fast_frees++;
348 osh->ctfpool->curr_obj++;
350 /* Hijack a skb member to store ptr to ctfpool */
351 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
353 /* Use bit flag to indicate skb from fast ctfpool */
354 PKTFAST(osh, skb) = FASTBUF;
356 CTFPOOL_UNLOCK(osh->ctfpool, flags);
358 return skb;
362 * Add new objects to the pool.
364 void
365 osl_ctfpool_replenish(osl_t *osh, uint thresh)
367 if ((osh == NULL) || (osh->ctfpool == NULL))
368 return;
370 /* Do nothing if no refills are required */
371 while ((osh->ctfpool->refills > 0) && (thresh--)) {
372 osl_ctfpool_add(osh);
373 osh->ctfpool->refills--;
378 * Initialize the packet pool with specified number of objects.
380 int32
381 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
383 osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
384 ASSERT(osh->ctfpool);
385 bzero(osh->ctfpool, sizeof(ctfpool_t));
387 osh->ctfpool->max_obj = numobj;
388 osh->ctfpool->obj_size = size;
390 spin_lock_init(&osh->ctfpool->lock);
392 while (numobj--) {
393 if (!osl_ctfpool_add(osh))
394 return -1;
395 osh->ctfpool->fast_frees--;
398 return 0;
402 * Cleanup the packet pool objects.
404 void
405 osl_ctfpool_cleanup(osl_t *osh)
407 struct sk_buff *skb, *nskb;
408 #ifdef CTFPOOL_SPINLOCK
409 unsigned long flags;
410 #endif /* CTFPOOL_SPINLOCK */
412 if ((osh == NULL) || (osh->ctfpool == NULL))
413 return;
415 CTFPOOL_LOCK(osh->ctfpool, flags);
417 skb = osh->ctfpool->head;
419 while (skb != NULL) {
420 nskb = skb->next;
421 dev_kfree_skb(skb);
422 skb = nskb;
423 osh->ctfpool->curr_obj--;
426 ASSERT(osh->ctfpool->curr_obj == 0);
427 osh->ctfpool->head = NULL;
428 CTFPOOL_UNLOCK(osh->ctfpool, flags);
430 kfree(osh->ctfpool);
431 osh->ctfpool = NULL;
434 void
435 osl_ctfpool_stats(osl_t *osh, void *b)
437 struct bcmstrbuf *bb;
439 if ((osh == NULL) || (osh->ctfpool == NULL))
440 return;
442 #ifdef DHD_USE_STATIC_BUF
443 if (bcm_static_buf) {
444 bcm_static_buf = 0;
446 if (bcm_static_skb) {
447 bcm_static_skb = 0;
449 #endif /* DHD_USE_STATIC_BUF */
451 bb = b;
453 ASSERT((osh != NULL) && (bb != NULL));
455 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
456 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
457 osh->ctfpool->curr_obj, osh->ctfpool->refills);
458 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
459 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
460 osh->ctfpool->slow_allocs);
463 static inline struct sk_buff *
464 osl_pktfastget(osl_t *osh, uint len)
466 struct sk_buff *skb;
467 #ifdef CTFPOOL_SPINLOCK
468 unsigned long flags;
469 #endif /* CTFPOOL_SPINLOCK */
471 /* Try to do fast allocate. Return null if ctfpool is not in use
472 * or if there are no items in the ctfpool.
474 if (osh->ctfpool == NULL)
475 return NULL;
477 CTFPOOL_LOCK(osh->ctfpool, flags);
478 if (osh->ctfpool->head == NULL) {
479 ASSERT(osh->ctfpool->curr_obj == 0);
480 osh->ctfpool->slow_allocs++;
481 CTFPOOL_UNLOCK(osh->ctfpool, flags);
482 return NULL;
485 ASSERT(len <= osh->ctfpool->obj_size);
487 /* Get an object from ctfpool */
488 skb = (struct sk_buff *)osh->ctfpool->head;
489 osh->ctfpool->head = (void *)skb->next;
491 osh->ctfpool->fast_allocs++;
492 osh->ctfpool->curr_obj--;
493 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
494 CTFPOOL_UNLOCK(osh->ctfpool, flags);
496 /* Init skb struct */
497 skb->next = skb->prev = NULL;
498 skb->data = skb->head + 16;
499 skb->tail = skb->head + 16;
501 skb->len = 0;
502 skb->cloned = 0;
503 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
504 skb->list = NULL;
505 #endif
506 atomic_set(&skb->users, 1);
508 PKTSETCLINK(skb, NULL);
509 PKTCCLRATTR(skb);
511 return skb;
513 #endif /* CTFPOOL */
514 /* Convert a driver packet to native(OS) packet
515 * In the process, packettag is zeroed out before sending up
516 * IP code depends on skb->cb to be setup correctly with various options
517 * In our case, that means it should be 0
519 struct sk_buff * BCMFASTPATH
520 osl_pkt_tonative(osl_t *osh, void *pkt)
522 struct sk_buff *nskb;
524 if (osh->pub.pkttag)
525 OSL_PKTTAG_CLEAR(pkt);
527 /* Decrement the packet counter */
528 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
529 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
531 return (struct sk_buff *)pkt;
534 /* Convert a native(OS) packet to driver packet.
535 * In the process, native packet is destroyed, there is no copying
536 * Also, a packettag is zeroed out
538 void * BCMFASTPATH
539 osl_pkt_frmnative(osl_t *osh, void *pkt)
541 struct sk_buff *nskb;
543 if (osh->pub.pkttag)
544 OSL_PKTTAG_CLEAR(pkt);
546 /* Increment the packet counter */
547 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
548 atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
550 return (void *)pkt;
553 /* Return a new packet. zero out pkttag */
554 void * BCMFASTPATH
555 osl_pktget(osl_t *osh, uint len)
557 struct sk_buff *skb;
559 #ifdef CTFPOOL
560 /* Allocate from local pool */
561 skb = osl_pktfastget(osh, len);
562 if ((skb != NULL) || ((skb = osl_alloc_skb(len)) != NULL)) {
563 #else /* CTFPOOL */
564 if ((skb = osl_alloc_skb(len))) {
565 #endif /* CTFPOOL */
566 skb_put(skb, len);
567 skb->priority = 0;
569 atomic_inc(&osh->pktalloced);
572 return ((void*) skb);
575 #ifdef CTFPOOL
576 static inline void
577 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
579 ctfpool_t *ctfpool;
580 #ifdef CTFPOOL_SPINLOCK
581 unsigned long flags;
582 #endif /* CTFPOOL_SPINLOCK */
584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
585 skb->tstamp.tv.sec = 0;
586 #else
587 skb->stamp.tv_sec = 0;
588 #endif
590 /* We only need to init the fields that we change */
591 skb->dev = NULL;
592 skb->dst = NULL;
593 OSL_PKTTAG_CLEAR(skb);
594 skb->ip_summed = 0;
595 skb->destructor = NULL;
597 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
598 //ASSERT(ctfpool != NULL);
599 if (ctfpool == NULL) return;
601 /* Add object to the ctfpool */
602 CTFPOOL_LOCK(ctfpool, flags);
603 skb->next = (struct sk_buff *)ctfpool->head;
604 ctfpool->head = (void *)skb;
606 ctfpool->fast_frees++;
607 ctfpool->curr_obj++;
609 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
610 CTFPOOL_UNLOCK(ctfpool, flags);
612 #endif /* CTFPOOL */
614 /* Free the driver packet. Free the tag if present */
615 void BCMFASTPATH
616 osl_pktfree(osl_t *osh, void *p, bool send)
618 struct sk_buff *skb, *nskb;
620 skb = (struct sk_buff*) p;
622 if (send && osh->pub.tx_fn)
623 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
625 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
627 /* perversion: we use skb->next to chain multi-skb packets */
628 while (skb) {
629 nskb = skb->next;
630 skb->next = NULL;
633 #ifdef CTFMAP
634 /* Clear the map ptr before freeing */
635 PKTCLRCTF(osh, skb);
636 CTFMAPPTR(osh, skb) = NULL;
637 #endif /* CTFMAP */
639 #ifdef CTFPOOL
640 if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
641 osl_pktfastfree(osh, skb);
642 else {
643 #else /* CTFPOOL */
645 #endif /* CTFPOOL */
647 if (skb->destructor)
648 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
649 * destructor exists
651 dev_kfree_skb_any(skb);
652 else
653 /* can free immediately (even in_irq()) if destructor
654 * does not exist
656 dev_kfree_skb(skb);
658 atomic_dec(&osh->pktalloced);
659 skb = nskb;
663 #ifdef DHD_USE_STATIC_BUF
664 void*
665 osl_pktget_static(osl_t *osh, uint len)
667 int i = 0;
668 struct sk_buff *skb;
670 if (len > (PAGE_SIZE*2)) {
671 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
672 return osl_pktget(osh, len);
675 down(&bcm_static_skb->osl_pkt_sem);
677 if (len <= PAGE_SIZE) {
678 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
679 if (bcm_static_skb->pkt_use[i] == 0)
680 break;
683 if (i != STATIC_PKT_MAX_NUM) {
684 bcm_static_skb->pkt_use[i] = 1;
685 up(&bcm_static_skb->osl_pkt_sem);
686 skb = bcm_static_skb->skb_4k[i];
687 skb->tail = skb->data + len;
688 skb->len = len;
689 return skb;
694 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
695 if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
696 break;
699 if (i != STATIC_PKT_MAX_NUM) {
700 bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
701 up(&bcm_static_skb->osl_pkt_sem);
702 skb = bcm_static_skb->skb_8k[i];
703 skb->tail = skb->data + len;
704 skb->len = len;
705 return skb;
708 up(&bcm_static_skb->osl_pkt_sem);
709 printk("%s: all static pkt in use!\n", __FUNCTION__);
710 return osl_pktget(osh, len);
713 void
714 osl_pktfree_static(osl_t *osh, void *p, bool send)
716 int i;
718 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
719 if (p == bcm_static_skb->skb_4k[i]) {
720 down(&bcm_static_skb->osl_pkt_sem);
721 bcm_static_skb->pkt_use[i] = 0;
722 up(&bcm_static_skb->osl_pkt_sem);
723 return;
727 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
728 if (p == bcm_static_skb->skb_8k[i]) {
729 down(&bcm_static_skb->osl_pkt_sem);
730 bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
731 up(&bcm_static_skb->osl_pkt_sem);
732 return;
736 return osl_pktfree(osh, p, send);
738 #endif /* DHD_USE_STATIC_BUF */
740 uint32
741 osl_pci_read_config(osl_t *osh, uint offset, uint size)
743 uint val = 0;
744 uint retry = PCI_CFG_RETRY;
746 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
748 /* only 4byte access supported */
749 ASSERT(size == 4);
751 do {
752 pci_read_config_dword(osh->pdev, offset, &val);
753 if (val != 0xffffffff)
754 break;
755 } while (retry--);
757 #ifdef BCMDBG
758 if (retry < PCI_CFG_RETRY)
759 printk("PCI CONFIG READ access to %d required %d retries\n", offset,
760 (PCI_CFG_RETRY - retry));
761 #endif /* BCMDBG */
763 return (val);
766 void
767 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
769 uint retry = PCI_CFG_RETRY;
771 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
773 /* only 4byte access supported */
774 ASSERT(size == 4);
776 do {
777 pci_write_config_dword(osh->pdev, offset, val);
778 if (offset != PCI_BAR0_WIN)
779 break;
780 if (osl_pci_read_config(osh, offset, size) == val)
781 break;
782 } while (retry--);
784 #ifdef BCMDBG
785 if (retry < PCI_CFG_RETRY)
786 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset,
787 (PCI_CFG_RETRY - retry));
788 #endif /* BCMDBG */
791 /* return bus # for the pci device pointed by osh->pdev */
792 uint
793 osl_pci_bus(osl_t *osh)
795 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
797 return ((struct pci_dev *)osh->pdev)->bus->number;
800 /* return slot # for the pci device pointed by osh->pdev */
801 uint
802 osl_pci_slot(osl_t *osh)
804 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
806 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
809 /* return the pci device pointed by osh->pdev */
810 struct pci_dev *
811 osl_pci_device(osl_t *osh)
813 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
815 return osh->pdev;
818 static void
819 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
823 void
824 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
826 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
829 void
830 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
832 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
835 void *
836 osl_malloc(osl_t *osh, uint size)
838 void *addr;
840 /* only ASSERT if osh is defined */
841 if (osh)
842 ASSERT(osh->magic == OS_HANDLE_MAGIC);
844 #ifdef DHD_USE_STATIC_BUF
845 if (bcm_static_buf)
847 int i = 0;
848 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
850 down(&bcm_static_buf->static_sem);
852 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
854 if (bcm_static_buf->buf_use[i] == 0)
855 break;
858 if (i == STATIC_BUF_MAX_NUM)
860 up(&bcm_static_buf->static_sem);
861 printk("all static buff in use!\n");
862 goto original;
865 bcm_static_buf->buf_use[i] = 1;
866 up(&bcm_static_buf->static_sem);
868 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
869 if (osh)
870 atomic_add(size, &osh->malloced);
872 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
875 original:
876 #endif /* DHD_USE_STATIC_BUF */
878 if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
879 if (osh)
880 osh->failed++;
881 return (NULL);
883 if (osh)
884 atomic_add(size, &osh->malloced);
886 return (addr);
889 void
890 osl_mfree(osl_t *osh, void *addr, uint size)
892 #ifdef DHD_USE_STATIC_BUF
893 if (bcm_static_buf)
895 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
896 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
898 int buf_idx = 0;
900 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
902 down(&bcm_static_buf->static_sem);
903 bcm_static_buf->buf_use[buf_idx] = 0;
904 up(&bcm_static_buf->static_sem);
906 if (osh) {
907 ASSERT(osh->magic == OS_HANDLE_MAGIC);
908 atomic_sub(size, &osh->malloced);
910 return;
913 #endif /* DHD_USE_STATIC_BUF */
914 if (osh) {
915 ASSERT(osh->magic == OS_HANDLE_MAGIC);
916 atomic_sub(size, &osh->malloced);
918 kfree(addr);
921 uint
922 osl_malloced(osl_t *osh)
924 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
925 return (atomic_read(&osh->malloced));
928 uint
929 osl_malloc_failed(osl_t *osh)
931 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
932 return (osh->failed);
936 uint
937 osl_dma_consistent_align(void)
939 return (PAGE_SIZE);
942 void*
943 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
945 uint16 align = (1 << align_bits);
946 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
948 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
949 size += align;
950 *alloced = size;
952 return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
955 void
956 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
958 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
960 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
963 uint BCMFASTPATH
964 osl_dma_map(osl_t *osh, void *va, uint size, int direction)
966 int dir;
968 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
969 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
970 return (pci_map_single(osh->pdev, va, size, dir));
973 void BCMFASTPATH
974 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
976 int dir;
978 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
979 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
980 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
984 void
985 osl_delay(uint usec)
987 uint d;
989 while (usec > 0) {
990 d = MIN(usec, 1000);
991 udelay(d);
992 usec -= d;
996 #if defined(DSLCPE_DELAY)
998 void
999 osl_oshsh_init(osl_t *osh, shared_osl_t* oshsh)
1001 extern unsigned long loops_per_jiffy;
1002 osh->oshsh = oshsh;
1003 osh->oshsh->MIPS = loops_per_jiffy / (500000/HZ);
1007 in_long_delay(osl_t *osh)
1009 return osh->oshsh->long_delay;
1012 void
1013 osl_long_delay(osl_t *osh, uint usec, bool yield)
1015 uint d;
1016 bool yielded = TRUE;
1017 int usec_to_delay = usec;
1018 unsigned long tick1, tick2, tick_diff = 0;
1020 /* delay at least requested usec */
1021 while (usec_to_delay > 0) {
1022 if (!yield || !yielded) {
1023 d = MIN(usec_to_delay, 10);
1024 udelay(d);
1025 usec_to_delay -= d;
1027 if (usec_to_delay > 0) {
1028 osh->oshsh->long_delay++;
1029 OSL_GETCYCLES(tick1);
1030 spin_unlock_bh(osh->oshsh->lock);
1031 if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1032 schedule();
1033 yielded = TRUE;
1034 } else {
1035 yielded = FALSE;
1037 spin_lock_bh(osh->oshsh->lock);
1038 OSL_GETCYCLES(tick2);
1040 if (yielded) {
1041 tick_diff = TICKDIFF(tick2, tick1);
1042 tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
1043 if (tick_diff) {
1044 usec_to_delay -= tick_diff;
1045 } else
1046 yielded = 0;
1048 osh->oshsh->long_delay--;
1049 ASSERT(osh->oshsh->long_delay >= 0);
1053 #endif /* DSLCPE_DELAY */
1055 /* Clone a packet.
1056 * The pkttag contents are NOT cloned.
1058 void *
1059 osl_pktdup(osl_t *osh, void *skb)
1061 void * p;
1063 ASSERT(!PKTISCHAINED(skb));
1065 /* clear the CTFBUF flag if set and map the rest of the buffer
1066 * before cloning.
1068 PKTCTFMAP(osh, skb);
1070 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1071 return NULL;
1073 #ifdef CTFPOOL
1074 if (PKTISFAST(osh, skb)) {
1075 ctfpool_t *ctfpool;
1077 /* if the buffer allocated from ctfpool is cloned then
1078 * we can't be sure when it will be freed. since there
1079 * is a chance that we will be losing a buffer
1080 * from our pool, we increment the refill count for the
1081 * object to be alloced later.
1083 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1084 ASSERT(ctfpool != NULL);
1085 PKTCLRFAST(osh, p);
1086 PKTCLRFAST(osh, skb);
1087 ctfpool->refills++;
1089 #endif /* CTFPOOL */
1091 /* skb_clone copies skb->cb.. we don't want that */
1092 if (osh->pub.pkttag)
1093 OSL_PKTTAG_CLEAR(p);
1095 /* Increment the packet counter */
1096 atomic_inc(&osh->pktalloced);
1097 return (p);
1102 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1104 #ifdef OSLREGOPS
1105 uint8
1106 osl_readb(osl_t *osh, volatile uint8 *r)
1108 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1109 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1111 return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8)));
1115 uint16
1116 osl_readw(osl_t *osh, volatile uint16 *r)
1118 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1119 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1121 return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16)));
1124 uint32
1125 osl_readl(osl_t *osh, volatile uint32 *r)
1127 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1128 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1130 return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32)));
1133 void
1134 osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1136 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1137 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1139 ((wreg)(ctx, (void*)r, v, sizeof(uint8)));
1143 void
1144 osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1146 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1147 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1149 ((wreg)(ctx, (void*)r, v, sizeof(uint16)));
1152 void
1153 osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1155 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1156 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1158 ((wreg)(ctx, (void*)r, v, sizeof(uint32)));
1160 #endif /* OSLREGOPS */
1163 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1165 #ifdef BINOSL
1167 uint32
1168 osl_sysuptime(void)
1170 return ((uint32)jiffies * (1000 / HZ));
1174 osl_printf(const char *format, ...)
1176 va_list args;
1177 static char printbuf[1024];
1178 int len;
1180 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1181 va_start(args, format);
1182 len = vsnprintf(printbuf, 1024, format, args);
1183 va_end(args);
1185 if (len > sizeof(printbuf)) {
1186 printk("osl_printf: buffer overrun\n");
1187 return (0);
1190 return (printk("%s", printbuf));
1194 osl_sprintf(char *buf, const char *format, ...)
1196 va_list args;
1197 int rc;
1199 va_start(args, format);
1200 rc = vsprintf(buf, format, args);
1201 va_end(args);
1202 return (rc);
1206 osl_snprintf(char *buf, size_t n, const char *format, ...)
1208 va_list args;
1209 int rc;
1211 va_start(args, format);
1212 rc = vsnprintf(buf, n, format, args);
1213 va_end(args);
1214 return (rc);
1218 osl_vsprintf(char *buf, const char *format, va_list ap)
1220 return (vsprintf(buf, format, ap));
1224 osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1226 return (vsnprintf(buf, n, format, ap));
1230 osl_strcmp(const char *s1, const char *s2)
1232 return (strcmp(s1, s2));
1236 osl_strncmp(const char *s1, const char *s2, uint n)
1238 return (strncmp(s1, s2, n));
1242 osl_strlen(const char *s)
1244 return (strlen(s));
1247 char*
1248 osl_strcpy(char *d, const char *s)
1250 return (strcpy(d, s));
1253 char*
1254 osl_strncpy(char *d, const char *s, uint n)
1256 return (strncpy(d, s, n));
1259 char*
1260 osl_strchr(const char *s, int c)
1262 return (strchr(s, c));
1265 char*
1266 osl_strrchr(const char *s, int c)
1268 return (strrchr(s, c));
1271 void*
1272 osl_memset(void *d, int c, size_t n)
1274 return memset(d, c, n);
1277 void*
1278 osl_memcpy(void *d, const void *s, size_t n)
1280 return memcpy(d, s, n);
1283 void*
1284 osl_memmove(void *d, const void *s, size_t n)
1286 return memmove(d, s, n);
1290 osl_memcmp(const void *s1, const void *s2, size_t n)
1292 return memcmp(s1, s2, n);
1295 uint32
1296 osl_readl(volatile uint32 *r)
1298 return (readl(r));
1301 uint16
1302 osl_readw(volatile uint16 *r)
1304 return (readw(r));
1307 uint8
1308 osl_readb(volatile uint8 *r)
1310 return (readb(r));
1313 void
1314 osl_writel(uint32 v, volatile uint32 *r)
1316 writel(v, r);
1319 void
1320 osl_writew(uint16 v, volatile uint16 *r)
1322 writew(v, r);
1325 void
1326 osl_writeb(uint8 v, volatile uint8 *r)
1328 writeb(v, r);
1331 void *
1332 osl_uncached(void *va)
1334 #ifdef mips
1335 return ((void*)KSEG1ADDR(va));
1336 #else
1337 return ((void*)va);
1338 #endif /* mips */
1341 void *
1342 osl_cached(void *va)
1344 #ifdef mips
1345 return ((void*)KSEG0ADDR(va));
1346 #else
1347 return ((void*)va);
1348 #endif /* mips */
1351 uint
1352 osl_getcycles(void)
1354 uint cycles;
1356 #if defined(mips)
1357 cycles = read_c0_count() * 2;
1358 #elif defined(__i386__)
1359 rdtscl(cycles);
1360 #else
1361 cycles = 0;
1362 #endif /* defined(mips) */
1363 return cycles;
1366 void *
1367 osl_reg_map(uint32 pa, uint size)
1369 return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1372 void
1373 osl_reg_unmap(void *va)
1375 iounmap(va);
1379 osl_busprobe(uint32 *val, uint32 addr)
1381 #ifdef mips
1382 return get_dbe(*val, (uint32 *)addr);
1383 #else
1384 *val = readl((uint32 *)(uintptr)addr);
1385 return 0;
1386 #endif /* mips */
1389 bool
1390 osl_pktshared(void *skb)
1392 return (((struct sk_buff*)skb)->cloned);
1395 uchar*
1396 osl_pktdata(osl_t *osh, void *skb)
1398 return (((struct sk_buff*)skb)->data);
1401 uint
1402 osl_pktlen(osl_t *osh, void *skb)
1404 return (((struct sk_buff*)skb)->len);
1407 uint
1408 osl_pktheadroom(osl_t *osh, void *skb)
1410 return (uint) skb_headroom((struct sk_buff *) skb);
1413 uint
1414 osl_pkttailroom(osl_t *osh, void *skb)
1416 return (uint) skb_tailroom((struct sk_buff *) skb);
1419 void*
1420 osl_pktnext(osl_t *osh, void *skb)
1422 return (((struct sk_buff*)skb)->next);
1425 void
1426 osl_pktsetnext(void *skb, void *x)
1428 ((struct sk_buff*)skb)->next = (struct sk_buff*)x;
1431 void
1432 osl_pktsetlen(osl_t *osh, void *skb, uint len)
1434 __skb_trim((struct sk_buff*)skb, len);
1437 uchar*
1438 osl_pktpush(osl_t *osh, void *skb, int bytes)
1440 return (skb_push((struct sk_buff*)skb, bytes));
1443 uchar*
1444 osl_pktpull(osl_t *osh, void *skb, int bytes)
1446 return (skb_pull((struct sk_buff*)skb, bytes));
1449 void*
1450 osl_pkttag(void *skb)
1452 return ((void*)(((struct sk_buff*)skb)->cb));
1455 void*
1456 osl_pktlink(void *skb)
1458 return (((struct sk_buff*)skb)->prev);
1461 void
1462 osl_pktsetlink(void *skb, void *x)
1464 ((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
1467 uint
1468 osl_pktprio(void *skb)
1470 return (((struct sk_buff*)skb)->priority);
1473 void
1474 osl_pktsetprio(void *skb, uint x)
1476 ((struct sk_buff*)skb)->priority = x;
1478 #endif /* BINOSL */
1480 uint
1481 osl_pktalloced(osl_t *osh)
1483 return (atomic_read(&osh->pktalloced));
1486 /* Linux Kernel: File Operations: start */
1487 void *
1488 osl_os_open_image(char *filename)
1490 struct file *fp;
1492 fp = filp_open(filename, O_RDONLY, 0);
1494 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1495 * Alternative:
1496 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1497 * ???
1499 if (IS_ERR(fp))
1500 fp = NULL;
1502 return fp;
1506 osl_os_get_image_block(char *buf, int len, void *image)
1508 struct file *fp = (struct file *)image;
1509 int rdlen;
1511 if (!image)
1512 return 0;
1514 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1515 if (rdlen > 0)
1516 fp->f_pos += rdlen;
1518 return rdlen;
1521 void
1522 osl_os_close_image(void *image)
1524 if (image)
1525 filp_close((struct file *)image, NULL);
1527 /* Linux Kernel: File Operations: end */