GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / shared / linux_osl.c
blobc07cee49789d7e8fdb74db85b91a53c1b12dd763
1 /*
2 * Linux OS Independent Layer
4 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c 400835 2013-05-07 19:23:31Z $
21 #define LINUX_PORT
23 #include <typedefs.h>
24 #include <bcmendian.h>
25 #include <linuxver.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <linux/delay.h>
30 #ifdef mips
31 #include <asm/paccess.h>
32 #endif /* mips */
33 #include <pcicfg.h>
37 #include <linux/fs.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 #ifdef DHD_USE_STATIC_BUF
45 #define STATIC_BUF_MAX_NUM 16
46 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
47 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
49 typedef struct bcm_static_buf {
50 struct semaphore static_sem;
51 unsigned char *buf_ptr;
52 unsigned char buf_use[STATIC_BUF_MAX_NUM];
53 } bcm_static_buf_t;
55 static bcm_static_buf_t *bcm_static_buf = 0;
57 #define STATIC_PKT_MAX_NUM 8
59 typedef struct bcm_static_pkt {
60 struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM];
61 struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM];
62 struct semaphore osl_pkt_sem;
63 unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2];
64 } bcm_static_pkt_t;
66 static bcm_static_pkt_t *bcm_static_skb = 0;
67 #endif /* DHD_USE_STATIC_BUF */
69 typedef struct bcm_mem_link {
70 struct bcm_mem_link *prev;
71 struct bcm_mem_link *next;
72 uint size;
73 int line;
74 void *osh;
75 char file[BCM_MEM_FILENAME_LEN];
76 } bcm_mem_link_t;
78 struct osl_info {
79 osl_pubinfo_t pub;
80 #ifdef CTFPOOL
81 ctfpool_t *ctfpool;
82 #endif /* CTFPOOL */
83 uint magic;
84 void *pdev;
85 atomic_t malloced;
86 atomic_t pktalloced; /* Number of allocated packet buffers */
87 uint failed;
88 uint bustype;
89 bcm_mem_link_t *dbgmem_list;
90 #if defined(DSLCPE_DELAY)
91 shared_osl_t *oshsh; /* osh shared */
92 #endif
93 spinlock_t dbgmem_lock;
94 #ifdef BCMDBG_CTRACE
95 spinlock_t ctrace_lock;
96 struct list_head ctrace_list;
97 int ctrace_num;
98 #endif /* BCMDBG_CTRACE */
99 spinlock_t pktalloc_lock;
102 #define OSL_PKTTAG_CLEAR(p) \
103 do { \
104 struct sk_buff *s = (struct sk_buff *)(p); \
105 ASSERT(OSL_PKTTAG_SZ == 32); \
106 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
107 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
108 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
109 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
110 } while (0)
112 /* PCMCIA attribute space access macros */
113 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
114 struct pcmcia_dev {
115 dev_link_t link; /* PCMCIA device pointer */
116 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
117 dev_node_t node; /* PCMCIA node structure */
118 #endif
119 void *base; /* Mapped attribute memory window */
120 size_t size; /* Size of window */
121 void *drv; /* Driver data */
123 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
125 /* Global ASSERT type flag */
126 uint32 g_assert_type = FALSE;
128 static int16 linuxbcmerrormap[] =
129 { 0, /* 0 */
130 -EINVAL, /* BCME_ERROR */
131 -EINVAL, /* BCME_BADARG */
132 -EINVAL, /* BCME_BADOPTION */
133 -EINVAL, /* BCME_NOTUP */
134 -EINVAL, /* BCME_NOTDOWN */
135 -EINVAL, /* BCME_NOTAP */
136 -EINVAL, /* BCME_NOTSTA */
137 -EINVAL, /* BCME_BADKEYIDX */
138 -EINVAL, /* BCME_RADIOOFF */
139 -EINVAL, /* BCME_NOTBANDLOCKED */
140 -EINVAL, /* BCME_NOCLK */
141 -EINVAL, /* BCME_BADRATESET */
142 -EINVAL, /* BCME_BADBAND */
143 -E2BIG, /* BCME_BUFTOOSHORT */
144 -E2BIG, /* BCME_BUFTOOLONG */
145 -EBUSY, /* BCME_BUSY */
146 -EINVAL, /* BCME_NOTASSOCIATED */
147 -EINVAL, /* BCME_BADSSIDLEN */
148 -EINVAL, /* BCME_OUTOFRANGECHAN */
149 -EINVAL, /* BCME_BADCHAN */
150 -EFAULT, /* BCME_BADADDR */
151 -ENOMEM, /* BCME_NORESOURCE */
152 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
153 -EMSGSIZE, /* BCME_BADLENGTH */
154 -EINVAL, /* BCME_NOTREADY */
155 -EPERM, /* BCME_EPERM */
156 -ENOMEM, /* BCME_NOMEM */
157 -EINVAL, /* BCME_ASSOCIATED */
158 -ERANGE, /* BCME_RANGE */
159 -EINVAL, /* BCME_NOTFOUND */
160 -EINVAL, /* BCME_WME_NOT_ENABLED */
161 -EINVAL, /* BCME_TSPEC_NOTFOUND */
162 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
163 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
164 -EIO, /* BCME_SDIO_ERROR */
165 -ENODEV, /* BCME_DONGLE_DOWN */
166 -EINVAL, /* BCME_VERSION */
167 -EIO, /* BCME_TXFAIL */
168 -EIO, /* BCME_RXFAIL */
169 -ENODEV, /* BCME_NODEVICE */
170 -EINVAL, /* BCME_NMODE_DISABLED */
171 -ENODATA, /* BCME_NONRESIDENT */
172 -EINVAL, /* BCME_SCANREJECT */
173 /* When an new error code is added to bcmutils.h, add os
174 * specific error translation here as well
176 /* check if BCME_LAST changed since the last time this function was updated */
177 #if BCME_LAST != -43
178 #error "You need to add a OS error translation in the linuxbcmerrormap \
179 for new error code defined in bcmutils.h"
180 #endif
183 /* translate bcmerrors into linux errors */
185 osl_error(int bcmerror)
187 if (bcmerror > 0)
188 bcmerror = 0;
189 else if (bcmerror < BCME_LAST)
190 bcmerror = BCME_ERROR;
192 /* Array bounds covered by ASSERT in osl_attach */
193 return linuxbcmerrormap[-bcmerror];
196 extern uint8* dhd_os_prealloc(void *osh, int section, int size);
198 osl_t *
199 osl_attach(void *pdev, uint bustype, bool pkttag)
201 osl_t *osh;
203 osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
204 ASSERT(osh);
206 bzero(osh, sizeof(osl_t));
208 /* Check that error map has the right number of entries in it */
209 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
211 osh->magic = OS_HANDLE_MAGIC;
212 atomic_set(&osh->malloced, 0);
213 osh->failed = 0;
214 osh->dbgmem_list = NULL;
215 spin_lock_init(&(osh->dbgmem_lock));
216 osh->pdev = pdev;
217 osh->pub.pkttag = pkttag;
218 osh->bustype = bustype;
220 switch (bustype) {
221 case PCI_BUS:
222 case SI_BUS:
223 case PCMCIA_BUS:
224 osh->pub.mmbus = TRUE;
225 break;
226 case JTAG_BUS:
227 case SDIO_BUS:
228 case USB_BUS:
229 case SPI_BUS:
230 case RPC_BUS:
231 osh->pub.mmbus = FALSE;
232 break;
233 default:
234 ASSERT(FALSE);
235 break;
238 #if defined(DHD_USE_STATIC_BUF)
239 if (!bcm_static_buf) {
240 if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+
241 STATIC_BUF_TOTAL_LEN))) {
242 printk("can not alloc static buf!\n");
244 else
245 printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf);
248 sema_init(&bcm_static_buf->static_sem, 1);
250 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
253 if (!bcm_static_skb) {
254 int i;
255 void *skb_buff_ptr = 0;
256 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
257 skb_buff_ptr = dhd_os_prealloc(osh, 4, 0);
259 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16);
260 for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++)
261 bcm_static_skb->pkt_use[i] = 0;
263 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
265 #endif /* DHD_USE_STATIC_BUF */
267 #ifdef BCMDBG_CTRACE
268 spin_lock_init(&osh->ctrace_lock);
269 INIT_LIST_HEAD(&osh->ctrace_list);
270 osh->ctrace_num = 0;
271 #endif /* BCMDBG_CTRACE */
273 spin_lock_init(&(osh->pktalloc_lock));
275 #ifdef BCMDBG
276 if (pkttag) {
277 struct sk_buff *skb;
278 BCM_REFERENCE(skb);
279 ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
281 #endif
282 return osh;
285 void
286 osl_detach(osl_t *osh)
288 if (osh == NULL)
289 return;
291 #ifdef DHD_USE_STATIC_BUF
292 if (bcm_static_buf) {
293 bcm_static_buf = 0;
295 if (bcm_static_skb) {
296 bcm_static_skb = 0;
298 #endif
300 ASSERT(osh->magic == OS_HANDLE_MAGIC);
301 kfree(osh);
304 static struct sk_buff *osl_alloc_skb(osl_t *osh, unsigned int len)
306 struct sk_buff *skb;
308 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
309 gfp_t flags = GFP_ATOMIC;
310 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
311 flags |= GFP_DMA;
312 #endif
313 skb = __dev_alloc_skb(len, flags);
314 #else
315 skb = dev_alloc_skb(len);
316 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
318 #ifdef CTFMAP
319 if (skb) {
320 _DMA_MAP(osh, PKTDATA(osh, skb), len, DMA_RX, NULL, NULL);
322 #endif
324 return skb;
327 #ifdef CTFPOOL
329 #ifdef CTFPOOL_SPINLOCK
330 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
331 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
332 #else
333 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
334 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
335 #endif /* CTFPOOL_SPINLOCK */
337 * Allocate and add an object to packet pool.
339 void *
340 osl_ctfpool_add(osl_t *osh)
342 struct sk_buff *skb;
343 #ifdef CTFPOOL_SPINLOCK
344 unsigned long flags;
345 #endif /* CTFPOOL_SPINLOCK */
347 if ((osh == NULL) || (osh->ctfpool == NULL))
348 return NULL;
350 CTFPOOL_LOCK(osh->ctfpool, flags);
351 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
353 /* No need to allocate more objects */
354 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
355 CTFPOOL_UNLOCK(osh->ctfpool, flags);
356 return NULL;
359 /* Allocate a new skb and add it to the ctfpool */
360 skb = osl_alloc_skb(osh, osh->ctfpool->obj_size);
361 if (skb == NULL) {
362 printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
363 osh->ctfpool->obj_size);
364 CTFPOOL_UNLOCK(osh->ctfpool, flags);
365 return NULL;
367 skb->next = (struct sk_buff *)osh->ctfpool->head;
368 osh->ctfpool->head = skb;
369 osh->ctfpool->fast_frees++;
370 osh->ctfpool->curr_obj++;
372 /* Hijack a skb member to store ptr to ctfpool */
373 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
375 /* Use bit flag to indicate skb from fast ctfpool */
376 PKTFAST(osh, skb) = FASTBUF;
378 CTFPOOL_UNLOCK(osh->ctfpool, flags);
380 return skb;
384 * Add new objects to the pool.
386 void
387 osl_ctfpool_replenish(osl_t *osh, uint thresh)
389 if ((osh == NULL) || (osh->ctfpool == NULL))
390 return;
392 /* Do nothing if no refills are required */
393 while ((osh->ctfpool->refills > 0) && (thresh--)) {
394 osl_ctfpool_add(osh);
395 osh->ctfpool->refills--;
400 * Initialize the packet pool with specified number of objects.
402 int32
403 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
405 osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
406 ASSERT(osh->ctfpool);
407 bzero(osh->ctfpool, sizeof(ctfpool_t));
409 osh->ctfpool->max_obj = numobj;
410 osh->ctfpool->obj_size = size;
412 spin_lock_init(&osh->ctfpool->lock);
414 while (numobj--) {
415 if (!osl_ctfpool_add(osh))
416 return -1;
417 osh->ctfpool->fast_frees--;
420 return 0;
424 * Cleanup the packet pool objects.
426 void
427 osl_ctfpool_cleanup(osl_t *osh)
429 struct sk_buff *skb, *nskb;
430 #ifdef CTFPOOL_SPINLOCK
431 unsigned long flags;
432 #endif /* CTFPOOL_SPINLOCK */
434 if ((osh == NULL) || (osh->ctfpool == NULL))
435 return;
437 CTFPOOL_LOCK(osh->ctfpool, flags);
439 skb = osh->ctfpool->head;
441 while (skb != NULL) {
442 nskb = skb->next;
443 dev_kfree_skb(skb);
444 skb = nskb;
445 osh->ctfpool->curr_obj--;
448 ASSERT(osh->ctfpool->curr_obj == 0);
449 osh->ctfpool->head = NULL;
450 CTFPOOL_UNLOCK(osh->ctfpool, flags);
452 kfree(osh->ctfpool);
453 osh->ctfpool = NULL;
456 void
457 osl_ctfpool_stats(osl_t *osh, void *b)
459 struct bcmstrbuf *bb;
461 if ((osh == NULL) || (osh->ctfpool == NULL))
462 return;
464 #ifdef DHD_USE_STATIC_BUF
465 if (bcm_static_buf) {
466 bcm_static_buf = 0;
468 if (bcm_static_skb) {
469 bcm_static_skb = 0;
471 #endif /* DHD_USE_STATIC_BUF */
473 bb = b;
475 ASSERT((osh != NULL) && (bb != NULL));
477 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
478 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
479 osh->ctfpool->curr_obj, osh->ctfpool->refills);
480 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
481 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
482 osh->ctfpool->slow_allocs);
485 static inline struct sk_buff *
486 osl_pktfastget(osl_t *osh, uint len)
488 struct sk_buff *skb;
489 #ifdef CTFPOOL_SPINLOCK
490 unsigned long flags;
491 #endif /* CTFPOOL_SPINLOCK */
493 /* Try to do fast allocate. Return null if ctfpool is not in use
494 * or if there are no items in the ctfpool.
496 if (osh->ctfpool == NULL)
497 return NULL;
499 CTFPOOL_LOCK(osh->ctfpool, flags);
500 if (osh->ctfpool->head == NULL) {
501 ASSERT(osh->ctfpool->curr_obj == 0);
502 osh->ctfpool->slow_allocs++;
503 CTFPOOL_UNLOCK(osh->ctfpool, flags);
504 return NULL;
507 ASSERT(len <= osh->ctfpool->obj_size);
509 /* Get an object from ctfpool */
510 skb = (struct sk_buff *)osh->ctfpool->head;
511 osh->ctfpool->head = (void *)skb->next;
513 osh->ctfpool->fast_allocs++;
514 osh->ctfpool->curr_obj--;
515 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
516 CTFPOOL_UNLOCK(osh->ctfpool, flags);
518 /* Init skb struct */
519 skb->next = skb->prev = NULL;
520 #if defined(__ARM_ARCH_7A__)
521 skb->data = skb->head + NET_SKB_PAD;
522 skb->tail = skb->head + NET_SKB_PAD;
523 #else
524 skb->data = skb->head + 16;
525 skb->tail = skb->head + 16;
526 #endif /* __ARM_ARCH_7A__ */
527 skb->len = 0;
528 skb->cloned = 0;
529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
530 skb->list = NULL;
531 #endif
532 atomic_set(&skb->users, 1);
534 PKTSETCLINK(skb, NULL);
535 PKTCCLRATTR(skb);
536 PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED);
538 return skb;
540 #endif /* CTFPOOL */
541 /* Convert a driver packet to native(OS) packet
542 * In the process, packettag is zeroed out before sending up
543 * IP code depends on skb->cb to be setup correctly with various options
544 * In our case, that means it should be 0
546 struct sk_buff * BCMFASTPATH
547 osl_pkt_tonative(osl_t *osh, void *pkt)
549 struct sk_buff *nskb;
550 #ifdef BCMDBG_CTRACE
551 struct sk_buff *nskb1, *nskb2;
552 #endif
554 if (osh->pub.pkttag)
555 OSL_PKTTAG_CLEAR(pkt);
557 /* Decrement the packet counter */
558 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
559 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
561 #ifdef BCMDBG_CTRACE
562 for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
563 if (PKTISCHAINED(nskb1)) {
564 nskb2 = PKTCLINK(nskb1);
566 else
567 nskb2 = NULL;
569 DEL_CTRACE(osh, nskb1);
571 #endif /* BCMDBG_CTRACE */
573 return (struct sk_buff *)pkt;
576 /* Convert a native(OS) packet to driver packet.
577 * In the process, native packet is destroyed, there is no copying
578 * Also, a packettag is zeroed out
580 #ifdef BCMDBG_CTRACE
581 void * BCMFASTPATH
582 osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
583 #else
584 void * BCMFASTPATH
585 osl_pkt_frmnative(osl_t *osh, void *pkt)
586 #endif /* BCMDBG_CTRACE */
588 struct sk_buff *nskb;
589 #ifdef BCMDBG_CTRACE
590 struct sk_buff *nskb1, *nskb2;
591 #endif
593 if (osh->pub.pkttag)
594 OSL_PKTTAG_CLEAR(pkt);
596 /* Increment the packet counter */
597 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
598 atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced);
600 #ifdef BCMDBG_CTRACE
601 for (nskb1 = nskb; nskb1 != NULL; nskb1 = nskb2) {
602 if (PKTISCHAINED(nskb1)) {
603 nskb2 = PKTCLINK(nskb1);
605 else
606 nskb2 = NULL;
608 ADD_CTRACE(osh, nskb1, file, line);
610 #endif /* BCMDBG_CTRACE */
612 return (void *)pkt;
615 /* Return a new packet. zero out pkttag */
616 #ifdef BCMDBG_CTRACE
617 void * BCMFASTPATH
618 osl_pktget(osl_t *osh, uint len, int line, char *file)
619 #else
620 void * BCMFASTPATH
621 osl_pktget(osl_t *osh, uint len)
622 #endif /* BCMDBG_CTRACE */
624 struct sk_buff *skb;
626 #ifdef CTFPOOL
627 /* Allocate from local pool */
628 skb = osl_pktfastget(osh, len);
629 if ((skb != NULL) || ((skb = osl_alloc_skb(osh, len)) != NULL)) {
630 #else /* CTFPOOL */
631 if ((skb = osl_alloc_skb(osh, len))) {
632 #endif /* CTFPOOL */
633 #ifdef BCMDBG
634 skb_put(skb, len);
635 #else
636 skb->tail += len;
637 skb->len += len;
638 #endif
639 skb->priority = 0;
641 #ifdef BCMDBG_CTRACE
642 ADD_CTRACE(osh, skb, file, line);
643 #endif
644 atomic_inc(&osh->pktalloced);
647 return ((void*) skb);
650 #ifdef CTFPOOL
651 static inline void
652 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
654 ctfpool_t *ctfpool;
655 #ifdef CTFPOOL_SPINLOCK
656 unsigned long flags;
657 #endif /* CTFPOOL_SPINLOCK */
659 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
660 skb->tstamp.tv.sec = 0;
661 #else
662 skb->stamp.tv_sec = 0;
663 #endif
665 /* We only need to init the fields that we change */
666 skb->dev = NULL;
667 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
668 skb->dst = NULL;
669 #endif
670 OSL_PKTTAG_CLEAR(skb);
671 skb->ip_summed = 0;
673 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
674 skb_orphan(skb);
675 #else
676 skb->destructor = NULL;
677 #endif
679 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
680 ASSERT(ctfpool != NULL);
682 /* Add object to the ctfpool */
683 CTFPOOL_LOCK(ctfpool, flags);
684 skb->next = (struct sk_buff *)ctfpool->head;
685 ctfpool->head = (void *)skb;
687 ctfpool->fast_frees++;
688 ctfpool->curr_obj++;
690 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
691 CTFPOOL_UNLOCK(ctfpool, flags);
693 #endif /* CTFPOOL */
695 /* Free the driver packet. Free the tag if present */
696 void BCMFASTPATH
697 osl_pktfree(osl_t *osh, void *p, bool send)
699 struct sk_buff *skb, *nskb;
701 skb = (struct sk_buff*) p;
703 if (send && osh->pub.tx_fn)
704 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
706 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
708 /* perversion: we use skb->next to chain multi-skb packets */
709 while (skb) {
710 nskb = skb->next;
711 skb->next = NULL;
713 #ifdef BCMDBG_CTRACE
714 DEL_CTRACE(osh, skb);
715 #endif
717 #ifdef CTFMAP
718 /* Clear the map ptr before freeing */
719 PKTCLRCTF(osh, skb);
720 CTFMAPPTR(osh, skb) = NULL;
721 #endif /* CTFMAP */
723 #ifdef CTFPOOL
724 if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
725 osl_pktfastfree(osh, skb);
726 else {
727 #else /* CTFPOOL */
729 #endif /* CTFPOOL */
731 if (skb->destructor)
732 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
733 * destructor exists
735 dev_kfree_skb_any(skb);
736 else
737 /* can free immediately (even in_irq()) if destructor
738 * does not exist
740 dev_kfree_skb(skb);
742 atomic_dec(&osh->pktalloced);
743 skb = nskb;
747 #ifdef DHD_USE_STATIC_BUF
748 void*
749 osl_pktget_static(osl_t *osh, uint len)
751 int i = 0;
752 struct sk_buff *skb;
754 if (len > (PAGE_SIZE*2)) {
755 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
756 return osl_pktget(osh, len);
759 down(&bcm_static_skb->osl_pkt_sem);
761 if (len <= PAGE_SIZE) {
762 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
763 if (bcm_static_skb->pkt_use[i] == 0)
764 break;
767 if (i != STATIC_PKT_MAX_NUM) {
768 bcm_static_skb->pkt_use[i] = 1;
769 up(&bcm_static_skb->osl_pkt_sem);
770 skb = bcm_static_skb->skb_4k[i];
771 skb->tail = skb->data + len;
772 skb->len = len;
773 return skb;
778 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
779 if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0)
780 break;
783 if (i != STATIC_PKT_MAX_NUM) {
784 bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1;
785 up(&bcm_static_skb->osl_pkt_sem);
786 skb = bcm_static_skb->skb_8k[i];
787 skb->tail = skb->data + len;
788 skb->len = len;
789 return skb;
792 up(&bcm_static_skb->osl_pkt_sem);
793 printk("%s: all static pkt in use!\n", __FUNCTION__);
794 return osl_pktget(osh, len);
797 void
798 osl_pktfree_static(osl_t *osh, void *p, bool send)
800 int i;
802 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
803 if (p == bcm_static_skb->skb_4k[i]) {
804 down(&bcm_static_skb->osl_pkt_sem);
805 bcm_static_skb->pkt_use[i] = 0;
806 up(&bcm_static_skb->osl_pkt_sem);
807 return;
811 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
812 if (p == bcm_static_skb->skb_8k[i]) {
813 down(&bcm_static_skb->osl_pkt_sem);
814 bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0;
815 up(&bcm_static_skb->osl_pkt_sem);
816 return;
820 return osl_pktfree(osh, p, send);
822 #endif /* DHD_USE_STATIC_BUF */
824 uint32
825 osl_pci_read_config(osl_t *osh, uint offset, uint size)
827 uint val = 0;
828 uint retry = PCI_CFG_RETRY;
830 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
832 /* only 4byte access supported */
833 ASSERT(size == 4);
835 do {
836 pci_read_config_dword(osh->pdev, offset, &val);
837 if (val != 0xffffffff)
838 break;
839 } while (retry--);
841 #ifdef BCMDBG
842 if (retry < PCI_CFG_RETRY)
843 printk("PCI CONFIG READ access to %d required %d retries\n", offset,
844 (PCI_CFG_RETRY - retry));
845 #endif /* BCMDBG */
847 return (val);
850 void
851 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
853 uint retry = PCI_CFG_RETRY;
855 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
857 /* only 4byte access supported */
858 ASSERT(size == 4);
860 do {
861 pci_write_config_dword(osh->pdev, offset, val);
862 if (offset != PCI_BAR0_WIN)
863 break;
864 if (osl_pci_read_config(osh, offset, size) == val)
865 break;
866 } while (retry--);
868 #ifdef BCMDBG
869 if (retry < PCI_CFG_RETRY)
870 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset,
871 (PCI_CFG_RETRY - retry));
872 #endif /* BCMDBG */
875 /* return bus # for the pci device pointed by osh->pdev */
876 uint
877 osl_pci_bus(osl_t *osh)
879 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
881 return ((struct pci_dev *)osh->pdev)->bus->number;
884 /* return slot # for the pci device pointed by osh->pdev */
885 uint
886 osl_pci_slot(osl_t *osh)
888 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
890 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
891 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn) + 1;
892 #else
893 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
894 #endif
897 /* return the pci device pointed by osh->pdev */
898 struct pci_dev *
899 osl_pci_device(osl_t *osh)
901 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
903 return osh->pdev;
906 static void
907 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
911 void
912 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
914 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
917 void
918 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
920 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
923 void *
924 osl_malloc(osl_t *osh, uint size)
926 void *addr;
928 /* only ASSERT if osh is defined */
929 if (osh)
930 ASSERT(osh->magic == OS_HANDLE_MAGIC);
932 #ifdef DHD_USE_STATIC_BUF
933 if (bcm_static_buf)
935 int i = 0;
936 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
938 down(&bcm_static_buf->static_sem);
940 for (i = 0; i < STATIC_BUF_MAX_NUM; i++)
942 if (bcm_static_buf->buf_use[i] == 0)
943 break;
946 if (i == STATIC_BUF_MAX_NUM)
948 up(&bcm_static_buf->static_sem);
949 printk("all static buff in use!\n");
950 goto original;
953 bcm_static_buf->buf_use[i] = 1;
954 up(&bcm_static_buf->static_sem);
956 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
957 if (osh)
958 atomic_add(size, &osh->malloced);
960 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
963 original:
964 #endif /* DHD_USE_STATIC_BUF */
966 if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
967 if (osh)
968 osh->failed++;
969 return (NULL);
971 if (osh)
972 atomic_add(size, &osh->malloced);
974 return (addr);
977 void
978 osl_mfree(osl_t *osh, void *addr, uint size)
980 #ifdef DHD_USE_STATIC_BUF
981 if (bcm_static_buf)
983 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
984 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
986 int buf_idx = 0;
988 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
990 down(&bcm_static_buf->static_sem);
991 bcm_static_buf->buf_use[buf_idx] = 0;
992 up(&bcm_static_buf->static_sem);
994 if (osh) {
995 ASSERT(osh->magic == OS_HANDLE_MAGIC);
996 atomic_sub(size, &osh->malloced);
998 return;
1001 #endif /* DHD_USE_STATIC_BUF */
1002 if (osh) {
1003 ASSERT(osh->magic == OS_HANDLE_MAGIC);
1004 atomic_sub(size, &osh->malloced);
1006 kfree(addr);
1009 uint
1010 osl_malloced(osl_t *osh)
1012 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1013 return (atomic_read(&osh->malloced));
1016 uint
1017 osl_malloc_failed(osl_t *osh)
1019 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1020 return (osh->failed);
1024 uint
1025 osl_dma_consistent_align(void)
1027 return (PAGE_SIZE);
1030 void*
1031 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
1033 void *va;
1034 uint16 align = (1 << align_bits);
1035 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1037 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
1038 size += align;
1039 *alloced = size;
1041 #ifdef __ARM_ARCH_7A__
1042 va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO);
1043 if (va)
1044 *pap = (ulong)__virt_to_phys(va);
1045 #else
1046 va = pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap);
1047 #endif
1048 return va;
1051 void
1052 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
1054 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1056 #ifdef __ARM_ARCH_7A__
1057 kfree(va);
1058 #else
1059 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
1060 #endif
1063 uint BCMFASTPATH
1064 osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah)
1066 int dir;
1068 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1069 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1071 #if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
1072 if (dmah != NULL) {
1073 int32 nsegs, i, totsegs = 0, totlen = 0;
1074 struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2];
1075 struct sk_buff *skb;
1076 for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) {
1077 sg = &_sg[totsegs];
1078 if (skb_is_nonlinear(skb)) {
1079 nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb));
1080 ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS));
1081 pci_map_sg(osh->pdev, sg, nsegs, dir);
1082 } else {
1083 nsegs = 1;
1084 ASSERT(totsegs + nsegs <= MAX_DMA_SEGS);
1085 sg->page_link = 0;
1086 sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb));
1087 #ifdef CTFMAP
1088 /* Map size bytes (not skb->len) for ctf bufs */
1089 pci_map_single(osh->pdev, PKTDATA(osh, skb),
1090 PKTISCTF(osh, skb) ? CTFMAPSZ : PKTLEN(osh, skb), dir);
1091 #else
1092 pci_map_single(osh->pdev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir);
1094 #endif /* CTFMAP */
1096 totsegs += nsegs;
1097 totlen += PKTLEN(osh, skb);
1099 dmah->nsegs = totsegs;
1100 dmah->origsize = totlen;
1101 for (i = 0, sg = _sg; i < totsegs; i++, sg++) {
1102 dmah->segs[i].addr = sg_phys(sg);
1103 dmah->segs[i].length = sg->length;
1105 return dmah->segs[0].addr;
1107 #endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
1109 return (pci_map_single(osh->pdev, va, size, dir));
1112 void BCMFASTPATH
1113 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
1115 int dir;
1117 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
1118 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
1119 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
1123 void
1124 osl_delay(uint usec)
1126 uint d;
1128 while (usec > 0) {
1129 d = MIN(usec, 1000);
1130 udelay(d);
1131 usec -= d;
1135 #if defined(DSLCPE_DELAY)
1137 void
1138 osl_oshsh_init(osl_t *osh, shared_osl_t* oshsh)
1140 extern unsigned long loops_per_jiffy;
1141 osh->oshsh = oshsh;
1142 osh->oshsh->MIPS = loops_per_jiffy / (500000/HZ);
1146 in_long_delay(osl_t *osh)
1148 return osh->oshsh->long_delay;
1151 void
1152 osl_long_delay(osl_t *osh, uint usec, bool yield)
1154 uint d;
1155 bool yielded = TRUE;
1156 int usec_to_delay = usec;
1157 unsigned long tick1, tick2, tick_diff = 0;
1159 /* delay at least requested usec */
1160 while (usec_to_delay > 0) {
1161 if (!yield || !yielded) {
1162 d = MIN(usec_to_delay, 10);
1163 udelay(d);
1164 usec_to_delay -= d;
1166 if (usec_to_delay > 0) {
1167 osh->oshsh->long_delay++;
1168 OSL_GETCYCLES(tick1);
1169 spin_unlock_bh(osh->oshsh->lock);
1170 if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1171 schedule();
1172 yielded = TRUE;
1173 } else {
1174 yielded = FALSE;
1176 spin_lock_bh(osh->oshsh->lock);
1177 OSL_GETCYCLES(tick2);
1179 if (yielded) {
1180 tick_diff = TICKDIFF(tick2, tick1);
1181 tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
1182 if (tick_diff) {
1183 usec_to_delay -= tick_diff;
1184 } else
1185 yielded = 0;
1187 osh->oshsh->long_delay--;
1188 ASSERT(osh->oshsh->long_delay >= 0);
1192 #endif /* DSLCPE_DELAY */
1194 /* Clone a packet.
1195 * The pkttag contents are NOT cloned.
1197 #ifdef BCMDBG_CTRACE
1198 void *
1199 osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1200 #else
1201 void *
1202 osl_pktdup(osl_t *osh, void *skb)
1203 #endif /* BCMDBG_CTRACE */
1205 void * p;
1207 ASSERT(!PKTISCHAINED(skb));
1209 /* clear the CTFBUF flag if set and map the rest of the buffer
1210 * before cloning.
1212 PKTCTFMAP(osh, skb);
1214 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1215 if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1216 #else
1217 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
1218 #endif
1219 return NULL;
1221 #ifdef CTFPOOL
1222 if (PKTISFAST(osh, skb)) {
1223 ctfpool_t *ctfpool;
1225 /* if the buffer allocated from ctfpool is cloned then
1226 * we can't be sure when it will be freed. since there
1227 * is a chance that we will be losing a buffer
1228 * from our pool, we increment the refill count for the
1229 * object to be alloced later.
1231 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1232 ASSERT(ctfpool != NULL);
1233 PKTCLRFAST(osh, p);
1234 PKTCLRFAST(osh, skb);
1235 ctfpool->refills++;
1237 #endif /* CTFPOOL */
1239 /* Clear PKTC context */
1240 PKTSETCLINK(p, NULL);
1241 PKTCCLRFLAGS(p);
1242 PKTCSETCNT(p, 1);
1243 PKTCSETLEN(p, PKTLEN(osh, skb));
1245 /* skb_clone copies skb->cb.. we don't want that */
1246 if (osh->pub.pkttag)
1247 OSL_PKTTAG_CLEAR(p);
1249 /* Increment the packet counter */
1250 atomic_inc(&osh->pktalloced);
1251 #ifdef BCMDBG_CTRACE
1252 ADD_CTRACE(osh, (struct sk_buff *)p, file, line);
1253 #endif
1254 return (p);
1257 #ifdef BCMDBG_CTRACE
1258 int osl_pkt_is_frmnative(osl_t *osh, struct sk_buff *pkt)
1260 unsigned long flags;
1261 struct sk_buff *skb;
1262 int ck = FALSE;
1264 spin_lock_irqsave(&osh->ctrace_lock, flags);
1266 list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1267 if (pkt == skb) {
1268 ck = TRUE;
1269 break;
1273 spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1274 return ck;
1277 void osl_ctrace_dump(osl_t *osh, struct bcmstrbuf *b)
1279 unsigned long flags;
1280 struct sk_buff *skb;
1281 int idx = 0;
1282 int i, j;
1284 spin_lock_irqsave(&osh->ctrace_lock, flags);
1286 if (b != NULL)
1287 bcm_bprintf(b, " Total %d sbk not free\n", osh->ctrace_num);
1288 else
1289 printk(" Total %d sbk not free\n", osh->ctrace_num);
1291 list_for_each_entry(skb, &osh->ctrace_list, ctrace_list) {
1292 if (b != NULL)
1293 bcm_bprintf(b, "[%d] skb %p:\n", ++idx, skb);
1294 else
1295 printk("[%d] skb %p:\n", ++idx, skb);
1297 for (i = 0; i < skb->ctrace_count; i++) {
1298 j = (skb->ctrace_start + i) % CTRACE_NUM;
1299 if (b != NULL)
1300 bcm_bprintf(b, " [%s(%d)]\n", skb->func[j], skb->line[j]);
1301 else
1302 printk(" [%s(%d)]\n", skb->func[j], skb->line[j]);
1304 if (b != NULL)
1305 bcm_bprintf(b, "\n");
1306 else
1307 printk("\n");
1310 spin_unlock_irqrestore(&osh->ctrace_lock, flags);
1312 return;
1314 #endif /* BCMDBG_CTRACE */
1318 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1320 #ifdef OSLREGOPS
1321 uint8
1322 osl_readb(osl_t *osh, volatile uint8 *r)
1324 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1325 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1327 return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8)));
1331 uint16
1332 osl_readw(osl_t *osh, volatile uint16 *r)
1334 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1335 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1337 return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16)));
1340 uint32
1341 osl_readl(osl_t *osh, volatile uint32 *r)
1343 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1344 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1346 return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32)));
1349 void
1350 osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1352 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1353 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1355 ((wreg)(ctx, (void*)r, v, sizeof(uint8)));
1359 void
1360 osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1362 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1363 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1365 ((wreg)(ctx, (void*)r, v, sizeof(uint16)));
1368 void
1369 osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1371 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1372 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1374 ((wreg)(ctx, (void*)r, v, sizeof(uint32)));
1376 #endif /* OSLREGOPS */
1379 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1381 #ifdef BINOSL
1383 uint32
1384 osl_sysuptime(void)
1386 return ((uint32)jiffies * (1000 / HZ));
1390 osl_printf(const char *format, ...)
1392 va_list args;
1393 static char printbuf[1024];
1394 int len;
1396 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1397 va_start(args, format);
1398 len = vsnprintf(printbuf, 1024, format, args);
1399 va_end(args);
1401 if (len > sizeof(printbuf)) {
1402 printk("osl_printf: buffer overrun\n");
1403 return (0);
1406 return (printk("%s", printbuf));
1410 osl_sprintf(char *buf, const char *format, ...)
1412 va_list args;
1413 int rc;
1415 va_start(args, format);
1416 rc = vsprintf(buf, format, args);
1417 va_end(args);
1418 return (rc);
1422 osl_snprintf(char *buf, size_t n, const char *format, ...)
1424 va_list args;
1425 int rc;
1427 va_start(args, format);
1428 rc = vsnprintf(buf, n, format, args);
1429 va_end(args);
1430 return (rc);
1434 osl_vsprintf(char *buf, const char *format, va_list ap)
1436 return (vsprintf(buf, format, ap));
1440 osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1442 return (vsnprintf(buf, n, format, ap));
1446 osl_strcmp(const char *s1, const char *s2)
1448 return (strcmp(s1, s2));
1452 osl_strncmp(const char *s1, const char *s2, uint n)
1454 return (strncmp(s1, s2, n));
1458 osl_strlen(const char *s)
1460 return (strlen(s));
1463 char*
1464 osl_strcpy(char *d, const char *s)
1466 return (strcpy(d, s));
1469 char*
1470 osl_strncpy(char *d, const char *s, uint n)
1472 return (strncpy(d, s, n));
1475 char*
1476 osl_strchr(const char *s, int c)
1478 return (strchr(s, c));
1481 char*
1482 osl_strrchr(const char *s, int c)
1484 return (strrchr(s, c));
1487 void*
1488 osl_memset(void *d, int c, size_t n)
1490 return memset(d, c, n);
1493 void*
1494 osl_memcpy(void *d, const void *s, size_t n)
1496 return memcpy(d, s, n);
1499 void*
1500 osl_memmove(void *d, const void *s, size_t n)
1502 return memmove(d, s, n);
1506 osl_memcmp(const void *s1, const void *s2, size_t n)
1508 return memcmp(s1, s2, n);
1511 uint32
1512 osl_readl(volatile uint32 *r)
1514 return (readl(r));
1517 uint16
1518 osl_readw(volatile uint16 *r)
1520 return (readw(r));
1523 uint8
1524 osl_readb(volatile uint8 *r)
1526 return (readb(r));
1529 void
1530 osl_writel(uint32 v, volatile uint32 *r)
1532 writel(v, r);
1535 void
1536 osl_writew(uint16 v, volatile uint16 *r)
1538 writew(v, r);
1541 void
1542 osl_writeb(uint8 v, volatile uint8 *r)
1544 writeb(v, r);
1547 void *
1548 osl_uncached(void *va)
1550 #ifdef mips
1551 return ((void*)KSEG1ADDR(va));
1552 #else
1553 return ((void*)va);
1554 #endif /* mips */
1557 void *
1558 osl_cached(void *va)
1560 #ifdef mips
1561 return ((void*)KSEG0ADDR(va));
1562 #else
1563 return ((void*)va);
1564 #endif /* mips */
1567 uint
1568 osl_getcycles(void)
1570 uint cycles;
1572 #if defined(mips)
1573 cycles = read_c0_count() * 2;
1574 #elif defined(__i386__)
1575 rdtscl(cycles);
1576 #else
1577 cycles = 0;
1578 #endif /* defined(mips) */
1579 return cycles;
1582 void *
1583 osl_reg_map(uint32 pa, uint size)
1585 return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1588 void
1589 osl_reg_unmap(void *va)
1591 iounmap(va);
1595 osl_busprobe(uint32 *val, uint32 addr)
1597 #ifdef mips
1598 return get_dbe(*val, (uint32 *)addr);
1599 #else
1600 *val = readl((uint32 *)(uintptr)addr);
1601 return 0;
1602 #endif /* mips */
1605 bool
1606 osl_pktshared(void *skb)
1608 return (((struct sk_buff*)skb)->cloned);
1611 uchar*
1612 osl_pktdata(osl_t *osh, void *skb)
1614 return (((struct sk_buff*)skb)->data);
1617 uint
1618 osl_pktlen(osl_t *osh, void *skb)
1620 return (((struct sk_buff*)skb)->len);
1623 uint
1624 osl_pktheadroom(osl_t *osh, void *skb)
1626 return (uint) skb_headroom((struct sk_buff *) skb);
1629 uint
1630 osl_pkttailroom(osl_t *osh, void *skb)
1632 return (uint) skb_tailroom((struct sk_buff *) skb);
1635 void*
1636 osl_pktnext(osl_t *osh, void *skb)
1638 return (((struct sk_buff*)skb)->next);
1641 void
1642 osl_pktsetnext(void *skb, void *x)
1644 ((struct sk_buff*)skb)->next = (struct sk_buff*)x;
1647 void
1648 osl_pktsetlen(osl_t *osh, void *skb, uint len)
1650 __skb_trim((struct sk_buff*)skb, len);
1653 uchar*
1654 osl_pktpush(osl_t *osh, void *skb, int bytes)
1656 return (skb_push((struct sk_buff*)skb, bytes));
1659 uchar*
1660 osl_pktpull(osl_t *osh, void *skb, int bytes)
1662 return (skb_pull((struct sk_buff*)skb, bytes));
1665 void*
1666 osl_pkttag(void *skb)
1668 return ((void*)(((struct sk_buff*)skb)->cb));
1671 void*
1672 osl_pktlink(void *skb)
1674 return (((struct sk_buff*)skb)->prev);
1677 void
1678 osl_pktsetlink(void *skb, void *x)
1680 ((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
1683 uint
1684 osl_pktprio(void *skb)
1686 return (((struct sk_buff*)skb)->priority);
1689 void
1690 osl_pktsetprio(void *skb, uint x)
1692 ((struct sk_buff*)skb)->priority = x;
1694 #endif /* BINOSL */
1696 uint
1697 osl_pktalloced(osl_t *osh)
1699 return (atomic_read(&osh->pktalloced));
1702 /* Linux Kernel: File Operations: start */
1703 void *
1704 osl_os_open_image(char *filename)
1706 struct file *fp;
1708 fp = filp_open(filename, O_RDONLY, 0);
1710 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1711 * Alternative:
1712 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1713 * ???
1715 if (IS_ERR(fp))
1716 fp = NULL;
1718 return fp;
1722 osl_os_get_image_block(char *buf, int len, void *image)
1724 struct file *fp = (struct file *)image;
1725 int rdlen;
1727 if (!image)
1728 return 0;
1730 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1731 if (rdlen > 0)
1732 fp->f_pos += rdlen;
1734 return rdlen;
1737 void
1738 osl_os_close_image(void *image)
1740 if (image)
1741 filp_close((struct file *)image, NULL);
1745 osl_os_image_size(void *image)
1747 int len = 0, curroffset;
1749 if (image) {
1750 /* store the current offset */
1751 curroffset = generic_file_llseek(image, 0, 1);
1752 /* goto end of file to get length */
1753 len = generic_file_llseek(image, 0, 2);
1754 /* restore back the offset */
1755 generic_file_llseek(image, curroffset, 0);
1757 return len;
1759 /* Linux Kernel: File Operations: end */