2 * Linux OS Independent Layer
4 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved.
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c 400835 2013-05-07 19:23:31Z $
24 #include <bcmendian.h>
29 #include <linux/delay.h>
31 #include <asm/paccess.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 #ifdef DHD_USE_STATIC_BUF
45 #define STATIC_BUF_MAX_NUM 16
46 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
47 #define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE)
49 typedef struct bcm_static_buf
{
50 struct semaphore static_sem
;
51 unsigned char *buf_ptr
;
52 unsigned char buf_use
[STATIC_BUF_MAX_NUM
];
55 static bcm_static_buf_t
*bcm_static_buf
= 0;
57 #define STATIC_PKT_MAX_NUM 8
59 typedef struct bcm_static_pkt
{
60 struct sk_buff
*skb_4k
[STATIC_PKT_MAX_NUM
];
61 struct sk_buff
*skb_8k
[STATIC_PKT_MAX_NUM
];
62 struct semaphore osl_pkt_sem
;
63 unsigned char pkt_use
[STATIC_PKT_MAX_NUM
* 2];
66 static bcm_static_pkt_t
*bcm_static_skb
= 0;
67 #endif /* DHD_USE_STATIC_BUF */
69 typedef struct bcm_mem_link
{
70 struct bcm_mem_link
*prev
;
71 struct bcm_mem_link
*next
;
75 char file
[BCM_MEM_FILENAME_LEN
];
86 atomic_t pktalloced
; /* Number of allocated packet buffers */
89 bcm_mem_link_t
*dbgmem_list
;
90 #if defined(DSLCPE_DELAY)
91 shared_osl_t
*oshsh
; /* osh shared */
93 spinlock_t dbgmem_lock
;
95 spinlock_t ctrace_lock
;
96 struct list_head ctrace_list
;
98 #endif /* BCMDBG_CTRACE */
99 spinlock_t pktalloc_lock
;
102 #define OSL_PKTTAG_CLEAR(p) \
104 struct sk_buff *s = (struct sk_buff *)(p); \
105 ASSERT(OSL_PKTTAG_SZ == 32); \
106 *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \
107 *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \
108 *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \
109 *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \
112 /* PCMCIA attribute space access macros */
113 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
115 dev_link_t link
; /* PCMCIA device pointer */
116 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
117 dev_node_t node
; /* PCMCIA node structure */
119 void *base
; /* Mapped attribute memory window */
120 size_t size
; /* Size of window */
121 void *drv
; /* Driver data */
123 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
125 /* Global ASSERT type flag */
126 uint32 g_assert_type
= FALSE
;
128 static int16 linuxbcmerrormap
[] =
130 -EINVAL
, /* BCME_ERROR */
131 -EINVAL
, /* BCME_BADARG */
132 -EINVAL
, /* BCME_BADOPTION */
133 -EINVAL
, /* BCME_NOTUP */
134 -EINVAL
, /* BCME_NOTDOWN */
135 -EINVAL
, /* BCME_NOTAP */
136 -EINVAL
, /* BCME_NOTSTA */
137 -EINVAL
, /* BCME_BADKEYIDX */
138 -EINVAL
, /* BCME_RADIOOFF */
139 -EINVAL
, /* BCME_NOTBANDLOCKED */
140 -EINVAL
, /* BCME_NOCLK */
141 -EINVAL
, /* BCME_BADRATESET */
142 -EINVAL
, /* BCME_BADBAND */
143 -E2BIG
, /* BCME_BUFTOOSHORT */
144 -E2BIG
, /* BCME_BUFTOOLONG */
145 -EBUSY
, /* BCME_BUSY */
146 -EINVAL
, /* BCME_NOTASSOCIATED */
147 -EINVAL
, /* BCME_BADSSIDLEN */
148 -EINVAL
, /* BCME_OUTOFRANGECHAN */
149 -EINVAL
, /* BCME_BADCHAN */
150 -EFAULT
, /* BCME_BADADDR */
151 -ENOMEM
, /* BCME_NORESOURCE */
152 -EOPNOTSUPP
, /* BCME_UNSUPPORTED */
153 -EMSGSIZE
, /* BCME_BADLENGTH */
154 -EINVAL
, /* BCME_NOTREADY */
155 -EPERM
, /* BCME_EPERM */
156 -ENOMEM
, /* BCME_NOMEM */
157 -EINVAL
, /* BCME_ASSOCIATED */
158 -ERANGE
, /* BCME_RANGE */
159 -EINVAL
, /* BCME_NOTFOUND */
160 -EINVAL
, /* BCME_WME_NOT_ENABLED */
161 -EINVAL
, /* BCME_TSPEC_NOTFOUND */
162 -EINVAL
, /* BCME_ACM_NOTSUPPORTED */
163 -EINVAL
, /* BCME_NOT_WME_ASSOCIATION */
164 -EIO
, /* BCME_SDIO_ERROR */
165 -ENODEV
, /* BCME_DONGLE_DOWN */
166 -EINVAL
, /* BCME_VERSION */
167 -EIO
, /* BCME_TXFAIL */
168 -EIO
, /* BCME_RXFAIL */
169 -ENODEV
, /* BCME_NODEVICE */
170 -EINVAL
, /* BCME_NMODE_DISABLED */
171 -ENODATA
, /* BCME_NONRESIDENT */
172 -EINVAL
, /* BCME_SCANREJECT */
173 /* When an new error code is added to bcmutils.h, add os
174 * specific error translation here as well
176 /* check if BCME_LAST changed since the last time this function was updated */
178 #error "You need to add a OS error translation in the linuxbcmerrormap \
179 for new error code defined in bcmutils.h"
183 /* translate bcmerrors into linux errors */
185 osl_error(int bcmerror
)
189 else if (bcmerror
< BCME_LAST
)
190 bcmerror
= BCME_ERROR
;
192 /* Array bounds covered by ASSERT in osl_attach */
193 return linuxbcmerrormap
[-bcmerror
];
196 extern uint8
* dhd_os_prealloc(void *osh
, int section
, int size
);
199 osl_attach(void *pdev
, uint bustype
, bool pkttag
)
203 osh
= kmalloc(sizeof(osl_t
), GFP_ATOMIC
);
206 bzero(osh
, sizeof(osl_t
));
208 /* Check that error map has the right number of entries in it */
209 ASSERT(ABS(BCME_LAST
) == (ARRAYSIZE(linuxbcmerrormap
) - 1));
211 osh
->magic
= OS_HANDLE_MAGIC
;
212 atomic_set(&osh
->malloced
, 0);
214 osh
->dbgmem_list
= NULL
;
215 spin_lock_init(&(osh
->dbgmem_lock
));
217 osh
->pub
.pkttag
= pkttag
;
218 osh
->bustype
= bustype
;
224 osh
->pub
.mmbus
= TRUE
;
231 osh
->pub
.mmbus
= FALSE
;
238 #if defined(DHD_USE_STATIC_BUF)
239 if (!bcm_static_buf
) {
240 if (!(bcm_static_buf
= (bcm_static_buf_t
*)dhd_os_prealloc(osh
, 3, STATIC_BUF_SIZE
+
241 STATIC_BUF_TOTAL_LEN
))) {
242 printk("can not alloc static buf!\n");
245 printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf
);
248 sema_init(&bcm_static_buf
->static_sem
, 1);
250 bcm_static_buf
->buf_ptr
= (unsigned char *)bcm_static_buf
+ STATIC_BUF_SIZE
;
253 if (!bcm_static_skb
) {
255 void *skb_buff_ptr
= 0;
256 bcm_static_skb
= (bcm_static_pkt_t
*)((char *)bcm_static_buf
+ 2048);
257 skb_buff_ptr
= dhd_os_prealloc(osh
, 4, 0);
259 bcopy(skb_buff_ptr
, bcm_static_skb
, sizeof(struct sk_buff
*)*16);
260 for (i
= 0; i
< STATIC_PKT_MAX_NUM
* 2; i
++)
261 bcm_static_skb
->pkt_use
[i
] = 0;
263 sema_init(&bcm_static_skb
->osl_pkt_sem
, 1);
265 #endif /* DHD_USE_STATIC_BUF */
268 spin_lock_init(&osh
->ctrace_lock
);
269 INIT_LIST_HEAD(&osh
->ctrace_list
);
271 #endif /* BCMDBG_CTRACE */
273 spin_lock_init(&(osh
->pktalloc_lock
));
279 ASSERT(OSL_PKTTAG_SZ
<= sizeof(skb
->cb
));
286 osl_detach(osl_t
*osh
)
291 #ifdef DHD_USE_STATIC_BUF
292 if (bcm_static_buf
) {
295 if (bcm_static_skb
) {
300 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
304 static struct sk_buff
*osl_alloc_skb(osl_t
*osh
, unsigned int len
)
308 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
309 gfp_t flags
= GFP_ATOMIC
;
310 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_ZONE_DMA)
313 skb
= __dev_alloc_skb(len
, flags
);
315 skb
= dev_alloc_skb(len
);
316 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
320 _DMA_MAP(osh
, PKTDATA(osh
, skb
), len
, DMA_RX
, NULL
, NULL
);
329 #ifdef CTFPOOL_SPINLOCK
330 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
331 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
333 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
334 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
335 #endif /* CTFPOOL_SPINLOCK */
337 * Allocate and add an object to packet pool.
340 osl_ctfpool_add(osl_t
*osh
)
343 #ifdef CTFPOOL_SPINLOCK
345 #endif /* CTFPOOL_SPINLOCK */
347 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
350 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
351 ASSERT(osh
->ctfpool
->curr_obj
<= osh
->ctfpool
->max_obj
);
353 /* No need to allocate more objects */
354 if (osh
->ctfpool
->curr_obj
== osh
->ctfpool
->max_obj
) {
355 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
359 /* Allocate a new skb and add it to the ctfpool */
360 skb
= osl_alloc_skb(osh
, osh
->ctfpool
->obj_size
);
362 printf("%s: skb alloc of len %d failed\n", __FUNCTION__
,
363 osh
->ctfpool
->obj_size
);
364 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
367 skb
->next
= (struct sk_buff
*)osh
->ctfpool
->head
;
368 osh
->ctfpool
->head
= skb
;
369 osh
->ctfpool
->fast_frees
++;
370 osh
->ctfpool
->curr_obj
++;
372 /* Hijack a skb member to store ptr to ctfpool */
373 CTFPOOLPTR(osh
, skb
) = (void *)osh
->ctfpool
;
375 /* Use bit flag to indicate skb from fast ctfpool */
376 PKTFAST(osh
, skb
) = FASTBUF
;
378 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
384 * Add new objects to the pool.
387 osl_ctfpool_replenish(osl_t
*osh
, uint thresh
)
389 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
392 /* Do nothing if no refills are required */
393 while ((osh
->ctfpool
->refills
> 0) && (thresh
--)) {
394 osl_ctfpool_add(osh
);
395 osh
->ctfpool
->refills
--;
400 * Initialize the packet pool with specified number of objects.
403 osl_ctfpool_init(osl_t
*osh
, uint numobj
, uint size
)
405 osh
->ctfpool
= kmalloc(sizeof(ctfpool_t
), GFP_ATOMIC
);
406 ASSERT(osh
->ctfpool
);
407 bzero(osh
->ctfpool
, sizeof(ctfpool_t
));
409 osh
->ctfpool
->max_obj
= numobj
;
410 osh
->ctfpool
->obj_size
= size
;
412 spin_lock_init(&osh
->ctfpool
->lock
);
415 if (!osl_ctfpool_add(osh
))
417 osh
->ctfpool
->fast_frees
--;
424 * Cleanup the packet pool objects.
427 osl_ctfpool_cleanup(osl_t
*osh
)
429 struct sk_buff
*skb
, *nskb
;
430 #ifdef CTFPOOL_SPINLOCK
432 #endif /* CTFPOOL_SPINLOCK */
434 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
437 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
439 skb
= osh
->ctfpool
->head
;
441 while (skb
!= NULL
) {
445 osh
->ctfpool
->curr_obj
--;
448 ASSERT(osh
->ctfpool
->curr_obj
== 0);
449 osh
->ctfpool
->head
= NULL
;
450 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
457 osl_ctfpool_stats(osl_t
*osh
, void *b
)
459 struct bcmstrbuf
*bb
;
461 if ((osh
== NULL
) || (osh
->ctfpool
== NULL
))
464 #ifdef DHD_USE_STATIC_BUF
465 if (bcm_static_buf
) {
468 if (bcm_static_skb
) {
471 #endif /* DHD_USE_STATIC_BUF */
475 ASSERT((osh
!= NULL
) && (bb
!= NULL
));
477 bcm_bprintf(bb
, "max_obj %d obj_size %d curr_obj %d refills %d\n",
478 osh
->ctfpool
->max_obj
, osh
->ctfpool
->obj_size
,
479 osh
->ctfpool
->curr_obj
, osh
->ctfpool
->refills
);
480 bcm_bprintf(bb
, "fast_allocs %d fast_frees %d slow_allocs %d\n",
481 osh
->ctfpool
->fast_allocs
, osh
->ctfpool
->fast_frees
,
482 osh
->ctfpool
->slow_allocs
);
485 static inline struct sk_buff
*
486 osl_pktfastget(osl_t
*osh
, uint len
)
489 #ifdef CTFPOOL_SPINLOCK
491 #endif /* CTFPOOL_SPINLOCK */
493 /* Try to do fast allocate. Return null if ctfpool is not in use
494 * or if there are no items in the ctfpool.
496 if (osh
->ctfpool
== NULL
)
499 CTFPOOL_LOCK(osh
->ctfpool
, flags
);
500 if (osh
->ctfpool
->head
== NULL
) {
501 ASSERT(osh
->ctfpool
->curr_obj
== 0);
502 osh
->ctfpool
->slow_allocs
++;
503 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
507 ASSERT(len
<= osh
->ctfpool
->obj_size
);
509 /* Get an object from ctfpool */
510 skb
= (struct sk_buff
*)osh
->ctfpool
->head
;
511 osh
->ctfpool
->head
= (void *)skb
->next
;
513 osh
->ctfpool
->fast_allocs
++;
514 osh
->ctfpool
->curr_obj
--;
515 ASSERT(CTFPOOLHEAD(osh
, skb
) == (struct sock
*)osh
->ctfpool
->head
);
516 CTFPOOL_UNLOCK(osh
->ctfpool
, flags
);
518 /* Init skb struct */
519 skb
->next
= skb
->prev
= NULL
;
520 #if defined(__ARM_ARCH_7A__)
521 skb
->data
= skb
->head
+ NET_SKB_PAD
;
522 skb
->tail
= skb
->head
+ NET_SKB_PAD
;
524 skb
->data
= skb
->head
+ 16;
525 skb
->tail
= skb
->head
+ 16;
526 #endif /* __ARM_ARCH_7A__ */
529 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
532 atomic_set(&skb
->users
, 1);
534 PKTSETCLINK(skb
, NULL
);
536 PKTFAST(osh
, skb
) &= ~(CTFBUF
| SKIPCT
| CHAINED
);
541 /* Convert a driver packet to native(OS) packet
542 * In the process, packettag is zeroed out before sending up
543 * IP code depends on skb->cb to be setup correctly with various options
544 * In our case, that means it should be 0
546 struct sk_buff
* BCMFASTPATH
547 osl_pkt_tonative(osl_t
*osh
, void *pkt
)
549 struct sk_buff
*nskb
;
551 struct sk_buff
*nskb1
, *nskb2
;
555 OSL_PKTTAG_CLEAR(pkt
);
557 /* Decrement the packet counter */
558 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
559 atomic_sub(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->pktalloced
);
562 for (nskb1
= nskb
; nskb1
!= NULL
; nskb1
= nskb2
) {
563 if (PKTISCHAINED(nskb1
)) {
564 nskb2
= PKTCLINK(nskb1
);
569 DEL_CTRACE(osh
, nskb1
);
571 #endif /* BCMDBG_CTRACE */
573 return (struct sk_buff
*)pkt
;
576 /* Convert a native(OS) packet to driver packet.
577 * In the process, native packet is destroyed, there is no copying
578 * Also, a packettag is zeroed out
582 osl_pkt_frmnative(osl_t
*osh
, void *pkt
, int line
, char *file
)
585 osl_pkt_frmnative(osl_t
*osh
, void *pkt
)
586 #endif /* BCMDBG_CTRACE */
588 struct sk_buff
*nskb
;
590 struct sk_buff
*nskb1
, *nskb2
;
594 OSL_PKTTAG_CLEAR(pkt
);
596 /* Increment the packet counter */
597 for (nskb
= (struct sk_buff
*)pkt
; nskb
; nskb
= nskb
->next
) {
598 atomic_add(PKTISCHAINED(nskb
) ? PKTCCNT(nskb
) : 1, &osh
->pktalloced
);
601 for (nskb1
= nskb
; nskb1
!= NULL
; nskb1
= nskb2
) {
602 if (PKTISCHAINED(nskb1
)) {
603 nskb2
= PKTCLINK(nskb1
);
608 ADD_CTRACE(osh
, nskb1
, file
, line
);
610 #endif /* BCMDBG_CTRACE */
615 /* Return a new packet. zero out pkttag */
618 osl_pktget(osl_t
*osh
, uint len
, int line
, char *file
)
621 osl_pktget(osl_t
*osh
, uint len
)
622 #endif /* BCMDBG_CTRACE */
627 /* Allocate from local pool */
628 skb
= osl_pktfastget(osh
, len
);
629 if ((skb
!= NULL
) || ((skb
= osl_alloc_skb(osh
, len
)) != NULL
)) {
631 if ((skb
= osl_alloc_skb(osh
, len
))) {
642 ADD_CTRACE(osh
, skb
, file
, line
);
644 atomic_inc(&osh
->pktalloced
);
647 return ((void*) skb
);
652 osl_pktfastfree(osl_t
*osh
, struct sk_buff
*skb
)
655 #ifdef CTFPOOL_SPINLOCK
657 #endif /* CTFPOOL_SPINLOCK */
659 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
660 skb
->tstamp
.tv
.sec
= 0;
662 skb
->stamp
.tv_sec
= 0;
665 /* We only need to init the fields that we change */
667 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
670 OSL_PKTTAG_CLEAR(skb
);
673 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
676 skb
->destructor
= NULL
;
679 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
680 ASSERT(ctfpool
!= NULL
);
682 /* Add object to the ctfpool */
683 CTFPOOL_LOCK(ctfpool
, flags
);
684 skb
->next
= (struct sk_buff
*)ctfpool
->head
;
685 ctfpool
->head
= (void *)skb
;
687 ctfpool
->fast_frees
++;
690 ASSERT(ctfpool
->curr_obj
<= ctfpool
->max_obj
);
691 CTFPOOL_UNLOCK(ctfpool
, flags
);
695 /* Free the driver packet. Free the tag if present */
697 osl_pktfree(osl_t
*osh
, void *p
, bool send
)
699 struct sk_buff
*skb
, *nskb
;
701 skb
= (struct sk_buff
*) p
;
703 if (send
&& osh
->pub
.tx_fn
)
704 osh
->pub
.tx_fn(osh
->pub
.tx_ctx
, p
, 0);
706 PKTDBG_TRACE(osh
, (void *) skb
, PKTLIST_PKTFREE
);
708 /* perversion: we use skb->next to chain multi-skb packets */
714 DEL_CTRACE(osh
, skb
);
718 /* Clear the map ptr before freeing */
720 CTFMAPPTR(osh
, skb
) = NULL
;
724 if ((PKTISFAST(osh
, skb
)) && (atomic_read(&skb
->users
) == 1))
725 osl_pktfastfree(osh
, skb
);
732 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
735 dev_kfree_skb_any(skb
);
737 /* can free immediately (even in_irq()) if destructor
742 atomic_dec(&osh
->pktalloced
);
747 #ifdef DHD_USE_STATIC_BUF
749 osl_pktget_static(osl_t
*osh
, uint len
)
754 if (len
> (PAGE_SIZE
*2)) {
755 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__
, len
);
756 return osl_pktget(osh
, len
);
759 down(&bcm_static_skb
->osl_pkt_sem
);
761 if (len
<= PAGE_SIZE
) {
762 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
763 if (bcm_static_skb
->pkt_use
[i
] == 0)
767 if (i
!= STATIC_PKT_MAX_NUM
) {
768 bcm_static_skb
->pkt_use
[i
] = 1;
769 up(&bcm_static_skb
->osl_pkt_sem
);
770 skb
= bcm_static_skb
->skb_4k
[i
];
771 skb
->tail
= skb
->data
+ len
;
778 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
779 if (bcm_static_skb
->pkt_use
[i
+STATIC_PKT_MAX_NUM
] == 0)
783 if (i
!= STATIC_PKT_MAX_NUM
) {
784 bcm_static_skb
->pkt_use
[i
+STATIC_PKT_MAX_NUM
] = 1;
785 up(&bcm_static_skb
->osl_pkt_sem
);
786 skb
= bcm_static_skb
->skb_8k
[i
];
787 skb
->tail
= skb
->data
+ len
;
792 up(&bcm_static_skb
->osl_pkt_sem
);
793 printk("%s: all static pkt in use!\n", __FUNCTION__
);
794 return osl_pktget(osh
, len
);
798 osl_pktfree_static(osl_t
*osh
, void *p
, bool send
)
802 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
803 if (p
== bcm_static_skb
->skb_4k
[i
]) {
804 down(&bcm_static_skb
->osl_pkt_sem
);
805 bcm_static_skb
->pkt_use
[i
] = 0;
806 up(&bcm_static_skb
->osl_pkt_sem
);
811 for (i
= 0; i
< STATIC_PKT_MAX_NUM
; i
++) {
812 if (p
== bcm_static_skb
->skb_8k
[i
]) {
813 down(&bcm_static_skb
->osl_pkt_sem
);
814 bcm_static_skb
->pkt_use
[i
+ STATIC_PKT_MAX_NUM
] = 0;
815 up(&bcm_static_skb
->osl_pkt_sem
);
820 return osl_pktfree(osh
, p
, send
);
822 #endif /* DHD_USE_STATIC_BUF */
825 osl_pci_read_config(osl_t
*osh
, uint offset
, uint size
)
828 uint retry
= PCI_CFG_RETRY
;
830 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
832 /* only 4byte access supported */
836 pci_read_config_dword(osh
->pdev
, offset
, &val
);
837 if (val
!= 0xffffffff)
842 if (retry
< PCI_CFG_RETRY
)
843 printk("PCI CONFIG READ access to %d required %d retries\n", offset
,
844 (PCI_CFG_RETRY
- retry
));
851 osl_pci_write_config(osl_t
*osh
, uint offset
, uint size
, uint val
)
853 uint retry
= PCI_CFG_RETRY
;
855 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
857 /* only 4byte access supported */
861 pci_write_config_dword(osh
->pdev
, offset
, val
);
862 if (offset
!= PCI_BAR0_WIN
)
864 if (osl_pci_read_config(osh
, offset
, size
) == val
)
869 if (retry
< PCI_CFG_RETRY
)
870 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset
,
871 (PCI_CFG_RETRY
- retry
));
875 /* return bus # for the pci device pointed by osh->pdev */
877 osl_pci_bus(osl_t
*osh
)
879 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
881 return ((struct pci_dev
*)osh
->pdev
)->bus
->number
;
884 /* return slot # for the pci device pointed by osh->pdev */
886 osl_pci_slot(osl_t
*osh
)
888 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
890 #if defined(__ARM_ARCH_7A__) && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
891 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
) + 1;
893 return PCI_SLOT(((struct pci_dev
*)osh
->pdev
)->devfn
);
897 /* return the pci device pointed by osh->pdev */
899 osl_pci_device(osl_t
*osh
)
901 ASSERT(osh
&& (osh
->magic
== OS_HANDLE_MAGIC
) && osh
->pdev
);
907 osl_pcmcia_attr(osl_t
*osh
, uint offset
, char *buf
, int size
, bool write
)
912 osl_pcmcia_read_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
914 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, FALSE
);
918 osl_pcmcia_write_attr(osl_t
*osh
, uint offset
, void *buf
, int size
)
920 osl_pcmcia_attr(osh
, offset
, (char *) buf
, size
, TRUE
);
924 osl_malloc(osl_t
*osh
, uint size
)
928 /* only ASSERT if osh is defined */
930 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
932 #ifdef DHD_USE_STATIC_BUF
936 if ((size
>= PAGE_SIZE
)&&(size
<= STATIC_BUF_SIZE
))
938 down(&bcm_static_buf
->static_sem
);
940 for (i
= 0; i
< STATIC_BUF_MAX_NUM
; i
++)
942 if (bcm_static_buf
->buf_use
[i
] == 0)
946 if (i
== STATIC_BUF_MAX_NUM
)
948 up(&bcm_static_buf
->static_sem
);
949 printk("all static buff in use!\n");
953 bcm_static_buf
->buf_use
[i
] = 1;
954 up(&bcm_static_buf
->static_sem
);
956 bzero(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
, size
);
958 atomic_add(size
, &osh
->malloced
);
960 return ((void *)(bcm_static_buf
->buf_ptr
+STATIC_BUF_SIZE
*i
));
964 #endif /* DHD_USE_STATIC_BUF */
966 if ((addr
= kmalloc(size
, GFP_ATOMIC
)) == NULL
) {
972 atomic_add(size
, &osh
->malloced
);
978 osl_mfree(osl_t
*osh
, void *addr
, uint size
)
980 #ifdef DHD_USE_STATIC_BUF
983 if ((addr
> (void *)bcm_static_buf
) && ((unsigned char *)addr
984 <= ((unsigned char *)bcm_static_buf
+ STATIC_BUF_TOTAL_LEN
)))
988 buf_idx
= ((unsigned char *)addr
- bcm_static_buf
->buf_ptr
)/STATIC_BUF_SIZE
;
990 down(&bcm_static_buf
->static_sem
);
991 bcm_static_buf
->buf_use
[buf_idx
] = 0;
992 up(&bcm_static_buf
->static_sem
);
995 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
996 atomic_sub(size
, &osh
->malloced
);
1001 #endif /* DHD_USE_STATIC_BUF */
1003 ASSERT(osh
->magic
== OS_HANDLE_MAGIC
);
1004 atomic_sub(size
, &osh
->malloced
);
1010 osl_malloced(osl_t
*osh
)
1012 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1013 return (atomic_read(&osh
->malloced
));
1017 osl_malloc_failed(osl_t
*osh
)
1019 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1020 return (osh
->failed
);
1025 osl_dma_consistent_align(void)
1031 osl_dma_alloc_consistent(osl_t
*osh
, uint size
, uint16 align_bits
, uint
*alloced
, ulong
*pap
)
1034 uint16 align
= (1 << align_bits
);
1035 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1037 if (!ISALIGNED(DMA_CONSISTENT_ALIGN
, align
))
1041 #ifdef __ARM_ARCH_7A__
1042 va
= kmalloc(size
, GFP_ATOMIC
| __GFP_ZERO
);
1044 *pap
= (ulong
)__virt_to_phys(va
);
1046 va
= pci_alloc_consistent(osh
->pdev
, size
, (dma_addr_t
*)pap
);
1052 osl_dma_free_consistent(osl_t
*osh
, void *va
, uint size
, ulong pa
)
1054 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1056 #ifdef __ARM_ARCH_7A__
1059 pci_free_consistent(osh
->pdev
, size
, va
, (dma_addr_t
)pa
);
1064 osl_dma_map(osl_t
*osh
, void *va
, uint size
, int direction
, void *p
, hnddma_seg_map_t
*dmah
)
1068 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1069 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1071 #if defined(__ARM_ARCH_7A__) && defined(BCMDMASGLISTOSL)
1073 int32 nsegs
, i
, totsegs
= 0, totlen
= 0;
1074 struct scatterlist
*sg
, _sg
[MAX_DMA_SEGS
* 2];
1075 struct sk_buff
*skb
;
1076 for (skb
= (struct sk_buff
*)p
; skb
!= NULL
; skb
= PKTNEXT(osh
, skb
)) {
1078 if (skb_is_nonlinear(skb
)) {
1079 nsegs
= skb_to_sgvec(skb
, sg
, 0, PKTLEN(osh
, skb
));
1080 ASSERT((nsegs
> 0) && (totsegs
+ nsegs
<= MAX_DMA_SEGS
));
1081 pci_map_sg(osh
->pdev
, sg
, nsegs
, dir
);
1084 ASSERT(totsegs
+ nsegs
<= MAX_DMA_SEGS
);
1086 sg_set_buf(sg
, PKTDATA(osh
, skb
), PKTLEN(osh
, skb
));
1088 /* Map size bytes (not skb->len) for ctf bufs */
1089 pci_map_single(osh
->pdev
, PKTDATA(osh
, skb
),
1090 PKTISCTF(osh
, skb
) ? CTFMAPSZ
: PKTLEN(osh
, skb
), dir
);
1092 pci_map_single(osh
->pdev
, PKTDATA(osh
, skb
), PKTLEN(osh
, skb
), dir
);
1097 totlen
+= PKTLEN(osh
, skb
);
1099 dmah
->nsegs
= totsegs
;
1100 dmah
->origsize
= totlen
;
1101 for (i
= 0, sg
= _sg
; i
< totsegs
; i
++, sg
++) {
1102 dmah
->segs
[i
].addr
= sg_phys(sg
);
1103 dmah
->segs
[i
].length
= sg
->length
;
1105 return dmah
->segs
[0].addr
;
1107 #endif /* __ARM_ARCH_7A__ && BCMDMASGLISTOSL */
1109 return (pci_map_single(osh
->pdev
, va
, size
, dir
));
1113 osl_dma_unmap(osl_t
*osh
, uint pa
, uint size
, int direction
)
1117 ASSERT((osh
&& (osh
->magic
== OS_HANDLE_MAGIC
)));
1118 dir
= (direction
== DMA_TX
)? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
1119 pci_unmap_single(osh
->pdev
, (uint32
)pa
, size
, dir
);
1124 osl_delay(uint usec
)
1129 d
= MIN(usec
, 1000);
1135 #if defined(DSLCPE_DELAY)
1138 osl_oshsh_init(osl_t
*osh
, shared_osl_t
* oshsh
)
1140 extern unsigned long loops_per_jiffy
;
1142 osh
->oshsh
->MIPS
= loops_per_jiffy
/ (500000/HZ
);
1146 in_long_delay(osl_t
*osh
)
1148 return osh
->oshsh
->long_delay
;
1152 osl_long_delay(osl_t
*osh
, uint usec
, bool yield
)
1155 bool yielded
= TRUE
;
1156 int usec_to_delay
= usec
;
1157 unsigned long tick1
, tick2
, tick_diff
= 0;
1159 /* delay at least requested usec */
1160 while (usec_to_delay
> 0) {
1161 if (!yield
|| !yielded
) {
1162 d
= MIN(usec_to_delay
, 10);
1166 if (usec_to_delay
> 0) {
1167 osh
->oshsh
->long_delay
++;
1168 OSL_GETCYCLES(tick1
);
1169 spin_unlock_bh(osh
->oshsh
->lock
);
1170 if (usec_to_delay
> 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1176 spin_lock_bh(osh
->oshsh
->lock
);
1177 OSL_GETCYCLES(tick2
);
1180 tick_diff
= TICKDIFF(tick2
, tick1
);
1181 tick_diff
= (tick_diff
* 2)/(osh
->oshsh
->MIPS
);
1183 usec_to_delay
-= tick_diff
;
1187 osh
->oshsh
->long_delay
--;
1188 ASSERT(osh
->oshsh
->long_delay
>= 0);
1192 #endif /* DSLCPE_DELAY */
1195 * The pkttag contents are NOT cloned.
1197 #ifdef BCMDBG_CTRACE
1199 osl_pktdup(osl_t
*osh
, void *skb
, int line
, char *file
)
1202 osl_pktdup(osl_t
*osh
, void *skb
)
1203 #endif /* BCMDBG_CTRACE */
1207 ASSERT(!PKTISCHAINED(skb
));
1209 /* clear the CTFBUF flag if set and map the rest of the buffer
1212 PKTCTFMAP(osh
, skb
);
1214 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36)
1215 if ((p
= pskb_copy((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1217 if ((p
= skb_clone((struct sk_buff
*)skb
, GFP_ATOMIC
)) == NULL
)
1222 if (PKTISFAST(osh
, skb
)) {
1225 /* if the buffer allocated from ctfpool is cloned then
1226 * we can't be sure when it will be freed. since there
1227 * is a chance that we will be losing a buffer
1228 * from our pool, we increment the refill count for the
1229 * object to be alloced later.
1231 ctfpool
= (ctfpool_t
*)CTFPOOLPTR(osh
, skb
);
1232 ASSERT(ctfpool
!= NULL
);
1234 PKTCLRFAST(osh
, skb
);
1237 #endif /* CTFPOOL */
1239 /* Clear PKTC context */
1240 PKTSETCLINK(p
, NULL
);
1243 PKTCSETLEN(p
, PKTLEN(osh
, skb
));
1245 /* skb_clone copies skb->cb.. we don't want that */
1246 if (osh
->pub
.pkttag
)
1247 OSL_PKTTAG_CLEAR(p
);
1249 /* Increment the packet counter */
1250 atomic_inc(&osh
->pktalloced
);
1251 #ifdef BCMDBG_CTRACE
1252 ADD_CTRACE(osh
, (struct sk_buff
*)p
, file
, line
);
1257 #ifdef BCMDBG_CTRACE
1258 int osl_pkt_is_frmnative(osl_t
*osh
, struct sk_buff
*pkt
)
1260 unsigned long flags
;
1261 struct sk_buff
*skb
;
1264 spin_lock_irqsave(&osh
->ctrace_lock
, flags
);
1266 list_for_each_entry(skb
, &osh
->ctrace_list
, ctrace_list
) {
1273 spin_unlock_irqrestore(&osh
->ctrace_lock
, flags
);
1277 void osl_ctrace_dump(osl_t
*osh
, struct bcmstrbuf
*b
)
1279 unsigned long flags
;
1280 struct sk_buff
*skb
;
1284 spin_lock_irqsave(&osh
->ctrace_lock
, flags
);
1287 bcm_bprintf(b
, " Total %d sbk not free\n", osh
->ctrace_num
);
1289 printk(" Total %d sbk not free\n", osh
->ctrace_num
);
1291 list_for_each_entry(skb
, &osh
->ctrace_list
, ctrace_list
) {
1293 bcm_bprintf(b
, "[%d] skb %p:\n", ++idx
, skb
);
1295 printk("[%d] skb %p:\n", ++idx
, skb
);
1297 for (i
= 0; i
< skb
->ctrace_count
; i
++) {
1298 j
= (skb
->ctrace_start
+ i
) % CTRACE_NUM
;
1300 bcm_bprintf(b
, " [%s(%d)]\n", skb
->func
[j
], skb
->line
[j
]);
1302 printk(" [%s(%d)]\n", skb
->func
[j
], skb
->line
[j
]);
1305 bcm_bprintf(b
, "\n");
1310 spin_unlock_irqrestore(&osh
->ctrace_lock
, flags
);
1314 #endif /* BCMDBG_CTRACE */
1318 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1322 osl_readb(osl_t
*osh
, volatile uint8
*r
)
1324 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1325 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1327 return (uint8
)((rreg
)(ctx
, (void*)r
, sizeof(uint8
)));
1332 osl_readw(osl_t
*osh
, volatile uint16
*r
)
1334 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1335 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1337 return (uint16
)((rreg
)(ctx
, (void*)r
, sizeof(uint16
)));
1341 osl_readl(osl_t
*osh
, volatile uint32
*r
)
1343 osl_rreg_fn_t rreg
= ((osl_pubinfo_t
*)osh
)->rreg_fn
;
1344 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1346 return (uint32
)((rreg
)(ctx
, (void*)r
, sizeof(uint32
)));
1350 osl_writeb(osl_t
*osh
, volatile uint8
*r
, uint8 v
)
1352 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1353 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1355 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint8
)));
1360 osl_writew(osl_t
*osh
, volatile uint16
*r
, uint16 v
)
1362 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1363 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1365 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint16
)));
1369 osl_writel(osl_t
*osh
, volatile uint32
*r
, uint32 v
)
1371 osl_wreg_fn_t wreg
= ((osl_pubinfo_t
*)osh
)->wreg_fn
;
1372 void *ctx
= ((osl_pubinfo_t
*)osh
)->reg_ctx
;
1374 ((wreg
)(ctx
, (void*)r
, v
, sizeof(uint32
)));
1376 #endif /* OSLREGOPS */
1379 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1386 return ((uint32
)jiffies
* (1000 / HZ
));
1390 osl_printf(const char *format
, ...)
1393 static char printbuf
[1024];
1396 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1397 va_start(args
, format
);
1398 len
= vsnprintf(printbuf
, 1024, format
, args
);
1401 if (len
> sizeof(printbuf
)) {
1402 printk("osl_printf: buffer overrun\n");
1406 return (printk("%s", printbuf
));
1410 osl_sprintf(char *buf
, const char *format
, ...)
1415 va_start(args
, format
);
1416 rc
= vsprintf(buf
, format
, args
);
1422 osl_snprintf(char *buf
, size_t n
, const char *format
, ...)
1427 va_start(args
, format
);
1428 rc
= vsnprintf(buf
, n
, format
, args
);
1434 osl_vsprintf(char *buf
, const char *format
, va_list ap
)
1436 return (vsprintf(buf
, format
, ap
));
1440 osl_vsnprintf(char *buf
, size_t n
, const char *format
, va_list ap
)
1442 return (vsnprintf(buf
, n
, format
, ap
));
1446 osl_strcmp(const char *s1
, const char *s2
)
1448 return (strcmp(s1
, s2
));
1452 osl_strncmp(const char *s1
, const char *s2
, uint n
)
1454 return (strncmp(s1
, s2
, n
));
1458 osl_strlen(const char *s
)
1464 osl_strcpy(char *d
, const char *s
)
1466 return (strcpy(d
, s
));
1470 osl_strncpy(char *d
, const char *s
, uint n
)
1472 return (strncpy(d
, s
, n
));
1476 osl_strchr(const char *s
, int c
)
1478 return (strchr(s
, c
));
1482 osl_strrchr(const char *s
, int c
)
1484 return (strrchr(s
, c
));
1488 osl_memset(void *d
, int c
, size_t n
)
1490 return memset(d
, c
, n
);
1494 osl_memcpy(void *d
, const void *s
, size_t n
)
1496 return memcpy(d
, s
, n
);
1500 osl_memmove(void *d
, const void *s
, size_t n
)
1502 return memmove(d
, s
, n
);
1506 osl_memcmp(const void *s1
, const void *s2
, size_t n
)
1508 return memcmp(s1
, s2
, n
);
1512 osl_readl(volatile uint32
*r
)
1518 osl_readw(volatile uint16
*r
)
1524 osl_readb(volatile uint8
*r
)
1530 osl_writel(uint32 v
, volatile uint32
*r
)
1536 osl_writew(uint16 v
, volatile uint16
*r
)
1542 osl_writeb(uint8 v
, volatile uint8
*r
)
1548 osl_uncached(void *va
)
1551 return ((void*)KSEG1ADDR(va
));
1558 osl_cached(void *va
)
1561 return ((void*)KSEG0ADDR(va
));
1573 cycles
= read_c0_count() * 2;
1574 #elif defined(__i386__)
1578 #endif /* defined(mips) */
1583 osl_reg_map(uint32 pa
, uint size
)
1585 return (ioremap_nocache((unsigned long)pa
, (unsigned long)size
));
1589 osl_reg_unmap(void *va
)
1595 osl_busprobe(uint32
*val
, uint32 addr
)
1598 return get_dbe(*val
, (uint32
*)addr
);
1600 *val
= readl((uint32
*)(uintptr
)addr
);
1606 osl_pktshared(void *skb
)
1608 return (((struct sk_buff
*)skb
)->cloned
);
1612 osl_pktdata(osl_t
*osh
, void *skb
)
1614 return (((struct sk_buff
*)skb
)->data
);
1618 osl_pktlen(osl_t
*osh
, void *skb
)
1620 return (((struct sk_buff
*)skb
)->len
);
1624 osl_pktheadroom(osl_t
*osh
, void *skb
)
1626 return (uint
) skb_headroom((struct sk_buff
*) skb
);
1630 osl_pkttailroom(osl_t
*osh
, void *skb
)
1632 return (uint
) skb_tailroom((struct sk_buff
*) skb
);
1636 osl_pktnext(osl_t
*osh
, void *skb
)
1638 return (((struct sk_buff
*)skb
)->next
);
1642 osl_pktsetnext(void *skb
, void *x
)
1644 ((struct sk_buff
*)skb
)->next
= (struct sk_buff
*)x
;
1648 osl_pktsetlen(osl_t
*osh
, void *skb
, uint len
)
1650 __skb_trim((struct sk_buff
*)skb
, len
);
1654 osl_pktpush(osl_t
*osh
, void *skb
, int bytes
)
1656 return (skb_push((struct sk_buff
*)skb
, bytes
));
1660 osl_pktpull(osl_t
*osh
, void *skb
, int bytes
)
1662 return (skb_pull((struct sk_buff
*)skb
, bytes
));
1666 osl_pkttag(void *skb
)
1668 return ((void*)(((struct sk_buff
*)skb
)->cb
));
1672 osl_pktlink(void *skb
)
1674 return (((struct sk_buff
*)skb
)->prev
);
1678 osl_pktsetlink(void *skb
, void *x
)
1680 ((struct sk_buff
*)skb
)->prev
= (struct sk_buff
*)x
;
1684 osl_pktprio(void *skb
)
1686 return (((struct sk_buff
*)skb
)->priority
);
1690 osl_pktsetprio(void *skb
, uint x
)
1692 ((struct sk_buff
*)skb
)->priority
= x
;
1697 osl_pktalloced(osl_t
*osh
)
1699 return (atomic_read(&osh
->pktalloced
));
1702 /* Linux Kernel: File Operations: start */
1704 osl_os_open_image(char *filename
)
1708 fp
= filp_open(filename
, O_RDONLY
, 0);
1710 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1712 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1722 osl_os_get_image_block(char *buf
, int len
, void *image
)
1724 struct file
*fp
= (struct file
*)image
;
1730 rdlen
= kernel_read(fp
, fp
->f_pos
, buf
, len
);
1738 osl_os_close_image(void *image
)
1741 filp_close((struct file
*)image
, NULL
);
1745 osl_os_image_size(void *image
)
1747 int len
= 0, curroffset
;
1750 /* store the current offset */
1751 curroffset
= generic_file_llseek(image
, 0, 1);
1752 /* goto end of file to get length */
1753 len
= generic_file_llseek(image
, 0, 2);
1754 /* restore back the offset */
1755 generic_file_llseek(image
, curroffset
, 0);
1759 /* Linux Kernel: File Operations: end */