K2.6 patches and update.
[tomato.git] / release / src-rt / shared / linux_osl.c
bloba4eef7d44cdb507f8a302a2355c5d1bb4f9dd361
1 /*
2 * Linux OS Independent Layer
4 * Copyright (C) 2010, Broadcom Corporation. All Rights Reserved.
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 * $Id: linux_osl.c,v 1.172.2.21 2011-01-27 17:03:39 Exp $
21 #define LINUX_PORT
23 #include <typedefs.h>
24 #include <bcmendian.h>
25 #include <linuxver.h>
26 #include <bcmdefs.h>
27 #include <osl.h>
28 #include <bcmutils.h>
29 #include <linux/delay.h>
30 #ifdef mips
31 #include <asm/paccess.h>
32 #endif /* mips */
33 #include <pcicfg.h>
37 #include <linux/fs.h>
39 #define PCI_CFG_RETRY 10
41 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognise osh */
42 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
44 typedef struct bcm_mem_link {
45 struct bcm_mem_link *prev;
46 struct bcm_mem_link *next;
47 uint size;
48 int line;
49 void *osh;
50 char file[BCM_MEM_FILENAME_LEN];
51 } bcm_mem_link_t;
53 #if defined(DSLCPE_DELAY_NOT_YET)
54 struct shared_osl {
55 int long_delay;
56 spinlock_t *lock;
57 void *wl;
58 unsigned long MIPS;
60 #endif
62 struct osl_info {
63 osl_pubinfo_t pub;
64 #ifdef CTFPOOL
65 ctfpool_t *ctfpool;
66 #endif /* CTFPOOL */
67 uint magic;
68 void *pdev;
69 atomic_t malloced;
70 atomic_t pktalloced; /* Number of allocated packet buffers */
71 uint failed;
72 uint bustype;
73 bcm_mem_link_t *dbgmem_list;
74 #if defined(DSLCPE_DELAY)
75 shared_osl_t *oshsh; /* osh shared */
76 #endif
77 #ifdef BCMDBG_PKT /* pkt logging for debugging */
78 spinlock_t pktlist_lock;
79 pktlist_info_t pktlist;
80 #endif /* BCMDBG_PKT */
81 spinlock_t dbgmem_lock;
82 spinlock_t pktalloc_lock;
85 /* PCMCIA attribute space access macros */
86 #if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
87 struct pcmcia_dev {
88 dev_link_t link; /* PCMCIA device pointer */
89 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
90 dev_node_t node; /* PCMCIA node structure */
91 #endif
92 void *base; /* Mapped attribute memory window */
93 size_t size; /* Size of window */
94 void *drv; /* Driver data */
96 #endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */
98 /* Global ASSERT type flag */
99 uint32 g_assert_type = FALSE;
101 static int16 linuxbcmerrormap[] =
102 { 0, /* 0 */
103 -EINVAL, /* BCME_ERROR */
104 -EINVAL, /* BCME_BADARG */
105 -EINVAL, /* BCME_BADOPTION */
106 -EINVAL, /* BCME_NOTUP */
107 -EINVAL, /* BCME_NOTDOWN */
108 -EINVAL, /* BCME_NOTAP */
109 -EINVAL, /* BCME_NOTSTA */
110 -EINVAL, /* BCME_BADKEYIDX */
111 -EINVAL, /* BCME_RADIOOFF */
112 -EINVAL, /* BCME_NOTBANDLOCKED */
113 -EINVAL, /* BCME_NOCLK */
114 -EINVAL, /* BCME_BADRATESET */
115 -EINVAL, /* BCME_BADBAND */
116 -E2BIG, /* BCME_BUFTOOSHORT */
117 -E2BIG, /* BCME_BUFTOOLONG */
118 -EBUSY, /* BCME_BUSY */
119 -EINVAL, /* BCME_NOTASSOCIATED */
120 -EINVAL, /* BCME_BADSSIDLEN */
121 -EINVAL, /* BCME_OUTOFRANGECHAN */
122 -EINVAL, /* BCME_BADCHAN */
123 -EFAULT, /* BCME_BADADDR */
124 -ENOMEM, /* BCME_NORESOURCE */
125 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
126 -EMSGSIZE, /* BCME_BADLENGTH */
127 -EINVAL, /* BCME_NOTREADY */
128 -EPERM, /* BCME_NOTPERMITTED */
129 -ENOMEM, /* BCME_NOMEM */
130 -EINVAL, /* BCME_ASSOCIATED */
131 -ERANGE, /* BCME_RANGE */
132 -EINVAL, /* BCME_NOTFOUND */
133 -EINVAL, /* BCME_WME_NOT_ENABLED */
134 -EINVAL, /* BCME_TSPEC_NOTFOUND */
135 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
136 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
137 -EIO, /* BCME_SDIO_ERROR */
138 -ENODEV, /* BCME_DONGLE_DOWN */
139 -EINVAL, /* BCME_VERSION */
140 -EIO, /* BCME_TXFAIL */
141 -EIO, /* BCME_RXFAIL */
142 -EINVAL, /* BCME_NODEVICE */
143 -EINVAL, /* BCME_NMODE_DISABLED */
144 -ENODATA, /* BCME_NONRESIDENT */
146 /* When an new error code is added to bcmutils.h, add os
147 * spcecific error translation here as well
149 /* check if BCME_LAST changed since the last time this function was updated */
150 #if BCME_LAST != -42
151 #error "You need to add a OS error translation in the linuxbcmerrormap \
152 for new error code defined in bcmutils.h"
153 #endif
156 /* translate bcmerrors into linux errors */
158 osl_error(int bcmerror)
160 if (bcmerror > 0)
161 bcmerror = 0;
162 else if (bcmerror < BCME_LAST)
163 bcmerror = BCME_ERROR;
165 /* Array bounds covered by ASSERT in osl_attach */
166 return linuxbcmerrormap[-bcmerror];
169 osl_t *
170 osl_attach(void *pdev, uint bustype, bool pkttag)
172 osl_t *osh;
174 osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
175 ASSERT(osh);
177 bzero(osh, sizeof(osl_t));
179 /* Check that error map has the right number of entries in it */
180 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
182 osh->magic = OS_HANDLE_MAGIC;
183 atomic_set(&osh->malloced, 0);
184 osh->failed = 0;
185 osh->dbgmem_list = NULL;
186 spin_lock_init(&(osh->dbgmem_lock));
187 osh->pdev = pdev;
188 osh->pub.pkttag = pkttag;
189 osh->bustype = bustype;
191 switch (bustype) {
192 case PCI_BUS:
193 case SI_BUS:
194 case PCMCIA_BUS:
195 osh->pub.mmbus = TRUE;
196 break;
197 case JTAG_BUS:
198 case SDIO_BUS:
199 case USB_BUS:
200 case SPI_BUS:
201 case RPC_BUS:
202 osh->pub.mmbus = FALSE;
203 break;
204 default:
205 ASSERT(FALSE);
206 break;
209 #ifdef BCMDBG_PKT
210 spin_lock_init(&(osh->pktlist_lock));
211 #endif
212 spin_lock_init(&(osh->pktalloc_lock));
213 #ifdef BCMDBG
214 if (pkttag) {
215 struct sk_buff *skb;
216 ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb));
218 #endif
219 return osh;
222 void
223 osl_detach(osl_t *osh)
225 if (osh == NULL)
226 return;
228 ASSERT(osh->magic == OS_HANDLE_MAGIC);
229 kfree(osh);
232 #ifdef CTFPOOL
234 #ifdef CTFPOOL_SPINLOCK
235 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_irqsave(&(ctfpool)->lock, flags)
236 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_irqrestore(&(ctfpool)->lock, flags)
237 #else
238 #define CTFPOOL_LOCK(ctfpool, flags) spin_lock_bh(&(ctfpool)->lock)
239 #define CTFPOOL_UNLOCK(ctfpool, flags) spin_unlock_bh(&(ctfpool)->lock)
240 #endif /* CTFPOOL_SPINLOCK */
242 * Allocate and add an object to packet pool.
244 void *
245 osl_ctfpool_add(osl_t *osh)
247 struct sk_buff *skb;
248 #ifdef CTFPOOL_SPINLOCK
249 unsigned long flags;
250 #endif /* CTFPOOL_SPINLOCK */
252 if ((osh == NULL) || (osh->ctfpool == NULL))
253 return NULL;
255 CTFPOOL_LOCK(osh->ctfpool, flags);
256 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
258 /* No need to allocate more objects */
259 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
260 CTFPOOL_UNLOCK(osh->ctfpool, flags);
261 return NULL;
264 /* Allocate a new skb and add it to the ctfpool */
265 skb = dev_alloc_skb(osh->ctfpool->obj_size);
266 if (skb == NULL) {
267 printf("%s: skb alloc of len %d failed\n", __FUNCTION__,
268 osh->ctfpool->obj_size);
269 CTFPOOL_UNLOCK(osh->ctfpool, flags);
270 return NULL;
273 /* Add to ctfpool */
274 skb->next = (struct sk_buff *)osh->ctfpool->head;
275 osh->ctfpool->head = skb;
276 osh->ctfpool->fast_frees++;
277 osh->ctfpool->curr_obj++;
279 /* Hijack a skb member to store ptr to ctfpool */
280 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
282 /* Use bit flag to indicate skb from fast ctfpool */
283 PKTFAST(osh, skb) = FASTBUF;
285 CTFPOOL_UNLOCK(osh->ctfpool, flags);
287 return skb;
291 * Add new objects to the pool.
293 void
294 osl_ctfpool_replenish(osl_t *osh, uint thresh)
296 if ((osh == NULL) || (osh->ctfpool == NULL))
297 return;
299 /* Do nothing if no refills are required */
300 while ((osh->ctfpool->refills > 0) && (thresh--)) {
301 osl_ctfpool_add(osh);
302 osh->ctfpool->refills--;
307 * Initialize the packet pool with specified number of objects.
309 int32
310 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
312 osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
313 ASSERT(osh->ctfpool);
314 bzero(osh->ctfpool, sizeof(ctfpool_t));
316 osh->ctfpool->max_obj = numobj;
317 osh->ctfpool->obj_size = size;
319 spin_lock_init(&osh->ctfpool->lock);
321 while (numobj--) {
322 if (!osl_ctfpool_add(osh))
323 return -1;
324 osh->ctfpool->fast_frees--;
327 return 0;
331 * Cleanup the packet pool objects.
333 void
334 osl_ctfpool_cleanup(osl_t *osh)
336 struct sk_buff *skb, *nskb;
337 #ifdef CTFPOOL_SPINLOCK
338 unsigned long flags;
339 #endif /* CTFPOOL_SPINLOCK */
341 if ((osh == NULL) || (osh->ctfpool == NULL))
342 return;
344 CTFPOOL_LOCK(osh->ctfpool, flags);
346 skb = osh->ctfpool->head;
348 while (skb != NULL) {
349 nskb = skb->next;
350 dev_kfree_skb(skb);
351 skb = nskb;
352 osh->ctfpool->curr_obj--;
355 ASSERT(osh->ctfpool->curr_obj == 0);
356 osh->ctfpool->head = NULL;
357 CTFPOOL_UNLOCK(osh->ctfpool, flags);
359 kfree(osh->ctfpool);
360 osh->ctfpool = NULL;
363 void
364 osl_ctfpool_stats(osl_t *osh, void *b)
366 struct bcmstrbuf *bb;
368 if ((osh == NULL) || (osh->ctfpool == NULL))
369 return;
371 bb = b;
373 ASSERT((osh != NULL) && (bb != NULL));
375 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
376 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
377 osh->ctfpool->curr_obj, osh->ctfpool->refills);
378 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
379 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
380 osh->ctfpool->slow_allocs);
383 static inline struct sk_buff *
384 osl_pktfastget(osl_t *osh, uint len)
386 struct sk_buff *skb;
387 #ifdef CTFPOOL_SPINLOCK
388 unsigned long flags;
389 #endif /* CTFPOOL_SPINLOCK */
391 /* Try to do fast allocate. Return null if ctfpool is not in use
392 * or if there are no items in the ctfpool.
394 if (osh->ctfpool == NULL)
395 return NULL;
397 CTFPOOL_LOCK(osh->ctfpool, flags);
398 if (osh->ctfpool->head == NULL) {
399 ASSERT(osh->ctfpool->curr_obj == 0);
400 osh->ctfpool->slow_allocs++;
401 CTFPOOL_UNLOCK(osh->ctfpool, flags);
402 return NULL;
405 ASSERT(len <= osh->ctfpool->obj_size);
407 /* Get an object from ctfpool */
408 skb = (struct sk_buff *)osh->ctfpool->head;
409 osh->ctfpool->head = (void *)skb->next;
411 osh->ctfpool->fast_allocs++;
412 osh->ctfpool->curr_obj--;
413 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
414 CTFPOOL_UNLOCK(osh->ctfpool, flags);
416 /* Init skb struct */
417 skb->next = skb->prev = NULL;
418 skb->data = skb->head + NET_SKB_PAD_ALLOC;
419 skb->tail = skb->data;
421 skb->len = 0;
422 skb->cloned = 0;
423 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
424 skb->list = NULL;
425 #endif
426 atomic_set(&skb->users, 1);
428 return skb;
430 #endif /* CTFPOOL */
431 /* Convert a driver packet to native(OS) packet
432 * In the process, packettag is zeroed out before sending up
433 * IP code depends on skb->cb to be setup correctly with various options
434 * In our case, that means it should be 0
436 struct sk_buff * BCMFASTPATH
437 osl_pkt_tonative(osl_t *osh, void *pkt)
439 #ifndef WL_UMK
440 struct sk_buff *nskb;
441 unsigned long flags;
442 #endif
444 if (osh->pub.pkttag)
445 bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
447 #ifndef WL_UMK
448 /* Decrement the packet counter */
449 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
450 #ifdef BCMDBG_PKT
451 spin_lock_irqsave(&osh->pktlist_lock, flags);
452 pktlist_remove(&(osh->pktlist), (void *) nskb);
453 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
454 #endif /* BCMDBG_PKT */
455 spin_lock_irqsave(&osh->pktalloc_lock, flags);
456 osh->pub.pktalloced--;
457 spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
459 #endif /* WL_UMK */
460 return (struct sk_buff *)pkt;
463 /* Convert a native(OS) packet to driver packet.
464 * In the process, native packet is destroyed, there is no copying
465 * Also, a packettag is zeroed out
467 #ifdef BCMDBG_PKT
468 void *
469 osl_pkt_frmnative(osl_t *osh, void *pkt, int line, char *file)
470 #else /* BCMDBG_PKT pkt logging for debugging */
471 void * BCMFASTPATH
472 osl_pkt_frmnative(osl_t *osh, void *pkt)
473 #endif /* BCMDBG_PKT */
475 #ifndef WL_UMK
476 struct sk_buff *nskb;
477 unsigned long flags;
478 #endif
480 if (osh->pub.pkttag)
481 bzero((void*)((struct sk_buff *)pkt)->cb, OSL_PKTTAG_SZ);
483 #ifndef WL_UMK
484 /* Increment the packet counter */
485 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
486 #ifdef BCMDBG_PKT
487 spin_lock_irqsave(&osh->pktlist_lock, flags);
488 pktlist_add(&(osh->pktlist), (void *) nskb, line, file);
489 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
490 #endif /* BCMDBG_PKT */
491 spin_lock_irqsave(&osh->pktalloc_lock, flags);
492 osh->pub.pktalloced++;
493 spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
495 #endif /* WL_UMK */
496 return (void *)pkt;
499 /* Return a new packet. zero out pkttag */
500 #ifdef BCMDBG_PKT
501 void * BCMFASTPATH
502 osl_pktget(osl_t *osh, uint len, int line, char *file)
503 #else /* BCMDBG_PKT */
504 void * BCMFASTPATH
505 osl_pktget(osl_t *osh, uint len)
506 #endif /* BCMDBG_PKT */
508 struct sk_buff *skb;
509 unsigned long flags;
511 #ifdef CTFPOOL
512 /* Allocate from local pool */
513 skb = osl_pktfastget(osh, len);
514 if ((skb != NULL) || ((skb = dev_alloc_skb(len)) != NULL)) {
515 #else /* CTFPOOL */
516 if ((skb = dev_alloc_skb(len))) {
517 #endif /* CTFPOOL */
518 skb_put(skb, len);
519 skb->priority = 0;
521 #ifdef BCMDBG_PKT
522 spin_lock_irqsave(&osh->pktlist_lock, flags);
523 pktlist_add(&(osh->pktlist), (void *) skb, line, file);
524 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
525 #endif
527 spin_lock_irqsave(&osh->pktalloc_lock, flags);
528 osh->pub.pktalloced++;
529 spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
532 return ((void*) skb);
535 #ifdef CTFPOOL
536 static inline void
537 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
539 ctfpool_t *ctfpool;
540 #ifdef CTFPOOL_SPINLOCK
541 unsigned long flags;
542 #endif /* CTFPOOL_SPINLOCK */
544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
545 skb->tstamp.tv.sec = 0;
546 #else
547 skb->stamp.tv_sec = 0;
548 #endif
550 /* We only need to init the fields that we change */
551 skb->dev = NULL;
552 skb->dst = NULL;
553 memset(skb->cb, 0, sizeof(skb->cb));
554 skb->ip_summed = 0;
555 skb->destructor = NULL;
557 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
558 #if 0
559 ASSERT(ctfpool != NULL);
560 #else
561 if (ctfpool == NULL) return;
562 #endif
564 /* Add object to the ctfpool */
565 CTFPOOL_LOCK(ctfpool, flags);
566 skb->next = (struct sk_buff *)ctfpool->head;
567 ctfpool->head = (void *)skb;
569 ctfpool->fast_frees++;
570 ctfpool->curr_obj++;
572 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
573 CTFPOOL_UNLOCK(ctfpool, flags);
575 #endif /* CTFPOOL */
577 /* Free the driver packet. Free the tag if present */
578 void BCMFASTPATH
579 osl_pktfree(osl_t *osh, void *p, bool send)
581 struct sk_buff *skb, *nskb;
582 unsigned long flags;
584 skb = (struct sk_buff*) p;
586 if (send && osh->pub.tx_fn)
587 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
589 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
591 /* perversion: we use skb->next to chain multi-skb packets */
592 while (skb) {
593 nskb = skb->next;
594 skb->next = NULL;
596 #ifdef BCMDBG_PKT
597 spin_lock_irqsave(&osh->pktlist_lock, flags);
598 pktlist_remove(&(osh->pktlist), (void *) skb);
599 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
600 #endif
602 #ifdef CTFMAP
603 /* Clear the map ptr before freeing */
604 PKTCLRCTF(osh, skb);
605 CTFMAPPTR(osh, skb) = NULL;
606 #endif /* CTFMAP */
608 #ifdef CTFPOOL
609 if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1))
610 osl_pktfastfree(osh, skb);
611 else {
612 #else /* CTFPOOL */
614 #endif /* CTFPOOL */
616 if (skb->destructor)
617 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
618 * destructor exists
620 dev_kfree_skb_any(skb);
621 else
622 /* can free immediately (even in_irq()) if destructor
623 * does not exist
625 dev_kfree_skb(skb);
627 spin_lock_irqsave(&osh->pktalloc_lock, flags);
628 osh->pub.pktalloced--;
629 spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
630 skb = nskb;
634 uint32
635 osl_pci_read_config(osl_t *osh, uint offset, uint size)
637 uint val = 0;
638 uint retry = PCI_CFG_RETRY;
640 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
642 /* only 4byte access supported */
643 ASSERT(size == 4);
645 do {
646 pci_read_config_dword(osh->pdev, offset, &val);
647 if (val != 0xffffffff)
648 break;
649 } while (retry--);
651 #ifdef BCMDBG
652 if (retry < PCI_CFG_RETRY)
653 printk("PCI CONFIG READ access to %d required %d retries\n", offset,
654 (PCI_CFG_RETRY - retry));
655 #endif /* BCMDBG */
657 return (val);
660 void
661 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
663 uint retry = PCI_CFG_RETRY;
665 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
667 /* only 4byte access supported */
668 ASSERT(size == 4);
670 do {
671 pci_write_config_dword(osh->pdev, offset, val);
672 if (offset != PCI_BAR0_WIN)
673 break;
674 if (osl_pci_read_config(osh, offset, size) == val)
675 break;
676 } while (retry--);
678 #ifdef BCMDBG
679 if (retry < PCI_CFG_RETRY)
680 printk("PCI CONFIG WRITE access to %d required %d retries\n", offset,
681 (PCI_CFG_RETRY - retry));
682 #endif /* BCMDBG */
685 /* return bus # for the pci device pointed by osh->pdev */
686 uint
687 osl_pci_bus(osl_t *osh)
689 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
691 return ((struct pci_dev *)osh->pdev)->bus->number;
694 /* return slot # for the pci device pointed by osh->pdev */
695 uint
696 osl_pci_slot(osl_t *osh)
698 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
700 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
703 static void
704 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
708 void
709 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
711 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
714 void
715 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
717 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
720 #ifdef BCMDBG_MEM
721 /* In BCMDBG_MEM configurations osl_malloc is only used internally in
722 * the implementation of osl_debug_malloc. Because we are using the GCC
723 * -Wstrict-prototypes compile option, we must always have a prototype
724 * for a global/external function. So make osl_malloc static in
725 * the BCMDBG_MEM case.
727 static
728 #endif
729 void *
730 osl_malloc(osl_t *osh, uint size)
732 void *addr;
734 /* only ASSERT if osh is defined */
735 if (osh)
736 ASSERT(osh->magic == OS_HANDLE_MAGIC);
738 if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
739 if (osh)
740 osh->failed++;
741 return (NULL);
743 if (osh)
744 atomic_add(size, &osh->malloced);
746 return (addr);
749 #ifdef BCMDBG_MEM
750 /* In BCMDBG_MEM configurations osl_mfree is only used internally in
751 * the implementation of osl_debug_mfree. Because we are using the GCC
752 * -Wstrict-prototypes compile option, we must always have a prototype
753 * for a global/external function. So make osl_mfree static in
754 * the BCMDBG_MEM case.
756 static
757 #endif
758 void
759 osl_mfree(osl_t *osh, void *addr, uint size)
761 if (osh) {
762 ASSERT(osh->magic == OS_HANDLE_MAGIC);
763 atomic_sub(size, &osh->malloced);
765 kfree(addr);
768 uint
769 osl_malloced(osl_t *osh)
771 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
772 return (atomic_read(&osh->malloced));
775 uint
776 osl_malloc_failed(osl_t *osh)
778 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
779 return (osh->failed);
782 #ifdef BCMDBG_MEM
783 #define MEMLIST_LOCK(osh, flags) spin_lock_irqsave(&(osh)->dbgmem_lock, flags)
784 #define MEMLIST_UNLOCK(osh, flags) spin_unlock_irqrestore(&(osh)->dbgmem_lock, flags)
786 void*
787 osl_debug_malloc(osl_t *osh, uint size, int line, char* file)
789 bcm_mem_link_t *p;
790 char* basename;
791 unsigned long flags = 0;
793 if (!size) {
794 printk("%s: allocating zero sized mem at %s line %d\n", __FUNCTION__, file, line);
795 ASSERT(0);
798 if (osh) {
799 MEMLIST_LOCK(osh, flags);
801 if ((p = (bcm_mem_link_t*)osl_malloc(osh, sizeof(bcm_mem_link_t) + size)) == NULL) {
802 if (osh) {
803 MEMLIST_UNLOCK(osh, flags);
805 return (NULL);
808 p->size = size;
809 p->line = line;
810 p->osh = (void *)osh;
812 basename = strrchr(file, '/');
813 /* skip the '/' */
814 if (basename)
815 basename++;
817 if (!basename)
818 basename = file;
820 strncpy(p->file, basename, BCM_MEM_FILENAME_LEN);
821 p->file[BCM_MEM_FILENAME_LEN - 1] = '\0';
823 /* link this block */
824 if (osh) {
825 p->prev = NULL;
826 p->next = osh->dbgmem_list;
827 if (p->next)
828 p->next->prev = p;
829 osh->dbgmem_list = p;
830 MEMLIST_UNLOCK(osh, flags);
833 return p + 1;
836 void
837 osl_debug_mfree(osl_t *osh, void *addr, uint size, int line, char* file)
839 bcm_mem_link_t *p = (bcm_mem_link_t *)((int8*)addr - sizeof(bcm_mem_link_t));
840 unsigned long flags = 0;
842 ASSERT(osh == NULL || osh->magic == OS_HANDLE_MAGIC);
844 if (p->size == 0) {
845 printk("osl_debug_mfree: double free on addr %p size %d at line %d file %s\n",
846 addr, size, line, file);
847 ASSERT(p->size);
848 return;
851 if (p->size != size) {
852 printk("osl_debug_mfree: dealloc size %d does not match alloc size %d on addr %p"
853 " at line %d file %s\n",
854 size, p->size, addr, line, file);
855 ASSERT(p->size == size);
856 return;
859 if (p->osh != (void *)osh) {
860 printk("osl_debug_mfree: alloc osh %p does not match dealloc osh %p\n",
861 p->osh, osh);
862 printk("Dealloc addr %p size %d at line %d file %s\n", addr, size, line, file);
863 printk("Alloc size %d line %d file %s\n", p->size, p->line, p->file);
864 ASSERT(p->osh == (void *)osh);
865 return;
868 /* unlink this block */
869 if (osh) {
870 MEMLIST_LOCK(osh, flags);
871 if (p->prev)
872 p->prev->next = p->next;
873 if (p->next)
874 p->next->prev = p->prev;
875 if (osh->dbgmem_list == p)
876 osh->dbgmem_list = p->next;
877 p->next = p->prev = NULL;
879 p->size = 0;
881 osl_mfree(osh, p, size + sizeof(bcm_mem_link_t));
882 if (osh) {
883 MEMLIST_UNLOCK(osh, flags);
888 osl_debug_memdump(osl_t *osh, struct bcmstrbuf *b)
890 bcm_mem_link_t *p;
891 unsigned long flags = 0;
893 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
895 MEMLIST_LOCK(osh, flags);
896 if (osh->dbgmem_list) {
897 if (b != NULL)
898 bcm_bprintf(b, " Address Size File:line\n");
899 else
900 printf(" Address Size File:line\n");
902 for (p = osh->dbgmem_list; p; p = p->next) {
903 if (b != NULL)
904 bcm_bprintf(b, "%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
905 p->size, p->file, p->line);
906 else
907 printf("%p %6d %s:%d\n", (char*)p + sizeof(bcm_mem_link_t),
908 p->size, p->file, p->line);
910 /* Detects loop-to-self so we don't enter infinite loop */
911 if (p == p->next) {
912 if (b != NULL)
913 bcm_bprintf(b, "WARNING: loop-to-self "
914 "p %p p->next %p\n", p, p->next);
915 else
916 printf("WARNING: loop-to-self "
917 "p %p p->next %p\n", p, p->next);
919 break;
923 MEMLIST_UNLOCK(osh, flags);
925 return 0;
928 #endif /* BCMDBG_MEM */
930 uint
931 osl_dma_consistent_align(void)
933 return (PAGE_SIZE);
936 void*
937 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
939 uint16 align = (1 << align_bits);
940 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
942 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
943 size += align;
944 *alloced = size;
946 return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
949 void
950 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
952 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
954 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
957 uint BCMFASTPATH
958 osl_dma_map(osl_t *osh, void *va, uint size, int direction)
960 int dir;
962 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
963 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
964 return (pci_map_single(osh->pdev, va, size, dir));
967 void BCMFASTPATH
968 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
970 int dir;
972 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
973 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
974 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
977 #if defined(BCMDBG_ASSERT)
978 void
979 osl_assert(const char *exp, const char *file, int line)
981 char tempbuf[256];
982 const char *basename;
984 basename = strrchr(file, '/');
985 /* skip the '/' */
986 if (basename)
987 basename++;
989 if (!basename)
990 basename = file;
992 #ifdef BCMDBG_ASSERT
993 snprintf(tempbuf, 256, "assertion \"%s\" failed: file \"%s\", line %d\n",
994 exp, basename, line);
996 /* Print assert message and give it time to be written to /var/log/messages */
997 if (!in_interrupt()) {
998 const int delay = 3;
999 printk("%s", tempbuf);
1000 printk("panic in %d seconds\n", delay);
1001 set_current_state(TASK_INTERRUPTIBLE);
1002 schedule_timeout(delay * HZ);
1005 switch (g_assert_type) {
1006 case 0:
1007 panic("%s", tempbuf);
1008 #ifdef __COVERITY__
1009 /* Inform Coverity that execution will not continue past this point */
1010 __coverity_panic__();
1011 #endif /* __COVERITY__ */
1012 break;
1013 case 2:
1014 printk("%s", tempbuf);
1015 BUG();
1016 #ifdef __COVERITY__
1017 /* Inform Coverity that execution will not continue past this point */
1018 __coverity_panic__();
1019 #endif /* __COVERITY__ */
1020 break;
1021 default:
1022 break;
1024 #endif /* BCMDBG_ASSERT */
1027 #endif
1029 void
1030 osl_delay(uint usec)
1032 uint d;
1034 while (usec > 0) {
1035 d = MIN(usec, 1000);
1036 udelay(d);
1037 usec -= d;
1041 #if defined(DSLCPE_DELAY)
1043 void
1044 osl_oshsh_init(osl_t *osh, shared_osl_t* oshsh)
1046 extern unsigned long loops_per_jiffy;
1047 osh->oshsh = oshsh;
1048 osh->oshsh->MIPS = loops_per_jiffy / (500000/HZ);
1052 in_long_delay(osl_t *osh)
1054 return osh->oshsh->long_delay;
1057 void
1058 osl_long_delay(osl_t *osh, uint usec, bool yield)
1060 uint d;
1061 bool yielded = TRUE;
1062 int usec_to_delay = usec;
1063 unsigned long tick1, tick2, tick_diff = 0;
1065 /* delay at least requested usec */
1066 while (usec_to_delay > 0) {
1067 if (!yield || !yielded) {
1068 d = MIN(usec_to_delay, 10);
1069 udelay(d);
1070 usec_to_delay -= d;
1072 if (usec_to_delay > 0) {
1073 osh->oshsh->long_delay++;
1074 OSL_GETCYCLES(tick1);
1075 spin_unlock_bh(osh->oshsh->lock);
1076 if (usec_to_delay > 0 && !in_irq() && !in_softirq() && !in_interrupt()) {
1077 schedule();
1078 yielded = TRUE;
1079 } else {
1080 yielded = FALSE;
1082 spin_lock_bh(osh->oshsh->lock);
1083 OSL_GETCYCLES(tick2);
1085 if (yielded) {
1086 tick_diff = TICKDIFF(tick2, tick1);
1087 tick_diff = (tick_diff * 2)/(osh->oshsh->MIPS);
1088 if (tick_diff) {
1089 usec_to_delay -= tick_diff;
1090 } else
1091 yielded = 0;
1093 osh->oshsh->long_delay--;
1094 ASSERT(osh->oshsh->long_delay >= 0);
1098 #endif /* DSLCPE_DELAY */
1100 /* Clone a packet.
1101 * The pkttag contents are NOT cloned.
1103 #ifdef BCMDBG_PKT
1104 void *
1105 osl_pktdup(osl_t *osh, void *skb, int line, char *file)
1106 #else /* BCMDBG_PKT */
1107 void *
1108 osl_pktdup(osl_t *osh, void *skb)
1109 #endif /* BCMDBG_PKT */
1111 void * p;
1112 unsigned long flags;
1114 /* clear the CTFBUF flag if set and map the reset of the buffer
1115 * before cloning
1117 #ifdef CTFMAP
1118 PKTCTFMAP(osh, skb);
1119 #endif
1121 if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
1122 return NULL;
1124 #ifdef CTFPOOL
1125 if (PKTISFAST(osh, skb)) {
1126 ctfpool_t *ctfpool;
1128 /* if the buffer allocated from ctfpool is cloned then
1129 * we can't be sure when it will be freed. since there
1130 * is a chance that we will be losing a buffer
1131 * from our pool, we increment the refill count for the
1132 * object to be alloced later.
1134 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
1135 ASSERT(ctfpool != NULL);
1136 PKTCLRFAST(osh, p);
1137 PKTCLRFAST(osh, skb);
1138 ctfpool->refills++;
1140 #endif /* CTFPOOL */
1142 /* skb_clone copies skb->cb.. we don't want that */
1143 if (osh->pub.pkttag)
1144 bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
1146 /* Increment the packet counter */
1147 spin_lock_irqsave(&osh->pktalloc_lock, flags);
1148 osh->pub.pktalloced++;
1149 spin_unlock_irqrestore(&osh->pktalloc_lock, flags);
1150 #ifdef BCMDBG_PKT
1151 spin_lock_irqsave(&osh->pktlist_lock, flags);
1152 pktlist_add(&(osh->pktlist), (void *) p, line, file);
1153 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1154 #endif
1155 return (p);
1158 #ifdef BCMDBG_PKT
1159 #ifdef BCMDBG_PTRACE
1160 void
1161 osl_pkttrace(osl_t *osh, void *pkt, uint16 bit)
1163 pktlist_trace(&(osh->pktlist), pkt, bit);
1165 #endif /* BCMDBG_PTRACE */
1167 char *
1168 osl_pktlist_dump(osl_t *osh, char *buf)
1170 pktlist_dump(&(osh->pktlist), buf);
1171 return buf;
1174 void
1175 osl_pktlist_add(osl_t *osh, void *p, int line, char *file)
1177 unsigned long flags;
1178 spin_lock_irqsave(&osh->pktlist_lock, flags);
1179 pktlist_add(&(osh->pktlist), p, line, file);
1180 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1183 void
1184 osl_pktlist_remove(osl_t *osh, void *p)
1186 unsigned long flags;
1187 spin_lock_irqsave(&osh->pktlist_lock, flags);
1188 pktlist_remove(&(osh->pktlist), p);
1189 spin_unlock_irqrestore(&osh->pktlist_lock, flags);
1191 #endif /* BCMDBG_PKT */
1194 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
1196 #if defined(OSLREGOPS) || (defined(WLC_HIGH) && !defined(WLC_LOW))
1197 uint8
1198 osl_readb(osl_t *osh, volatile uint8 *r)
1200 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1201 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1203 return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8)));
1207 uint16
1208 osl_readw(osl_t *osh, volatile uint16 *r)
1210 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1211 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1213 return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16)));
1216 uint32
1217 osl_readl(osl_t *osh, volatile uint32 *r)
1219 osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn;
1220 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1222 return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32)));
1225 void
1226 osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v)
1228 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1229 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1231 ((wreg)(ctx, (void*)r, v, sizeof(uint8)));
1235 void
1236 osl_writew(osl_t *osh, volatile uint16 *r, uint16 v)
1238 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1239 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1241 ((wreg)(ctx, (void*)r, v, sizeof(uint16)));
1244 void
1245 osl_writel(osl_t *osh, volatile uint32 *r, uint32 v)
1247 osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn;
1248 void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx;
1250 ((wreg)(ctx, (void*)r, v, sizeof(uint32)));
1252 #endif /* OSLREGOPS */
1255 * BINOSL selects the slightly slower function-call-based binary compatible osl.
1257 #ifdef BINOSL
1259 uint32
1260 osl_sysuptime(void)
1262 return ((uint32)jiffies * (1000 / HZ));
1265 uint
1266 osl_pktalloced(osl_t *osh)
1268 return (osh->pub.pktalloced);
1272 osl_printf(const char *format, ...)
1274 va_list args;
1275 static char printbuf[1024];
1276 int len;
1278 /* sprintf into a local buffer because there *is* no "vprintk()".. */
1279 va_start(args, format);
1280 len = vsnprintf(printbuf, 1024, format, args);
1281 va_end(args);
1283 if (len > sizeof(printbuf)) {
1284 printk("osl_printf: buffer overrun\n");
1285 return (0);
1288 return (printk("%s", printbuf));
1292 osl_sprintf(char *buf, const char *format, ...)
1294 va_list args;
1295 int rc;
1297 va_start(args, format);
1298 rc = vsprintf(buf, format, args);
1299 va_end(args);
1300 return (rc);
1304 osl_snprintf(char *buf, size_t n, const char *format, ...)
1306 va_list args;
1307 int rc;
1309 va_start(args, format);
1310 rc = vsnprintf(buf, n, format, args);
1311 va_end(args);
1312 return (rc);
1316 osl_vsprintf(char *buf, const char *format, va_list ap)
1318 return (vsprintf(buf, format, ap));
1322 osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap)
1324 return (vsnprintf(buf, n, format, ap));
1328 osl_strcmp(const char *s1, const char *s2)
1330 return (strcmp(s1, s2));
1334 osl_strncmp(const char *s1, const char *s2, uint n)
1336 return (strncmp(s1, s2, n));
1340 osl_strlen(const char *s)
1342 return (strlen(s));
1345 char*
1346 osl_strcpy(char *d, const char *s)
1348 return (strcpy(d, s));
1351 char*
1352 osl_strncpy(char *d, const char *s, uint n)
1354 return (strncpy(d, s, n));
1357 char*
1358 osl_strchr(const char *s, int c)
1360 return (strchr(s, c));
1363 char*
1364 osl_strrchr(const char *s, int c)
1366 return (strrchr(s, c));
1369 void*
1370 osl_memset(void *d, int c, size_t n)
1372 return memset(d, c, n);
1375 void*
1376 osl_memcpy(void *d, const void *s, size_t n)
1378 return memcpy(d, s, n);
1381 void*
1382 osl_memmove(void *d, const void *s, size_t n)
1384 return memmove(d, s, n);
1388 osl_memcmp(const void *s1, const void *s2, size_t n)
1390 return memcmp(s1, s2, n);
1393 uint32
1394 osl_readl(volatile uint32 *r)
1396 return (readl(r));
1399 uint16
1400 osl_readw(volatile uint16 *r)
1402 return (readw(r));
1405 uint8
1406 osl_readb(volatile uint8 *r)
1408 return (readb(r));
1411 void
1412 osl_writel(uint32 v, volatile uint32 *r)
1414 writel(v, r);
1417 void
1418 osl_writew(uint16 v, volatile uint16 *r)
1420 writew(v, r);
1423 void
1424 osl_writeb(uint8 v, volatile uint8 *r)
1426 writeb(v, r);
1429 void *
1430 osl_uncached(void *va)
1432 #ifdef mips
1433 return ((void*)KSEG1ADDR(va));
1434 #else
1435 return ((void*)va);
1436 #endif /* mips */
1439 void *
1440 osl_cached(void *va)
1442 #ifdef mips
1443 return ((void*)KSEG0ADDR(va));
1444 #else
1445 return ((void*)va);
1446 #endif /* mips */
1449 uint
1450 osl_getcycles(void)
1452 uint cycles;
1454 #if defined(mips)
1455 cycles = read_c0_count() * 2;
1456 #elif defined(__i386__)
1457 rdtscl(cycles);
1458 #else
1459 cycles = 0;
1460 #endif /* defined(mips) */
1461 return cycles;
1464 void *
1465 osl_reg_map(uint32 pa, uint size)
1467 return (ioremap_nocache((unsigned long)pa, (unsigned long)size));
1470 void
1471 osl_reg_unmap(void *va)
1473 iounmap(va);
1477 osl_busprobe(uint32 *val, uint32 addr)
1479 #ifdef mips
1480 return get_dbe(*val, (uint32 *)addr);
1481 #else
1482 *val = readl((uint32 *)(uintptr)addr);
1483 return 0;
1484 #endif /* mips */
1487 bool
1488 osl_pktshared(void *skb)
1490 return (((struct sk_buff*)skb)->cloned);
1493 uchar*
1494 osl_pktdata(osl_t *osh, void *skb)
1496 return (((struct sk_buff*)skb)->data);
1499 uint
1500 osl_pktlen(osl_t *osh, void *skb)
1502 return (((struct sk_buff*)skb)->len);
1505 uint
1506 osl_pktheadroom(osl_t *osh, void *skb)
1508 return (uint) skb_headroom((struct sk_buff *) skb);
1511 uint
1512 osl_pkttailroom(osl_t *osh, void *skb)
1514 return (uint) skb_tailroom((struct sk_buff *) skb);
1517 void*
1518 osl_pktnext(osl_t *osh, void *skb)
1520 return (((struct sk_buff*)skb)->next);
1523 void
1524 osl_pktsetnext(void *skb, void *x)
1526 ((struct sk_buff*)skb)->next = (struct sk_buff*)x;
1529 void
1530 osl_pktsetlen(osl_t *osh, void *skb, uint len)
1532 __skb_trim((struct sk_buff*)skb, len);
1535 uchar*
1536 osl_pktpush(osl_t *osh, void *skb, int bytes)
1538 return (skb_push((struct sk_buff*)skb, bytes));
1541 uchar*
1542 osl_pktpull(osl_t *osh, void *skb, int bytes)
1544 return (skb_pull((struct sk_buff*)skb, bytes));
1547 void*
1548 osl_pkttag(void *skb)
1550 return ((void*)(((struct sk_buff*)skb)->cb));
1553 void*
1554 osl_pktlink(void *skb)
1556 return (((struct sk_buff*)skb)->prev);
1559 void
1560 osl_pktsetlink(void *skb, void *x)
1562 ((struct sk_buff*)skb)->prev = (struct sk_buff*)x;
1565 uint
1566 osl_pktprio(void *skb)
1568 return (((struct sk_buff*)skb)->priority);
1571 void
1572 osl_pktsetprio(void *skb, uint x)
1574 ((struct sk_buff*)skb)->priority = x;
1576 #endif /* BINOSL */
1578 /* Linux Kernel: File Operations: start */
1579 void *
1580 osl_os_open_image(char *filename)
1582 struct file *fp;
1584 fp = filp_open(filename, O_RDONLY, 0);
1586 * 2.6.11 (FC4) supports filp_open() but later revs don't?
1587 * Alternative:
1588 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
1589 * ???
1591 if (IS_ERR(fp))
1592 fp = NULL;
1594 return fp;
1598 osl_os_get_image_block(char *buf, int len, void *image)
1600 struct file *fp = (struct file *)image;
1601 int rdlen;
1603 if (!image)
1604 return 0;
1606 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1607 if (rdlen > 0)
1608 fp->f_pos += rdlen;
1610 return rdlen;
1613 void
1614 osl_os_close_image(void *image)
1616 if (image)
1617 filp_close((struct file *)image, NULL);
1620 osl_os_image_size(void *image)
1622 int len = 0, curroffset;
1624 if (image) {
1625 /* store the current offset */
1626 curroffset = generic_file_llseek(image, 0, 1);
1627 /* goto end of file to get length */
1628 len = generic_file_llseek(image, 0, 2);
1629 /* restore back the offset */
1630 generic_file_llseek(image, curroffset, 0);
1632 return len;
1635 /* Linux Kernel: File Operations: end */