3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/types.h>
28 #include <linux/highmem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ioport.h>
31 #include <linux/irq.h>
32 #include <linux/interrupt.h>
33 #include <linux/wait.h>
34 #include <linux/spinlock.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/timer.h>
38 #include <linux/jiffies.h>
39 #include <linux/delay.h>
40 #include <linux/time.h>
43 #include <asm/bitops.h>
44 #include <asm/kmap_types.h>
45 #include <asm/atomic.h>
47 #include "include/osd.h"
52 typedef struct _TIMER
{
53 struct timer_list timer
;
54 PFN_TIMER_CALLBACK callback
;
59 typedef struct _WAITEVENT
{
61 wait_queue_head_t event
;
64 typedef struct _SPINLOCK
{
69 typedef struct _WORKQUEUE
{
70 struct workqueue_struct
*queue
;
73 typedef struct _WORKITEM
{
74 struct work_struct work
;
75 PFN_WORKITEM_CALLBACK callback
;
84 void LogMsg(const char *fmt
, ...)
93 void BitSet(unsigned int* addr
, int bit
)
95 set_bit(bit
, (unsigned long*)addr
);
98 int BitTest(unsigned int* addr
, int bit
)
100 return test_bit(bit
, (unsigned long*)addr
);
103 void BitClear(unsigned int* addr
, int bit
)
105 clear_bit(bit
, (unsigned long*)addr
);
108 int BitTestAndClear(unsigned int* addr
, int bit
)
110 return test_and_clear_bit(bit
, (unsigned long*)addr
);
113 int BitTestAndSet(unsigned int* addr
, int bit
)
115 return test_and_set_bit(bit
, (unsigned long*)addr
);
119 int InterlockedIncrement(int *val
)
121 return atomic_inc_return((atomic_t
*)val
);
124 int InterlockedDecrement(int *val
)
126 return atomic_dec_return((atomic_t
*)val
);
129 #ifndef atomic_cmpxchg
130 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
132 int InterlockedCompareExchange(int *val
, int new, int curr
)
134 //return ((int)cmpxchg(((atomic_t*)val), curr, new));
135 return atomic_cmpxchg((atomic_t
*)val
, curr
, new);
139 void Sleep(unsigned long usecs
)
144 void* VirtualAllocExec(unsigned int size
)
147 return __vmalloc(size
, GFP_KERNEL
, PAGE_KERNEL_EXEC
);
149 return __vmalloc(size
, GFP_KERNEL
, __pgprot(__PAGE_KERNEL
& (~_PAGE_NX
)));
153 void VirtualFree(void* VirtAddr
)
155 return vfree(VirtAddr
);
158 void* PageAlloc(unsigned int count
)
161 p
= (void *)__get_free_pages(GFP_KERNEL
, get_order(count
* PAGE_SIZE
));
162 if (p
) memset(p
, 0, count
* PAGE_SIZE
);
165 //struct page* page = alloc_page(GFP_KERNEL|__GFP_ZERO);
168 ////BUGBUG: We need to use kmap in case we are in HIMEM region
169 //p = page_address(page);
170 //if (p) memset(p, 0, PAGE_SIZE);
174 void PageFree(void* page
, unsigned int count
)
176 free_pages((unsigned long)page
, get_order(count
* PAGE_SIZE
));
177 /*struct page* p = virt_to_page(page);
182 void* PageMapVirtualAddress(unsigned long Pfn
)
184 return kmap_atomic(pfn_to_page(Pfn
), KM_IRQ0
);
187 void PageUnmapVirtualAddress(void* VirtAddr
)
189 kunmap_atomic(VirtAddr
, KM_IRQ0
);
192 void* MemAllocZeroed(unsigned int size
)
194 void *p
= kmalloc(size
, GFP_KERNEL
);
195 if (p
) memset(p
, 0, size
);
199 void* MemAllocAtomic(unsigned int size
)
201 return kmalloc(size
, GFP_ATOMIC
);
204 void MemFree(void* buf
)
209 void *MemMapIO(unsigned long phys
, unsigned long size
)
213 return (void*)(phys
+ 0xFFFF83000C000000);
215 return (void*)(phys
+ 0xfb000000);
218 return (void*)GetVirtualAddress(phys
); //return ioremap_nocache(phys, size);
222 void MemUnmapIO(void *virt
)
232 void TimerCallback(unsigned long data
)
234 TIMER
* t
= (TIMER
*)data
;
236 t
->callback(t
->context
);
239 HANDLE
TimerCreate(PFN_TIMER_CALLBACK pfnTimerCB
, void* context
)
241 TIMER
* t
= kmalloc(sizeof(TIMER
), GFP_KERNEL
);
247 t
->callback
= pfnTimerCB
;
248 t
->context
= context
;
250 init_timer(&t
->timer
);
251 t
->timer
.data
= (unsigned long)t
;
252 t
->timer
.function
= TimerCallback
;
257 void TimerStart(HANDLE hTimer
, u32 expirationInUs
)
259 TIMER
* t
= (TIMER
* )hTimer
;
261 t
->timer
.expires
= jiffies
+ usecs_to_jiffies(expirationInUs
);
262 add_timer(&t
->timer
);
265 int TimerStop(HANDLE hTimer
)
267 TIMER
* t
= (TIMER
* )hTimer
;
269 return del_timer(&t
->timer
);
272 void TimerClose(HANDLE hTimer
)
274 TIMER
* t
= (TIMER
* )hTimer
;
276 del_timer(&t
->timer
);
280 size_t GetTickCount(void)
285 signed long long GetTimestamp(void)
291 return timeval_to_ns(&t
);
294 HANDLE
WaitEventCreate(void)
296 WAITEVENT
* wait
= kmalloc(sizeof(WAITEVENT
), GFP_KERNEL
);
303 init_waitqueue_head(&wait
->event
);
307 void WaitEventClose(HANDLE hWait
)
309 WAITEVENT
* waitEvent
= (WAITEVENT
* )hWait
;
313 void WaitEventSet(HANDLE hWait
)
315 WAITEVENT
* waitEvent
= (WAITEVENT
* )hWait
;
316 waitEvent
->condition
= 1;
317 wake_up_interruptible(&waitEvent
->event
);
320 int WaitEventWait(HANDLE hWait
)
323 WAITEVENT
* waitEvent
= (WAITEVENT
* )hWait
;
325 ret
= wait_event_interruptible(waitEvent
->event
,
326 waitEvent
->condition
);
327 waitEvent
->condition
= 0;
331 int WaitEventWaitEx(HANDLE hWait
, u32 TimeoutInMs
)
334 WAITEVENT
* waitEvent
= (WAITEVENT
* )hWait
;
336 ret
= wait_event_interruptible_timeout(waitEvent
->event
,
337 waitEvent
->condition
,
338 msecs_to_jiffies(TimeoutInMs
));
339 waitEvent
->condition
= 0;
343 HANDLE
SpinlockCreate(void)
345 SPINLOCK
* spin
= kmalloc(sizeof(SPINLOCK
), GFP_KERNEL
);
350 spin_lock_init(&spin
->lock
);
355 void SpinlockAcquire(HANDLE hSpin
)
357 SPINLOCK
* spin
= (SPINLOCK
* )hSpin
;
359 spin_lock_irqsave(&spin
->lock
, spin
->flags
);
362 void SpinlockRelease(HANDLE hSpin
)
364 SPINLOCK
* spin
= (SPINLOCK
* )hSpin
;
366 spin_unlock_irqrestore(&spin
->lock
, spin
->flags
);
369 void SpinlockClose(HANDLE hSpin
)
371 SPINLOCK
* spin
= (SPINLOCK
* )hSpin
;
375 void* Physical2LogicalAddr(unsigned long PhysAddr
)
377 void* logicalAddr
= phys_to_virt(PhysAddr
);
378 BUG_ON(!virt_addr_valid(logicalAddr
));
382 unsigned long Logical2PhysicalAddr(void * LogicalAddr
)
384 BUG_ON(!virt_addr_valid(LogicalAddr
));
385 return virt_to_phys(LogicalAddr
);
389 unsigned long Virtual2Physical(void * VirtAddr
)
391 unsigned long pfn
= vmalloc_to_pfn(VirtAddr
);
393 return pfn
<< PAGE_SHIFT
;
396 void WorkItemCallback(struct work_struct
*work
)
398 WORKITEM
* w
= (WORKITEM
*)work
;
400 w
->callback(w
->context
);
405 HANDLE
WorkQueueCreate(char* name
)
407 WORKQUEUE
*wq
= kmalloc(sizeof(WORKQUEUE
), GFP_KERNEL
);
412 wq
->queue
= create_workqueue(name
);
417 void WorkQueueClose(HANDLE hWorkQueue
)
419 WORKQUEUE
*wq
= (WORKQUEUE
*)hWorkQueue
;
421 destroy_workqueue(wq
->queue
);
426 int WorkQueueQueueWorkItem(HANDLE hWorkQueue
, PFN_WORKITEM_CALLBACK workItem
, void* context
)
428 WORKQUEUE
*wq
= (WORKQUEUE
*)hWorkQueue
;
430 WORKITEM
* w
= kmalloc(sizeof(WORKITEM
), GFP_ATOMIC
);
436 w
->callback
= workItem
,
437 w
->context
= context
;
438 INIT_WORK(&w
->work
, WorkItemCallback
);
439 return queue_work(wq
->queue
, &w
->work
);
442 void QueueWorkItem(PFN_WORKITEM_CALLBACK workItem
, void* context
)
444 WORKITEM
* w
= kmalloc(sizeof(WORKITEM
), GFP_ATOMIC
);
450 w
->callback
= workItem
,
451 w
->context
= context
;
452 INIT_WORK(&w
->work
, WorkItemCallback
);
453 schedule_work(&w
->work
);