Staging: hv: osd: remove MemAlloc wrapper
[linux-2.6/mini2440.git] / drivers / staging / hv / osd.c
blob457698c01f8bac2165e2f9f750c529747424aff9
1 /*
3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/types.h>
27 #include <linux/mm.h>
28 #include <linux/highmem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ioport.h>
31 #include <linux/irq.h>
32 #include <linux/interrupt.h>
33 #include <linux/wait.h>
34 #include <linux/spinlock.h>
35 #include <linux/workqueue.h>
36 #include <linux/kernel.h>
37 #include <linux/timer.h>
38 #include <linux/jiffies.h>
39 #include <linux/delay.h>
40 #include <linux/time.h>
42 #include <asm/io.h>
43 #include <asm/bitops.h>
44 #include <asm/kmap_types.h>
45 #include <asm/atomic.h>
47 #include "include/osd.h"
50 // Data types
52 typedef struct _TIMER {
53 struct timer_list timer;
54 PFN_TIMER_CALLBACK callback;
55 void* context;
56 }TIMER;
59 typedef struct _WAITEVENT {
60 int condition;
61 wait_queue_head_t event;
62 } WAITEVENT;
64 typedef struct _SPINLOCK {
65 spinlock_t lock;
66 unsigned long flags;
67 } SPINLOCK;
69 typedef struct _WORKQUEUE {
70 struct workqueue_struct *queue;
71 } WORKQUEUE;
73 typedef struct _WORKITEM {
74 struct work_struct work;
75 PFN_WORKITEM_CALLBACK callback;
76 void* context;
77 } WORKITEM;
81 // Global
84 void LogMsg(const char *fmt, ...)
86 va_list args;
88 va_start(args, fmt);
89 vprintk(fmt, args);
90 va_end(args);
93 void BitSet(unsigned int* addr, int bit)
95 set_bit(bit, (unsigned long*)addr);
98 int BitTest(unsigned int* addr, int bit)
100 return test_bit(bit, (unsigned long*)addr);
103 void BitClear(unsigned int* addr, int bit)
105 clear_bit(bit, (unsigned long*)addr);
108 int BitTestAndClear(unsigned int* addr, int bit)
110 return test_and_clear_bit(bit, (unsigned long*)addr);
113 int BitTestAndSet(unsigned int* addr, int bit)
115 return test_and_set_bit(bit, (unsigned long*)addr);
119 int InterlockedIncrement(int *val)
121 return atomic_inc_return((atomic_t*)val);
124 int InterlockedDecrement(int *val)
126 return atomic_dec_return((atomic_t*)val);
129 #ifndef atomic_cmpxchg
130 #define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new))
131 #endif
132 int InterlockedCompareExchange(int *val, int new, int curr)
134 //return ((int)cmpxchg(((atomic_t*)val), curr, new));
135 return atomic_cmpxchg((atomic_t*)val, curr, new);
139 void Sleep(unsigned long usecs)
141 udelay(usecs);
144 void* VirtualAllocExec(unsigned int size)
146 #ifdef __x86_64__
147 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
148 #else
149 return __vmalloc(size, GFP_KERNEL, __pgprot(__PAGE_KERNEL & (~_PAGE_NX)));
150 #endif
153 void VirtualFree(void* VirtAddr)
155 return vfree(VirtAddr);
158 void* PageAlloc(unsigned int count)
160 void *p;
161 p = (void *)__get_free_pages(GFP_KERNEL, get_order(count * PAGE_SIZE));
162 if (p) memset(p, 0, count * PAGE_SIZE);
163 return p;
165 //struct page* page = alloc_page(GFP_KERNEL|__GFP_ZERO);
166 //void *p;
168 ////BUGBUG: We need to use kmap in case we are in HIMEM region
169 //p = page_address(page);
170 //if (p) memset(p, 0, PAGE_SIZE);
171 //return p;
174 void PageFree(void* page, unsigned int count)
176 free_pages((unsigned long)page, get_order(count * PAGE_SIZE));
177 /*struct page* p = virt_to_page(page);
178 __free_page(p);*/
182 void* PageMapVirtualAddress(unsigned long Pfn)
184 return kmap_atomic(pfn_to_page(Pfn), KM_IRQ0);
187 void PageUnmapVirtualAddress(void* VirtAddr)
189 kunmap_atomic(VirtAddr, KM_IRQ0);
192 void* MemAllocZeroed(unsigned int size)
194 void *p = kmalloc(size, GFP_KERNEL);
195 if (p) memset(p, 0, size);
196 return p;
199 void* MemAllocAtomic(unsigned int size)
201 return kmalloc(size, GFP_ATOMIC);
204 void MemFree(void* buf)
206 kfree(buf);
209 void *MemMapIO(unsigned long phys, unsigned long size)
211 #if X2V_LINUX
212 #ifdef __x86_64__
213 return (void*)(phys + 0xFFFF83000C000000);
214 #else // i386
215 return (void*)(phys + 0xfb000000);
216 #endif
217 #else
218 return (void*)GetVirtualAddress(phys); //return ioremap_nocache(phys, size);
219 #endif
222 void MemUnmapIO(void *virt)
224 //iounmap(virt);
227 void MemoryFence()
229 mb();
232 void TimerCallback(unsigned long data)
234 TIMER* t = (TIMER*)data;
236 t->callback(t->context);
239 HANDLE TimerCreate(PFN_TIMER_CALLBACK pfnTimerCB, void* context)
241 TIMER* t = kmalloc(sizeof(TIMER), GFP_KERNEL);
242 if (!t)
244 return NULL;
247 t->callback = pfnTimerCB;
248 t->context = context;
250 init_timer(&t->timer);
251 t->timer.data = (unsigned long)t;
252 t->timer.function = TimerCallback;
254 return t;
257 void TimerStart(HANDLE hTimer, u32 expirationInUs)
259 TIMER* t = (TIMER* )hTimer;
261 t->timer.expires = jiffies + usecs_to_jiffies(expirationInUs);
262 add_timer(&t->timer);
265 int TimerStop(HANDLE hTimer)
267 TIMER* t = (TIMER* )hTimer;
269 return del_timer(&t->timer);
272 void TimerClose(HANDLE hTimer)
274 TIMER* t = (TIMER* )hTimer;
276 del_timer(&t->timer);
277 kfree(t);
280 size_t GetTickCount(void)
282 return jiffies;
285 signed long long GetTimestamp(void)
287 struct timeval t;
289 do_gettimeofday(&t);
291 return timeval_to_ns(&t);
294 HANDLE WaitEventCreate(void)
296 WAITEVENT* wait = kmalloc(sizeof(WAITEVENT), GFP_KERNEL);
297 if (!wait)
299 return NULL;
302 wait->condition = 0;
303 init_waitqueue_head(&wait->event);
304 return wait;
307 void WaitEventClose(HANDLE hWait)
309 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
310 kfree(waitEvent);
313 void WaitEventSet(HANDLE hWait)
315 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
316 waitEvent->condition = 1;
317 wake_up_interruptible(&waitEvent->event);
320 int WaitEventWait(HANDLE hWait)
322 int ret=0;
323 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
325 ret= wait_event_interruptible(waitEvent->event,
326 waitEvent->condition);
327 waitEvent->condition = 0;
328 return ret;
331 int WaitEventWaitEx(HANDLE hWait, u32 TimeoutInMs)
333 int ret=0;
334 WAITEVENT* waitEvent = (WAITEVENT* )hWait;
336 ret= wait_event_interruptible_timeout(waitEvent->event,
337 waitEvent->condition,
338 msecs_to_jiffies(TimeoutInMs));
339 waitEvent->condition = 0;
340 return ret;
343 HANDLE SpinlockCreate(void)
345 SPINLOCK* spin = kmalloc(sizeof(SPINLOCK), GFP_KERNEL);
346 if (!spin)
348 return NULL;
350 spin_lock_init(&spin->lock);
352 return spin;
355 void SpinlockAcquire(HANDLE hSpin)
357 SPINLOCK* spin = (SPINLOCK* )hSpin;
359 spin_lock_irqsave(&spin->lock, spin->flags);
362 void SpinlockRelease(HANDLE hSpin)
364 SPINLOCK* spin = (SPINLOCK* )hSpin;
366 spin_unlock_irqrestore(&spin->lock, spin->flags);
369 void SpinlockClose(HANDLE hSpin)
371 SPINLOCK* spin = (SPINLOCK* )hSpin;
372 kfree(spin);
375 void* Physical2LogicalAddr(unsigned long PhysAddr)
377 void* logicalAddr = phys_to_virt(PhysAddr);
378 BUG_ON(!virt_addr_valid(logicalAddr));
379 return logicalAddr;
382 unsigned long Logical2PhysicalAddr(void * LogicalAddr)
384 BUG_ON(!virt_addr_valid(LogicalAddr));
385 return virt_to_phys(LogicalAddr);
389 unsigned long Virtual2Physical(void * VirtAddr)
391 unsigned long pfn = vmalloc_to_pfn(VirtAddr);
393 return pfn << PAGE_SHIFT;
396 void WorkItemCallback(struct work_struct *work)
398 WORKITEM* w = (WORKITEM*)work;
400 w->callback(w->context);
402 kfree(w);
405 HANDLE WorkQueueCreate(char* name)
407 WORKQUEUE *wq = kmalloc(sizeof(WORKQUEUE), GFP_KERNEL);
408 if (!wq)
410 return NULL;
412 wq->queue = create_workqueue(name);
414 return wq;
417 void WorkQueueClose(HANDLE hWorkQueue)
419 WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
421 destroy_workqueue(wq->queue);
423 return;
426 int WorkQueueQueueWorkItem(HANDLE hWorkQueue, PFN_WORKITEM_CALLBACK workItem, void* context)
428 WORKQUEUE *wq = (WORKQUEUE *)hWorkQueue;
430 WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
431 if (!w)
433 return -1;
436 w->callback = workItem,
437 w->context = context;
438 INIT_WORK(&w->work, WorkItemCallback);
439 return queue_work(wq->queue, &w->work);
442 void QueueWorkItem(PFN_WORKITEM_CALLBACK workItem, void* context)
444 WORKITEM* w = kmalloc(sizeof(WORKITEM), GFP_ATOMIC);
445 if (!w)
447 return;
450 w->callback = workItem,
451 w->context = context;
452 INIT_WORK(&w->work, WorkItemCallback);
453 schedule_work(&w->work);