Fix typo in comment
[kqemu.git] / kqemu-linux.c
blob70fe1e7816c4fc0dfd4b23ee06b061d16c43478e
1 /*
2 * Linux kernel wrapper for KQEMU
4 * Copyright (C) 2004-2008 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24 #include <linux/proc_fs.h>
25 #include <linux/version.h>
26 #include <linux/ioctl.h>
27 #include <linux/smp_lock.h>
28 #include <linux/miscdevice.h>
29 #include <asm/atomic.h>
30 #include <asm/processor.h>
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
34 #include "kqemu-kernel.h"
36 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
37 #error "Linux 2.4.19 or above needed"
38 #endif
40 /* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */
41 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page)
42 #define page_to_pfn(page) ((page) - mem_map)
43 #define pfn_to_page(pfn) (mem_map + (pfn))
44 #endif
46 #ifdef PAGE_KERNEL_EXEC
47 #if defined(__i386__)
48 /* problem : i386 kernels usually don't export __PAGE_KERNEL_EXEC */
49 #undef PAGE_KERNEL_EXEC
50 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX)
51 #endif
52 #else
53 #define PAGE_KERNEL_EXEC PAGE_KERNEL
54 #endif
56 //#define DEBUG
58 #ifdef DEBUG
59 int lock_count;
60 int page_alloc_count;
61 #endif
63 /* if 0 is used, then devfs/udev is used to automatically create the
64 device */
65 int major = 0;
66 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
67 module_param(major, int, 0);
68 #else
69 MODULE_PARM(major,"i");
70 #endif
72 /* Lock the page at virtual address 'user_addr' and return its
73 physical address (page index). Return a host OS private user page
74 identifier or NULL if error */
75 struct kqemu_user_page *CDECL kqemu_lock_user_page(unsigned long *ppage_index,
76 unsigned long user_addr)
78 int ret;
79 struct page *page;
81 ret = get_user_pages(current, current->mm,
82 user_addr,
83 1, /* 1 page. */
84 1, /* 'write': intent to write. */
85 0, /* 'force': ? */
86 &page,
87 NULL);
88 if (ret != 1)
89 return NULL;
90 /* we ensure here that the page cannot be swapped out by the
91 kernel. */
92 /* XXX: This test may be incorrect for 2.6 kernels */
93 if (!page->mapping) {
94 put_page(page);
95 return NULL;
97 #ifdef DEBUG
98 lock_count++;
99 #endif
100 *ppage_index = page_to_pfn(page);
101 return (struct kqemu_user_page *)page;
104 void CDECL kqemu_unlock_user_page(struct kqemu_user_page *page1)
106 struct page *page = (struct page *)page1;
107 set_page_dirty(page);
108 put_page(page);
109 #ifdef DEBUG
110 lock_count--;
111 #endif
114 /* Allocate a new page and return its physical address (page
115 index). Return a host OS private page identifier or NULL if
116 error */
117 struct kqemu_page *CDECL kqemu_alloc_zeroed_page(unsigned long *ppage_index)
119 unsigned long vaddr;
120 struct page *page;
122 vaddr = get_zeroed_page(GFP_KERNEL);
123 if (!vaddr)
124 return NULL;
125 #ifdef DEBUG
126 page_alloc_count++;
127 #endif
128 page = virt_to_page(vaddr);
129 *ppage_index = page_to_pfn(page);
130 return (struct kqemu_page *)page;
133 void CDECL kqemu_free_page(struct kqemu_page *page1)
135 struct page *page = (struct page *)page1;
136 __free_page(page);
137 #ifdef DEBUG
138 page_alloc_count--;
139 #endif
142 /* Return a host kernel address of the physical page whose private
143 identifier is 'page1' */
144 void * CDECL kqemu_page_kaddr(struct kqemu_page *page1)
146 struct page *page = (struct page *)page1;
147 return page_address(page);
150 /* Allocate 'size' bytes of memory in host kernel address space (size
151 is a multiple of 4 KB) and return the address or NULL if error. The
152 allocated memory must be marked as executable by the host kernel
153 and must be page aligned. On i386 with PAE (but not on x86_64), it
154 must be allocated in the first 4 GB of physical memory. */
155 void * CDECL kqemu_vmalloc(unsigned int size)
157 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL_EXEC);
160 void CDECL kqemu_vfree(void *ptr)
162 return vfree(ptr);
165 /* Convert a page aligned address inside a memory area allocated by
166 kqemu_vmalloc() to a physical address (page index) */
167 unsigned long CDECL kqemu_vmalloc_to_phys(const void *vaddr)
169 struct page *page;
170 page = vmalloc_to_page((void *)vaddr);
171 if (!page)
172 return -1;
173 return page_to_pfn(page);
176 /* Map a IO area in the kernel address space and return its
177 address. Return NULL if error or not implemented. This function is
178 only used if an APIC is detected on the host CPU. */
179 void * CDECL kqemu_io_map(unsigned long page_index, unsigned int size)
181 return ioremap(page_index << PAGE_SHIFT, size);
184 /* Unmap the IO area */
185 void CDECL kqemu_io_unmap(void *ptr, unsigned int size)
187 return iounmap(ptr);
190 /* return TRUE if a signal is pending (i.e. the guest must stop
191 execution) */
192 int CDECL kqemu_schedule(void)
194 if (need_resched()) {
195 schedule();
197 return signal_pending(current);
200 char log_buf[4096];
202 void CDECL kqemu_log(const char *fmt, ...)
204 va_list ap;
205 va_start(ap, fmt);
206 vsnprintf(log_buf, sizeof(log_buf), fmt, ap);
207 printk("kqemu: %s", log_buf);
208 va_end(ap);
211 /*********************************************************/
213 static struct kqemu_global_state *kqemu_gs;
215 struct kqemu_instance {
216 struct semaphore sem;
217 struct kqemu_state *state;
220 static int kqemu_open(struct inode *inode, struct file *filp)
222 struct kqemu_instance *ks;
224 ks = kmalloc(sizeof(struct kqemu_instance), GFP_KERNEL);
225 if (!ks)
226 return -ENOMEM;
227 init_MUTEX(&ks->sem);
228 ks->state = NULL;
229 filp->private_data = ks;
230 return 0;
233 static int kqemu_release(struct inode *inode, struct file *filp)
235 struct kqemu_instance *ks = filp->private_data;
237 down(&ks->sem);
238 if (ks->state) {
239 kqemu_delete(ks->state);
240 ks->state = NULL;
242 up(&ks->sem);
244 kfree(ks);
246 #ifdef DEBUG
247 printk("lock_count=%d page_alloc_count=%d\n",
248 lock_count, page_alloc_count);
249 #endif
250 return 0;
253 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
254 static long kqemu_ioctl(struct file *filp,
255 unsigned int cmd, unsigned long arg)
256 #else
257 static int kqemu_ioctl(struct inode *inode, struct file *filp,
258 unsigned int cmd, unsigned long arg)
259 #endif
261 struct kqemu_instance *ks = filp->private_data;
262 struct kqemu_state *s = ks->state;
263 long ret;
265 down(&ks->sem);
266 switch(cmd) {
267 case KQEMU_INIT:
269 struct kqemu_init d1, *d = &d1;
270 if (s) {
271 ret = -EIO;
272 break;
274 if (copy_from_user(d, (void *)arg, sizeof(*d))) {
275 ret = -EFAULT;
276 break;
278 s = kqemu_init(d, kqemu_gs);
279 if (!s) {
280 ret = -ENOMEM;
281 break;
283 ks->state = s;
284 ret = 0;
286 break;
287 case KQEMU_SET_PHYS_MEM:
289 struct kqemu_phys_mem kphys_mem;
290 if (!s) {
291 ret = -EIO;
292 break;
295 if (copy_from_user(&kphys_mem, (void *)arg, sizeof(kphys_mem))) {
296 ret = -EFAULT;
297 break;
299 ret = kqemu_set_phys_mem(s, &kphys_mem);
300 if (ret != 0) {
301 ret = -EINVAL;
304 break;
305 case KQEMU_EXEC:
307 struct kqemu_cpu_state *ctx;
308 if (!s) {
309 ret = -EIO;
310 break;
313 ctx = kqemu_get_cpu_state(s);
314 if (copy_from_user(ctx, (void *)arg, sizeof(*ctx))) {
315 ret = -EFAULT;
316 break;
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
319 unlock_kernel();
320 #endif
321 ret = kqemu_exec(s);
322 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
323 lock_kernel();
324 #endif
325 if (copy_to_user((void *)arg, ctx, sizeof(*ctx))) {
326 ret = -EFAULT;
327 break;
330 break;
331 case KQEMU_GET_VERSION:
333 if (put_user(KQEMU_VERSION, (int *)arg) < 0) {
334 ret = -EFAULT;
335 } else {
336 ret = 0;
339 break;
340 default:
341 ret = -ENOIOCTLCMD;
342 break;
344 up(&ks->sem);
345 return ret;
348 static struct file_operations kqemu_fops = {
349 owner: THIS_MODULE,
350 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
351 compat_ioctl: kqemu_ioctl,
352 unlocked_ioctl: kqemu_ioctl,
353 #else
354 ioctl: kqemu_ioctl,
355 #endif
356 open: kqemu_open,
357 release: kqemu_release,
360 static struct miscdevice kqemu_dev =
362 .minor = MISC_DYNAMIC_MINOR,
363 .name = "kqemu",
364 .fops = &kqemu_fops,
367 int init_module(void)
369 int ret, max_locked_pages;
370 struct sysinfo si;
372 printk("QEMU Accelerator Module version %d.%d.%d, Copyright (c) 2005-2008 Fabrice Bellard\n",
373 (KQEMU_VERSION >> 16),
374 (KQEMU_VERSION >> 8) & 0xff,
375 (KQEMU_VERSION) & 0xff);
376 si_meminfo(&si);
377 max_locked_pages = si.totalram / 2;
378 kqemu_gs = kqemu_global_init(max_locked_pages);
379 if (!kqemu_gs)
380 return -ENOMEM;
382 if (major > 0) {
383 ret = register_chrdev(major, "kqemu", &kqemu_fops);
384 if (ret < 0) {
385 kqemu_global_delete(kqemu_gs);
386 printk("kqemu: could not get major %d\n", major);
387 return ret;
389 } else {
390 ret = misc_register (&kqemu_dev);
391 if (ret < 0) {
392 kqemu_global_delete(kqemu_gs);
393 printk("kqemu: could not create device\n");
394 return ret;
397 printk("KQEMU installed, max_locked_mem=%dkB.\n",
398 max_locked_pages * 4);
399 return 0;
402 void cleanup_module(void)
404 if (major > 0)
405 unregister_chrdev(major, "kqemu");
406 else
407 misc_deregister (&kqemu_dev);
408 if (kqemu_gs) {
409 kqemu_global_delete(kqemu_gs);
410 kqemu_gs = NULL;
414 MODULE_LICENSE("GPL");