kvm: external module: fix pre-2.6.27 UP kernels
[qemu-kvm/fedora.git] / kvm / kernel / external-module-compat-comm.h
blobc3c1c0ec1afb9df1f412410ca3a428351b07c91b
2 /*
3 * Compatibility header for building as an external module.
4 */
6 /*
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
8 */
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
21 /* Override CONFIG_KVM_TRACE */
22 #ifdef EXT_CONFIG_KVM_TRACE
23 # define CONFIG_KVM_TRACE 1
24 #else
25 # undef CONFIG_KVM_TRACE
26 #endif
29 * 2.6.16 does not have GFP_NOWAIT
32 #include <linux/gfp.h>
34 #ifndef GFP_NOWAIT
35 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
36 #endif
40 * kvm profiling support needs 2.6.20
42 #include <linux/profile.h>
44 #ifndef KVM_PROFILING
45 #define KVM_PROFILING 1234
46 #define prof_on 4321
47 #endif
50 * smp_call_function_single() is not exported below 2.6.20, and has different
51 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
55 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
56 void *info, int wait);
57 #undef smp_call_function_single
58 #define smp_call_function_single kvm_smp_call_function_single
60 #endif
62 /* on_each_cpu() lost an argument in 2.6.27. */
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
65 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
67 #else
69 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
71 #endif
74 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
78 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
79 #define mutex_lock_interruptible(a) down_interruptible(a)
80 #define mutex_unlock(a) up(a)
81 #define mutex_lock(a) down(a)
82 #define mutex_init(a) init_MUTEX(a)
83 #define mutex_trylock(a) down_trylock(a)
84 #define mutex semaphore
85 #endif
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
88 #ifndef kzalloc
89 #define kzalloc(size,flags) \
90 ({ \
91 void *__ret = kmalloc(size, flags); \
92 if (__ret) \
93 memset(__ret, 0, size); \
94 __ret; \
96 #endif
97 #endif
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
100 #ifndef kmem_cache_zalloc
101 #define kmem_cache_zalloc(cache,flags) \
102 ({ \
103 void *__ret = kmem_cache_alloc(cache, flags); \
104 if (__ret) \
105 memset(__ret, 0, kmem_cache_size(cache)); \
106 __ret; \
108 #endif
109 #endif
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
113 #ifndef CONFIG_HOTPLUG_CPU
114 #define register_cpu_notifier(nb) (0)
115 #endif
117 #endif
119 #include <linux/miscdevice.h>
120 #ifndef KVM_MINOR
121 #define KVM_MINOR 232
122 #endif
124 #include <linux/notifier.h>
125 #ifndef CPU_TASKS_FROZEN
127 #define CPU_TASKS_FROZEN 0x0010
128 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
129 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
130 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
131 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
132 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
133 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
135 #endif
137 #ifndef CPU_DYING
138 #define CPU_DYING 0x000A
139 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
140 #endif
142 #include <asm/system.h>
144 struct inode;
145 #include <linux/anon_inodes.h>
146 #define anon_inode_getfd kvm_anon_inode_getfd
147 int kvm_init_anon_inodes(void);
148 void kvm_exit_anon_inodes(void);
149 int anon_inode_getfd(const char *name,
150 const struct file_operations *fops,
151 void *priv , int flags);
154 * 2.6.23 removed the cache destructor
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
157 # define kmem_cache_create(name, size, align, flags, ctor) \
158 kmem_cache_create(name, size, align, flags, ctor, NULL)
159 #endif
161 /* HRTIMER_MODE_ABS started life with a different name */
162 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
163 #define HRTIMER_MODE_ABS HRTIMER_ABS
164 #endif
166 /* div64_u64 is fairly new */
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
169 #define div64_u64 kvm_div64_u64
171 #ifdef CONFIG_64BIT
173 static inline uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
175 return dividend / divisor;
178 #else
180 uint64_t div64_u64(uint64_t dividend, uint64_t divisor);
182 #endif
184 #endif
186 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
188 #ifdef RHEL_RELEASE_CODE
189 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
190 #define RHEL_BOOL 1
191 #endif
192 #endif
194 #ifndef RHEL_BOOL
196 typedef _Bool bool;
198 #endif
200 #endif
203 * PF_VCPU is a Linux 2.6.24 addition
206 #include <linux/sched.h>
208 #ifndef PF_VCPU
209 #define PF_VCPU 0
210 #endif
213 * smp_call_function_mask() is not defined/exported below 2.6.24
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
218 int kvm_smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
219 void *info, int wait);
221 #define smp_call_function_mask kvm_smp_call_function_mask
223 #endif
225 /* empty_zero_page isn't exported in all kernels */
226 #include <asm/pgtable.h>
228 #define empty_zero_page kvm_empty_zero_page
230 static char empty_zero_page[PAGE_SIZE];
232 static inline void blahblah(void)
234 (void)empty_zero_page[0];
237 /* __mmdrop() is not exported before 2.6.25 */
238 #include <linux/sched.h>
240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
242 #define mmdrop(x) do { (void)(x); } while (0)
243 #define mmget(x) do { (void)(x); } while (0)
245 #else
247 #define mmget(x) do { atomic_inc(x); } while (0)
249 #endif
251 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
253 # define KVM_NEED_PAGEFAULT_DISABLE 1
254 # ifdef RHEL_RELEASE_CODE
255 # if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
256 # undef KVM_NEED_PAGEFAULT_DISABLE
257 # endif
258 # endif
259 #endif
261 #ifdef KVM_NEED_PAGEFAULT_DISABLE
263 static inline void pagefault_disable(void)
265 inc_preempt_count();
267 * make sure to have issued the store before a pagefault
268 * can hit.
270 barrier();
273 static inline void pagefault_enable(void)
276 * make sure to issue those last loads/stores before enabling
277 * the pagefault handler again.
279 barrier();
280 dec_preempt_count();
282 * make sure we do..
284 barrier();
285 preempt_check_resched();
288 #endif
290 #include <linux/uaccess.h>
292 /* vm ops ->fault() was introduced in 2.6.23. */
293 #include <linux/mm.h>
295 #ifdef KVM_MAIN
296 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
298 struct vm_fault {
299 unsigned int flags;
300 pgoff_t pgoff;
301 void __user *virtual_address;
302 struct page *page;
305 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
306 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
308 static inline struct page *kvm_nopage_to_fault(
309 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf),
310 struct vm_area_struct *vma,
311 unsigned long address,
312 int *type)
314 struct vm_fault vmf;
315 int ret;
317 vmf.pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
318 vmf.virtual_address = (void __user *)address;
319 ret = fault(vma, &vmf);
320 if (ret)
321 return NOPAGE_SIGBUS;
322 *type = VM_FAULT_MINOR;
323 return vmf.page;
326 static inline struct page *__kvm_vcpu_fault(struct vm_area_struct *vma,
327 unsigned long address,
328 int *type)
330 return kvm_nopage_to_fault(kvm_vcpu_fault, vma, address, type);
333 static inline struct page *__kvm_vm_fault(struct vm_area_struct *vma,
334 unsigned long address,
335 int *type)
337 return kvm_nopage_to_fault(kvm_vm_fault, vma, address, type);
340 #define VMA_OPS_FAULT(x) nopage
341 #define VMA_OPS_FAULT_FUNC(x) __##x
343 #else
345 #define VMA_OPS_FAULT(x) x
346 #define VMA_OPS_FAULT_FUNC(x) x
348 #endif
349 #endif
351 /* simple vfs attribute getter signature has changed to add a return code */
353 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
355 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
356 static u64 x(void *v) \
358 u64 ret = 0; \
360 __##x(v, &ret); \
361 return ret; \
364 #else
366 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
367 static int x(void *v, u64 *val) \
369 return __##x(v, val); \
372 #endif
374 /* set_kset_name() is gone in 2.6.25 */
376 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
378 #define set_kset_name(x) .name = x
380 #endif
382 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
383 #ifndef FASTCALL
384 #define FASTCALL(x) x
385 #define fastcall
386 #endif
387 #endif
389 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
391 static unsigned __attribute__((__used__)) kvm_tsc_khz = 2000000;
393 #else
395 #define kvm_tsc_khz tsc_khz
397 #endif
399 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
401 #include <linux/ktime.h>
402 #include <linux/hrtimer.h>
404 #define ktime_get kvm_ktime_get
406 static inline ktime_t ktime_get(void)
408 struct timespec now;
410 ktime_get_ts(&now);
412 return timespec_to_ktime(now);
415 #endif
417 /* __aligned arrived in 2.6.21 */
418 #ifndef __aligned
419 #define __aligned(x) __attribute__((__aligned__(x)))
420 #endif
422 #include <linux/mm.h>
424 /* The shrinker API changed in 2.6.23 */
425 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
427 struct kvm_shrinker {
428 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
429 int seeks;
430 struct shrinker *kshrinker;
433 static inline void register_shrinker(struct kvm_shrinker *shrinker)
435 shrinker->kshrinker = set_shrinker(shrinker->seeks, shrinker->shrink);
438 static inline void unregister_shrinker(struct kvm_shrinker *shrinker)
440 if (shrinker->kshrinker)
441 remove_shrinker(shrinker->kshrinker);
444 #define shrinker kvm_shrinker
446 #endif
448 /* clocksource */
449 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
450 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
452 /* khz = cyc/(Million ns)
453 * mult/2^shift = ns/cyc
454 * mult = ns/cyc * 2^shift
455 * mult = 1Million/khz * 2^shift
456 * mult = 1000000 * 2^shift / khz
457 * mult = (1000000<<shift) / khz
459 u64 tmp = ((u64)1000000) << shift_constant;
461 tmp += khz/2; /* round for do_div */
462 do_div(tmp, khz);
464 return (u32)tmp;
466 #else
467 #include <linux/clocksource.h>
468 #endif
470 /* manually export hrtimer_init/start/cancel */
471 #include <linux/kallsyms.h>
472 extern void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
473 enum hrtimer_mode mode);
474 extern int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
475 const enum hrtimer_mode mode);
476 extern int (*hrtimer_cancel_p)(struct hrtimer *timer);
478 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
479 static inline void hrtimer_kallsyms_resolve(void)
481 hrtimer_init_p = (void *) kallsyms_lookup_name("hrtimer_init");
482 BUG_ON(!hrtimer_init_p);
483 hrtimer_start_p = (void *) kallsyms_lookup_name("hrtimer_start");
484 BUG_ON(!hrtimer_start_p);
485 hrtimer_cancel_p = (void *) kallsyms_lookup_name("hrtimer_cancel");
486 BUG_ON(!hrtimer_cancel_p);
488 #else
489 static inline void hrtimer_kallsyms_resolve(void)
491 hrtimer_init_p = hrtimer_init;
492 hrtimer_start_p = hrtimer_start;
493 hrtimer_cancel_p = hrtimer_cancel;
495 #endif
497 /* handle old hrtimer API with data pointer */
498 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
499 static inline void hrtimer_data_pointer(struct hrtimer *timer)
501 timer->data = (void *)timer;
503 #else
504 static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
505 #endif
507 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
509 #define ns_to_timespec kvm_ns_to_timespec
511 struct timespec kvm_ns_to_timespec(const s64 nsec);
513 #endif
515 /* work_struct lost the 'data' field in 2.6.20 */
516 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
518 #define kvm_INIT_WORK(work, handler) \
519 INIT_WORK(work, (void (*)(void *))handler, work)
521 #else
523 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
525 #endif
527 /* cancel_work_sync() was flush_work() in 2.6.21 */
528 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
530 static inline int cancel_work_sync(struct work_struct *work)
533 * FIXME: actually cancel. How? Add own implementation of workqueues?
535 return 0;
538 /* ... and it returned void before 2.6.23 */
539 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
541 #define cancel_work_sync(work) ({ cancel_work_sync(work); 0; })
543 #endif
545 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
547 struct pci_dev;
549 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
551 #endif
553 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
555 #include <linux/relay.h>
557 /* relay_open() interface has changed on 2.6.21 */
559 struct rchan *kvm_relay_open(const char *base_filename,
560 struct dentry *parent,
561 size_t subbuf_size,
562 size_t n_subbufs,
563 struct rchan_callbacks *cb,
564 void *private_data);
566 #else
568 #define kvm_relay_open relay_open
570 #endif
572 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
574 static inline int get_user_pages_fast(unsigned long start, int nr_pages,
575 int write, struct page **pages)
577 int npages;
579 down_read(&current->mm->mmap_sem);
580 npages = get_user_pages(current, current->mm, start, nr_pages, write,
581 0, pages, NULL);
582 up_read(&current->mm->mmap_sem);
584 return npages;
587 #endif
589 /* spin_needbreak() was called something else in 2.6.24 */
590 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)
592 #define spin_needbreak need_lockbreak
594 #endif
596 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
598 static inline void kvm_hrtimer_add_expires_ns(struct hrtimer *timer, u64 delta)
600 timer->expires = ktime_add_ns(timer->expires, delta);
603 static inline ktime_t kvm_hrtimer_get_expires(struct hrtimer *timer)
605 return timer->expires;
608 static inline u64 kvm_hrtimer_get_expires_ns(struct hrtimer *timer)
610 return ktime_to_ns(timer->expires);
613 static inline void kvm_hrtimer_start_expires(struct hrtimer *timer, int mode)
615 hrtimer_start_p(timer, timer->expires, mode);
618 static inline ktime_t kvm_hrtimer_expires_remaining(const struct hrtimer *timer)
620 return ktime_sub(timer->expires, timer->base->get_time());
623 #else
625 #define kvm_hrtimer_add_expires_ns hrtimer_add_expires_ns
626 #define kvm_hrtimer_get_expires hrtimer_get_expires
627 #define kvm_hrtimer_get_expires_ns hrtimer_get_expires_ns
628 #define kvm_hrtimer_start_expires hrtimer_start_expires
629 #define kvm_hrtimer_expires_remaining hrtimer_expires_remaining
631 #endif
633 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
635 static inline int pci_reset_function(struct pci_dev *dev)
637 return 0;
640 #endif
642 #include <linux/interrupt.h>
643 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
645 typedef irqreturn_t (*kvm_irq_handler_t)(int, void *, struct pt_regs *);
646 static inline int kvm_request_irq(unsigned int a, kvm_irq_handler_t handler,
647 unsigned long c, const char *d, void *e)
649 /* FIXME: allocate thunk, etc. */
650 return -EINVAL;
653 #else
655 #define kvm_request_irq request_irq
657 #endif
659 /* dynamically allocated cpu masks introduced in 2.6.28 */
660 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
662 typedef cpumask_t cpumask_var_t[1];
664 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
666 return 1;
669 static inline void free_cpumask_var(cpumask_var_t mask)
673 static inline void cpumask_clear(cpumask_var_t mask)
675 cpus_clear(*mask);
678 static inline void cpumask_set_cpu(int cpu, cpumask_var_t mask)
680 cpu_set(cpu, *mask);
683 static inline int smp_call_function_many(cpumask_var_t cpus,
684 void (*func)(void *data), void *data,
685 int sync)
687 return smp_call_function_mask(*cpus, func, data, sync);
690 static inline int cpumask_empty(cpumask_var_t mask)
692 return cpus_empty(*mask);
695 static inline int cpumask_test_cpu(int cpu, cpumask_var_t mask)
697 return cpu_isset(cpu, *mask);
700 static inline void cpumask_clear_cpu(int cpu, cpumask_var_t mask)
702 cpu_clear(cpu, *mask);
705 #define cpu_online_mask (&cpu_online_map)
707 #endif
709 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
711 #define IF_ANON_INODES_DOES_REFCOUNTS(x)
713 #else
715 #define IF_ANON_INODES_DOES_REFCOUNTS(x) x
717 #endif
720 /* Macro introduced only on newer kernels: */
721 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
722 #define marker_synchronize_unregister() synchronize_sched()
723 #endif