kvm: external module: define marker_synchronize_unregister on older kernels
[qemu-kvm/fedora.git] / kvm / kernel / external-module-compat-comm.h
blob5cb70b0e209983ddfb3d4cd23bd8a4f997f7d91e
2 /*
3 * Compatibility header for building as an external module.
4 */
6 /*
7 * Avoid picking up the kernel's kvm.h in case we have a newer one.
8 */
10 #include <linux/compiler.h>
11 #include <linux/version.h>
12 #include <linux/string.h>
13 #include <linux/kvm.h>
14 #include <linux/kvm_para.h>
15 #include <linux/cpu.h>
16 #include <linux/time.h>
17 #include <asm/processor.h>
18 #include <linux/hrtimer.h>
19 #include <asm/bitops.h>
21 /* Override CONFIG_KVM_TRACE */
22 #ifdef EXT_CONFIG_KVM_TRACE
23 # define CONFIG_KVM_TRACE 1
24 #else
25 # undef CONFIG_KVM_TRACE
26 #endif
29 * 2.6.16 does not have GFP_NOWAIT
32 #include <linux/gfp.h>
34 #ifndef GFP_NOWAIT
35 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
36 #endif
40 * kvm profiling support needs 2.6.20
42 #include <linux/profile.h>
44 #ifndef KVM_PROFILING
45 #define KVM_PROFILING 1234
46 #define prof_on 4321
47 #endif
50 * smp_call_function_single() is not exported below 2.6.20, and has different
51 * semantics below 2.6.23. The 'nonatomic' argument was removed in 2.6.27.
53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
55 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
56 void *info, int wait);
58 #define smp_call_function_single kvm_smp_call_function_single
60 #endif
62 /* on_each_cpu() lost an argument in 2.6.27. */
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
65 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, 0, wait)
67 #else
69 #define kvm_on_each_cpu(func, info, wait) on_each_cpu(func, info, wait)
71 #endif
74 * The cpu hotplug stubs are broken if !CONFIG_CPU_HOTPLUG
77 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,15)
78 #define DEFINE_MUTEX(a) DECLARE_MUTEX(a)
79 #define mutex_lock_interruptible(a) down_interruptible(a)
80 #define mutex_unlock(a) up(a)
81 #define mutex_lock(a) down(a)
82 #define mutex_init(a) init_MUTEX(a)
83 #define mutex_trylock(a) down_trylock(a)
84 #define mutex semaphore
85 #endif
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
88 #ifndef kzalloc
89 #define kzalloc(size,flags) \
90 ({ \
91 void *__ret = kmalloc(size, flags); \
92 if (__ret) \
93 memset(__ret, 0, size); \
94 __ret; \
96 #endif
97 #endif
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
100 #ifndef kmem_cache_zalloc
101 #define kmem_cache_zalloc(cache,flags) \
102 ({ \
103 void *__ret = kmem_cache_alloc(cache, flags); \
104 if (__ret) \
105 memset(__ret, 0, kmem_cache_size(cache)); \
106 __ret; \
108 #endif
109 #endif
111 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
113 #ifndef CONFIG_HOTPLUG_CPU
114 #define register_cpu_notifier(nb) (0)
115 #endif
117 #endif
119 #include <linux/miscdevice.h>
120 #ifndef KVM_MINOR
121 #define KVM_MINOR 232
122 #endif
124 #include <linux/notifier.h>
125 #ifndef CPU_TASKS_FROZEN
127 #define CPU_TASKS_FROZEN 0x0010
128 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
129 #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
130 #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
131 #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
132 #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
133 #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
135 #endif
137 #ifndef CPU_DYING
138 #define CPU_DYING 0x000A
139 #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
140 #endif
142 #include <asm/system.h>
144 struct inode;
145 #include <linux/anon_inodes.h>
146 #define anon_inode_getfd kvm_anon_inode_getfd
147 int kvm_init_anon_inodes(void);
148 void kvm_exit_anon_inodes(void);
149 int anon_inode_getfd(const char *name,
150 const struct file_operations *fops,
151 void *priv , int flags);
154 * 2.6.23 removed the cache destructor
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
157 # define kmem_cache_create(name, size, align, flags, ctor) \
158 kmem_cache_create(name, size, align, flags, ctor, NULL)
159 #endif
161 /* HRTIMER_MODE_ABS started life with a different name */
162 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
163 #define HRTIMER_MODE_ABS HRTIMER_ABS
164 #endif
166 /* div64_u64 is fairly new */
167 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
169 #define div64_u64 kvm_div64_u64
171 #ifdef CONFIG_64BIT
173 static inline uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
175 return dividend / divisor;
178 #else
180 uint64_t div64_u64(uint64_t dividend, uint64_t divisor);
182 #endif
184 #endif
186 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
188 #ifdef RHEL_RELEASE_CODE
189 #if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,2)
190 #define RHEL_BOOL 1
191 #endif
192 #endif
194 #ifndef RHEL_BOOL
196 typedef _Bool bool;
198 #endif
200 #endif
203 * PF_VCPU is a Linux 2.6.24 addition
206 #include <linux/sched.h>
208 #ifndef PF_VCPU
209 #define PF_VCPU 0
210 #endif
213 * smp_call_function_mask() is not defined/exported below 2.6.24
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
218 int kvm_smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
219 void *info, int wait);
221 #define smp_call_function_mask kvm_smp_call_function_mask
223 #endif
225 /* empty_zero_page isn't exported in all kernels */
226 #include <asm/pgtable.h>
228 #define empty_zero_page kvm_empty_zero_page
230 static char empty_zero_page[PAGE_SIZE];
232 static inline void blahblah(void)
234 (void)empty_zero_page[0];
237 /* __mmdrop() is not exported before 2.6.25 */
238 #include <linux/sched.h>
240 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
242 #define mmdrop(x) do { (void)(x); } while (0)
243 #define mmget(x) do { (void)(x); } while (0)
245 #else
247 #define mmget(x) do { atomic_inc(x); } while (0)
249 #endif
251 /* pagefault_enable(), page_fault_disable() - 2.6.20 */
252 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
254 static inline void pagefault_disable(void)
256 inc_preempt_count();
258 * make sure to have issued the store before a pagefault
259 * can hit.
261 barrier();
264 static inline void pagefault_enable(void)
267 * make sure to issue those last loads/stores before enabling
268 * the pagefault handler again.
270 barrier();
271 dec_preempt_count();
273 * make sure we do..
275 barrier();
276 preempt_check_resched();
279 #endif
281 /* vm ops ->fault() was introduced in 2.6.23. */
282 #include <linux/mm.h>
284 #ifdef KVM_MAIN
285 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
287 struct vm_fault {
288 unsigned int flags;
289 pgoff_t pgoff;
290 void __user *virtual_address;
291 struct page *page;
294 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
295 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
297 static inline struct page *kvm_nopage_to_fault(
298 int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf),
299 struct vm_area_struct *vma,
300 unsigned long address,
301 int *type)
303 struct vm_fault vmf;
304 int ret;
306 vmf.pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
307 vmf.virtual_address = (void __user *)address;
308 ret = fault(vma, &vmf);
309 if (ret)
310 return NOPAGE_SIGBUS;
311 *type = VM_FAULT_MINOR;
312 return vmf.page;
315 static inline struct page *__kvm_vcpu_fault(struct vm_area_struct *vma,
316 unsigned long address,
317 int *type)
319 return kvm_nopage_to_fault(kvm_vcpu_fault, vma, address, type);
322 static inline struct page *__kvm_vm_fault(struct vm_area_struct *vma,
323 unsigned long address,
324 int *type)
326 return kvm_nopage_to_fault(kvm_vm_fault, vma, address, type);
329 #define VMA_OPS_FAULT(x) nopage
330 #define VMA_OPS_FAULT_FUNC(x) __##x
332 #else
334 #define VMA_OPS_FAULT(x) x
335 #define VMA_OPS_FAULT_FUNC(x) x
337 #endif
338 #endif
340 /* simple vfs attribute getter signature has changed to add a return code */
342 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
344 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
345 static u64 x(void *v) \
347 u64 ret = 0; \
349 __##x(v, &ret); \
350 return ret; \
353 #else
355 #define MAKE_SIMPLE_ATTRIBUTE_GETTER(x) \
356 static int x(void *v, u64 *val) \
358 return __##x(v, val); \
361 #endif
363 /* set_kset_name() is gone in 2.6.25 */
365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
367 #define set_kset_name(x) .name = x
369 #endif
371 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
372 #ifndef FASTCALL
373 #define FASTCALL(x) x
374 #define fastcall
375 #endif
376 #endif
378 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
380 static unsigned __attribute__((__used__)) kvm_tsc_khz = 2000000;
382 #else
384 #define kvm_tsc_khz tsc_khz
386 #endif
388 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
390 #include <linux/ktime.h>
391 #include <linux/hrtimer.h>
393 #define ktime_get kvm_ktime_get
395 static inline ktime_t ktime_get(void)
397 struct timespec now;
399 ktime_get_ts(&now);
401 return timespec_to_ktime(now);
404 #endif
406 /* __aligned arrived in 2.6.21 */
407 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
408 #define __aligned(x) __attribute__((__aligned__(x)))
409 #endif
411 #include <linux/mm.h>
413 /* The shrinker API changed in 2.6.23 */
414 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
416 struct kvm_shrinker {
417 int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
418 int seeks;
419 struct shrinker *kshrinker;
422 static inline void register_shrinker(struct kvm_shrinker *shrinker)
424 shrinker->kshrinker = set_shrinker(shrinker->seeks, shrinker->shrink);
427 static inline void unregister_shrinker(struct kvm_shrinker *shrinker)
429 if (shrinker->kshrinker)
430 remove_shrinker(shrinker->kshrinker);
433 #define shrinker kvm_shrinker
435 #endif
437 /* clocksource */
438 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
439 static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
441 /* khz = cyc/(Million ns)
442 * mult/2^shift = ns/cyc
443 * mult = ns/cyc * 2^shift
444 * mult = 1Million/khz * 2^shift
445 * mult = 1000000 * 2^shift / khz
446 * mult = (1000000<<shift) / khz
448 u64 tmp = ((u64)1000000) << shift_constant;
450 tmp += khz/2; /* round for do_div */
451 do_div(tmp, khz);
453 return (u32)tmp;
455 #else
456 #include <linux/clocksource.h>
457 #endif
459 /* manually export hrtimer_init/start/cancel */
460 #include <linux/kallsyms.h>
461 extern void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
462 enum hrtimer_mode mode);
463 extern int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
464 const enum hrtimer_mode mode);
465 extern int (*hrtimer_cancel_p)(struct hrtimer *timer);
467 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) && defined(CONFIG_KALLSYMS)
468 static inline void hrtimer_kallsyms_resolve(void)
470 hrtimer_init_p = (void *) kallsyms_lookup_name("hrtimer_init");
471 BUG_ON(!hrtimer_init_p);
472 hrtimer_start_p = (void *) kallsyms_lookup_name("hrtimer_start");
473 BUG_ON(!hrtimer_start_p);
474 hrtimer_cancel_p = (void *) kallsyms_lookup_name("hrtimer_cancel");
475 BUG_ON(!hrtimer_cancel_p);
477 #else
478 static inline void hrtimer_kallsyms_resolve(void)
480 hrtimer_init_p = hrtimer_init;
481 hrtimer_start_p = hrtimer_start;
482 hrtimer_cancel_p = hrtimer_cancel;
484 #endif
486 /* handle old hrtimer API with data pointer */
487 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
488 static inline void hrtimer_data_pointer(struct hrtimer *timer)
490 timer->data = (void *)timer;
492 #else
493 static inline void hrtimer_data_pointer(struct hrtimer *timer) {}
494 #endif
496 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
498 #define ns_to_timespec kvm_ns_to_timespec
500 struct timespec kvm_ns_to_timespec(const s64 nsec);
502 #endif
504 /* work_struct lost the 'data' field in 2.6.20 */
505 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
507 #define kvm_INIT_WORK(work, handler) \
508 INIT_WORK(work, (void (*)(void *))handler, work)
510 #else
512 #define kvm_INIT_WORK(work, handler) INIT_WORK(work, handler)
514 #endif
516 /* cancel_work_sync() was flush_work() in 2.6.21 */
517 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
519 static inline int cancel_work_sync(struct work_struct *work)
522 * FIXME: actually cancel. How? Add own implementation of workqueues?
524 return 0;
527 /* ... and it returned void before 2.6.23 */
528 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
530 #define cancel_work_sync(work) ({ cancel_work_sync(work); 0; })
532 #endif
534 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
536 struct pci_dev;
538 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn);
540 #endif
542 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
544 #include <linux/relay.h>
546 /* relay_open() interface has changed on 2.6.21 */
548 struct rchan *kvm_relay_open(const char *base_filename,
549 struct dentry *parent,
550 size_t subbuf_size,
551 size_t n_subbufs,
552 struct rchan_callbacks *cb,
553 void *private_data);
555 #else
557 #define kvm_relay_open relay_open
559 #endif
561 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
563 static inline int get_user_pages_fast(unsigned long start, int nr_pages,
564 int write, struct page **pages)
566 int npages;
568 down_read(&current->mm->mmap_sem);
569 npages = get_user_pages(current, current->mm, start, nr_pages, write,
570 0, pages, NULL);
571 up_read(&current->mm->mmap_sem);
573 return npages;
576 #endif
578 /* spin_needbreak() was called something else in 2.6.24 */
579 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)
581 #define spin_needbreak need_lockbreak
583 #endif
585 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
587 static inline void kvm_hrtimer_add_expires_ns(struct hrtimer *timer, u64 delta)
589 timer->expires = ktime_add_ns(timer->expires, delta);
592 static inline ktime_t kvm_hrtimer_get_expires(struct hrtimer *timer)
594 return timer->expires;
597 static inline u64 kvm_hrtimer_get_expires_ns(struct hrtimer *timer)
599 return ktime_to_ns(timer->expires);
602 static inline void kvm_hrtimer_start_expires(struct hrtimer *timer, int mode)
604 hrtimer_start_p(timer, timer->expires, mode);
607 #else
609 #define kvm_hrtimer_add_expires_ns hrtimer_add_expires_ns
610 #define kvm_hrtimer_get_expires hrtimer_get_expires
611 #define kvm_hrtimer_get_expires_ns hrtimer_get_expires_ns
612 #define kvm_hrtimer_start_expires hrtimer_start_expires
614 #endif
616 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
618 static inline int pci_reset_function(struct pci_dev *dev)
620 return 0;
623 #endif
625 #include <linux/interrupt.h>
626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
628 typedef irqreturn_t (*kvm_irq_handler_t)(int, void *, struct pt_regs *);
629 static inline int kvm_request_irq(unsigned int a, kvm_irq_handler_t handler,
630 unsigned long c, const char *d, void *e)
632 /* FIXME: allocate thunk, etc. */
633 return -EINVAL;
636 #else
638 #define kvm_request_irq request_irq
640 #endif
642 /* dynamically allocated cpu masks introduced in 2.6.28 */
643 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
645 typedef cpumask_t cpumask_var_t[1];
647 static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
649 return 1;
652 static inline void free_cpumask_var(cpumask_var_t mask)
656 static inline void cpumask_clear(cpumask_var_t mask)
658 cpus_clear(*mask);
661 static inline void cpumask_set_cpu(int cpu, cpumask_var_t mask)
663 cpu_set(cpu, *mask);
666 static inline int smp_call_function_many(cpumask_var_t cpus,
667 void (*func)(void *data), void *data,
668 int sync)
670 return smp_call_function_mask(*cpus, func, data, sync);
673 static inline int cpumask_empty(cpumask_var_t mask)
675 return cpus_empty(*mask);
678 static inline int cpumask_test_cpu(int cpu, cpumask_var_t mask)
680 return cpu_isset(cpu, *mask);
683 static inline void cpumask_clear_cpu(int cpu, cpumask_var_t mask)
685 cpu_clear(cpu, *mask);
688 #define cpu_online_mask (&cpu_online_map)
690 #endif
692 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
694 #define IF_ANON_INODES_DOES_REFCOUNTS(x)
696 #else
698 #define IF_ANON_INODES_DOES_REFCOUNTS(x) x
700 #endif
703 /* Macro introduced only on newer kernels: */
704 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
705 #define marker_synchronize_unregister() synchronize_sched()
706 #endif