Ppc: Fix for lost simultaneous interrupts
[qemu-kvm/fedora.git] / kvm / kernel / external-module-compat.c
blob0b5fd413817a422360798c2d37bb989412025595
2 /*
3 * smp_call_function_single() is not exported below 2.6.20.
4 */
6 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
8 #undef smp_call_function_single
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
13 struct scfs_thunk_info {
14 int cpu;
15 void (*func)(void *info);
16 void *info;
19 static void scfs_thunk(void *_thunk)
21 struct scfs_thunk_info *thunk = _thunk;
23 if (raw_smp_processor_id() == thunk->cpu)
24 thunk->func(thunk->info);
27 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
28 void *info, int nonatomic, int wait)
30 int r, this_cpu;
31 struct scfs_thunk_info thunk;
33 this_cpu = get_cpu();
34 WARN_ON(irqs_disabled());
35 if (cpu == this_cpu) {
36 r = 0;
37 local_irq_disable();
38 func(info);
39 local_irq_enable();
40 } else {
41 thunk.cpu = cpu;
42 thunk.func = func;
43 thunk.info = info;
44 r = smp_call_function(scfs_thunk, &thunk, 0, 1);
46 put_cpu();
47 return r;
50 #define smp_call_function_single kvm_smp_call_function_single
52 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
54 * pre 2.6.23 doesn't handle smp_call_function_single on current cpu
57 #undef smp_call_function_single
59 #include <linux/smp.h>
61 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
62 void *info, int nonatomic, int wait)
64 int this_cpu, r;
66 this_cpu = get_cpu();
67 WARN_ON(irqs_disabled());
68 if (cpu == this_cpu) {
69 r = 0;
70 local_irq_disable();
71 func(info);
72 local_irq_enable();
73 } else
74 r = smp_call_function_single(cpu, func, info, nonatomic, wait);
75 put_cpu();
76 return r;
79 #define smp_call_function_single kvm_smp_call_function_single
81 #endif
83 /* div64_64 is fairly new */
84 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
86 #ifndef CONFIG_64BIT
88 /* 64bit divisor, dividend and result. dynamic precision */
89 uint64_t div64_64(uint64_t dividend, uint64_t divisor)
91 uint32_t high, d;
93 high = divisor >> 32;
94 if (high) {
95 unsigned int shift = fls(high);
97 d = divisor >> shift;
98 dividend >>= shift;
99 } else
100 d = divisor;
102 do_div(dividend, d);
104 return dividend;
107 #endif
109 #endif
112 * smp_call_function_mask() is not defined/exported below 2.6.24
115 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
117 #include <linux/smp.h>
119 struct kvm_call_data_struct {
120 void (*func) (void *info);
121 void *info;
122 atomic_t started;
123 atomic_t finished;
124 int wait;
127 static void kvm_ack_smp_call(void *_data)
129 struct kvm_call_data_struct *data = _data;
130 /* if wait == 0, data can be out of scope
131 * after atomic_inc(info->started)
133 void (*func) (void *info) = data->func;
134 void *info = data->info;
135 int wait = data->wait;
137 smp_mb();
138 atomic_inc(&data->started);
139 (*func)(info);
140 if (wait) {
141 smp_mb();
142 atomic_inc(&data->finished);
146 int kvm_smp_call_function_mask(cpumask_t mask,
147 void (*func) (void *info), void *info, int wait)
149 struct kvm_call_data_struct data;
150 cpumask_t allbutself;
151 int cpus;
152 int cpu;
153 int me;
155 me = get_cpu();
156 WARN_ON(irqs_disabled());
157 allbutself = cpu_online_map;
158 cpu_clear(me, allbutself);
160 cpus_and(mask, mask, allbutself);
161 cpus = cpus_weight(mask);
163 if (!cpus)
164 goto out;
166 data.func = func;
167 data.info = info;
168 atomic_set(&data.started, 0);
169 data.wait = wait;
170 if (wait)
171 atomic_set(&data.finished, 0);
173 for (cpu = first_cpu(mask); cpu != NR_CPUS; cpu = next_cpu(cpu, mask))
174 smp_call_function_single(cpu, kvm_ack_smp_call, &data, 1, 0);
176 while (atomic_read(&data.started) != cpus) {
177 cpu_relax();
178 barrier();
181 if (!wait)
182 goto out;
184 while (atomic_read(&data.finished) != cpus) {
185 cpu_relax();
186 barrier();
188 out:
189 put_cpu();
190 return 0;
193 #endif