kvm: external module: pci_get_bus_and_slot() compatibility
[qemu-kvm/fedora.git] / kvm / kernel / x86 / external-module-compat.c
blob71429c7e04ca4479c480bb7cdfdec4d80ca4c699
2 /*
3 * smp_call_function_single() is not exported below 2.6.20.
4 */
6 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
8 #undef smp_call_function_single
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
13 struct scfs_thunk_info {
14 int cpu;
15 void (*func)(void *info);
16 void *info;
19 static void scfs_thunk(void *_thunk)
21 struct scfs_thunk_info *thunk = _thunk;
23 if (raw_smp_processor_id() == thunk->cpu)
24 thunk->func(thunk->info);
27 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
28 void *info, int wait)
30 int r, this_cpu;
31 struct scfs_thunk_info thunk;
33 this_cpu = get_cpu();
34 WARN_ON(irqs_disabled());
35 if (cpu == this_cpu) {
36 r = 0;
37 local_irq_disable();
38 func(info);
39 local_irq_enable();
40 } else {
41 thunk.cpu = cpu;
42 thunk.func = func;
43 thunk.info = info;
44 r = smp_call_function(scfs_thunk, &thunk, 0, 1);
46 put_cpu();
47 return r;
50 #define smp_call_function_single kvm_smp_call_function_single
52 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
54 * pre 2.6.23 doesn't handle smp_call_function_single on current cpu
57 #undef smp_call_function_single
59 #include <linux/smp.h>
61 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
62 void *info, int wait)
64 int this_cpu, r;
66 this_cpu = get_cpu();
67 WARN_ON(irqs_disabled());
68 if (cpu == this_cpu) {
69 r = 0;
70 local_irq_disable();
71 func(info);
72 local_irq_enable();
73 } else
74 r = smp_call_function_single(cpu, func, info, 0, wait);
75 put_cpu();
76 return r;
79 #define smp_call_function_single kvm_smp_call_function_single
81 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
83 /* The 'nonatomic' argument was removed in 2.6.27. */
85 #undef smp_call_function_single
87 #include <linux/smp.h>
89 int kvm_smp_call_function_single(int cpu, void (*func)(void *info),
90 void *info, int wait)
92 return smp_call_function_single(cpu, func, info, 0, wait);
95 #define smp_call_function_single kvm_smp_call_function_single
97 #endif
99 /* div64_u64 is fairly new */
100 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
102 #ifndef CONFIG_64BIT
104 /* 64bit divisor, dividend and result. dynamic precision */
105 uint64_t div64_u64(uint64_t dividend, uint64_t divisor)
107 uint32_t high, d;
109 high = divisor >> 32;
110 if (high) {
111 unsigned int shift = fls(high);
113 d = divisor >> shift;
114 dividend >>= shift;
115 } else
116 d = divisor;
118 do_div(dividend, d);
120 return dividend;
123 #endif
125 #endif
128 * smp_call_function_mask() is not defined/exported below 2.6.24
131 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
133 #include <linux/smp.h>
135 struct kvm_call_data_struct {
136 void (*func) (void *info);
137 void *info;
138 atomic_t started;
139 atomic_t finished;
140 int wait;
143 static void kvm_ack_smp_call(void *_data)
145 struct kvm_call_data_struct *data = _data;
146 /* if wait == 0, data can be out of scope
147 * after atomic_inc(info->started)
149 void (*func) (void *info) = data->func;
150 void *info = data->info;
151 int wait = data->wait;
153 smp_mb();
154 atomic_inc(&data->started);
155 (*func)(info);
156 if (wait) {
157 smp_mb();
158 atomic_inc(&data->finished);
162 int kvm_smp_call_function_mask(cpumask_t mask,
163 void (*func) (void *info), void *info, int wait)
165 struct kvm_call_data_struct data;
166 cpumask_t allbutself;
167 int cpus;
168 int cpu;
169 int me;
171 me = get_cpu();
172 WARN_ON(irqs_disabled());
173 allbutself = cpu_online_map;
174 cpu_clear(me, allbutself);
176 cpus_and(mask, mask, allbutself);
177 cpus = cpus_weight(mask);
179 if (!cpus)
180 goto out;
182 data.func = func;
183 data.info = info;
184 atomic_set(&data.started, 0);
185 data.wait = wait;
186 if (wait)
187 atomic_set(&data.finished, 0);
189 for (cpu = first_cpu(mask); cpu != NR_CPUS; cpu = next_cpu(cpu, mask))
190 smp_call_function_single(cpu, kvm_ack_smp_call, &data, 0);
192 while (atomic_read(&data.started) != cpus) {
193 cpu_relax();
194 barrier();
197 if (!wait)
198 goto out;
200 while (atomic_read(&data.finished) != cpus) {
201 cpu_relax();
202 barrier();
204 out:
205 put_cpu();
206 return 0;
209 #endif
211 /* manually export hrtimer_init/start/cancel */
212 void (*hrtimer_init_p)(struct hrtimer *timer, clockid_t which_clock,
213 enum hrtimer_mode mode);
214 int (*hrtimer_start_p)(struct hrtimer *timer, ktime_t tim,
215 const enum hrtimer_mode mode);
216 int (*hrtimer_cancel_p)(struct hrtimer *timer);
218 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
220 static void kvm_set_normalized_timespec(struct timespec *ts, time_t sec,
221 long nsec)
223 while (nsec >= NSEC_PER_SEC) {
224 nsec -= NSEC_PER_SEC;
225 ++sec;
227 while (nsec < 0) {
228 nsec += NSEC_PER_SEC;
229 --sec;
231 ts->tv_sec = sec;
232 ts->tv_nsec = nsec;
235 struct timespec kvm_ns_to_timespec(const s64 nsec)
237 struct timespec ts;
239 if (!nsec)
240 return (struct timespec) {0, 0};
242 ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
243 if (unlikely(nsec < 0))
244 kvm_set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
246 return ts;
249 #endif
251 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
253 #include <linux/pci.h>
255 struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
257 struct pci_dev *dev = NULL;
259 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
260 if (pci_domain_nr(dev->bus) == 0 &&
261 (dev->bus->number == bus && dev->devfn == devfn))
262 return dev;
264 return NULL;
267 #endif