KVM: enable PCI multiple-segments for pass-through device
[linux-2.6.git] / arch / powerpc / kvm / booke.c
blobe283e44e9f16b408a28c2b5b2b7aeb4197693584
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
26 #include <linux/fs.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include "timing.h"
32 #include <asm/cacheflush.h>
34 #include "booke.h"
36 unsigned long kvmppc_booke_handlers;
38 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41 struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "mmio", VCPU_STAT(mmio_exits) },
43 { "dcr", VCPU_STAT(dcr_exits) },
44 { "sig", VCPU_STAT(signal_exits) },
45 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
46 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
47 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
48 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
49 { "sysc", VCPU_STAT(syscall_exits) },
50 { "isi", VCPU_STAT(isi_exits) },
51 { "dsi", VCPU_STAT(dsi_exits) },
52 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
53 { "dec", VCPU_STAT(dec_exits) },
54 { "ext_intr", VCPU_STAT(ext_intr_exits) },
55 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
56 { NULL }
59 /* TODO: use vcpu_printf() */
60 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62 int i;
64 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
65 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
66 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
68 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
70 for (i = 0; i < 32; i += 4) {
71 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
72 kvmppc_get_gpr(vcpu, i),
73 kvmppc_get_gpr(vcpu, i+1),
74 kvmppc_get_gpr(vcpu, i+2),
75 kvmppc_get_gpr(vcpu, i+3));
79 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
80 unsigned int priority)
82 set_bit(priority, &vcpu->arch.pending_exceptions);
85 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
87 /* BookE does flags in ESR, so ignore those we get here */
88 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
91 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
93 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
96 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
98 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
101 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
103 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
106 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
107 struct kvm_interrupt *irq)
109 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
112 /* Deliver the interrupt of the corresponding priority, if possible. */
113 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
114 unsigned int priority)
116 int allowed = 0;
117 ulong msr_mask;
119 switch (priority) {
120 case BOOKE_IRQPRIO_PROGRAM:
121 case BOOKE_IRQPRIO_DTLB_MISS:
122 case BOOKE_IRQPRIO_ITLB_MISS:
123 case BOOKE_IRQPRIO_SYSCALL:
124 case BOOKE_IRQPRIO_DATA_STORAGE:
125 case BOOKE_IRQPRIO_INST_STORAGE:
126 case BOOKE_IRQPRIO_FP_UNAVAIL:
127 case BOOKE_IRQPRIO_SPE_UNAVAIL:
128 case BOOKE_IRQPRIO_SPE_FP_DATA:
129 case BOOKE_IRQPRIO_SPE_FP_ROUND:
130 case BOOKE_IRQPRIO_AP_UNAVAIL:
131 case BOOKE_IRQPRIO_ALIGNMENT:
132 allowed = 1;
133 msr_mask = MSR_CE|MSR_ME|MSR_DE;
134 break;
135 case BOOKE_IRQPRIO_CRITICAL:
136 case BOOKE_IRQPRIO_WATCHDOG:
137 allowed = vcpu->arch.msr & MSR_CE;
138 msr_mask = MSR_ME;
139 break;
140 case BOOKE_IRQPRIO_MACHINE_CHECK:
141 allowed = vcpu->arch.msr & MSR_ME;
142 msr_mask = 0;
143 break;
144 case BOOKE_IRQPRIO_EXTERNAL:
145 case BOOKE_IRQPRIO_DECREMENTER:
146 case BOOKE_IRQPRIO_FIT:
147 allowed = vcpu->arch.msr & MSR_EE;
148 msr_mask = MSR_CE|MSR_ME|MSR_DE;
149 break;
150 case BOOKE_IRQPRIO_DEBUG:
151 allowed = vcpu->arch.msr & MSR_DE;
152 msr_mask = MSR_ME;
153 break;
156 if (allowed) {
157 vcpu->arch.srr0 = vcpu->arch.pc;
158 vcpu->arch.srr1 = vcpu->arch.msr;
159 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
160 kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask);
162 clear_bit(priority, &vcpu->arch.pending_exceptions);
165 return allowed;
168 /* Check pending exceptions and deliver one, if possible. */
169 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
171 unsigned long *pending = &vcpu->arch.pending_exceptions;
172 unsigned int priority;
174 priority = __ffs(*pending);
175 while (priority <= BOOKE_IRQPRIO_MAX) {
176 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
177 break;
179 priority = find_next_bit(pending,
180 BITS_PER_BYTE * sizeof(*pending),
181 priority + 1);
186 * kvmppc_handle_exit
188 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
190 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
191 unsigned int exit_nr)
193 enum emulation_result er;
194 int r = RESUME_HOST;
196 /* update before a new last_exit_type is rewritten */
197 kvmppc_update_timing_stats(vcpu);
199 local_irq_enable();
201 run->exit_reason = KVM_EXIT_UNKNOWN;
202 run->ready_for_interrupt_injection = 1;
204 switch (exit_nr) {
205 case BOOKE_INTERRUPT_MACHINE_CHECK:
206 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
207 kvmppc_dump_vcpu(vcpu);
208 r = RESUME_HOST;
209 break;
211 case BOOKE_INTERRUPT_EXTERNAL:
212 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
213 if (need_resched())
214 cond_resched();
215 r = RESUME_GUEST;
216 break;
218 case BOOKE_INTERRUPT_DECREMENTER:
219 /* Since we switched IVPR back to the host's value, the host
220 * handled this interrupt the moment we enabled interrupts.
221 * Now we just offer it a chance to reschedule the guest. */
222 kvmppc_account_exit(vcpu, DEC_EXITS);
223 if (need_resched())
224 cond_resched();
225 r = RESUME_GUEST;
226 break;
228 case BOOKE_INTERRUPT_PROGRAM:
229 if (vcpu->arch.msr & MSR_PR) {
230 /* Program traps generated by user-level software must be handled
231 * by the guest kernel. */
232 vcpu->arch.esr = vcpu->arch.fault_esr;
233 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
234 r = RESUME_GUEST;
235 kvmppc_account_exit(vcpu, USR_PR_INST);
236 break;
239 er = kvmppc_emulate_instruction(run, vcpu);
240 switch (er) {
241 case EMULATE_DONE:
242 /* don't overwrite subtypes, just account kvm_stats */
243 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
244 /* Future optimization: only reload non-volatiles if
245 * they were actually modified by emulation. */
246 r = RESUME_GUEST_NV;
247 break;
248 case EMULATE_DO_DCR:
249 run->exit_reason = KVM_EXIT_DCR;
250 r = RESUME_HOST;
251 break;
252 case EMULATE_FAIL:
253 /* XXX Deliver Program interrupt to guest. */
254 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
255 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
256 /* For debugging, encode the failing instruction and
257 * report it to userspace. */
258 run->hw.hardware_exit_reason = ~0ULL << 32;
259 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
260 r = RESUME_HOST;
261 break;
262 default:
263 BUG();
265 break;
267 case BOOKE_INTERRUPT_FP_UNAVAIL:
268 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
269 kvmppc_account_exit(vcpu, FP_UNAVAIL);
270 r = RESUME_GUEST;
271 break;
273 case BOOKE_INTERRUPT_SPE_UNAVAIL:
274 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
275 r = RESUME_GUEST;
276 break;
278 case BOOKE_INTERRUPT_SPE_FP_DATA:
279 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
280 r = RESUME_GUEST;
281 break;
283 case BOOKE_INTERRUPT_SPE_FP_ROUND:
284 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
285 r = RESUME_GUEST;
286 break;
288 case BOOKE_INTERRUPT_DATA_STORAGE:
289 vcpu->arch.dear = vcpu->arch.fault_dear;
290 vcpu->arch.esr = vcpu->arch.fault_esr;
291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
292 kvmppc_account_exit(vcpu, DSI_EXITS);
293 r = RESUME_GUEST;
294 break;
296 case BOOKE_INTERRUPT_INST_STORAGE:
297 vcpu->arch.esr = vcpu->arch.fault_esr;
298 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
299 kvmppc_account_exit(vcpu, ISI_EXITS);
300 r = RESUME_GUEST;
301 break;
303 case BOOKE_INTERRUPT_SYSCALL:
304 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
305 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
306 r = RESUME_GUEST;
307 break;
309 case BOOKE_INTERRUPT_DTLB_MISS: {
310 unsigned long eaddr = vcpu->arch.fault_dear;
311 int gtlb_index;
312 gpa_t gpaddr;
313 gfn_t gfn;
315 /* Check the guest TLB. */
316 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
317 if (gtlb_index < 0) {
318 /* The guest didn't have a mapping for it. */
319 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
320 vcpu->arch.dear = vcpu->arch.fault_dear;
321 vcpu->arch.esr = vcpu->arch.fault_esr;
322 kvmppc_mmu_dtlb_miss(vcpu);
323 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
324 r = RESUME_GUEST;
325 break;
328 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
329 gfn = gpaddr >> PAGE_SHIFT;
331 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
332 /* The guest TLB had a mapping, but the shadow TLB
333 * didn't, and it is RAM. This could be because:
334 * a) the entry is mapping the host kernel, or
335 * b) the guest used a large mapping which we're faking
336 * Either way, we need to satisfy the fault without
337 * invoking the guest. */
338 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
339 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
340 r = RESUME_GUEST;
341 } else {
342 /* Guest has mapped and accessed a page which is not
343 * actually RAM. */
344 vcpu->arch.paddr_accessed = gpaddr;
345 r = kvmppc_emulate_mmio(run, vcpu);
346 kvmppc_account_exit(vcpu, MMIO_EXITS);
349 break;
352 case BOOKE_INTERRUPT_ITLB_MISS: {
353 unsigned long eaddr = vcpu->arch.pc;
354 gpa_t gpaddr;
355 gfn_t gfn;
356 int gtlb_index;
358 r = RESUME_GUEST;
360 /* Check the guest TLB. */
361 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
362 if (gtlb_index < 0) {
363 /* The guest didn't have a mapping for it. */
364 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
365 kvmppc_mmu_itlb_miss(vcpu);
366 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
367 break;
370 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
372 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
373 gfn = gpaddr >> PAGE_SHIFT;
375 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
376 /* The guest TLB had a mapping, but the shadow TLB
377 * didn't. This could be because:
378 * a) the entry is mapping the host kernel, or
379 * b) the guest used a large mapping which we're faking
380 * Either way, we need to satisfy the fault without
381 * invoking the guest. */
382 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
383 } else {
384 /* Guest mapped and leaped at non-RAM! */
385 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
388 break;
391 case BOOKE_INTERRUPT_DEBUG: {
392 u32 dbsr;
394 vcpu->arch.pc = mfspr(SPRN_CSRR0);
396 /* clear IAC events in DBSR register */
397 dbsr = mfspr(SPRN_DBSR);
398 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
399 mtspr(SPRN_DBSR, dbsr);
401 run->exit_reason = KVM_EXIT_DEBUG;
402 kvmppc_account_exit(vcpu, DEBUG_EXITS);
403 r = RESUME_HOST;
404 break;
407 default:
408 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
409 BUG();
412 local_irq_disable();
414 kvmppc_core_deliver_interrupts(vcpu);
416 if (!(r & RESUME_HOST)) {
417 /* To avoid clobbering exit_reason, only check for signals if
418 * we aren't already exiting to userspace for some other
419 * reason. */
420 if (signal_pending(current)) {
421 run->exit_reason = KVM_EXIT_INTR;
422 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
423 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
427 return r;
430 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
431 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
433 vcpu->arch.pc = 0;
434 vcpu->arch.msr = 0;
435 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
437 vcpu->arch.shadow_pid = 1;
439 /* Eye-catching number so we know if the guest takes an interrupt
440 * before it's programmed its own IVPR. */
441 vcpu->arch.ivpr = 0x55550000;
443 kvmppc_init_timing_stats(vcpu);
445 return kvmppc_core_vcpu_setup(vcpu);
448 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
450 int i;
452 regs->pc = vcpu->arch.pc;
453 regs->cr = kvmppc_get_cr(vcpu);
454 regs->ctr = vcpu->arch.ctr;
455 regs->lr = vcpu->arch.lr;
456 regs->xer = kvmppc_get_xer(vcpu);
457 regs->msr = vcpu->arch.msr;
458 regs->srr0 = vcpu->arch.srr0;
459 regs->srr1 = vcpu->arch.srr1;
460 regs->pid = vcpu->arch.pid;
461 regs->sprg0 = vcpu->arch.sprg0;
462 regs->sprg1 = vcpu->arch.sprg1;
463 regs->sprg2 = vcpu->arch.sprg2;
464 regs->sprg3 = vcpu->arch.sprg3;
465 regs->sprg5 = vcpu->arch.sprg4;
466 regs->sprg6 = vcpu->arch.sprg5;
467 regs->sprg7 = vcpu->arch.sprg6;
469 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
470 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
472 return 0;
475 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
477 int i;
479 vcpu->arch.pc = regs->pc;
480 kvmppc_set_cr(vcpu, regs->cr);
481 vcpu->arch.ctr = regs->ctr;
482 vcpu->arch.lr = regs->lr;
483 kvmppc_set_xer(vcpu, regs->xer);
484 kvmppc_set_msr(vcpu, regs->msr);
485 vcpu->arch.srr0 = regs->srr0;
486 vcpu->arch.srr1 = regs->srr1;
487 vcpu->arch.sprg0 = regs->sprg0;
488 vcpu->arch.sprg1 = regs->sprg1;
489 vcpu->arch.sprg2 = regs->sprg2;
490 vcpu->arch.sprg3 = regs->sprg3;
491 vcpu->arch.sprg5 = regs->sprg4;
492 vcpu->arch.sprg6 = regs->sprg5;
493 vcpu->arch.sprg7 = regs->sprg6;
495 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
496 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
498 return 0;
501 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
502 struct kvm_sregs *sregs)
504 return -ENOTSUPP;
507 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
508 struct kvm_sregs *sregs)
510 return -ENOTSUPP;
513 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
515 return -ENOTSUPP;
518 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
520 return -ENOTSUPP;
523 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
524 struct kvm_translation *tr)
526 return kvmppc_core_vcpu_translate(vcpu, tr);
529 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
531 return -ENOTSUPP;
534 int __init kvmppc_booke_init(void)
536 unsigned long ivor[16];
537 unsigned long max_ivor = 0;
538 int i;
540 /* We install our own exception handlers by hijacking IVPR. IVPR must
541 * be 16-bit aligned, so we need a 64KB allocation. */
542 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
543 VCPU_SIZE_ORDER);
544 if (!kvmppc_booke_handlers)
545 return -ENOMEM;
547 /* XXX make sure our handlers are smaller than Linux's */
549 /* Copy our interrupt handlers to match host IVORs. That way we don't
550 * have to swap the IVORs on every guest/host transition. */
551 ivor[0] = mfspr(SPRN_IVOR0);
552 ivor[1] = mfspr(SPRN_IVOR1);
553 ivor[2] = mfspr(SPRN_IVOR2);
554 ivor[3] = mfspr(SPRN_IVOR3);
555 ivor[4] = mfspr(SPRN_IVOR4);
556 ivor[5] = mfspr(SPRN_IVOR5);
557 ivor[6] = mfspr(SPRN_IVOR6);
558 ivor[7] = mfspr(SPRN_IVOR7);
559 ivor[8] = mfspr(SPRN_IVOR8);
560 ivor[9] = mfspr(SPRN_IVOR9);
561 ivor[10] = mfspr(SPRN_IVOR10);
562 ivor[11] = mfspr(SPRN_IVOR11);
563 ivor[12] = mfspr(SPRN_IVOR12);
564 ivor[13] = mfspr(SPRN_IVOR13);
565 ivor[14] = mfspr(SPRN_IVOR14);
566 ivor[15] = mfspr(SPRN_IVOR15);
568 for (i = 0; i < 16; i++) {
569 if (ivor[i] > max_ivor)
570 max_ivor = ivor[i];
572 memcpy((void *)kvmppc_booke_handlers + ivor[i],
573 kvmppc_handlers_start + i * kvmppc_handler_len,
574 kvmppc_handler_len);
576 flush_icache_range(kvmppc_booke_handlers,
577 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
579 return 0;
582 void __exit kvmppc_booke_exit(void)
584 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
585 kvm_exit();