KVM: modify memslots layout in struct kvm
[linux-2.6/x86.git] / arch / ia64 / kvm / kvm-ia64.c
blob1ca1dbf4811718f71d4cf890ef6b0a3176bba4af
1 /*
2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/gfp.h>
27 #include <linux/fs.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
34 #include <linux/iommu.h>
35 #include <linux/intel-iommu.h>
37 #include <asm/pgtable.h>
38 #include <asm/gcc_intrin.h>
39 #include <asm/pal.h>
40 #include <asm/cacheflush.h>
41 #include <asm/div64.h>
42 #include <asm/tlb.h>
43 #include <asm/elf.h>
44 #include <asm/sn/addrs.h>
45 #include <asm/sn/clksupport.h>
46 #include <asm/sn/shub_mmr.h>
48 #include "misc.h"
49 #include "vti.h"
50 #include "iodev.h"
51 #include "ioapic.h"
52 #include "lapic.h"
53 #include "irq.h"
55 static unsigned long kvm_vmm_base;
56 static unsigned long kvm_vsa_base;
57 static unsigned long kvm_vm_buffer;
58 static unsigned long kvm_vm_buffer_size;
59 unsigned long kvm_vmm_gp;
61 static long vp_env_info;
63 static struct kvm_vmm_info *kvm_vmm_info;
65 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
67 struct kvm_stats_debugfs_item debugfs_entries[] = {
68 { NULL }
71 static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
73 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
74 if (vcpu->kvm->arch.is_sn2)
75 return rtc_time();
76 else
77 #endif
78 return ia64_getreg(_IA64_REG_AR_ITC);
81 static void kvm_flush_icache(unsigned long start, unsigned long len)
83 int l;
85 for (l = 0; l < (len + 32); l += 32)
86 ia64_fc((void *)(start + l));
88 ia64_sync_i();
89 ia64_srlz_i();
92 static void kvm_flush_tlb_all(void)
94 unsigned long i, j, count0, count1, stride0, stride1, addr;
95 long flags;
97 addr = local_cpu_data->ptce_base;
98 count0 = local_cpu_data->ptce_count[0];
99 count1 = local_cpu_data->ptce_count[1];
100 stride0 = local_cpu_data->ptce_stride[0];
101 stride1 = local_cpu_data->ptce_stride[1];
103 local_irq_save(flags);
104 for (i = 0; i < count0; ++i) {
105 for (j = 0; j < count1; ++j) {
106 ia64_ptce(addr);
107 addr += stride1;
109 addr += stride0;
111 local_irq_restore(flags);
112 ia64_srlz_i(); /* srlz.i implies srlz.d */
115 long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
117 struct ia64_pal_retval iprv;
119 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
120 (u64)opt_handler);
122 return iprv.status;
125 static DEFINE_SPINLOCK(vp_lock);
127 int kvm_arch_hardware_enable(void *garbage)
129 long status;
130 long tmp_base;
131 unsigned long pte;
132 unsigned long saved_psr;
133 int slot;
135 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
136 local_irq_save(saved_psr);
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr);
139 if (slot < 0)
140 return -EINVAL;
142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return -EINVAL;
151 if (!kvm_vsa_base) {
152 kvm_vsa_base = tmp_base;
153 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot);
158 return 0;
161 void kvm_arch_hardware_disable(void *garbage)
164 long status;
165 int slot;
166 unsigned long pte;
167 unsigned long saved_psr;
168 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
170 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
171 PAGE_KERNEL));
173 local_irq_save(saved_psr);
174 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
175 local_irq_restore(saved_psr);
176 if (slot < 0)
177 return;
179 status = ia64_pal_vp_exit_env(host_iva);
180 if (status)
181 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
182 status);
183 ia64_ptr_entry(0x3, slot);
186 void kvm_arch_check_processor_compat(void *rtn)
188 *(int *)rtn = 0;
191 int kvm_dev_ioctl_check_extension(long ext)
194 int r;
196 switch (ext) {
197 case KVM_CAP_IRQCHIP:
198 case KVM_CAP_MP_STATE:
199 case KVM_CAP_IRQ_INJECT_STATUS:
200 r = 1;
201 break;
202 case KVM_CAP_COALESCED_MMIO:
203 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
204 break;
205 case KVM_CAP_IOMMU:
206 r = iommu_found();
207 break;
208 default:
209 r = 0;
211 return r;
215 static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
217 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
218 kvm_run->hw.hardware_exit_reason = 1;
219 return 0;
222 static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
224 struct kvm_mmio_req *p;
225 struct kvm_io_device *mmio_dev;
226 int r;
228 p = kvm_get_vcpu_ioreq(vcpu);
230 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
231 goto mmio;
232 vcpu->mmio_needed = 1;
233 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
234 vcpu->mmio_size = kvm_run->mmio.len = p->size;
235 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
237 if (vcpu->mmio_is_write)
238 memcpy(vcpu->mmio_data, &p->data, p->size);
239 memcpy(kvm_run->mmio.data, &p->data, p->size);
240 kvm_run->exit_reason = KVM_EXIT_MMIO;
241 return 0;
242 mmio:
243 if (p->dir)
244 r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
245 p->size, &p->data);
246 else
247 r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
248 p->size, &p->data);
249 if (r)
250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
251 p->state = STATE_IORESP_READY;
253 return 1;
256 static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
258 struct exit_ctl_data *p;
260 p = kvm_get_exit_data(vcpu);
262 if (p->exit_reason == EXIT_REASON_PAL_CALL)
263 return kvm_pal_emul(vcpu, kvm_run);
264 else {
265 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
266 kvm_run->hw.hardware_exit_reason = 2;
267 return 0;
271 static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
273 struct exit_ctl_data *p;
275 p = kvm_get_exit_data(vcpu);
277 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
278 kvm_sal_emul(vcpu);
279 return 1;
280 } else {
281 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
282 kvm_run->hw.hardware_exit_reason = 3;
283 return 0;
288 static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
290 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
292 if (!test_and_set_bit(vector, &vpd->irr[0])) {
293 vcpu->arch.irq_new_pending = 1;
294 kvm_vcpu_kick(vcpu);
295 return 1;
297 return 0;
301 * offset: address offset to IPI space.
302 * value: deliver value.
304 static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
305 uint64_t vector)
307 switch (dm) {
308 case SAPIC_FIXED:
309 break;
310 case SAPIC_NMI:
311 vector = 2;
312 break;
313 case SAPIC_EXTINT:
314 vector = 0;
315 break;
316 case SAPIC_INIT:
317 case SAPIC_PMI:
318 default:
319 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
320 return;
322 __apic_accept_irq(vcpu, vector);
325 static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
326 unsigned long eid)
328 union ia64_lid lid;
329 int i;
330 struct kvm_vcpu *vcpu;
332 kvm_for_each_vcpu(i, vcpu, kvm) {
333 lid.val = VCPU_LID(vcpu);
334 if (lid.id == id && lid.eid == eid)
335 return vcpu;
338 return NULL;
341 static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
343 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
344 struct kvm_vcpu *target_vcpu;
345 struct kvm_pt_regs *regs;
346 union ia64_ipi_a addr = p->u.ipi_data.addr;
347 union ia64_ipi_d data = p->u.ipi_data.data;
349 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
350 if (!target_vcpu)
351 return handle_vm_error(vcpu, kvm_run);
353 if (!target_vcpu->arch.launched) {
354 regs = vcpu_regs(target_vcpu);
356 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
357 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
359 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
360 if (waitqueue_active(&target_vcpu->wq))
361 wake_up_interruptible(&target_vcpu->wq);
362 } else {
363 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
364 if (target_vcpu != vcpu)
365 kvm_vcpu_kick(target_vcpu);
368 return 1;
371 struct call_data {
372 struct kvm_ptc_g ptc_g_data;
373 struct kvm_vcpu *vcpu;
376 static void vcpu_global_purge(void *info)
378 struct call_data *p = (struct call_data *)info;
379 struct kvm_vcpu *vcpu = p->vcpu;
381 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
382 return;
384 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
385 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
386 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
387 p->ptc_g_data;
388 } else {
389 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
390 vcpu->arch.ptc_g_count = 0;
391 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
395 static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
397 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
398 struct kvm *kvm = vcpu->kvm;
399 struct call_data call_data;
400 int i;
401 struct kvm_vcpu *vcpui;
403 call_data.ptc_g_data = p->u.ptc_g_data;
405 kvm_for_each_vcpu(i, vcpui, kvm) {
406 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
407 vcpu == vcpui)
408 continue;
410 if (waitqueue_active(&vcpui->wq))
411 wake_up_interruptible(&vcpui->wq);
413 if (vcpui->cpu != -1) {
414 call_data.vcpu = vcpui;
415 smp_call_function_single(vcpui->cpu,
416 vcpu_global_purge, &call_data, 1);
417 } else
418 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
421 return 1;
424 static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
426 return 1;
429 static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
431 unsigned long pte, rtc_phys_addr, map_addr;
432 int slot;
434 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
435 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
436 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
437 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
438 vcpu->arch.sn_rtc_tr_slot = slot;
439 if (slot < 0) {
440 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
441 slot = 0;
443 return slot;
446 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
449 ktime_t kt;
450 long itc_diff;
451 unsigned long vcpu_now_itc;
452 unsigned long expires;
453 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
454 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
455 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
457 if (irqchip_in_kernel(vcpu->kvm)) {
459 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
461 if (time_after(vcpu_now_itc, vpd->itm)) {
462 vcpu->arch.timer_check = 1;
463 return 1;
465 itc_diff = vpd->itm - vcpu_now_itc;
466 if (itc_diff < 0)
467 itc_diff = -itc_diff;
469 expires = div64_u64(itc_diff, cyc_per_usec);
470 kt = ktime_set(0, 1000 * expires);
472 vcpu->arch.ht_active = 1;
473 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
475 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
476 kvm_vcpu_block(vcpu);
477 hrtimer_cancel(p_ht);
478 vcpu->arch.ht_active = 0;
480 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
481 kvm_cpu_has_pending_timer(vcpu))
482 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
483 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
485 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
486 return -EINTR;
487 return 1;
488 } else {
489 printk(KERN_ERR"kvm: Unsupported userspace halt!");
490 return 0;
494 static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
495 struct kvm_run *kvm_run)
497 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
498 return 0;
501 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
502 struct kvm_run *kvm_run)
504 return 1;
507 static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
508 struct kvm_run *kvm_run)
510 printk("VMM: %s", vcpu->arch.log_buf);
511 return 1;
514 static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
515 struct kvm_run *kvm_run) = {
516 [EXIT_REASON_VM_PANIC] = handle_vm_error,
517 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
518 [EXIT_REASON_PAL_CALL] = handle_pal_call,
519 [EXIT_REASON_SAL_CALL] = handle_sal_call,
520 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
521 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
522 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
523 [EXIT_REASON_IPI] = handle_ipi,
524 [EXIT_REASON_PTC_G] = handle_global_purge,
525 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
529 static const int kvm_vti_max_exit_handlers =
530 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
532 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
534 struct exit_ctl_data *p_exit_data;
536 p_exit_data = kvm_get_exit_data(vcpu);
537 return p_exit_data->exit_reason;
541 * The guest has exited. See if we can fix it or if we need userspace
542 * assistance.
544 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
546 u32 exit_reason = kvm_get_exit_reason(vcpu);
547 vcpu->arch.last_exit = exit_reason;
549 if (exit_reason < kvm_vti_max_exit_handlers
550 && kvm_vti_exit_handlers[exit_reason])
551 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
552 else {
553 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
554 kvm_run->hw.hardware_exit_reason = exit_reason;
556 return 0;
559 static inline void vti_set_rr6(unsigned long rr6)
561 ia64_set_rr(RR6, rr6);
562 ia64_srlz_i();
565 static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
567 unsigned long pte;
568 struct kvm *kvm = vcpu->kvm;
569 int r;
571 /*Insert a pair of tr to map vmm*/
572 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
573 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
574 if (r < 0)
575 goto out;
576 vcpu->arch.vmm_tr_slot = r;
577 /*Insert a pairt of tr to map data of vm*/
578 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
579 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
580 pte, KVM_VM_DATA_SHIFT);
581 if (r < 0)
582 goto out;
583 vcpu->arch.vm_tr_slot = r;
585 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
586 if (kvm->arch.is_sn2) {
587 r = kvm_sn2_setup_mappings(vcpu);
588 if (r < 0)
589 goto out;
591 #endif
593 r = 0;
594 out:
595 return r;
598 static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
600 struct kvm *kvm = vcpu->kvm;
601 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
602 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
603 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
604 if (kvm->arch.is_sn2)
605 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
606 #endif
609 static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
611 unsigned long psr;
612 int r;
613 int cpu = smp_processor_id();
615 if (vcpu->arch.last_run_cpu != cpu ||
616 per_cpu(last_vcpu, cpu) != vcpu) {
617 per_cpu(last_vcpu, cpu) = vcpu;
618 vcpu->arch.last_run_cpu = cpu;
619 kvm_flush_tlb_all();
622 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
623 vti_set_rr6(vcpu->arch.vmm_rr);
624 local_irq_save(psr);
625 r = kvm_insert_vmm_mapping(vcpu);
626 local_irq_restore(psr);
627 return r;
630 static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
632 kvm_purge_vmm_mapping(vcpu);
633 vti_set_rr6(vcpu->arch.host_rr6);
636 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
638 union context *host_ctx, *guest_ctx;
639 int r;
642 * down_read() may sleep and return with interrupts enabled
644 down_read(&vcpu->kvm->slots_lock);
646 again:
647 if (signal_pending(current)) {
648 r = -EINTR;
649 kvm_run->exit_reason = KVM_EXIT_INTR;
650 goto out;
653 preempt_disable();
654 local_irq_disable();
656 /*Get host and guest context with guest address space.*/
657 host_ctx = kvm_get_host_context(vcpu);
658 guest_ctx = kvm_get_guest_context(vcpu);
660 clear_bit(KVM_REQ_KICK, &vcpu->requests);
662 r = kvm_vcpu_pre_transition(vcpu);
663 if (r < 0)
664 goto vcpu_run_fail;
666 up_read(&vcpu->kvm->slots_lock);
667 kvm_guest_enter();
670 * Transition to the guest
672 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
674 kvm_vcpu_post_transition(vcpu);
676 vcpu->arch.launched = 1;
677 set_bit(KVM_REQ_KICK, &vcpu->requests);
678 local_irq_enable();
681 * We must have an instruction between local_irq_enable() and
682 * kvm_guest_exit(), so the timer interrupt isn't delayed by
683 * the interrupt shadow. The stat.exits increment will do nicely.
684 * But we need to prevent reordering, hence this barrier():
686 barrier();
687 kvm_guest_exit();
688 preempt_enable();
690 down_read(&vcpu->kvm->slots_lock);
692 r = kvm_handle_exit(kvm_run, vcpu);
694 if (r > 0) {
695 if (!need_resched())
696 goto again;
699 out:
700 up_read(&vcpu->kvm->slots_lock);
701 if (r > 0) {
702 kvm_resched(vcpu);
703 down_read(&vcpu->kvm->slots_lock);
704 goto again;
707 return r;
709 vcpu_run_fail:
710 local_irq_enable();
711 preempt_enable();
712 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
713 goto out;
716 static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
718 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
720 if (!vcpu->mmio_is_write)
721 memcpy(&p->data, vcpu->mmio_data, 8);
722 p->state = STATE_IORESP_READY;
725 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
727 int r;
728 sigset_t sigsaved;
730 vcpu_load(vcpu);
732 if (vcpu->sigset_active)
733 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
735 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
736 kvm_vcpu_block(vcpu);
737 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
738 r = -EAGAIN;
739 goto out;
742 if (vcpu->mmio_needed) {
743 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
744 kvm_set_mmio_data(vcpu);
745 vcpu->mmio_read_completed = 1;
746 vcpu->mmio_needed = 0;
748 r = __vcpu_run(vcpu, kvm_run);
749 out:
750 if (vcpu->sigset_active)
751 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
753 vcpu_put(vcpu);
754 return r;
757 static struct kvm *kvm_alloc_kvm(void)
760 struct kvm *kvm;
761 uint64_t vm_base;
763 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
765 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
767 if (!vm_base)
768 return ERR_PTR(-ENOMEM);
770 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
771 kvm = (struct kvm *)(vm_base +
772 offsetof(struct kvm_vm_data, kvm_vm_struct));
773 kvm->arch.vm_base = vm_base;
774 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
776 return kvm;
779 struct kvm_io_range {
780 unsigned long start;
781 unsigned long size;
782 unsigned long type;
785 static const struct kvm_io_range io_ranges[] = {
786 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
787 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
788 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
789 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
790 {PIB_START, PIB_SIZE, GPFN_PIB},
793 static void kvm_build_io_pmt(struct kvm *kvm)
795 unsigned long i, j;
797 /* Mark I/O ranges */
798 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
799 i++) {
800 for (j = io_ranges[i].start;
801 j < io_ranges[i].start + io_ranges[i].size;
802 j += PAGE_SIZE)
803 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
804 io_ranges[i].type, 0);
809 /*Use unused rids to virtualize guest rid.*/
810 #define GUEST_PHYSICAL_RR0 0x1739
811 #define GUEST_PHYSICAL_RR4 0x2739
812 #define VMM_INIT_RR 0x1660
814 static void kvm_init_vm(struct kvm *kvm)
816 BUG_ON(!kvm);
818 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
819 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
820 kvm->arch.vmm_init_rr = VMM_INIT_RR;
823 *Fill P2M entries for MMIO/IO ranges
825 kvm_build_io_pmt(kvm);
827 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
829 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
830 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
833 struct kvm *kvm_arch_create_vm(void)
835 struct kvm *kvm = kvm_alloc_kvm();
837 if (IS_ERR(kvm))
838 return ERR_PTR(-ENOMEM);
840 kvm->arch.is_sn2 = ia64_platform_is("sn2");
842 kvm_init_vm(kvm);
844 return kvm;
848 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
849 struct kvm_irqchip *chip)
851 int r;
853 r = 0;
854 switch (chip->chip_id) {
855 case KVM_IRQCHIP_IOAPIC:
856 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
857 break;
858 default:
859 r = -EINVAL;
860 break;
862 return r;
865 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
867 int r;
869 r = 0;
870 switch (chip->chip_id) {
871 case KVM_IRQCHIP_IOAPIC:
872 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
873 break;
874 default:
875 r = -EINVAL;
876 break;
878 return r;
881 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
883 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
885 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
886 int i;
888 vcpu_load(vcpu);
890 for (i = 0; i < 16; i++) {
891 vpd->vgr[i] = regs->vpd.vgr[i];
892 vpd->vbgr[i] = regs->vpd.vbgr[i];
894 for (i = 0; i < 128; i++)
895 vpd->vcr[i] = regs->vpd.vcr[i];
896 vpd->vhpi = regs->vpd.vhpi;
897 vpd->vnat = regs->vpd.vnat;
898 vpd->vbnat = regs->vpd.vbnat;
899 vpd->vpsr = regs->vpd.vpsr;
901 vpd->vpr = regs->vpd.vpr;
903 memcpy(&vcpu->arch.guest, &regs->saved_guest, sizeof(union context));
905 RESTORE_REGS(mp_state);
906 RESTORE_REGS(vmm_rr);
907 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
908 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
909 RESTORE_REGS(itr_regions);
910 RESTORE_REGS(dtr_regions);
911 RESTORE_REGS(tc_regions);
912 RESTORE_REGS(irq_check);
913 RESTORE_REGS(itc_check);
914 RESTORE_REGS(timer_check);
915 RESTORE_REGS(timer_pending);
916 RESTORE_REGS(last_itc);
917 for (i = 0; i < 8; i++) {
918 vcpu->arch.vrr[i] = regs->vrr[i];
919 vcpu->arch.ibr[i] = regs->ibr[i];
920 vcpu->arch.dbr[i] = regs->dbr[i];
922 for (i = 0; i < 4; i++)
923 vcpu->arch.insvc[i] = regs->insvc[i];
924 RESTORE_REGS(xtp);
925 RESTORE_REGS(metaphysical_rr0);
926 RESTORE_REGS(metaphysical_rr4);
927 RESTORE_REGS(metaphysical_saved_rr0);
928 RESTORE_REGS(metaphysical_saved_rr4);
929 RESTORE_REGS(fp_psr);
930 RESTORE_REGS(saved_gp);
932 vcpu->arch.irq_new_pending = 1;
933 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
934 set_bit(KVM_REQ_RESUME, &vcpu->requests);
936 vcpu_put(vcpu);
938 return 0;
941 long kvm_arch_vm_ioctl(struct file *filp,
942 unsigned int ioctl, unsigned long arg)
944 struct kvm *kvm = filp->private_data;
945 void __user *argp = (void __user *)arg;
946 int r = -ENOTTY;
948 switch (ioctl) {
949 case KVM_SET_MEMORY_REGION: {
950 struct kvm_memory_region kvm_mem;
951 struct kvm_userspace_memory_region kvm_userspace_mem;
953 r = -EFAULT;
954 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
955 goto out;
956 kvm_userspace_mem.slot = kvm_mem.slot;
957 kvm_userspace_mem.flags = kvm_mem.flags;
958 kvm_userspace_mem.guest_phys_addr =
959 kvm_mem.guest_phys_addr;
960 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
961 r = kvm_vm_ioctl_set_memory_region(kvm,
962 &kvm_userspace_mem, 0);
963 if (r)
964 goto out;
965 break;
967 case KVM_CREATE_IRQCHIP:
968 r = -EFAULT;
969 r = kvm_ioapic_init(kvm);
970 if (r)
971 goto out;
972 r = kvm_setup_default_irq_routing(kvm);
973 if (r) {
974 kfree(kvm->arch.vioapic);
975 goto out;
977 break;
978 case KVM_IRQ_LINE_STATUS:
979 case KVM_IRQ_LINE: {
980 struct kvm_irq_level irq_event;
982 r = -EFAULT;
983 if (copy_from_user(&irq_event, argp, sizeof irq_event))
984 goto out;
985 if (irqchip_in_kernel(kvm)) {
986 __s32 status;
987 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
988 irq_event.irq, irq_event.level);
989 if (ioctl == KVM_IRQ_LINE_STATUS) {
990 irq_event.status = status;
991 if (copy_to_user(argp, &irq_event,
992 sizeof irq_event))
993 goto out;
995 r = 0;
997 break;
999 case KVM_GET_IRQCHIP: {
1000 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1001 struct kvm_irqchip chip;
1003 r = -EFAULT;
1004 if (copy_from_user(&chip, argp, sizeof chip))
1005 goto out;
1006 r = -ENXIO;
1007 if (!irqchip_in_kernel(kvm))
1008 goto out;
1009 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1010 if (r)
1011 goto out;
1012 r = -EFAULT;
1013 if (copy_to_user(argp, &chip, sizeof chip))
1014 goto out;
1015 r = 0;
1016 break;
1018 case KVM_SET_IRQCHIP: {
1019 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1020 struct kvm_irqchip chip;
1022 r = -EFAULT;
1023 if (copy_from_user(&chip, argp, sizeof chip))
1024 goto out;
1025 r = -ENXIO;
1026 if (!irqchip_in_kernel(kvm))
1027 goto out;
1028 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1029 if (r)
1030 goto out;
1031 r = 0;
1032 break;
1034 default:
1037 out:
1038 return r;
1041 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1042 struct kvm_sregs *sregs)
1044 return -EINVAL;
1047 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1048 struct kvm_sregs *sregs)
1050 return -EINVAL;
1053 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1054 struct kvm_translation *tr)
1057 return -EINVAL;
1060 static int kvm_alloc_vmm_area(void)
1062 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1063 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1064 get_order(KVM_VMM_SIZE));
1065 if (!kvm_vmm_base)
1066 return -ENOMEM;
1068 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1069 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1071 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1072 kvm_vmm_base, kvm_vm_buffer);
1075 return 0;
1078 static void kvm_free_vmm_area(void)
1080 if (kvm_vmm_base) {
1081 /*Zero this area before free to avoid bits leak!!*/
1082 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1083 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1084 kvm_vmm_base = 0;
1085 kvm_vm_buffer = 0;
1086 kvm_vsa_base = 0;
1090 static int vti_init_vpd(struct kvm_vcpu *vcpu)
1092 int i;
1093 union cpuid3_t cpuid3;
1094 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1096 if (IS_ERR(vpd))
1097 return PTR_ERR(vpd);
1099 /* CPUID init */
1100 for (i = 0; i < 5; i++)
1101 vpd->vcpuid[i] = ia64_get_cpuid(i);
1103 /* Limit the CPUID number to 5 */
1104 cpuid3.value = vpd->vcpuid[3];
1105 cpuid3.number = 4; /* 5 - 1 */
1106 vpd->vcpuid[3] = cpuid3.value;
1108 /*Set vac and vdc fields*/
1109 vpd->vac.a_from_int_cr = 1;
1110 vpd->vac.a_to_int_cr = 1;
1111 vpd->vac.a_from_psr = 1;
1112 vpd->vac.a_from_cpuid = 1;
1113 vpd->vac.a_cover = 1;
1114 vpd->vac.a_bsw = 1;
1115 vpd->vac.a_int = 1;
1116 vpd->vdc.d_vmsw = 1;
1118 /*Set virtual buffer*/
1119 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1121 return 0;
1124 static int vti_create_vp(struct kvm_vcpu *vcpu)
1126 long ret;
1127 struct vpd *vpd = vcpu->arch.vpd;
1128 unsigned long vmm_ivt;
1130 vmm_ivt = kvm_vmm_info->vmm_ivt;
1132 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1134 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1136 if (ret) {
1137 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1138 return -EINVAL;
1140 return 0;
1143 static void init_ptce_info(struct kvm_vcpu *vcpu)
1145 ia64_ptce_info_t ptce = {0};
1147 ia64_get_ptce(&ptce);
1148 vcpu->arch.ptce_base = ptce.base;
1149 vcpu->arch.ptce_count[0] = ptce.count[0];
1150 vcpu->arch.ptce_count[1] = ptce.count[1];
1151 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1152 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1155 static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1157 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1159 if (hrtimer_cancel(p_ht))
1160 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
1163 static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1165 struct kvm_vcpu *vcpu;
1166 wait_queue_head_t *q;
1168 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1169 q = &vcpu->wq;
1171 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1172 goto out;
1174 if (waitqueue_active(q))
1175 wake_up_interruptible(q);
1177 out:
1178 vcpu->arch.timer_fired = 1;
1179 vcpu->arch.timer_check = 1;
1180 return HRTIMER_NORESTART;
1183 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1185 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1187 struct kvm_vcpu *v;
1188 int r;
1189 int i;
1190 long itc_offset;
1191 struct kvm *kvm = vcpu->kvm;
1192 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1194 union context *p_ctx = &vcpu->arch.guest;
1195 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1197 /*Init vcpu context for first run.*/
1198 if (IS_ERR(vmm_vcpu))
1199 return PTR_ERR(vmm_vcpu);
1201 if (kvm_vcpu_is_bsp(vcpu)) {
1202 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1204 /*Set entry address for first run.*/
1205 regs->cr_iip = PALE_RESET_ENTRY;
1207 /*Initialize itc offset for vcpus*/
1208 itc_offset = 0UL - kvm_get_itc(vcpu);
1209 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1210 v = (struct kvm_vcpu *)((char *)vcpu +
1211 sizeof(struct kvm_vcpu_data) * i);
1212 v->arch.itc_offset = itc_offset;
1213 v->arch.last_itc = 0;
1215 } else
1216 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1218 r = -ENOMEM;
1219 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1220 if (!vcpu->arch.apic)
1221 goto out;
1222 vcpu->arch.apic->vcpu = vcpu;
1224 p_ctx->gr[1] = 0;
1225 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1226 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1227 p_ctx->psr = 0x1008522000UL;
1228 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1229 p_ctx->caller_unat = 0;
1230 p_ctx->pr = 0x0;
1231 p_ctx->ar[36] = 0x0; /*unat*/
1232 p_ctx->ar[19] = 0x0; /*rnat*/
1233 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1234 ((sizeof(struct kvm_vcpu)+15) & ~15);
1235 p_ctx->ar[64] = 0x0; /*pfs*/
1236 p_ctx->cr[0] = 0x7e04UL;
1237 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1238 p_ctx->cr[8] = 0x3c;
1240 /*Initilize region register*/
1241 p_ctx->rr[0] = 0x30;
1242 p_ctx->rr[1] = 0x30;
1243 p_ctx->rr[2] = 0x30;
1244 p_ctx->rr[3] = 0x30;
1245 p_ctx->rr[4] = 0x30;
1246 p_ctx->rr[5] = 0x30;
1247 p_ctx->rr[7] = 0x30;
1249 /*Initilize branch register 0*/
1250 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1252 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1253 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1254 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1256 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1257 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1259 vcpu->arch.last_run_cpu = -1;
1260 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1261 vcpu->arch.vsa_base = kvm_vsa_base;
1262 vcpu->arch.__gp = kvm_vmm_gp;
1263 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1264 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1265 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1266 init_ptce_info(vcpu);
1268 r = 0;
1269 out:
1270 return r;
1273 static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1275 unsigned long psr;
1276 int r;
1278 local_irq_save(psr);
1279 r = kvm_insert_vmm_mapping(vcpu);
1280 local_irq_restore(psr);
1281 if (r)
1282 goto fail;
1283 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1284 if (r)
1285 goto fail;
1287 r = vti_init_vpd(vcpu);
1288 if (r) {
1289 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1290 goto uninit;
1293 r = vti_create_vp(vcpu);
1294 if (r)
1295 goto uninit;
1297 kvm_purge_vmm_mapping(vcpu);
1299 return 0;
1300 uninit:
1301 kvm_vcpu_uninit(vcpu);
1302 fail:
1303 return r;
1306 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1307 unsigned int id)
1309 struct kvm_vcpu *vcpu;
1310 unsigned long vm_base = kvm->arch.vm_base;
1311 int r;
1312 int cpu;
1314 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1316 r = -EINVAL;
1317 if (id >= KVM_MAX_VCPUS) {
1318 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1319 KVM_MAX_VCPUS);
1320 goto fail;
1323 r = -ENOMEM;
1324 if (!vm_base) {
1325 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1326 goto fail;
1328 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1329 vcpu_data[id].vcpu_struct));
1330 vcpu->kvm = kvm;
1332 cpu = get_cpu();
1333 r = vti_vcpu_setup(vcpu, id);
1334 put_cpu();
1336 if (r) {
1337 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1338 goto fail;
1341 return vcpu;
1342 fail:
1343 return ERR_PTR(r);
1346 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1348 return 0;
1351 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1353 return -EINVAL;
1356 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1358 return -EINVAL;
1361 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1362 struct kvm_guest_debug *dbg)
1364 return -EINVAL;
1367 static void free_kvm(struct kvm *kvm)
1369 unsigned long vm_base = kvm->arch.vm_base;
1371 if (vm_base) {
1372 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1373 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1378 static void kvm_release_vm_pages(struct kvm *kvm)
1380 struct kvm_memslots *slots;
1381 struct kvm_memory_slot *memslot;
1382 int i, j;
1383 unsigned long base_gfn;
1385 slots = kvm->memslots;
1386 for (i = 0; i < slots->nmemslots; i++) {
1387 memslot = &slots->memslots[i];
1388 base_gfn = memslot->base_gfn;
1390 for (j = 0; j < memslot->npages; j++) {
1391 if (memslot->rmap[j])
1392 put_page((struct page *)memslot->rmap[j]);
1397 void kvm_arch_sync_events(struct kvm *kvm)
1401 void kvm_arch_destroy_vm(struct kvm *kvm)
1403 kvm_iommu_unmap_guest(kvm);
1404 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1405 kvm_free_all_assigned_devices(kvm);
1406 #endif
1407 kfree(kvm->arch.vioapic);
1408 kvm_release_vm_pages(kvm);
1409 kvm_free_physmem(kvm);
1410 free_kvm(kvm);
1413 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1417 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1419 if (cpu != vcpu->cpu) {
1420 vcpu->cpu = cpu;
1421 if (vcpu->arch.ht_active)
1422 kvm_migrate_hlt_timer(vcpu);
1426 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1428 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1430 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1431 int i;
1433 vcpu_load(vcpu);
1435 for (i = 0; i < 16; i++) {
1436 regs->vpd.vgr[i] = vpd->vgr[i];
1437 regs->vpd.vbgr[i] = vpd->vbgr[i];
1439 for (i = 0; i < 128; i++)
1440 regs->vpd.vcr[i] = vpd->vcr[i];
1441 regs->vpd.vhpi = vpd->vhpi;
1442 regs->vpd.vnat = vpd->vnat;
1443 regs->vpd.vbnat = vpd->vbnat;
1444 regs->vpd.vpsr = vpd->vpsr;
1445 regs->vpd.vpr = vpd->vpr;
1447 memcpy(&regs->saved_guest, &vcpu->arch.guest, sizeof(union context));
1449 SAVE_REGS(mp_state);
1450 SAVE_REGS(vmm_rr);
1451 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1452 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1453 SAVE_REGS(itr_regions);
1454 SAVE_REGS(dtr_regions);
1455 SAVE_REGS(tc_regions);
1456 SAVE_REGS(irq_check);
1457 SAVE_REGS(itc_check);
1458 SAVE_REGS(timer_check);
1459 SAVE_REGS(timer_pending);
1460 SAVE_REGS(last_itc);
1461 for (i = 0; i < 8; i++) {
1462 regs->vrr[i] = vcpu->arch.vrr[i];
1463 regs->ibr[i] = vcpu->arch.ibr[i];
1464 regs->dbr[i] = vcpu->arch.dbr[i];
1466 for (i = 0; i < 4; i++)
1467 regs->insvc[i] = vcpu->arch.insvc[i];
1468 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
1469 SAVE_REGS(xtp);
1470 SAVE_REGS(metaphysical_rr0);
1471 SAVE_REGS(metaphysical_rr4);
1472 SAVE_REGS(metaphysical_saved_rr0);
1473 SAVE_REGS(metaphysical_saved_rr4);
1474 SAVE_REGS(fp_psr);
1475 SAVE_REGS(saved_gp);
1477 vcpu_put(vcpu);
1478 return 0;
1481 int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1482 struct kvm_ia64_vcpu_stack *stack)
1484 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1485 return 0;
1488 int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1489 struct kvm_ia64_vcpu_stack *stack)
1491 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1492 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1494 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1495 return 0;
1498 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1501 hrtimer_cancel(&vcpu->arch.hlt_timer);
1502 kfree(vcpu->arch.apic);
1506 long kvm_arch_vcpu_ioctl(struct file *filp,
1507 unsigned int ioctl, unsigned long arg)
1509 struct kvm_vcpu *vcpu = filp->private_data;
1510 void __user *argp = (void __user *)arg;
1511 struct kvm_ia64_vcpu_stack *stack = NULL;
1512 long r;
1514 switch (ioctl) {
1515 case KVM_IA64_VCPU_GET_STACK: {
1516 struct kvm_ia64_vcpu_stack __user *user_stack;
1517 void __user *first_p = argp;
1519 r = -EFAULT;
1520 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1521 goto out;
1523 if (!access_ok(VERIFY_WRITE, user_stack,
1524 sizeof(struct kvm_ia64_vcpu_stack))) {
1525 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1526 "Illegal user destination address for stack\n");
1527 goto out;
1529 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1530 if (!stack) {
1531 r = -ENOMEM;
1532 goto out;
1535 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1536 if (r)
1537 goto out;
1539 if (copy_to_user(user_stack, stack,
1540 sizeof(struct kvm_ia64_vcpu_stack)))
1541 goto out;
1543 break;
1545 case KVM_IA64_VCPU_SET_STACK: {
1546 struct kvm_ia64_vcpu_stack __user *user_stack;
1547 void __user *first_p = argp;
1549 r = -EFAULT;
1550 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1551 goto out;
1553 if (!access_ok(VERIFY_READ, user_stack,
1554 sizeof(struct kvm_ia64_vcpu_stack))) {
1555 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1556 "Illegal user address for stack\n");
1557 goto out;
1559 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1560 if (!stack) {
1561 r = -ENOMEM;
1562 goto out;
1564 if (copy_from_user(stack, user_stack,
1565 sizeof(struct kvm_ia64_vcpu_stack)))
1566 goto out;
1568 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1569 break;
1572 default:
1573 r = -EINVAL;
1576 out:
1577 kfree(stack);
1578 return r;
1581 int kvm_arch_set_memory_region(struct kvm *kvm,
1582 struct kvm_userspace_memory_region *mem,
1583 struct kvm_memory_slot old,
1584 int user_alloc)
1586 unsigned long i;
1587 unsigned long pfn;
1588 int npages = mem->memory_size >> PAGE_SHIFT;
1589 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1590 unsigned long base_gfn = memslot->base_gfn;
1592 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1593 return -ENOMEM;
1595 for (i = 0; i < npages; i++) {
1596 pfn = gfn_to_pfn(kvm, base_gfn + i);
1597 if (!kvm_is_mmio_pfn(pfn)) {
1598 kvm_set_pmt_entry(kvm, base_gfn + i,
1599 pfn << PAGE_SHIFT,
1600 _PAGE_AR_RWX | _PAGE_MA_WB);
1601 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1602 } else {
1603 kvm_set_pmt_entry(kvm, base_gfn + i,
1604 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1605 _PAGE_MA_UC);
1606 memslot->rmap[i] = 0;
1610 return 0;
1613 void kvm_arch_flush_shadow(struct kvm *kvm)
1615 kvm_flush_remote_tlbs(kvm);
1618 long kvm_arch_dev_ioctl(struct file *filp,
1619 unsigned int ioctl, unsigned long arg)
1621 return -EINVAL;
1624 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1626 kvm_vcpu_uninit(vcpu);
1629 static int vti_cpu_has_kvm_support(void)
1631 long avail = 1, status = 1, control = 1;
1632 long ret;
1634 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1635 if (ret)
1636 goto out;
1638 if (!(avail & PAL_PROC_VM_BIT))
1639 goto out;
1641 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1643 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1644 if (ret)
1645 goto out;
1646 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1648 if (!(vp_env_info & VP_OPCODE)) {
1649 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1650 "vm_env_info:0x%lx\n", vp_env_info);
1653 return 1;
1654 out:
1655 return 0;
1660 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1661 * SN2 RTC, replacing the ITC based default verion.
1663 static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1664 struct module *module)
1666 unsigned long new_ar, new_ar_sn2;
1667 unsigned long module_base;
1669 if (!ia64_platform_is("sn2"))
1670 return;
1672 module_base = (unsigned long)module->module_core;
1674 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1675 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1677 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1678 "as source\n");
1681 * Copy the SN2 version of mov_ar into place. They are both
1682 * the same size, so 6 bundles is sufficient (6 * 0x10).
1684 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1687 static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1688 struct module *module)
1690 unsigned long module_base;
1691 unsigned long vmm_size;
1693 unsigned long vmm_offset, func_offset, fdesc_offset;
1694 struct fdesc *p_fdesc;
1696 BUG_ON(!module);
1698 if (!kvm_vmm_base) {
1699 printk("kvm: kvm area hasn't been initilized yet!!\n");
1700 return -EFAULT;
1703 /*Calculate new position of relocated vmm module.*/
1704 module_base = (unsigned long)module->module_core;
1705 vmm_size = module->core_size;
1706 if (unlikely(vmm_size > KVM_VMM_SIZE))
1707 return -EFAULT;
1709 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1710 kvm_patch_vmm(vmm_info, module);
1711 kvm_flush_icache(kvm_vmm_base, vmm_size);
1713 /*Recalculate kvm_vmm_info based on new VMM*/
1714 vmm_offset = vmm_info->vmm_ivt - module_base;
1715 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1716 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1717 kvm_vmm_info->vmm_ivt);
1719 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1720 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1721 fdesc_offset);
1722 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1723 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1724 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1725 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1727 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1728 KVM_VMM_BASE+func_offset);
1730 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1731 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1732 fdesc_offset);
1733 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1734 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1735 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1736 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1738 kvm_vmm_gp = p_fdesc->gp;
1740 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1741 kvm_vmm_info->vmm_entry);
1742 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1743 KVM_VMM_BASE + func_offset);
1745 return 0;
1748 int kvm_arch_init(void *opaque)
1750 int r;
1751 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1753 if (!vti_cpu_has_kvm_support()) {
1754 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1755 r = -EOPNOTSUPP;
1756 goto out;
1759 if (kvm_vmm_info) {
1760 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1761 r = -EEXIST;
1762 goto out;
1765 r = -ENOMEM;
1766 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1767 if (!kvm_vmm_info)
1768 goto out;
1770 if (kvm_alloc_vmm_area())
1771 goto out_free0;
1773 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1774 if (r)
1775 goto out_free1;
1777 return 0;
1779 out_free1:
1780 kvm_free_vmm_area();
1781 out_free0:
1782 kfree(kvm_vmm_info);
1783 out:
1784 return r;
1787 void kvm_arch_exit(void)
1789 kvm_free_vmm_area();
1790 kfree(kvm_vmm_info);
1791 kvm_vmm_info = NULL;
1794 static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1795 struct kvm_dirty_log *log)
1797 struct kvm_memory_slot *memslot;
1798 int r, i;
1799 long n, base;
1800 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1801 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1803 r = -EINVAL;
1804 if (log->slot >= KVM_MEMORY_SLOTS)
1805 goto out;
1807 memslot = &kvm->memslots->memslots[log->slot];
1808 r = -ENOENT;
1809 if (!memslot->dirty_bitmap)
1810 goto out;
1812 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1813 base = memslot->base_gfn / BITS_PER_LONG;
1815 for (i = 0; i < n/sizeof(long); ++i) {
1816 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1817 dirty_bitmap[base + i] = 0;
1819 r = 0;
1820 out:
1821 return r;
1824 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1825 struct kvm_dirty_log *log)
1827 int r;
1828 int n;
1829 struct kvm_memory_slot *memslot;
1830 int is_dirty = 0;
1832 spin_lock(&kvm->arch.dirty_log_lock);
1834 r = kvm_ia64_sync_dirty_log(kvm, log);
1835 if (r)
1836 goto out;
1838 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1839 if (r)
1840 goto out;
1842 /* If nothing is dirty, don't bother messing with page tables. */
1843 if (is_dirty) {
1844 kvm_flush_remote_tlbs(kvm);
1845 memslot = &kvm->memslots->memslots[log->slot];
1846 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1847 memset(memslot->dirty_bitmap, 0, n);
1849 r = 0;
1850 out:
1851 spin_unlock(&kvm->arch.dirty_log_lock);
1852 return r;
1855 int kvm_arch_hardware_setup(void)
1857 return 0;
1860 void kvm_arch_hardware_unsetup(void)
1864 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1866 int me;
1867 int cpu = vcpu->cpu;
1869 if (waitqueue_active(&vcpu->wq))
1870 wake_up_interruptible(&vcpu->wq);
1872 me = get_cpu();
1873 if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
1874 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
1875 smp_send_reschedule(cpu);
1876 put_cpu();
1879 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
1881 return __apic_accept_irq(vcpu, irq->vector);
1884 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1886 return apic->vcpu->vcpu_id == dest;
1889 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1891 return 0;
1894 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1896 return vcpu1->arch.xtp - vcpu2->arch.xtp;
1899 int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1900 int short_hand, int dest, int dest_mode)
1902 struct kvm_lapic *target = vcpu->arch.apic;
1903 return (dest_mode == 0) ?
1904 kvm_apic_match_physical_addr(target, dest) :
1905 kvm_apic_match_logical_addr(target, dest);
1908 static int find_highest_bits(int *dat)
1910 u32 bits, bitnum;
1911 int i;
1913 /* loop for all 256 bits */
1914 for (i = 7; i >= 0 ; i--) {
1915 bits = dat[i];
1916 if (bits) {
1917 bitnum = fls(bits);
1918 return i * 32 + bitnum - 1;
1922 return -1;
1925 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1927 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1929 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1930 return NMI_VECTOR;
1931 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1932 return ExtINT_VECTOR;
1934 return find_highest_bits((int *)&vpd->irr[0]);
1937 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1939 return vcpu->arch.timer_fired;
1942 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
1944 return gfn;
1947 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1949 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
1950 (kvm_highest_pending_irq(vcpu) != -1);
1953 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1954 struct kvm_mp_state *mp_state)
1956 vcpu_load(vcpu);
1957 mp_state->mp_state = vcpu->arch.mp_state;
1958 vcpu_put(vcpu);
1959 return 0;
1962 static int vcpu_reset(struct kvm_vcpu *vcpu)
1964 int r;
1965 long psr;
1966 local_irq_save(psr);
1967 r = kvm_insert_vmm_mapping(vcpu);
1968 local_irq_restore(psr);
1969 if (r)
1970 goto fail;
1972 vcpu->arch.launched = 0;
1973 kvm_arch_vcpu_uninit(vcpu);
1974 r = kvm_arch_vcpu_init(vcpu);
1975 if (r)
1976 goto fail;
1978 kvm_purge_vmm_mapping(vcpu);
1979 r = 0;
1980 fail:
1981 return r;
1984 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1985 struct kvm_mp_state *mp_state)
1987 int r = 0;
1989 vcpu_load(vcpu);
1990 vcpu->arch.mp_state = mp_state->mp_state;
1991 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1992 r = vcpu_reset(vcpu);
1993 vcpu_put(vcpu);
1994 return r;