KVM: s390: Fix RUNNING flag misinterpretation
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / s390 / kvm / kvm-s390.c
blob630ab65b488b8fb6732ee9d46ee1bfad7beb996e
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
74 { "diagnose_10", VCPU_STAT(diagnose_10) },
75 { "diagnose_44", VCPU_STAT(diagnose_44) },
76 { NULL }
79 static unsigned long long *facilities;
81 /* Section: not file related */
82 int kvm_arch_hardware_enable(void *garbage)
84 /* every s390 is virtualization enabled ;-) */
85 return 0;
88 void kvm_arch_hardware_disable(void *garbage)
92 int kvm_arch_hardware_setup(void)
94 return 0;
97 void kvm_arch_hardware_unsetup(void)
101 void kvm_arch_check_processor_compat(void *rtn)
105 int kvm_arch_init(void *opaque)
107 return 0;
110 void kvm_arch_exit(void)
114 /* Section: device related */
115 long kvm_arch_dev_ioctl(struct file *filp,
116 unsigned int ioctl, unsigned long arg)
118 if (ioctl == KVM_S390_ENABLE_SIE)
119 return s390_enable_sie();
120 return -EINVAL;
123 int kvm_dev_ioctl_check_extension(long ext)
125 int r;
127 switch (ext) {
128 case KVM_CAP_S390_PSW:
129 case KVM_CAP_S390_GMAP:
130 r = 1;
131 break;
132 default:
133 r = 0;
135 return r;
138 /* Section: vm related */
140 * Get (and clear) the dirty memory log for a memory slot.
142 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
143 struct kvm_dirty_log *log)
145 return 0;
148 long kvm_arch_vm_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
151 struct kvm *kvm = filp->private_data;
152 void __user *argp = (void __user *)arg;
153 int r;
155 switch (ioctl) {
156 case KVM_S390_INTERRUPT: {
157 struct kvm_s390_interrupt s390int;
159 r = -EFAULT;
160 if (copy_from_user(&s390int, argp, sizeof(s390int)))
161 break;
162 r = kvm_s390_inject_vm(kvm, &s390int);
163 break;
165 default:
166 r = -ENOTTY;
169 return r;
172 int kvm_arch_init_vm(struct kvm *kvm)
174 int rc;
175 char debug_name[16];
177 rc = s390_enable_sie();
178 if (rc)
179 goto out_err;
181 rc = -ENOMEM;
183 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
184 if (!kvm->arch.sca)
185 goto out_err;
187 sprintf(debug_name, "kvm-%u", current->pid);
189 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
190 if (!kvm->arch.dbf)
191 goto out_nodbf;
193 spin_lock_init(&kvm->arch.float_int.lock);
194 INIT_LIST_HEAD(&kvm->arch.float_int.list);
196 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
197 VM_EVENT(kvm, 3, "%s", "vm created");
199 kvm->arch.gmap = gmap_alloc(current->mm);
200 if (!kvm->arch.gmap)
201 goto out_nogmap;
203 return 0;
204 out_nogmap:
205 debug_unregister(kvm->arch.dbf);
206 out_nodbf:
207 free_page((unsigned long)(kvm->arch.sca));
208 out_err:
209 return rc;
212 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
214 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
215 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
216 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
217 (__u64) vcpu->arch.sie_block)
218 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
219 smp_mb();
220 free_page((unsigned long)(vcpu->arch.sie_block));
221 kvm_vcpu_uninit(vcpu);
222 kfree(vcpu);
225 static void kvm_free_vcpus(struct kvm *kvm)
227 unsigned int i;
228 struct kvm_vcpu *vcpu;
230 kvm_for_each_vcpu(i, vcpu, kvm)
231 kvm_arch_vcpu_destroy(vcpu);
233 mutex_lock(&kvm->lock);
234 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
235 kvm->vcpus[i] = NULL;
237 atomic_set(&kvm->online_vcpus, 0);
238 mutex_unlock(&kvm->lock);
241 void kvm_arch_sync_events(struct kvm *kvm)
245 void kvm_arch_destroy_vm(struct kvm *kvm)
247 kvm_free_vcpus(kvm);
248 free_page((unsigned long)(kvm->arch.sca));
249 debug_unregister(kvm->arch.dbf);
250 gmap_free(kvm->arch.gmap);
253 /* Section: vcpu related */
254 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
256 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
257 return 0;
260 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
262 /* Nothing todo */
265 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
267 save_fp_regs(&vcpu->arch.host_fpregs);
268 save_access_regs(vcpu->arch.host_acrs);
269 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
270 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs);
272 gmap_enable(vcpu->arch.gmap);
273 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
276 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
278 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
279 gmap_disable(vcpu->arch.gmap);
280 save_fp_regs(&vcpu->arch.guest_fpregs);
281 save_access_regs(vcpu->arch.guest_acrs);
282 restore_fp_regs(&vcpu->arch.host_fpregs);
283 restore_access_regs(vcpu->arch.host_acrs);
286 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
288 /* this equals initial cpu reset in pop, but we don't switch to ESA */
289 vcpu->arch.sie_block->gpsw.mask = 0UL;
290 vcpu->arch.sie_block->gpsw.addr = 0UL;
291 vcpu->arch.sie_block->prefix = 0UL;
292 vcpu->arch.sie_block->ihcpu = 0xffff;
293 vcpu->arch.sie_block->cputm = 0UL;
294 vcpu->arch.sie_block->ckc = 0UL;
295 vcpu->arch.sie_block->todpr = 0;
296 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
297 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
298 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
299 vcpu->arch.guest_fpregs.fpc = 0;
300 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
301 vcpu->arch.sie_block->gbea = 1;
304 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
306 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
307 CPUSTAT_SM |
308 CPUSTAT_STOPPED);
309 vcpu->arch.sie_block->ecb = 6;
310 vcpu->arch.sie_block->eca = 0xC1002001U;
311 vcpu->arch.sie_block->fac = (int) (long) facilities;
312 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
313 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
314 (unsigned long) vcpu);
315 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
316 get_cpu_id(&vcpu->arch.cpu_id);
317 vcpu->arch.cpu_id.version = 0xff;
318 return 0;
321 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
322 unsigned int id)
324 struct kvm_vcpu *vcpu;
325 int rc = -EINVAL;
327 if (id >= KVM_MAX_VCPUS)
328 goto out;
330 rc = -ENOMEM;
332 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
333 if (!vcpu)
334 goto out;
336 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
337 get_zeroed_page(GFP_KERNEL);
339 if (!vcpu->arch.sie_block)
340 goto out_free_cpu;
342 vcpu->arch.sie_block->icpua = id;
343 BUG_ON(!kvm->arch.sca);
344 if (!kvm->arch.sca->cpu[id].sda)
345 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
346 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
347 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
348 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
350 spin_lock_init(&vcpu->arch.local_int.lock);
351 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
352 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
353 spin_lock(&kvm->arch.float_int.lock);
354 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
355 init_waitqueue_head(&vcpu->arch.local_int.wq);
356 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
357 spin_unlock(&kvm->arch.float_int.lock);
359 rc = kvm_vcpu_init(vcpu, kvm, id);
360 if (rc)
361 goto out_free_sie_block;
362 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
363 vcpu->arch.sie_block);
365 return vcpu;
366 out_free_sie_block:
367 free_page((unsigned long)(vcpu->arch.sie_block));
368 out_free_cpu:
369 kfree(vcpu);
370 out:
371 return ERR_PTR(rc);
374 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
376 /* kvm common code refers to this, but never calls it */
377 BUG();
378 return 0;
381 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
383 kvm_s390_vcpu_initial_reset(vcpu);
384 return 0;
387 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
389 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
390 return 0;
393 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
395 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
396 return 0;
399 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
400 struct kvm_sregs *sregs)
402 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
403 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
404 restore_access_regs(vcpu->arch.guest_acrs);
405 return 0;
408 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
409 struct kvm_sregs *sregs)
411 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
412 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
413 return 0;
416 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
419 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
420 restore_fp_regs(&vcpu->arch.guest_fpregs);
421 return 0;
424 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
426 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
427 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
428 return 0;
431 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
433 int rc = 0;
435 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
436 rc = -EBUSY;
437 else {
438 vcpu->run->psw_mask = psw.mask;
439 vcpu->run->psw_addr = psw.addr;
441 return rc;
444 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
445 struct kvm_translation *tr)
447 return -EINVAL; /* not implemented yet */
450 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
451 struct kvm_guest_debug *dbg)
453 return -EINVAL; /* not implemented yet */
456 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
457 struct kvm_mp_state *mp_state)
459 return -EINVAL; /* not implemented yet */
462 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
463 struct kvm_mp_state *mp_state)
465 return -EINVAL; /* not implemented yet */
468 static void __vcpu_run(struct kvm_vcpu *vcpu)
470 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
472 if (need_resched())
473 schedule();
475 if (test_thread_flag(TIF_MCCK_PENDING))
476 s390_handle_mcck();
478 kvm_s390_deliver_pending_interrupts(vcpu);
480 vcpu->arch.sie_block->icptcode = 0;
481 local_irq_disable();
482 kvm_guest_enter();
483 local_irq_enable();
484 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
485 atomic_read(&vcpu->arch.sie_block->cpuflags));
486 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
487 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
488 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
490 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
491 vcpu->arch.sie_block->icptcode);
492 local_irq_disable();
493 kvm_guest_exit();
494 local_irq_enable();
496 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
499 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
501 int rc;
502 sigset_t sigsaved;
504 rerun_vcpu:
505 if (vcpu->sigset_active)
506 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
508 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
510 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
512 switch (kvm_run->exit_reason) {
513 case KVM_EXIT_S390_SIEIC:
514 case KVM_EXIT_UNKNOWN:
515 case KVM_EXIT_INTR:
516 case KVM_EXIT_S390_RESET:
517 break;
518 default:
519 BUG();
522 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
523 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
525 might_fault();
527 do {
528 __vcpu_run(vcpu);
529 rc = kvm_handle_sie_intercept(vcpu);
530 } while (!signal_pending(current) && !rc);
532 if (rc == SIE_INTERCEPT_RERUNVCPU)
533 goto rerun_vcpu;
535 if (signal_pending(current) && !rc) {
536 kvm_run->exit_reason = KVM_EXIT_INTR;
537 rc = -EINTR;
540 if (rc == -EOPNOTSUPP) {
541 /* intercept cannot be handled in-kernel, prepare kvm-run */
542 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
543 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
544 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
545 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
546 rc = 0;
549 if (rc == -EREMOTE) {
550 /* intercept was handled, but userspace support is needed
551 * kvm_run has been prepared by the handler */
552 rc = 0;
555 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
556 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
558 if (vcpu->sigset_active)
559 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
561 vcpu->stat.exit_userspace++;
562 return rc;
565 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
566 unsigned long n, int prefix)
568 if (prefix)
569 return copy_to_guest(vcpu, guestdest, from, n);
570 else
571 return copy_to_guest_absolute(vcpu, guestdest, from, n);
575 * store status at address
576 * we use have two special cases:
577 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
578 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
580 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
582 unsigned char archmode = 1;
583 int prefix;
585 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
586 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
587 return -EFAULT;
588 addr = SAVE_AREA_BASE;
589 prefix = 0;
590 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
591 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
592 return -EFAULT;
593 addr = SAVE_AREA_BASE;
594 prefix = 1;
595 } else
596 prefix = 0;
598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
599 vcpu->arch.guest_fpregs.fprs, 128, prefix))
600 return -EFAULT;
602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
603 vcpu->arch.guest_gprs, 128, prefix))
604 return -EFAULT;
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
607 &vcpu->arch.sie_block->gpsw, 16, prefix))
608 return -EFAULT;
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
611 &vcpu->arch.sie_block->prefix, 4, prefix))
612 return -EFAULT;
614 if (__guestcopy(vcpu,
615 addr + offsetof(struct save_area, fp_ctrl_reg),
616 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
617 return -EFAULT;
619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
620 &vcpu->arch.sie_block->todpr, 4, prefix))
621 return -EFAULT;
623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
624 &vcpu->arch.sie_block->cputm, 8, prefix))
625 return -EFAULT;
627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
628 &vcpu->arch.sie_block->ckc, 8, prefix))
629 return -EFAULT;
631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
632 &vcpu->arch.guest_acrs, 64, prefix))
633 return -EFAULT;
635 if (__guestcopy(vcpu,
636 addr + offsetof(struct save_area, ctrl_regs),
637 &vcpu->arch.sie_block->gcr, 128, prefix))
638 return -EFAULT;
639 return 0;
642 long kvm_arch_vcpu_ioctl(struct file *filp,
643 unsigned int ioctl, unsigned long arg)
645 struct kvm_vcpu *vcpu = filp->private_data;
646 void __user *argp = (void __user *)arg;
647 long r;
649 switch (ioctl) {
650 case KVM_S390_INTERRUPT: {
651 struct kvm_s390_interrupt s390int;
653 r = -EFAULT;
654 if (copy_from_user(&s390int, argp, sizeof(s390int)))
655 break;
656 r = kvm_s390_inject_vcpu(vcpu, &s390int);
657 break;
659 case KVM_S390_STORE_STATUS:
660 r = kvm_s390_vcpu_store_status(vcpu, arg);
661 break;
662 case KVM_S390_SET_INITIAL_PSW: {
663 psw_t psw;
665 r = -EFAULT;
666 if (copy_from_user(&psw, argp, sizeof(psw)))
667 break;
668 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
669 break;
671 case KVM_S390_INITIAL_RESET:
672 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
673 break;
674 default:
675 r = -EINVAL;
677 return r;
680 /* Section: memory related */
681 int kvm_arch_prepare_memory_region(struct kvm *kvm,
682 struct kvm_memory_slot *memslot,
683 struct kvm_memory_slot old,
684 struct kvm_userspace_memory_region *mem,
685 int user_alloc)
687 /* A few sanity checks. We can have exactly one memory slot which has
688 to start at guest virtual zero and which has to be located at a
689 page boundary in userland and which has to end at a page boundary.
690 The memory in userland is ok to be fragmented into various different
691 vmas. It is okay to mmap() and munmap() stuff in this slot after
692 doing this call at any time */
694 if (mem->slot)
695 return -EINVAL;
697 if (mem->guest_phys_addr)
698 return -EINVAL;
700 if (mem->userspace_addr & 0xffffful)
701 return -EINVAL;
703 if (mem->memory_size & 0xffffful)
704 return -EINVAL;
706 if (!user_alloc)
707 return -EINVAL;
709 return 0;
712 void kvm_arch_commit_memory_region(struct kvm *kvm,
713 struct kvm_userspace_memory_region *mem,
714 struct kvm_memory_slot old,
715 int user_alloc)
717 int rc;
720 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
721 mem->guest_phys_addr, mem->memory_size);
722 if (rc)
723 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
724 return;
727 void kvm_arch_flush_shadow(struct kvm *kvm)
731 static int __init kvm_s390_init(void)
733 int ret;
734 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
735 if (ret)
736 return ret;
739 * guests can ask for up to 255+1 double words, we need a full page
740 * to hold the maximum amount of facilities. On the other hand, we
741 * only set facilities that are known to work in KVM.
743 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
744 if (!facilities) {
745 kvm_exit();
746 return -ENOMEM;
748 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
749 facilities[0] &= 0xff00fff3f47c0000ULL;
750 facilities[1] &= 0x201c000000000000ULL;
751 return 0;
754 static void __exit kvm_s390_exit(void)
756 free_page((unsigned long) facilities);
757 kvm_exit();
760 module_init(kvm_s390_init);
761 module_exit(kvm_s390_exit);