KVM: s390: Send program check on access error
[linux-2.6/mini2440.git] / arch / s390 / kvm / kvm-s390.c
blob6558b09ff579fa9ae3fe11de2191f783525adff1
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
27 #include "kvm-s390.h"
28 #include "gaccess.h"
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
34 { "exit_null", VCPU_STAT(exit_null) },
35 { "exit_validity", VCPU_STAT(exit_validity) },
36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 { "exit_external_request", VCPU_STAT(exit_external_request) },
38 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42 { "instruction_lctg", VCPU_STAT(instruction_lctg) },
43 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
52 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 { "instruction_spx", VCPU_STAT(instruction_spx) },
54 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 { "instruction_stap", VCPU_STAT(instruction_stap) },
56 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
61 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67 { "diagnose_44", VCPU_STAT(diagnose_44) },
68 { NULL }
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
75 /* every s390 is virtualization enabled ;-) */
78 void kvm_arch_hardware_disable(void *garbage)
82 void decache_vcpus_on_cpu(int cpu)
86 int kvm_arch_hardware_setup(void)
88 return 0;
91 void kvm_arch_hardware_unsetup(void)
95 void kvm_arch_check_processor_compat(void *rtn)
99 int kvm_arch_init(void *opaque)
101 return 0;
104 void kvm_arch_exit(void)
108 /* Section: device related */
109 long kvm_arch_dev_ioctl(struct file *filp,
110 unsigned int ioctl, unsigned long arg)
112 if (ioctl == KVM_S390_ENABLE_SIE)
113 return s390_enable_sie();
114 return -EINVAL;
117 int kvm_dev_ioctl_check_extension(long ext)
119 return 0;
122 /* Section: vm related */
124 * Get (and clear) the dirty memory log for a memory slot.
126 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
127 struct kvm_dirty_log *log)
129 return 0;
132 long kvm_arch_vm_ioctl(struct file *filp,
133 unsigned int ioctl, unsigned long arg)
135 struct kvm *kvm = filp->private_data;
136 void __user *argp = (void __user *)arg;
137 int r;
139 switch (ioctl) {
140 case KVM_S390_INTERRUPT: {
141 struct kvm_s390_interrupt s390int;
143 r = -EFAULT;
144 if (copy_from_user(&s390int, argp, sizeof(s390int)))
145 break;
146 r = kvm_s390_inject_vm(kvm, &s390int);
147 break;
149 default:
150 r = -EINVAL;
153 return r;
156 struct kvm *kvm_arch_create_vm(void)
158 struct kvm *kvm;
159 int rc;
160 char debug_name[16];
162 rc = s390_enable_sie();
163 if (rc)
164 goto out_nokvm;
166 rc = -ENOMEM;
167 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
168 if (!kvm)
169 goto out_nokvm;
171 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
172 if (!kvm->arch.sca)
173 goto out_nosca;
175 sprintf(debug_name, "kvm-%u", current->pid);
177 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
178 if (!kvm->arch.dbf)
179 goto out_nodbf;
181 spin_lock_init(&kvm->arch.float_int.lock);
182 INIT_LIST_HEAD(&kvm->arch.float_int.list);
184 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
185 VM_EVENT(kvm, 3, "%s", "vm created");
187 try_module_get(THIS_MODULE);
189 return kvm;
190 out_nodbf:
191 free_page((unsigned long)(kvm->arch.sca));
192 out_nosca:
193 kfree(kvm);
194 out_nokvm:
195 return ERR_PTR(rc);
198 void kvm_arch_destroy_vm(struct kvm *kvm)
200 debug_unregister(kvm->arch.dbf);
201 free_page((unsigned long)(kvm->arch.sca));
202 kfree(kvm);
203 module_put(THIS_MODULE);
206 /* Section: vcpu related */
207 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
209 return 0;
212 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
214 /* kvm common code refers to this, but does'nt call it */
215 BUG();
218 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
220 save_fp_regs(&vcpu->arch.host_fpregs);
221 save_access_regs(vcpu->arch.host_acrs);
222 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
223 restore_fp_regs(&vcpu->arch.guest_fpregs);
224 restore_access_regs(vcpu->arch.guest_acrs);
227 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
229 save_fp_regs(&vcpu->arch.guest_fpregs);
230 save_access_regs(vcpu->arch.guest_acrs);
231 restore_fp_regs(&vcpu->arch.host_fpregs);
232 restore_access_regs(vcpu->arch.host_acrs);
235 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
237 /* this equals initial cpu reset in pop, but we don't switch to ESA */
238 vcpu->arch.sie_block->gpsw.mask = 0UL;
239 vcpu->arch.sie_block->gpsw.addr = 0UL;
240 vcpu->arch.sie_block->prefix = 0UL;
241 vcpu->arch.sie_block->ihcpu = 0xffff;
242 vcpu->arch.sie_block->cputm = 0UL;
243 vcpu->arch.sie_block->ckc = 0UL;
244 vcpu->arch.sie_block->todpr = 0;
245 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
246 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
247 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
248 vcpu->arch.guest_fpregs.fpc = 0;
249 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
250 vcpu->arch.sie_block->gbea = 1;
253 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
255 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
256 vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
257 vcpu->arch.sie_block->gmsor = 0x000000000000;
258 vcpu->arch.sie_block->ecb = 2;
259 vcpu->arch.sie_block->eca = 0xC1002001U;
260 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
261 (unsigned long) vcpu);
262 get_cpu_id(&vcpu->arch.cpu_id);
263 vcpu->arch.cpu_id.version = 0xfe;
264 return 0;
267 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
268 unsigned int id)
270 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
271 int rc = -ENOMEM;
273 if (!vcpu)
274 goto out_nomem;
276 vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
278 if (!vcpu->arch.sie_block)
279 goto out_free_cpu;
281 vcpu->arch.sie_block->icpua = id;
282 BUG_ON(!kvm->arch.sca);
283 BUG_ON(kvm->arch.sca->cpu[id].sda);
284 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
285 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
286 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
288 spin_lock_init(&vcpu->arch.local_int.lock);
289 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
290 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
291 spin_lock_bh(&kvm->arch.float_int.lock);
292 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
293 init_waitqueue_head(&vcpu->arch.local_int.wq);
294 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
295 spin_unlock_bh(&kvm->arch.float_int.lock);
297 rc = kvm_vcpu_init(vcpu, kvm, id);
298 if (rc)
299 goto out_free_cpu;
300 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
301 vcpu->arch.sie_block);
303 try_module_get(THIS_MODULE);
305 return vcpu;
306 out_free_cpu:
307 kfree(vcpu);
308 out_nomem:
309 return ERR_PTR(rc);
312 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
314 VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
315 free_page((unsigned long)(vcpu->arch.sie_block));
316 kfree(vcpu);
317 module_put(THIS_MODULE);
320 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
322 /* kvm common code refers to this, but never calls it */
323 BUG();
324 return 0;
327 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
329 vcpu_load(vcpu);
330 kvm_s390_vcpu_initial_reset(vcpu);
331 vcpu_put(vcpu);
332 return 0;
335 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
337 vcpu_load(vcpu);
338 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
339 vcpu_put(vcpu);
340 return 0;
343 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
345 vcpu_load(vcpu);
346 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
347 vcpu_put(vcpu);
348 return 0;
351 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
352 struct kvm_sregs *sregs)
354 vcpu_load(vcpu);
355 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
356 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
357 vcpu_put(vcpu);
358 return 0;
361 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
362 struct kvm_sregs *sregs)
364 vcpu_load(vcpu);
365 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
366 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
367 vcpu_put(vcpu);
368 return 0;
371 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
373 vcpu_load(vcpu);
374 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
375 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
376 vcpu_put(vcpu);
377 return 0;
380 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
382 vcpu_load(vcpu);
383 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
384 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
385 vcpu_put(vcpu);
386 return 0;
389 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
391 int rc = 0;
393 vcpu_load(vcpu);
394 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
395 rc = -EBUSY;
396 else
397 vcpu->arch.sie_block->gpsw = psw;
398 vcpu_put(vcpu);
399 return rc;
402 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
403 struct kvm_translation *tr)
405 return -EINVAL; /* not implemented yet */
408 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
409 struct kvm_debug_guest *dbg)
411 return -EINVAL; /* not implemented yet */
414 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
415 struct kvm_mp_state *mp_state)
417 return -EINVAL; /* not implemented yet */
420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
421 struct kvm_mp_state *mp_state)
423 return -EINVAL; /* not implemented yet */
426 extern void s390_handle_mcck(void);
428 static void __vcpu_run(struct kvm_vcpu *vcpu)
430 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
432 if (need_resched())
433 schedule();
435 if (test_thread_flag(TIF_MCCK_PENDING))
436 s390_handle_mcck();
438 kvm_s390_deliver_pending_interrupts(vcpu);
440 vcpu->arch.sie_block->icptcode = 0;
441 local_irq_disable();
442 kvm_guest_enter();
443 local_irq_enable();
444 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
445 atomic_read(&vcpu->arch.sie_block->cpuflags));
446 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
447 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
448 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
450 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
451 vcpu->arch.sie_block->icptcode);
452 local_irq_disable();
453 kvm_guest_exit();
454 local_irq_enable();
456 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
459 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
461 int rc;
462 sigset_t sigsaved;
464 vcpu_load(vcpu);
466 if (vcpu->sigset_active)
467 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
469 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
471 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
473 switch (kvm_run->exit_reason) {
474 case KVM_EXIT_S390_SIEIC:
475 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
476 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
477 break;
478 case KVM_EXIT_UNKNOWN:
479 case KVM_EXIT_S390_RESET:
480 break;
481 default:
482 BUG();
485 might_sleep();
487 do {
488 __vcpu_run(vcpu);
489 rc = kvm_handle_sie_intercept(vcpu);
490 } while (!signal_pending(current) && !rc);
492 if (signal_pending(current) && !rc)
493 rc = -EINTR;
495 if (rc == -ENOTSUPP) {
496 /* intercept cannot be handled in-kernel, prepare kvm-run */
497 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
498 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
499 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
500 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
501 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
502 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
503 rc = 0;
506 if (rc == -EREMOTE) {
507 /* intercept was handled, but userspace support is needed
508 * kvm_run has been prepared by the handler */
509 rc = 0;
512 if (vcpu->sigset_active)
513 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
515 vcpu_put(vcpu);
517 vcpu->stat.exit_userspace++;
518 return rc;
521 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
522 unsigned long n, int prefix)
524 if (prefix)
525 return copy_to_guest(vcpu, guestdest, from, n);
526 else
527 return copy_to_guest_absolute(vcpu, guestdest, from, n);
531 * store status at address
532 * we use have two special cases:
533 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
534 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
536 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
538 const unsigned char archmode = 1;
539 int prefix;
541 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
542 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
543 return -EFAULT;
544 addr = SAVE_AREA_BASE;
545 prefix = 0;
546 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
547 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
548 return -EFAULT;
549 addr = SAVE_AREA_BASE;
550 prefix = 1;
551 } else
552 prefix = 0;
554 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
555 vcpu->arch.guest_fpregs.fprs, 128, prefix))
556 return -EFAULT;
558 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
559 vcpu->arch.guest_gprs, 128, prefix))
560 return -EFAULT;
562 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
563 &vcpu->arch.sie_block->gpsw, 16, prefix))
564 return -EFAULT;
566 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
567 &vcpu->arch.sie_block->prefix, 4, prefix))
568 return -EFAULT;
570 if (__guestcopy(vcpu,
571 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
572 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
573 return -EFAULT;
575 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
576 &vcpu->arch.sie_block->todpr, 4, prefix))
577 return -EFAULT;
579 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
580 &vcpu->arch.sie_block->cputm, 8, prefix))
581 return -EFAULT;
583 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
584 &vcpu->arch.sie_block->ckc, 8, prefix))
585 return -EFAULT;
587 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
588 &vcpu->arch.guest_acrs, 64, prefix))
589 return -EFAULT;
591 if (__guestcopy(vcpu,
592 addr + offsetof(struct save_area_s390x, ctrl_regs),
593 &vcpu->arch.sie_block->gcr, 128, prefix))
594 return -EFAULT;
595 return 0;
598 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
600 int rc;
602 vcpu_load(vcpu);
603 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
604 vcpu_put(vcpu);
605 return rc;
608 long kvm_arch_vcpu_ioctl(struct file *filp,
609 unsigned int ioctl, unsigned long arg)
611 struct kvm_vcpu *vcpu = filp->private_data;
612 void __user *argp = (void __user *)arg;
614 switch (ioctl) {
615 case KVM_S390_INTERRUPT: {
616 struct kvm_s390_interrupt s390int;
618 if (copy_from_user(&s390int, argp, sizeof(s390int)))
619 return -EFAULT;
620 return kvm_s390_inject_vcpu(vcpu, &s390int);
622 case KVM_S390_STORE_STATUS:
623 return kvm_s390_vcpu_store_status(vcpu, arg);
624 case KVM_S390_SET_INITIAL_PSW: {
625 psw_t psw;
627 if (copy_from_user(&psw, argp, sizeof(psw)))
628 return -EFAULT;
629 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
631 case KVM_S390_INITIAL_RESET:
632 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
633 default:
636 return -EINVAL;
639 /* Section: memory related */
640 int kvm_arch_set_memory_region(struct kvm *kvm,
641 struct kvm_userspace_memory_region *mem,
642 struct kvm_memory_slot old,
643 int user_alloc)
645 /* A few sanity checks. We can have exactly one memory slot which has
646 to start at guest virtual zero and which has to be located at a
647 page boundary in userland and which has to end at a page boundary.
648 The memory in userland is ok to be fragmented into various different
649 vmas. It is okay to mmap() and munmap() stuff in this slot after
650 doing this call at any time */
652 if (mem->slot)
653 return -EINVAL;
655 if (mem->guest_phys_addr)
656 return -EINVAL;
658 if (mem->userspace_addr & (PAGE_SIZE - 1))
659 return -EINVAL;
661 if (mem->memory_size & (PAGE_SIZE - 1))
662 return -EINVAL;
664 kvm->arch.guest_origin = mem->userspace_addr;
665 kvm->arch.guest_memsize = mem->memory_size;
667 /* FIXME: we do want to interrupt running CPUs and update their memory
668 configuration now to avoid race conditions. But hey, changing the
669 memory layout while virtual CPUs are running is usually bad
670 programming practice. */
672 return 0;
675 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
677 return gfn;
680 static int __init kvm_s390_init(void)
682 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
685 static void __exit kvm_s390_exit(void)
687 kvm_exit();
690 module_init(kvm_s390_init);
691 module_exit(kvm_s390_exit);